View Javadoc

1   /*
2    * Copyright 2011 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.io.hfile;
21  
22  import java.io.ByteArrayInputStream;
23  import java.io.DataInput;
24  import java.io.DataInputStream;
25  import java.io.IOException;
26  import java.nio.ByteBuffer;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.fs.FSDataInputStream;
31  import org.apache.hadoop.fs.Path;
32  import org.apache.hadoop.hbase.KeyValue;
33  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
34  import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
35  import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
36  import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
37  import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
38  import org.apache.hadoop.hbase.util.Bytes;
39  import org.apache.hadoop.io.IOUtils;
40  import org.apache.hadoop.io.RawComparator;
41  
42  import com.google.common.base.Preconditions;
43  
44  /**
45   * {@link HFile} reader for version 1. Does not support data block encoding,
46   * even in cache only, i.e. HFile v1 blocks are always brought into cache
47   * unencoded.
48   */
49  public class HFileReaderV1 extends AbstractHFileReader {
50    private static final Log LOG = LogFactory.getLog(HFileReaderV1.class);
51  
52    private volatile boolean fileInfoLoaded = false;
53  
54    /**
55     * Opens a HFile.  You must load the index before you can
56     * use it by calling {@link #loadFileInfo()}.
57     *
58     * @param fsdis input stream.  Caller is responsible for closing the passed
59     * stream.
60     * @param size Length of the stream.
61     * @param cacheConf cache references and configuration
62     */
63    public HFileReaderV1(Path path, FixedFileTrailer trailer,
64        final FSDataInputStream fsdis, final long size,
65        final boolean closeIStream,
66        final CacheConfig cacheConf) throws IOException {
67      super(path, trailer, fsdis, size, closeIStream, cacheConf);
68  
69      trailer.expectMajorVersion(1);
70      fsBlockReader = new HFileBlock.FSReaderV1(fsdis, compressAlgo, fileSize);
71    }
72  
73    private byte[] readAllIndex(final FSDataInputStream in,
74        final long indexOffset, final int indexSize) throws IOException {
75      byte[] allIndex = new byte[indexSize];
76      in.seek(indexOffset);
77      IOUtils.readFully(in, allIndex, 0, allIndex.length);
78  
79      return allIndex;
80    }
81  
82    /**
83     * Read in the index and file info.
84     *
85     * @return A map of fileinfo data.
86     * @see Writer#appendFileInfo(byte[], byte[])
87     * @throws IOException
88     */
89    @Override
90    public FileInfo loadFileInfo() throws IOException {
91      if (fileInfoLoaded)
92        return fileInfo;
93  
94      // Read in the fileinfo and get what we need from it.
95      istream.seek(trailer.getFileInfoOffset());
96      fileInfo = new FileInfo();
97      fileInfo.readFields(istream);
98      lastKey = fileInfo.get(FileInfo.LASTKEY);
99      avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
100     avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
101 
102     // Comparator is stored in the file info in version 1.
103     String clazzName = Bytes.toString(fileInfo.get(FileInfo.COMPARATOR));
104     comparator = getComparator(clazzName);
105 
106     dataBlockIndexReader =
107         new HFileBlockIndex.BlockIndexReader(comparator, 1);
108     metaBlockIndexReader =
109         new HFileBlockIndex.BlockIndexReader(Bytes.BYTES_RAWCOMPARATOR, 1);
110 
111     int sizeToLoadOnOpen = (int) (fileSize - trailer.getLoadOnOpenDataOffset() -
112         trailer.getTrailerSize());
113     byte[] dataAndMetaIndex = readAllIndex(istream,
114         trailer.getLoadOnOpenDataOffset(), sizeToLoadOnOpen);
115 
116     ByteArrayInputStream bis = new ByteArrayInputStream(dataAndMetaIndex);
117     DataInputStream dis = new DataInputStream(bis);
118 
119     // Read in the data index.
120     if (trailer.getDataIndexCount() > 0)
121       BlockType.INDEX_V1.readAndCheck(dis);
122     dataBlockIndexReader.readRootIndex(dis, trailer.getDataIndexCount());
123 
124     // Read in the metadata index.
125     if (trailer.getMetaIndexCount() > 0)
126       BlockType.INDEX_V1.readAndCheck(dis);
127     metaBlockIndexReader.readRootIndex(dis, trailer.getMetaIndexCount());
128 
129     fileInfoLoaded = true;
130     return fileInfo;
131   }
132 
133   /**
134    * Creates comparator from the given class name.
135    *
136    * @param clazzName the comparator class name read from the trailer
137    * @return an instance of the comparator to use
138    * @throws IOException in case comparator class name is invalid
139    */
140   @SuppressWarnings("unchecked")
141   private RawComparator<byte[]> getComparator(final String clazzName)
142   throws IOException {
143     if (clazzName == null || clazzName.length() == 0) {
144       return null;
145     }
146     try {
147       return (RawComparator<byte[]>)Class.forName(clazzName).newInstance();
148     } catch (InstantiationException e) {
149       throw new IOException(e);
150     } catch (IllegalAccessException e) {
151       throw new IOException(e);
152     } catch (ClassNotFoundException e) {
153       throw new IOException(e);
154     }
155   }
156 
157   /**
158    * Create a Scanner on this file. No seeks or reads are done on creation. Call
159    * {@link HFileScanner#seekTo(byte[])} to position an start the read. There is
160    * nothing to clean up in a Scanner. Letting go of your references to the
161    * scanner is sufficient.
162    *
163    * @param cacheBlocks True if we should cache blocks read in by this scanner.
164    * @param pread Use positional read rather than seek+read if true (pread is
165    *          better for random reads, seek+read is better scanning).
166    * @param isCompaction is scanner being used for a compaction?
167    * @return Scanner on this file.
168    */
169   @Override
170   public HFileScanner getScanner(boolean cacheBlocks, final boolean pread,
171                                 final boolean isCompaction) {
172     return new ScannerV1(this, cacheBlocks, pread, isCompaction);
173   }
174 
175   /**
176    * @param key Key to search.
177    * @return Block number of the block containing the key or -1 if not in this
178    * file.
179    */
180   protected int blockContainingKey(final byte[] key, int offset, int length) {
181     Preconditions.checkState(!dataBlockIndexReader.isEmpty(),
182         "Block index not loaded");
183     return dataBlockIndexReader.rootBlockContainingKey(key, offset, length);
184   }
185 
186   /**
187    * @param metaBlockName
188    * @param cacheBlock Add block to cache, if found
189    * @return Block wrapped in a ByteBuffer
190    * @throws IOException
191    */
192   @Override
193   public ByteBuffer getMetaBlock(String metaBlockName, boolean cacheBlock)
194       throws IOException {
195     if (trailer.getMetaIndexCount() == 0) {
196       return null; // there are no meta blocks
197     }
198     if (metaBlockIndexReader == null) {
199       throw new IOException("Meta index not loaded");
200     }
201 
202     byte[] nameBytes = Bytes.toBytes(metaBlockName);
203     int block = metaBlockIndexReader.rootBlockContainingKey(nameBytes, 0,
204         nameBytes.length);
205     if (block == -1)
206       return null;
207     long offset = metaBlockIndexReader.getRootBlockOffset(block);
208     long nextOffset;
209     if (block == metaBlockIndexReader.getRootBlockCount() - 1) {
210       nextOffset = trailer.getFileInfoOffset();
211     } else {
212       nextOffset = metaBlockIndexReader.getRootBlockOffset(block + 1);
213     }
214 
215     long startTimeNs = System.nanoTime();
216 
217     BlockCacheKey cacheKey = new BlockCacheKey(name, offset,
218         DataBlockEncoding.NONE, BlockType.META);
219 
220     BlockCategory effectiveCategory = BlockCategory.META;
221     if (metaBlockName.equals(HFileWriterV1.BLOOM_FILTER_META_KEY) ||
222         metaBlockName.equals(HFileWriterV1.BLOOM_FILTER_DATA_KEY)) {
223       effectiveCategory = BlockCategory.BLOOM;
224     }
225 
226     // Per meta key from any given file, synchronize reads for said block
227     synchronized (metaBlockIndexReader.getRootBlockKey(block)) {
228       // Check cache for block.  If found return.
229       if (cacheConf.isBlockCacheEnabled()) {
230         HFileBlock cachedBlock =
231           (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey,
232               cacheConf.shouldCacheBlockOnRead(effectiveCategory), false);
233         if (cachedBlock != null) {
234           getSchemaMetrics().updateOnCacheHit(effectiveCategory,
235               SchemaMetrics.NO_COMPACTION);
236           return cachedBlock.getBufferWithoutHeader();
237         }
238         // Cache Miss, please load.
239       }
240 
241       HFileBlock hfileBlock = fsBlockReader.readBlockData(offset,
242           nextOffset - offset, metaBlockIndexReader.getRootBlockDataSize(block),
243           true);
244       passSchemaMetricsTo(hfileBlock);
245       hfileBlock.expectType(BlockType.META);
246 
247       final long delta = System.nanoTime() - startTimeNs;
248       HFile.offerReadLatency(delta, true);
249       getSchemaMetrics().updateOnCacheMiss(effectiveCategory,
250           SchemaMetrics.NO_COMPACTION, delta);
251 
252       // Cache the block
253       if (cacheBlock && cacheConf.shouldCacheBlockOnRead(effectiveCategory)) {
254         cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock,
255             cacheConf.isInMemory());
256       }
257 
258       return hfileBlock.getBufferWithoutHeader();
259     }
260   }
261 
262   /**
263    * Read in a file block.
264    * @param block Index of block to read.
265    * @param pread Use positional read instead of seek+read (positional is
266    * better doing random reads whereas seek+read is better scanning).
267    * @param isCompaction is this block being read as part of a compaction
268    * @return Block wrapped in a ByteBuffer.
269    * @throws IOException
270    */
271   ByteBuffer readBlockBuffer(int block, boolean cacheBlock,
272       final boolean pread, final boolean isCompaction) throws IOException {
273     if (dataBlockIndexReader == null) {
274       throw new IOException("Block index not loaded");
275     }
276     if (block < 0 || block >= dataBlockIndexReader.getRootBlockCount()) {
277       throw new IOException("Requested block is out of range: " + block +
278         ", max: " + dataBlockIndexReader.getRootBlockCount());
279     }
280 
281     long offset = dataBlockIndexReader.getRootBlockOffset(block);
282     BlockCacheKey cacheKey = new BlockCacheKey(name, offset);
283 
284     // For any given block from any given file, synchronize reads for said
285     // block.
286     // Without a cache, this synchronizing is needless overhead, but really
287     // the other choice is to duplicate work (which the cache would prevent you
288     // from doing).
289     synchronized (dataBlockIndexReader.getRootBlockKey(block)) {
290       // Check cache for block.  If found return.
291       if (cacheConf.isBlockCacheEnabled()) {
292         HFileBlock cachedBlock =
293           (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey,
294               cacheConf.shouldCacheDataOnRead(), false);
295         if (cachedBlock != null) {
296           getSchemaMetrics().updateOnCacheHit(
297               cachedBlock.getBlockType().getCategory(), isCompaction);
298           return cachedBlock.getBufferWithoutHeader();
299         }
300         // Carry on, please load.
301       }
302 
303       // Load block from filesystem.
304       long startTimeNs = System.nanoTime();
305       long nextOffset;
306 
307       if (block == dataBlockIndexReader.getRootBlockCount() - 1) {
308         // last block!  The end of data block is first meta block if there is
309         // one or if there isn't, the fileinfo offset.
310         nextOffset = (metaBlockIndexReader.getRootBlockCount() == 0) ?
311             this.trailer.getFileInfoOffset() :
312             metaBlockIndexReader.getRootBlockOffset(0);
313       } else {
314         nextOffset = dataBlockIndexReader.getRootBlockOffset(block + 1);
315       }
316 
317       HFileBlock hfileBlock = fsBlockReader.readBlockData(offset, nextOffset
318           - offset, dataBlockIndexReader.getRootBlockDataSize(block), pread);
319       passSchemaMetricsTo(hfileBlock);
320       hfileBlock.expectType(BlockType.DATA);
321 
322       final long delta = System.nanoTime() - startTimeNs;
323       HFile.offerReadLatency(delta, pread);
324       getSchemaMetrics().updateOnCacheMiss(BlockCategory.DATA, isCompaction,
325           delta);
326 
327       // Cache the block
328       if (cacheBlock && cacheConf.shouldCacheBlockOnRead(
329           hfileBlock.getBlockType().getCategory())) {
330         cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock,
331             cacheConf.isInMemory());
332       }
333       return hfileBlock.getBufferWithoutHeader();
334     }
335   }
336 
337   /**
338    * @return Last key in the file.  May be null if file has no entries.
339    * Note that this is not the last rowkey, but rather the byte form of
340    * the last KeyValue.
341    */
342   public byte[] getLastKey() {
343     if (!fileInfoLoaded) {
344       throw new RuntimeException("Load file info first");
345     }
346     return dataBlockIndexReader.isEmpty() ? null : lastKey;
347   }
348 
349   /**
350    * @return Midkey for this file. We work with block boundaries only so
351    *         returned midkey is an approximation only.
352    *
353    * @throws IOException
354    */
355   @Override
356   public byte[] midkey() throws IOException {
357     Preconditions.checkState(isFileInfoLoaded(), "File info is not loaded");
358     Preconditions.checkState(!dataBlockIndexReader.isEmpty(),
359         "Data block index is not loaded or is empty");
360     return dataBlockIndexReader.midkey();
361   }
362 
363   @Override
364   public void close() throws IOException {
365     close(cacheConf.shouldEvictOnClose());
366   }
367 
368   @Override
369   public void close(boolean evictOnClose) throws IOException {
370     if (evictOnClose && cacheConf.isBlockCacheEnabled()) {
371       int numEvicted = 0;
372       for (int i = 0; i < dataBlockIndexReader.getRootBlockCount(); i++) {
373         if (cacheConf.getBlockCache().evictBlock(
374             new BlockCacheKey(name,
375                 dataBlockIndexReader.getRootBlockOffset(i),
376                 DataBlockEncoding.NONE, BlockType.DATA))) {
377           numEvicted++;
378         }
379       }
380       LOG.debug("On close of file " + name + " evicted " + numEvicted
381           + " block(s) of " + dataBlockIndexReader.getRootBlockCount()
382           + " total blocks");
383     }
384     if (this.closeIStream && this.istream != null) {
385       this.istream.close();
386       this.istream = null;
387     }
388 
389     getSchemaMetrics().flushMetrics();
390   }
391 
392   protected abstract static class AbstractScannerV1
393       extends AbstractHFileReader.Scanner {
394     protected int currBlock;
395 
396     /**
397      * This masks a field with the same name in the superclass and saves us the
398      * runtime overhead of casting from abstract reader to reader V1.
399      */
400     protected HFileReaderV1 reader;
401 
402     public AbstractScannerV1(HFileReaderV1 reader, boolean cacheBlocks,
403         final boolean pread, final boolean isCompaction) {
404       super(reader, cacheBlocks, pread, isCompaction);
405       this.reader = (HFileReaderV1) reader;
406     }
407 
408     /**
409      * Within a loaded block, seek looking for the first key
410      * that is smaller than (or equal to?) the key we are interested in.
411      *
412      * A note on the seekBefore - if you have seekBefore = true, AND the
413      * first key in the block = key, then you'll get thrown exceptions.
414      * @param key to find
415      * @param seekBefore find the key before the exact match.
416      * @return
417      */
418     protected abstract int blockSeek(byte[] key, int offset, int length,
419         boolean seekBefore);
420 
421     protected abstract void loadBlock(int bloc, boolean rewind)
422         throws IOException;
423 
424     @Override
425     public int seekTo(byte[] key, int offset, int length) throws IOException {
426       int b = reader.blockContainingKey(key, offset, length);
427       if (b < 0) return -1; // falls before the beginning of the file! :-(
428       // Avoid re-reading the same block (that'd be dumb).
429       loadBlock(b, true);
430       return blockSeek(key, offset, length, false);
431     }
432 
433     @Override
434     public int reseekTo(byte[] key, int offset, int length)
435         throws IOException {
436       if (blockBuffer != null && currKeyLen != 0) {
437         ByteBuffer bb = getKey();
438         int compared = reader.getComparator().compare(key, offset,
439             length, bb.array(), bb.arrayOffset(), bb.limit());
440         if (compared < 1) {
441           // If the required key is less than or equal to current key, then
442           // don't do anything.
443           return compared;
444         }
445       }
446 
447       int b = reader.blockContainingKey(key, offset, length);
448       if (b < 0) {
449         return -1;
450       }
451       loadBlock(b, false);
452       return blockSeek(key, offset, length, false);
453     }
454 
455     @Override
456     public boolean seekBefore(byte[] key, int offset, int length)
457         throws IOException {
458       int b = reader.blockContainingKey(key, offset, length);
459       if (b < 0)
460         return false; // key is before the start of the file.
461 
462       // Question: does this block begin with 'key'?
463       byte[] firstkKey = reader.getDataBlockIndexReader().getRootBlockKey(b);
464       if (reader.getComparator().compare(firstkKey, 0, firstkKey.length,
465           key, offset, length) == 0) {
466         // Ok the key we're interested in is the first of the block, so go back
467         // by one.
468         if (b == 0) {
469           // we have a 'problem', the key we want is the first of the file.
470           return false;
471         }
472         b--;
473         // TODO shortcut: seek forward in this block to the last key of the
474         // block.
475       }
476       loadBlock(b, true);
477       blockSeek(key, offset, length, true);
478       return true;
479     }
480   }
481 
482   /**
483    * Implementation of {@link HFileScanner} interface.
484    */
485 
486   protected static class ScannerV1 extends AbstractScannerV1 {
487     private HFileReaderV1 reader;
488 
489     public ScannerV1(HFileReaderV1 reader, boolean cacheBlocks,
490         final boolean pread, final boolean isCompaction) {
491       super(reader, cacheBlocks, pread, isCompaction);
492       this.reader = reader;
493     }
494 
495     @Override
496     public KeyValue getKeyValue() {
497       if (blockBuffer == null) {
498         return null;
499       }
500       return new KeyValue(blockBuffer.array(), blockBuffer.arrayOffset()
501           + blockBuffer.position() - 8);
502     }
503 
504     @Override
505     public ByteBuffer getKey() {
506       Preconditions.checkState(blockBuffer != null && currKeyLen > 0,
507           "you need to seekTo() before calling getKey()");
508 
509       ByteBuffer keyBuff = blockBuffer.slice();
510       keyBuff.limit(currKeyLen);
511       keyBuff.rewind();
512       // Do keyBuff.asReadOnly()?
513       return keyBuff;
514     }
515 
516     @Override
517     public ByteBuffer getValue() {
518       if (blockBuffer == null || currKeyLen == 0) {
519         throw new RuntimeException(
520             "you need to seekTo() before calling getValue()");
521       }
522 
523       // TODO: Could this be done with one ByteBuffer rather than create two?
524       ByteBuffer valueBuff = blockBuffer.slice();
525       valueBuff.position(currKeyLen);
526       valueBuff = valueBuff.slice();
527       valueBuff.limit(currValueLen);
528       valueBuff.rewind();
529       return valueBuff;
530     }
531 
532     @Override
533     public boolean next() throws IOException {
534       if (blockBuffer == null) {
535         throw new IOException("Next called on non-seeked scanner");
536       }
537 
538       try {
539         blockBuffer.position(blockBuffer.position() + currKeyLen
540             + currValueLen);
541       } catch (IllegalArgumentException e) {
542         LOG.error("Current pos = " + blockBuffer.position() +
543                   "; currKeyLen = " + currKeyLen +
544                   "; currValLen = " + currValueLen +
545                   "; block limit = " + blockBuffer.limit() +
546                   "; HFile name = " + reader.getName() +
547                   "; currBlock id = " + currBlock, e);
548         throw e;
549       }
550       if (blockBuffer.remaining() <= 0) {
551         currBlock++;
552         if (currBlock >= reader.getDataBlockIndexReader().getRootBlockCount()) {
553           // damn we are at the end
554           currBlock = 0;
555           blockBuffer = null;
556           return false;
557         }
558         blockBuffer = reader.readBlockBuffer(currBlock, cacheBlocks, pread,
559             isCompaction);
560         currKeyLen = blockBuffer.getInt();
561         currValueLen = blockBuffer.getInt();
562         blockFetches++;
563         return true;
564       }
565 
566       currKeyLen = blockBuffer.getInt();
567       currValueLen = blockBuffer.getInt();
568       return true;
569     }
570 
571     @Override
572     protected int blockSeek(byte[] key, int offset, int length,
573         boolean seekBefore) {
574       int klen, vlen;
575       int lastLen = 0;
576       do {
577         klen = blockBuffer.getInt();
578         vlen = blockBuffer.getInt();
579         int comp = reader.getComparator().compare(key, offset, length,
580             blockBuffer.array(),
581             blockBuffer.arrayOffset() + blockBuffer.position(), klen);
582         if (comp == 0) {
583           if (seekBefore) {
584             blockBuffer.position(blockBuffer.position() - lastLen - 16);
585             currKeyLen = blockBuffer.getInt();
586             currValueLen = blockBuffer.getInt();
587             return 1; // non exact match.
588           }
589           currKeyLen = klen;
590           currValueLen = vlen;
591           return 0; // indicate exact match
592         }
593         if (comp < 0) {
594           // go back one key:
595           blockBuffer.position(blockBuffer.position() - lastLen - 16);
596           currKeyLen = blockBuffer.getInt();
597           currValueLen = blockBuffer.getInt();
598           return 1;
599         }
600         blockBuffer.position(blockBuffer.position() + klen + vlen);
601         lastLen = klen + vlen;
602       } while (blockBuffer.remaining() > 0);
603 
604       // ok we are at the end, so go back a littleeeeee....
605       // The 8 in the below is intentionally different to the 16s in the above
606       // Do the math you you'll figure it.
607       blockBuffer.position(blockBuffer.position() - lastLen - 8);
608       currKeyLen = blockBuffer.getInt();
609       currValueLen = blockBuffer.getInt();
610       return 1; // didn't exactly find it.
611     }
612 
613     @Override
614     public String getKeyString() {
615       return Bytes.toStringBinary(blockBuffer.array(),
616           blockBuffer.arrayOffset() + blockBuffer.position(), currKeyLen);
617     }
618 
619     @Override
620     public String getValueString() {
621       return Bytes.toString(blockBuffer.array(), blockBuffer.arrayOffset() +
622         blockBuffer.position() + currKeyLen, currValueLen);
623     }
624 
625     @Override
626     public boolean seekTo() throws IOException {
627       if (reader.getDataBlockIndexReader().isEmpty()) {
628         return false;
629       }
630       if (blockBuffer != null && currBlock == 0) {
631         blockBuffer.rewind();
632         currKeyLen = blockBuffer.getInt();
633         currValueLen = blockBuffer.getInt();
634         return true;
635       }
636       currBlock = 0;
637       blockBuffer = reader.readBlockBuffer(currBlock, cacheBlocks, pread,
638           isCompaction);
639       currKeyLen = blockBuffer.getInt();
640       currValueLen = blockBuffer.getInt();
641       blockFetches++;
642       return true;
643     }
644 
645     @Override
646     protected void loadBlock(int bloc, boolean rewind) throws IOException {
647       if (blockBuffer == null) {
648         blockBuffer = reader.readBlockBuffer(bloc, cacheBlocks, pread,
649             isCompaction);
650         currBlock = bloc;
651         blockFetches++;
652       } else {
653         if (bloc != currBlock) {
654           blockBuffer = reader.readBlockBuffer(bloc, cacheBlocks, pread,
655               isCompaction);
656           currBlock = bloc;
657           blockFetches++;
658         } else {
659           // we are already in the same block, just rewind to seek again.
660           if (rewind) {
661             blockBuffer.rewind();
662           }
663           else {
664             // Go back by (size of rowlength + size of valuelength) = 8 bytes
665             blockBuffer.position(blockBuffer.position()-8);
666           }
667         }
668       }
669     }
670 
671   }
672 
673   @Override
674   public HFileBlock readBlock(long offset, long onDiskBlockSize,
675       boolean cacheBlock, boolean pread, boolean isCompaction,
676       BlockType expectedBlockType) {
677     throw new UnsupportedOperationException();
678   }
679 
680   @Override
681   public DataInput getGeneralBloomFilterMetadata() throws IOException {
682     // Shouldn't cache Bloom filter blocks, otherwise server would abort when
683     // splitting, see HBASE-6479
684     ByteBuffer buf = getMetaBlock(HFileWriterV1.BLOOM_FILTER_META_KEY, false);
685     if (buf == null)
686       return null;
687     ByteArrayInputStream bais = new ByteArrayInputStream(buf.array(),
688         buf.arrayOffset(), buf.limit());
689     return new DataInputStream(bais);
690   }
691 
692   @Override
693   public DataInput getDeleteBloomFilterMetadata() throws IOException {
694     return null;
695   }
696 
697   @Override
698   public boolean isFileInfoLoaded() {
699     return fileInfoLoaded;
700   }
701 
702 }