View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.io.hfile;
20  
21  import java.io.ByteArrayOutputStream;
22  import java.io.DataInput;
23  import java.io.DataInputStream;
24  import java.io.DataOutput;
25  import java.io.DataOutputStream;
26  import java.io.IOException;
27  import java.nio.ByteBuffer;
28  import java.util.ArrayList;
29  import java.util.Arrays;
30  import java.util.Collections;
31  import java.util.List;
32  import java.util.concurrent.atomic.AtomicReference;
33  
34  import org.apache.commons.logging.Log;
35  import org.apache.commons.logging.LogFactory;
36  import org.apache.hadoop.classification.InterfaceAudience;
37  import org.apache.hadoop.conf.Configuration;
38  import org.apache.hadoop.fs.FSDataOutputStream;
39  import org.apache.hadoop.hbase.Cell;
40  import org.apache.hadoop.hbase.HConstants;
41  import org.apache.hadoop.hbase.KeyValue;
42  import org.apache.hadoop.hbase.KeyValue.KVComparator;
43  import org.apache.hadoop.hbase.KeyValueUtil;
44  import org.apache.hadoop.hbase.io.HeapSize;
45  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
46  import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader;
47  import org.apache.hadoop.hbase.util.Bytes;
48  import org.apache.hadoop.hbase.util.ClassSize;
49  import org.apache.hadoop.hbase.util.CompoundBloomFilterWriter;
50  import org.apache.hadoop.io.WritableUtils;
51  import org.apache.hadoop.util.StringUtils;
52  
53  /**
54   * Provides functionality to write ({@link BlockIndexWriter}) and read
55   * ({@link BlockIndexReader}) single-level and multi-level block indexes.
56   *
57   * Examples of how to use the block index writer can be found in
58   * {@link CompoundBloomFilterWriter} and {@link HFileWriterV2}. Examples of how
59   * to use the reader can be found in {@link HFileReaderV2} and
60   * TestHFileBlockIndex.
61   */
62  @InterfaceAudience.Private
63  public class HFileBlockIndex {
64  
65    private static final Log LOG = LogFactory.getLog(HFileBlockIndex.class);
66  
67    static final int DEFAULT_MAX_CHUNK_SIZE = 128 * 1024;
68  
69    /**
70     * The maximum size guideline for index blocks (both leaf, intermediate, and
71     * root). If not specified, <code>DEFAULT_MAX_CHUNK_SIZE</code> is used.
72     */
73    public static final String MAX_CHUNK_SIZE_KEY = "hfile.index.block.max.size";
74  
75    /**
76     * The number of bytes stored in each "secondary index" entry in addition to
77     * key bytes in the non-root index block format. The first long is the file
78     * offset of the deeper-level block the entry points to, and the int that
79     * follows is that block's on-disk size without including header.
80     */
81    static final int SECONDARY_INDEX_ENTRY_OVERHEAD = Bytes.SIZEOF_INT
82        + Bytes.SIZEOF_LONG;
83  
84    /**
85     * Error message when trying to use inline block API in single-level mode.
86     */
87    private static final String INLINE_BLOCKS_NOT_ALLOWED =
88        "Inline blocks are not allowed in the single-level-only mode";
89  
90    /**
91     * The size of a meta-data record used for finding the mid-key in a
92     * multi-level index. Consists of the middle leaf-level index block offset
93     * (long), its on-disk size without header included (int), and the mid-key
94     * entry's zero-based index in that leaf index block.
95     */
96    private static final int MID_KEY_METADATA_SIZE = Bytes.SIZEOF_LONG +
97        2 * Bytes.SIZEOF_INT;
98  
99    /**
100    * The reader will always hold the root level index in the memory. Index
101    * blocks at all other levels will be cached in the LRU cache in practice,
102    * although this API does not enforce that.
103    *
104    * All non-root (leaf and intermediate) index blocks contain what we call a
105    * "secondary index": an array of offsets to the entries within the block.
106    * This allows us to do binary search for the entry corresponding to the
107    * given key without having to deserialize the block.
108    */
109   public static class BlockIndexReader implements HeapSize {
110     /** Needed doing lookup on blocks. */
111     private final KVComparator comparator;
112 
113     // Root-level data.
114     private byte[][] blockKeys;
115     private long[] blockOffsets;
116     private int[] blockDataSizes;
117     private int rootCount = 0;
118 
119     // Mid-key metadata.
120     private long midLeafBlockOffset = -1;
121     private int midLeafBlockOnDiskSize = -1;
122     private int midKeyEntry = -1;
123 
124     /** Pre-computed mid-key */
125     private AtomicReference<byte[]> midKey = new AtomicReference<byte[]>();
126 
127     /**
128      * The number of levels in the block index tree. One if there is only root
129      * level, two for root and leaf levels, etc.
130      */
131     private int searchTreeLevel;
132 
133     /** A way to read {@link HFile} blocks at a given offset */
134     private CachingBlockReader cachingBlockReader;
135 
136     public BlockIndexReader(final KVComparator c, final int treeLevel,
137         final CachingBlockReader cachingBlockReader) {
138       this(c, treeLevel);
139       this.cachingBlockReader = cachingBlockReader;
140     }
141 
142     public BlockIndexReader(final KVComparator c, final int treeLevel)
143     {
144       comparator = c;
145       searchTreeLevel = treeLevel;
146     }
147 
148     /**
149      * @return true if the block index is empty.
150      */
151     public boolean isEmpty() {
152       return blockKeys.length == 0;
153     }
154 
155     /**
156      * Verifies that the block index is non-empty and throws an
157      * {@link IllegalStateException} otherwise.
158      */
159     public void ensureNonEmpty() {
160       if (blockKeys.length == 0) {
161         throw new IllegalStateException("Block index is empty or not loaded");
162       }
163     }
164 
165     /**
166      * Return the data block which contains this key. This function will only
167      * be called when the HFile version is larger than 1.
168      *
169      * @param key the key we are looking for
170      * @param currentBlock the current block, to avoid re-reading the same block
171      * @param cacheBlocks
172      * @param pread
173      * @param isCompaction
174      * @param expectedDataBlockEncoding the data block encoding the caller is
175      *          expecting the data block to be in, or null to not perform this
176      *          check and return the block irrespective of the encoding
177      * @return reader a basic way to load blocks
178      * @throws IOException
179      */
180     public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boolean cacheBlocks,
181         boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding)
182         throws IOException {
183       BlockWithScanInfo blockWithScanInfo = loadDataBlockWithScanInfo(key, currentBlock,
184           cacheBlocks,
185           pread, isCompaction, expectedDataBlockEncoding);
186       if (blockWithScanInfo == null) {
187         return null;
188       } else {
189         return blockWithScanInfo.getHFileBlock();
190       }
191     }
192 
193     /**
194      * Return the BlockWithScanInfo which contains the DataBlock with other scan
195      * info such as nextIndexedKey. This function will only be called when the
196      * HFile version is larger than 1.
197      * 
198      * @param key
199      *          the key we are looking for
200      * @param currentBlock
201      *          the current block, to avoid re-reading the same block
202      * @param cacheBlocks
203      * @param pread
204      * @param isCompaction
205      * @param expectedDataBlockEncoding the data block encoding the caller is
206      *          expecting the data block to be in, or null to not perform this
207      *          check and return the block irrespective of the encoding.
208      * @return the BlockWithScanInfo which contains the DataBlock with other
209      *         scan info such as nextIndexedKey.
210      * @throws IOException
211      */
212     public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock,
213         boolean cacheBlocks,
214         boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding)
215         throws IOException {
216       int rootLevelIndex = rootBlockContainingKey(key);
217       if (rootLevelIndex < 0 || rootLevelIndex >= blockOffsets.length) {
218         return null;
219       }
220 
221       // the next indexed key
222       byte[] nextIndexedKey = null;
223 
224       // Read the next-level (intermediate or leaf) index block.
225       long currentOffset = blockOffsets[rootLevelIndex];
226       int currentOnDiskSize = blockDataSizes[rootLevelIndex];
227 
228       if (rootLevelIndex < blockKeys.length - 1) {
229         nextIndexedKey = blockKeys[rootLevelIndex + 1];
230       } else {
231         nextIndexedKey = HConstants.NO_NEXT_INDEXED_KEY;
232       }
233 
234       int lookupLevel = 1; // How many levels deep we are in our lookup.
235       int index = -1;
236 
237       HFileBlock block;
238       while (true) {
239 
240         if (currentBlock != null && currentBlock.getOffset() == currentOffset)
241         {
242           // Avoid reading the same block again, even with caching turned off.
243           // This is crucial for compaction-type workload which might have
244           // caching turned off. This is like a one-block cache inside the
245           // scanner.
246           block = currentBlock;
247         } else {
248           // Call HFile's caching block reader API. We always cache index
249           // blocks, otherwise we might get terrible performance.
250           boolean shouldCache = cacheBlocks || (lookupLevel < searchTreeLevel);
251           BlockType expectedBlockType;
252           if (lookupLevel < searchTreeLevel - 1) {
253             expectedBlockType = BlockType.INTERMEDIATE_INDEX;
254           } else if (lookupLevel == searchTreeLevel - 1) {
255             expectedBlockType = BlockType.LEAF_INDEX;
256           } else {
257             // this also accounts for ENCODED_DATA
258             expectedBlockType = BlockType.DATA;
259           }
260           block = cachingBlockReader.readBlock(currentOffset,
261           currentOnDiskSize, shouldCache, pread, isCompaction, true,
262               expectedBlockType, expectedDataBlockEncoding);
263         }
264 
265         if (block == null) {
266           throw new IOException("Failed to read block at offset " +
267               currentOffset + ", onDiskSize=" + currentOnDiskSize);
268         }
269 
270         // Found a data block, break the loop and check our level in the tree.
271         if (block.getBlockType().isData()) {
272           break;
273         }
274 
275         // Not a data block. This must be a leaf-level or intermediate-level
276         // index block. We don't allow going deeper than searchTreeLevel.
277         if (++lookupLevel > searchTreeLevel) {
278           throw new IOException("Search Tree Level overflow: lookupLevel="+
279               lookupLevel + ", searchTreeLevel=" + searchTreeLevel);
280         }
281 
282         // Locate the entry corresponding to the given key in the non-root
283         // (leaf or intermediate-level) index block.
284         ByteBuffer buffer = block.getBufferWithoutHeader();
285         index = locateNonRootIndexEntry(buffer, key, comparator);
286         if (index == -1) {
287           // This has to be changed
288           // For now change this to key value
289           KeyValue kv = KeyValueUtil.ensureKeyValue(key);
290           throw new IOException("The key "
291               + Bytes.toStringBinary(kv.getKey(), kv.getKeyOffset(), kv.getKeyLength())
292               + " is before the" + " first key of the non-root index block "
293               + block);
294         }
295 
296         currentOffset = buffer.getLong();
297         currentOnDiskSize = buffer.getInt();
298 
299         // Only update next indexed key if there is a next indexed key in the current level
300         byte[] tmpNextIndexedKey = getNonRootIndexedKey(buffer, index + 1);
301         if (tmpNextIndexedKey != null) {
302           nextIndexedKey = tmpNextIndexedKey;
303         }
304       }
305 
306       if (lookupLevel != searchTreeLevel) {
307         throw new IOException("Reached a data block at level " + lookupLevel +
308             " but the number of levels is " + searchTreeLevel);
309       }
310 
311       // set the next indexed key for the current block.
312       BlockWithScanInfo blockWithScanInfo = new BlockWithScanInfo(block, nextIndexedKey);
313       return blockWithScanInfo;
314     }
315 
316     /**
317      * An approximation to the {@link HFile}'s mid-key. Operates on block
318      * boundaries, and does not go inside blocks. In other words, returns the
319      * first key of the middle block of the file.
320      *
321      * @return the first key of the middle block
322      */
323     public byte[] midkey() throws IOException {
324       if (rootCount == 0)
325         throw new IOException("HFile empty");
326 
327       byte[] targetMidKey = this.midKey.get();
328       if (targetMidKey != null) {
329         return targetMidKey;
330       }
331 
332       if (midLeafBlockOffset >= 0) {
333         if (cachingBlockReader == null) {
334           throw new IOException("Have to read the middle leaf block but " +
335               "no block reader available");
336         }
337 
338         // Caching, using pread, assuming this is not a compaction.
339         HFileBlock midLeafBlock = cachingBlockReader.readBlock(
340             midLeafBlockOffset, midLeafBlockOnDiskSize, true, true, false, true,
341             BlockType.LEAF_INDEX, null);
342 
343         ByteBuffer b = midLeafBlock.getBufferWithoutHeader();
344         int numDataBlocks = b.getInt();
345         int keyRelOffset = b.getInt(Bytes.SIZEOF_INT * (midKeyEntry + 1));
346         int keyLen = b.getInt(Bytes.SIZEOF_INT * (midKeyEntry + 2)) -
347             keyRelOffset;
348         int keyOffset = b.arrayOffset() +
349             Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset +
350             SECONDARY_INDEX_ENTRY_OVERHEAD;
351         targetMidKey = Arrays.copyOfRange(b.array(), keyOffset, keyOffset + keyLen);
352       } else {
353         // The middle of the root-level index.
354         targetMidKey = blockKeys[rootCount / 2];
355       }
356 
357       this.midKey.set(targetMidKey);
358       return targetMidKey;
359     }
360 
361     /**
362      * @param i from 0 to {@link #getRootBlockCount() - 1}
363      */
364     public byte[] getRootBlockKey(int i) {
365       return blockKeys[i];
366     }
367 
368     /**
369      * @param i from 0 to {@link #getRootBlockCount() - 1}
370      */
371     public long getRootBlockOffset(int i) {
372       return blockOffsets[i];
373     }
374 
375     /**
376      * @param i zero-based index of a root-level block
377      * @return the on-disk size of the root-level block for version 2, or the
378      *         uncompressed size for version 1
379      */
380     public int getRootBlockDataSize(int i) {
381       return blockDataSizes[i];
382     }
383 
384     /**
385      * @return the number of root-level blocks in this block index
386      */
387     public int getRootBlockCount() {
388       return rootCount;
389     }
390 
391     /**
392      * Finds the root-level index block containing the given key.
393      *
394      * @param key
395      *          Key to find
396      * @return Offset of block containing <code>key</code> (between 0 and the
397      *         number of blocks - 1) or -1 if this file does not contain the
398      *         request.
399      */
400     public int rootBlockContainingKey(final byte[] key, int offset, int length) {
401       int pos = Bytes.binarySearch(blockKeys, key, offset, length, comparator);
402       // pos is between -(blockKeys.length + 1) to blockKeys.length - 1, see
403       // binarySearch's javadoc.
404 
405       if (pos >= 0) {
406         // This means this is an exact match with an element of blockKeys.
407         assert pos < blockKeys.length;
408         return pos;
409       }
410 
411       // Otherwise, pos = -(i + 1), where blockKeys[i - 1] < key < blockKeys[i],
412       // and i is in [0, blockKeys.length]. We are returning j = i - 1 such that
413       // blockKeys[j] <= key < blockKeys[j + 1]. In particular, j = -1 if
414       // key < blockKeys[0], meaning the file does not contain the given key.
415 
416       int i = -pos - 1;
417       assert 0 <= i && i <= blockKeys.length;
418       return i - 1;
419     }
420 
421     /**
422      * Finds the root-level index block containing the given key.
423      *
424      * @param key
425      *          Key to find
426      */
427     public int rootBlockContainingKey(final Cell key) {
428       int pos = Bytes.binarySearch(blockKeys, key, comparator);
429       // pos is between -(blockKeys.length + 1) to blockKeys.length - 1, see
430       // binarySearch's javadoc.
431 
432       if (pos >= 0) {
433         // This means this is an exact match with an element of blockKeys.
434         assert pos < blockKeys.length;
435         return pos;
436       }
437 
438       // Otherwise, pos = -(i + 1), where blockKeys[i - 1] < key < blockKeys[i],
439       // and i is in [0, blockKeys.length]. We are returning j = i - 1 such that
440       // blockKeys[j] <= key < blockKeys[j + 1]. In particular, j = -1 if
441       // key < blockKeys[0], meaning the file does not contain the given key.
442 
443       int i = -pos - 1;
444       assert 0 <= i && i <= blockKeys.length;
445       return i - 1;
446     }
447 
448     /**
449      * Adds a new entry in the root block index. Only used when reading.
450      *
451      * @param key Last key in the block
452      * @param offset file offset where the block is stored
453      * @param dataSize the uncompressed data size
454      */
455     private void add(final byte[] key, final long offset, final int dataSize) {
456       blockOffsets[rootCount] = offset;
457       blockKeys[rootCount] = key;
458       blockDataSizes[rootCount] = dataSize;
459       rootCount++;
460     }
461 
462     /**
463      * The indexed key at the ith position in the nonRootIndex. The position starts at 0.
464      * @param nonRootIndex
465      * @param i the ith position
466      * @return The indexed key at the ith position in the nonRootIndex.
467      */
468     private byte[] getNonRootIndexedKey(ByteBuffer nonRootIndex, int i) {
469       int numEntries = nonRootIndex.getInt(0);
470       if (i < 0 || i >= numEntries) {
471         return null;
472       }
473 
474       // Entries start after the number of entries and the secondary index.
475       // The secondary index takes numEntries + 1 ints.
476       int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2);
477       // Targetkey's offset relative to the end of secondary index
478       int targetKeyRelOffset = nonRootIndex.getInt(
479           Bytes.SIZEOF_INT * (i + 1));
480 
481       // The offset of the target key in the blockIndex buffer
482       int targetKeyOffset = entriesOffset     // Skip secondary index
483           + targetKeyRelOffset               // Skip all entries until mid
484           + SECONDARY_INDEX_ENTRY_OVERHEAD;  // Skip offset and on-disk-size
485 
486       // We subtract the two consecutive secondary index elements, which
487       // gives us the size of the whole (offset, onDiskSize, key) tuple. We
488       // then need to subtract the overhead of offset and onDiskSize.
489       int targetKeyLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 2)) -
490         targetKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD;
491 
492       int from = nonRootIndex.arrayOffset() + targetKeyOffset;
493       int to = from + targetKeyLength;
494       return Arrays.copyOfRange(nonRootIndex.array(), from, to);
495     }
496 
497     /**
498      * Performs a binary search over a non-root level index block. Utilizes the
499      * secondary index, which records the offsets of (offset, onDiskSize,
500      * firstKey) tuples of all entries.
501      * 
502      * @param key
503      *          the key we are searching for offsets to individual entries in
504      *          the blockIndex buffer
505      * @param nonRootIndex
506      *          the non-root index block buffer, starting with the secondary
507      *          index. The position is ignored.
508      * @return the index i in [0, numEntries - 1] such that keys[i] <= key <
509      *         keys[i + 1], if keys is the array of all keys being searched, or
510      *         -1 otherwise
511      * @throws IOException
512      */
513     static int binarySearchNonRootIndex(Cell key, ByteBuffer nonRootIndex,
514         KVComparator comparator) {
515 
516       int numEntries = nonRootIndex.getInt(0);
517       int low = 0;
518       int high = numEntries - 1;
519       int mid = 0;
520 
521       // Entries start after the number of entries and the secondary index.
522       // The secondary index takes numEntries + 1 ints.
523       int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2);
524 
525       // If we imagine that keys[-1] = -Infinity and
526       // keys[numEntries] = Infinity, then we are maintaining an invariant that
527       // keys[low - 1] < key < keys[high + 1] while narrowing down the range.
528       KeyValue.KeyOnlyKeyValue nonRootIndexKV = new KeyValue.KeyOnlyKeyValue();
529       while (low <= high) {
530         mid = (low + high) >>> 1;
531 
532         // Midkey's offset relative to the end of secondary index
533         int midKeyRelOffset = nonRootIndex.getInt(
534             Bytes.SIZEOF_INT * (mid + 1));
535 
536         // The offset of the middle key in the blockIndex buffer
537         int midKeyOffset = entriesOffset       // Skip secondary index
538             + midKeyRelOffset                  // Skip all entries until mid
539             + SECONDARY_INDEX_ENTRY_OVERHEAD;  // Skip offset and on-disk-size
540 
541         // We subtract the two consecutive secondary index elements, which
542         // gives us the size of the whole (offset, onDiskSize, key) tuple. We
543         // then need to subtract the overhead of offset and onDiskSize.
544         int midLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (mid + 2)) -
545             midKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD;
546 
547         // we have to compare in this order, because the comparator order
548         // has special logic when the 'left side' is a special key.
549         nonRootIndexKV.setKey(nonRootIndex.array(),
550             nonRootIndex.arrayOffset() + midKeyOffset, midLength);
551         int cmp = comparator.compareOnlyKeyPortion(key, nonRootIndexKV);
552 
553         // key lives above the midpoint
554         if (cmp > 0)
555           low = mid + 1; // Maintain the invariant that keys[low - 1] < key
556         // key lives below the midpoint
557         else if (cmp < 0)
558           high = mid - 1; // Maintain the invariant that key < keys[high + 1]
559         else
560           return mid; // exact match
561       }
562 
563       // As per our invariant, keys[low - 1] < key < keys[high + 1], meaning
564       // that low - 1 < high + 1 and (low - high) <= 1. As per the loop break
565       // condition, low >= high + 1. Therefore, low = high + 1.
566 
567       if (low != high + 1) {
568         throw new IllegalStateException("Binary search broken: low=" + low
569             + " " + "instead of " + (high + 1));
570       }
571 
572       // OK, our invariant says that keys[low - 1] < key < keys[low]. We need to
573       // return i such that keys[i] <= key < keys[i + 1]. Therefore i = low - 1.
574       int i = low - 1;
575 
576       // Some extra validation on the result.
577       if (i < -1 || i >= numEntries) {
578         throw new IllegalStateException("Binary search broken: result is " +
579             i + " but expected to be between -1 and (numEntries - 1) = " +
580             (numEntries - 1));
581       }
582 
583       return i;
584     }
585 
586     /**
587      * Search for one key using the secondary index in a non-root block. In case
588      * of success, positions the provided buffer at the entry of interest, where
589      * the file offset and the on-disk-size can be read.
590      *
591      * @param nonRootBlock
592      *          a non-root block without header. Initial position does not
593      *          matter.
594      * @param key
595      *          the byte array containing the key
596      * @return the index position where the given key was found, otherwise
597      *         return -1 in the case the given key is before the first key.
598      *
599      */
600     static int locateNonRootIndexEntry(ByteBuffer nonRootBlock, Cell key,
601         KVComparator comparator) {
602       int entryIndex = binarySearchNonRootIndex(key, nonRootBlock, comparator);
603 
604       if (entryIndex != -1) {
605         int numEntries = nonRootBlock.getInt(0);
606 
607         // The end of secondary index and the beginning of entries themselves.
608         int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2);
609 
610         // The offset of the entry we are interested in relative to the end of
611         // the secondary index.
612         int entryRelOffset = nonRootBlock.getInt(Bytes.SIZEOF_INT * (1 + entryIndex));
613 
614         nonRootBlock.position(entriesOffset + entryRelOffset);
615       }
616 
617       return entryIndex;
618     }
619 
620     /**
621      * Read in the root-level index from the given input stream. Must match
622      * what was written into the root level by
623      * {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the
624      * offset that function returned.
625      *
626      * @param in the buffered input stream or wrapped byte input stream
627      * @param numEntries the number of root-level index entries
628      * @throws IOException
629      */
630     public void readRootIndex(DataInput in, final int numEntries)
631         throws IOException {
632       blockOffsets = new long[numEntries];
633       blockKeys = new byte[numEntries][];
634       blockDataSizes = new int[numEntries];
635 
636       // If index size is zero, no index was written.
637       if (numEntries > 0) {
638         for (int i = 0; i < numEntries; ++i) {
639           long offset = in.readLong();
640           int dataSize = in.readInt();
641           byte[] key = Bytes.readByteArray(in);
642           add(key, offset, dataSize);
643         }
644       }
645     }
646     
647     /**
648      * Read in the root-level index from the given input stream. Must match
649      * what was written into the root level by
650      * {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the
651      * offset that function returned.
652      *
653      * @param blk the HFile block
654      * @param numEntries the number of root-level index entries
655      * @return the buffered input stream or wrapped byte input stream
656      * @throws IOException
657      */
658     public DataInputStream readRootIndex(HFileBlock blk, final int numEntries) throws IOException {
659       DataInputStream in = blk.getByteStream();
660       readRootIndex(in, numEntries);
661       return in;
662     }
663 
664     /**
665      * Read the root-level metadata of a multi-level block index. Based on
666      * {@link #readRootIndex(DataInput, int)}, but also reads metadata
667      * necessary to compute the mid-key in a multi-level index.
668      *
669      * @param blk the HFile block
670      * @param numEntries the number of root-level index entries
671      * @throws IOException
672      */
673     public void readMultiLevelIndexRoot(HFileBlock blk,
674         final int numEntries) throws IOException {
675       DataInputStream in = readRootIndex(blk, numEntries);
676       // after reading the root index the checksum bytes have to
677       // be subtracted to know if the mid key exists.
678       int checkSumBytes = blk.totalChecksumBytes();
679       if ((in.available() - checkSumBytes) < MID_KEY_METADATA_SIZE) {
680         // No mid-key metadata available.
681         return;
682       }
683       midLeafBlockOffset = in.readLong();
684       midLeafBlockOnDiskSize = in.readInt();
685       midKeyEntry = in.readInt();
686     }
687 
688     @Override
689     public String toString() {
690       StringBuilder sb = new StringBuilder();
691       sb.append("size=" + rootCount).append("\n");
692       for (int i = 0; i < rootCount; i++) {
693         sb.append("key=").append(KeyValue.keyToString(blockKeys[i]))
694             .append("\n  offset=").append(blockOffsets[i])
695             .append(", dataSize=" + blockDataSizes[i]).append("\n");
696       }
697       return sb.toString();
698     }
699 
700     @Override
701     public long heapSize() {
702       long heapSize = ClassSize.align(6 * ClassSize.REFERENCE +
703           2 * Bytes.SIZEOF_INT + ClassSize.OBJECT);
704 
705       // Mid-key metadata.
706       heapSize += MID_KEY_METADATA_SIZE;
707 
708       // Calculating the size of blockKeys
709       if (blockKeys != null) {
710         // Adding array + references overhead
711         heapSize += ClassSize.align(ClassSize.ARRAY + blockKeys.length
712             * ClassSize.REFERENCE);
713 
714         // Adding bytes
715         for (byte[] key : blockKeys) {
716           heapSize += ClassSize.align(ClassSize.ARRAY + key.length);
717         }
718       }
719 
720       if (blockOffsets != null) {
721         heapSize += ClassSize.align(ClassSize.ARRAY + blockOffsets.length
722             * Bytes.SIZEOF_LONG);
723       }
724 
725       if (blockDataSizes != null) {
726         heapSize += ClassSize.align(ClassSize.ARRAY + blockDataSizes.length
727             * Bytes.SIZEOF_INT);
728       }
729 
730       return ClassSize.align(heapSize);
731     }
732 
733   }
734 
735   /**
736    * Writes the block index into the output stream. Generate the tree from
737    * bottom up. The leaf level is written to disk as a sequence of inline
738    * blocks, if it is larger than a certain number of bytes. If the leaf level
739    * is not large enough, we write all entries to the root level instead.
740    *
741    * After all leaf blocks have been written, we end up with an index
742    * referencing the resulting leaf index blocks. If that index is larger than
743    * the allowed root index size, the writer will break it up into
744    * reasonable-size intermediate-level index block chunks write those chunks
745    * out, and create another index referencing those chunks. This will be
746    * repeated until the remaining index is small enough to become the root
747    * index. However, in most practical cases we will only have leaf-level
748    * blocks and the root index, or just the root index.
749    */
750   public static class BlockIndexWriter implements InlineBlockWriter {
751     /**
752      * While the index is being written, this represents the current block
753      * index referencing all leaf blocks, with one exception. If the file is
754      * being closed and there are not enough blocks to complete even a single
755      * leaf block, no leaf blocks get written and this contains the entire
756      * block index. After all levels of the index were written by
757      * {@link #writeIndexBlocks(FSDataOutputStream)}, this contains the final
758      * root-level index.
759      */
760     private BlockIndexChunk rootChunk = new BlockIndexChunk();
761 
762     /**
763      * Current leaf-level chunk. New entries referencing data blocks get added
764      * to this chunk until it grows large enough to be written to disk.
765      */
766     private BlockIndexChunk curInlineChunk = new BlockIndexChunk();
767 
768     /**
769      * The number of block index levels. This is one if there is only root
770      * level (even empty), two if there a leaf level and root level, and is
771      * higher if there are intermediate levels. This is only final after
772      * {@link #writeIndexBlocks(FSDataOutputStream)} has been called. The
773      * initial value accounts for the root level, and will be increased to two
774      * as soon as we find out there is a leaf-level in
775      * {@link #blockWritten(long, int)}.
776      */
777     private int numLevels = 1;
778 
779     private HFileBlock.Writer blockWriter;
780     private byte[] firstKey = null;
781 
782     /**
783      * The total number of leaf-level entries, i.e. entries referenced by
784      * leaf-level blocks. For the data block index this is equal to the number
785      * of data blocks.
786      */
787     private long totalNumEntries;
788 
789     /** Total compressed size of all index blocks. */
790     private long totalBlockOnDiskSize;
791 
792     /** Total uncompressed size of all index blocks. */
793     private long totalBlockUncompressedSize;
794 
795     /** The maximum size guideline of all multi-level index blocks. */
796     private int maxChunkSize;
797 
798     /** Whether we require this block index to always be single-level. */
799     private boolean singleLevelOnly;
800 
801     /** Block cache, or null if cache-on-write is disabled */
802     private BlockCache blockCache;
803 
804     /** Name to use for computing cache keys */
805     private String nameForCaching;
806 
807     /** Creates a single-level block index writer */
808     public BlockIndexWriter() {
809       this(null, null, null);
810       singleLevelOnly = true;
811     }
812 
813     /**
814      * Creates a multi-level block index writer.
815      *
816      * @param blockWriter the block writer to use to write index blocks
817      * @param blockCache if this is not null, index blocks will be cached
818      *    on write into this block cache.
819      */
820     public BlockIndexWriter(HFileBlock.Writer blockWriter,
821         BlockCache blockCache, String nameForCaching) {
822       if ((blockCache == null) != (nameForCaching == null)) {
823         throw new IllegalArgumentException("Block cache and file name for " +
824             "caching must be both specified or both null");
825       }
826 
827       this.blockWriter = blockWriter;
828       this.blockCache = blockCache;
829       this.nameForCaching = nameForCaching;
830       this.maxChunkSize = HFileBlockIndex.DEFAULT_MAX_CHUNK_SIZE;
831     }
832 
833     public void setMaxChunkSize(int maxChunkSize) {
834       if (maxChunkSize <= 0) {
835         throw new IllegalArgumentException("Invald maximum index block size");
836       }
837       this.maxChunkSize = maxChunkSize;
838     }
839 
840     /**
841      * Writes the root level and intermediate levels of the block index into
842      * the output stream, generating the tree from bottom up. Assumes that the
843      * leaf level has been inline-written to the disk if there is enough data
844      * for more than one leaf block. We iterate by breaking the current level
845      * of the block index, starting with the index of all leaf-level blocks,
846      * into chunks small enough to be written to disk, and generate its parent
847      * level, until we end up with a level small enough to become the root
848      * level.
849      *
850      * If the leaf level is not large enough, there is no inline block index
851      * anymore, so we only write that level of block index to disk as the root
852      * level.
853      *
854      * @param out FSDataOutputStream
855      * @return position at which we entered the root-level index.
856      * @throws IOException
857      */
858     public long writeIndexBlocks(FSDataOutputStream out) throws IOException {
859       if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) {
860         throw new IOException("Trying to write a multi-level block index, " +
861             "but are " + curInlineChunk.getNumEntries() + " entries in the " +
862             "last inline chunk.");
863       }
864 
865       // We need to get mid-key metadata before we create intermediate
866       // indexes and overwrite the root chunk.
867       byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata()
868           : null;
869 
870       if (curInlineChunk != null) {
871         while (rootChunk.getRootSize() > maxChunkSize) {
872           rootChunk = writeIntermediateLevel(out, rootChunk);
873           numLevels += 1;
874         }
875       }
876 
877       // write the root level
878       long rootLevelIndexPos = out.getPos();
879 
880       {
881         DataOutput blockStream =
882             blockWriter.startWriting(BlockType.ROOT_INDEX);
883         rootChunk.writeRoot(blockStream);
884         if (midKeyMetadata != null)
885           blockStream.write(midKeyMetadata);
886         blockWriter.writeHeaderAndData(out);
887       }
888 
889       // Add root index block size
890       totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader();
891       totalBlockUncompressedSize +=
892           blockWriter.getUncompressedSizeWithoutHeader();
893 
894       if (LOG.isTraceEnabled()) {
895         LOG.trace("Wrote a " + numLevels + "-level index with root level at pos "
896           + rootLevelIndexPos + ", " + rootChunk.getNumEntries()
897           + " root-level entries, " + totalNumEntries + " total entries, "
898           + StringUtils.humanReadableInt(this.totalBlockOnDiskSize) +
899           " on-disk size, "
900           + StringUtils.humanReadableInt(totalBlockUncompressedSize) +
901           " total uncompressed size.");
902       }
903       return rootLevelIndexPos;
904     }
905 
906     /**
907      * Writes the block index data as a single level only. Does not do any
908      * block framing.
909      *
910      * @param out the buffered output stream to write the index to. Typically a
911      *          stream writing into an {@link HFile} block.
912      * @param description a short description of the index being written. Used
913      *          in a log message.
914      * @throws IOException
915      */
916     public void writeSingleLevelIndex(DataOutput out, String description)
917         throws IOException {
918       expectNumLevels(1);
919 
920       if (!singleLevelOnly)
921         throw new IOException("Single-level mode is turned off");
922 
923       if (rootChunk.getNumEntries() > 0)
924         throw new IOException("Root-level entries already added in " +
925             "single-level mode");
926 
927       rootChunk = curInlineChunk;
928       curInlineChunk = new BlockIndexChunk();
929 
930       if (LOG.isTraceEnabled()) {
931         LOG.trace("Wrote a single-level " + description + " index with "
932           + rootChunk.getNumEntries() + " entries, " + rootChunk.getRootSize()
933           + " bytes");
934       }
935       rootChunk.writeRoot(out);
936     }
937 
938     /**
939      * Split the current level of the block index into intermediate index
940      * blocks of permitted size and write those blocks to disk. Return the next
941      * level of the block index referencing those intermediate-level blocks.
942      *
943      * @param out
944      * @param currentLevel the current level of the block index, such as the a
945      *          chunk referencing all leaf-level index blocks
946      * @return the parent level block index, which becomes the root index after
947      *         a few (usually zero) iterations
948      * @throws IOException
949      */
950     private BlockIndexChunk writeIntermediateLevel(FSDataOutputStream out,
951         BlockIndexChunk currentLevel) throws IOException {
952       // Entries referencing intermediate-level blocks we are about to create.
953       BlockIndexChunk parent = new BlockIndexChunk();
954 
955       // The current intermediate-level block index chunk.
956       BlockIndexChunk curChunk = new BlockIndexChunk();
957 
958       for (int i = 0; i < currentLevel.getNumEntries(); ++i) {
959         curChunk.add(currentLevel.getBlockKey(i),
960             currentLevel.getBlockOffset(i), currentLevel.getOnDiskDataSize(i));
961 
962         if (curChunk.getRootSize() >= maxChunkSize)
963           writeIntermediateBlock(out, parent, curChunk);
964       }
965 
966       if (curChunk.getNumEntries() > 0) {
967         writeIntermediateBlock(out, parent, curChunk);
968       }
969 
970       return parent;
971     }
972 
973     private void writeIntermediateBlock(FSDataOutputStream out,
974         BlockIndexChunk parent, BlockIndexChunk curChunk) throws IOException {
975       long beginOffset = out.getPos();
976       DataOutputStream dos = blockWriter.startWriting(
977           BlockType.INTERMEDIATE_INDEX);
978       curChunk.writeNonRoot(dos);
979       byte[] curFirstKey = curChunk.getBlockKey(0);
980       blockWriter.writeHeaderAndData(out);
981 
982       if (blockCache != null) {
983         HFileBlock blockForCaching = blockWriter.getBlockForCaching();
984         blockCache.cacheBlock(new BlockCacheKey(nameForCaching,
985           beginOffset), blockForCaching);
986       }
987 
988       // Add intermediate index block size
989       totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader();
990       totalBlockUncompressedSize +=
991           blockWriter.getUncompressedSizeWithoutHeader();
992 
993       // OFFSET is the beginning offset the chunk of block index entries.
994       // SIZE is the total byte size of the chunk of block index entries
995       // + the secondary index size
996       // FIRST_KEY is the first key in the chunk of block index
997       // entries.
998       parent.add(curFirstKey, beginOffset,
999           blockWriter.getOnDiskSizeWithHeader());
1000 
1001       // clear current block index chunk
1002       curChunk.clear();
1003       curFirstKey = null;
1004     }
1005 
1006     /**
1007      * @return how many block index entries there are in the root level
1008      */
1009     public final int getNumRootEntries() {
1010       return rootChunk.getNumEntries();
1011     }
1012 
1013     /**
1014      * @return the number of levels in this block index.
1015      */
1016     public int getNumLevels() {
1017       return numLevels;
1018     }
1019 
1020     private void expectNumLevels(int expectedNumLevels) {
1021       if (numLevels != expectedNumLevels) {
1022         throw new IllegalStateException("Number of block index levels is "
1023             + numLevels + "but is expected to be " + expectedNumLevels);
1024       }
1025     }
1026 
1027     /**
1028      * Whether there is an inline block ready to be written. In general, we
1029      * write an leaf-level index block as an inline block as soon as its size
1030      * as serialized in the non-root format reaches a certain threshold.
1031      */
1032     @Override
1033     public boolean shouldWriteBlock(boolean closing) {
1034       if (singleLevelOnly) {
1035         throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
1036       }
1037 
1038       if (curInlineChunk == null) {
1039         throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " +
1040             "called with closing=true and then called again?");
1041       }
1042 
1043       if (curInlineChunk.getNumEntries() == 0) {
1044         return false;
1045       }
1046 
1047       // We do have some entries in the current inline chunk.
1048       if (closing) {
1049         if (rootChunk.getNumEntries() == 0) {
1050           // We did not add any leaf-level blocks yet. Instead of creating a
1051           // leaf level with one block, move these entries to the root level.
1052 
1053           expectNumLevels(1);
1054           rootChunk = curInlineChunk;
1055           curInlineChunk = null;  // Disallow adding any more index entries.
1056           return false;
1057         }
1058 
1059         return true;
1060       } else {
1061         return curInlineChunk.getNonRootSize() >= maxChunkSize;
1062       }
1063     }
1064 
1065     /**
1066      * Write out the current inline index block. Inline blocks are non-root
1067      * blocks, so the non-root index format is used.
1068      *
1069      * @param out
1070      */
1071     @Override
1072     public void writeInlineBlock(DataOutput out) throws IOException {
1073       if (singleLevelOnly)
1074         throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
1075 
1076       // Write the inline block index to the output stream in the non-root
1077       // index block format.
1078       curInlineChunk.writeNonRoot(out);
1079 
1080       // Save the first key of the inline block so that we can add it to the
1081       // parent-level index.
1082       firstKey = curInlineChunk.getBlockKey(0);
1083 
1084       // Start a new inline index block
1085       curInlineChunk.clear();
1086     }
1087 
1088     /**
1089      * Called after an inline block has been written so that we can add an
1090      * entry referring to that block to the parent-level index.
1091      */
1092     @Override
1093     public void blockWritten(long offset, int onDiskSize, int uncompressedSize)
1094     {
1095       // Add leaf index block size
1096       totalBlockOnDiskSize += onDiskSize;
1097       totalBlockUncompressedSize += uncompressedSize;
1098 
1099       if (singleLevelOnly)
1100         throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
1101 
1102       if (firstKey == null) {
1103         throw new IllegalStateException("Trying to add second-level index " +
1104             "entry with offset=" + offset + " and onDiskSize=" + onDiskSize +
1105             "but the first key was not set in writeInlineBlock");
1106       }
1107 
1108       if (rootChunk.getNumEntries() == 0) {
1109         // We are writing the first leaf block, so increase index level.
1110         expectNumLevels(1);
1111         numLevels = 2;
1112       }
1113 
1114       // Add another entry to the second-level index. Include the number of
1115       // entries in all previous leaf-level chunks for mid-key calculation.
1116       rootChunk.add(firstKey, offset, onDiskSize, totalNumEntries);
1117       firstKey = null;
1118     }
1119 
1120     @Override
1121     public BlockType getInlineBlockType() {
1122       return BlockType.LEAF_INDEX;
1123     }
1124 
1125     /**
1126      * Add one index entry to the current leaf-level block. When the leaf-level
1127      * block gets large enough, it will be flushed to disk as an inline block.
1128      *
1129      * @param firstKey the first key of the data block
1130      * @param blockOffset the offset of the data block
1131      * @param blockDataSize the on-disk size of the data block ({@link HFile}
1132      *          format version 2), or the uncompressed size of the data block (
1133      *          {@link HFile} format version 1).
1134      */
1135     public void addEntry(byte[] firstKey, long blockOffset, int blockDataSize)
1136     {
1137       curInlineChunk.add(firstKey, blockOffset, blockDataSize);
1138       ++totalNumEntries;
1139     }
1140 
1141     /**
1142      * @throws IOException if we happened to write a multi-level index.
1143      */
1144     public void ensureSingleLevel() throws IOException {
1145       if (numLevels > 1) {
1146         throw new IOException ("Wrote a " + numLevels + "-level index with " +
1147             rootChunk.getNumEntries() + " root-level entries, but " +
1148             "this is expected to be a single-level block index.");
1149       }
1150     }
1151 
1152     /**
1153      * @return true if we are using cache-on-write. This is configured by the
1154      *         caller of the constructor by either passing a valid block cache
1155      *         or null.
1156      */
1157     @Override
1158     public boolean getCacheOnWrite() {
1159       return blockCache != null;
1160     }
1161 
1162     /**
1163      * The total uncompressed size of the root index block, intermediate-level
1164      * index blocks, and leaf-level index blocks.
1165      *
1166      * @return the total uncompressed size of all index blocks
1167      */
1168     public long getTotalUncompressedSize() {
1169       return totalBlockUncompressedSize;
1170     }
1171 
1172   }
1173 
1174   /**
1175    * A single chunk of the block index in the process of writing. The data in
1176    * this chunk can become a leaf-level, intermediate-level, or root index
1177    * block.
1178    */
1179   static class BlockIndexChunk {
1180 
1181     /** First keys of the key range corresponding to each index entry. */
1182     private final List<byte[]> blockKeys = new ArrayList<byte[]>();
1183 
1184     /** Block offset in backing stream. */
1185     private final List<Long> blockOffsets = new ArrayList<Long>();
1186 
1187     /** On-disk data sizes of lower-level data or index blocks. */
1188     private final List<Integer> onDiskDataSizes = new ArrayList<Integer>();
1189 
1190     /**
1191      * The cumulative number of sub-entries, i.e. entries on deeper-level block
1192      * index entries. numSubEntriesAt[i] is the number of sub-entries in the
1193      * blocks corresponding to this chunk's entries #0 through #i inclusively.
1194      */
1195     private final List<Long> numSubEntriesAt = new ArrayList<Long>();
1196 
1197     /**
1198      * The offset of the next entry to be added, relative to the end of the
1199      * "secondary index" in the "non-root" format representation of this index
1200      * chunk. This is the next value to be added to the secondary index.
1201      */
1202     private int curTotalNonRootEntrySize = 0;
1203 
1204     /**
1205      * The accumulated size of this chunk if stored in the root index format.
1206      */
1207     private int curTotalRootSize = 0;
1208 
1209     /**
1210      * The "secondary index" used for binary search over variable-length
1211      * records in a "non-root" format block. These offsets are relative to the
1212      * end of this secondary index.
1213      */
1214     private final List<Integer> secondaryIndexOffsetMarks =
1215         new ArrayList<Integer>();
1216 
1217     /**
1218      * Adds a new entry to this block index chunk.
1219      *
1220      * @param firstKey the first key in the block pointed to by this entry
1221      * @param blockOffset the offset of the next-level block pointed to by this
1222      *          entry
1223      * @param onDiskDataSize the on-disk data of the block pointed to by this
1224      *          entry, including header size
1225      * @param curTotalNumSubEntries if this chunk is the root index chunk under
1226      *          construction, this specifies the current total number of
1227      *          sub-entries in all leaf-level chunks, including the one
1228      *          corresponding to the second-level entry being added.
1229      */
1230     void add(byte[] firstKey, long blockOffset, int onDiskDataSize,
1231         long curTotalNumSubEntries) {
1232       // Record the offset for the secondary index
1233       secondaryIndexOffsetMarks.add(curTotalNonRootEntrySize);
1234       curTotalNonRootEntrySize += SECONDARY_INDEX_ENTRY_OVERHEAD
1235           + firstKey.length;
1236 
1237       curTotalRootSize += Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT
1238           + WritableUtils.getVIntSize(firstKey.length) + firstKey.length;
1239 
1240       blockKeys.add(firstKey);
1241       blockOffsets.add(blockOffset);
1242       onDiskDataSizes.add(onDiskDataSize);
1243 
1244       if (curTotalNumSubEntries != -1) {
1245         numSubEntriesAt.add(curTotalNumSubEntries);
1246 
1247         // Make sure the parallel arrays are in sync.
1248         if (numSubEntriesAt.size() != blockKeys.size()) {
1249           throw new IllegalStateException("Only have key/value count " +
1250               "stats for " + numSubEntriesAt.size() + " block index " +
1251               "entries out of " + blockKeys.size());
1252         }
1253       }
1254     }
1255 
1256     /**
1257      * The same as {@link #add(byte[], long, int, long)} but does not take the
1258      * key/value into account. Used for single-level indexes.
1259      *
1260      * @see {@link #add(byte[], long, int, long)}
1261      */
1262     public void add(byte[] firstKey, long blockOffset, int onDiskDataSize) {
1263       add(firstKey, blockOffset, onDiskDataSize, -1);
1264     }
1265 
1266     public void clear() {
1267       blockKeys.clear();
1268       blockOffsets.clear();
1269       onDiskDataSizes.clear();
1270       secondaryIndexOffsetMarks.clear();
1271       numSubEntriesAt.clear();
1272       curTotalNonRootEntrySize = 0;
1273       curTotalRootSize = 0;
1274     }
1275 
1276     /**
1277      * Finds the entry corresponding to the deeper-level index block containing
1278      * the given deeper-level entry (a "sub-entry"), assuming a global 0-based
1279      * ordering of sub-entries.
1280      *
1281      * <p>
1282      * <i> Implementation note. </i> We are looking for i such that
1283      * numSubEntriesAt[i - 1] <= k < numSubEntriesAt[i], because a deeper-level
1284      * block #i (0-based) contains sub-entries # numSubEntriesAt[i - 1]'th
1285      * through numSubEntriesAt[i] - 1, assuming a global 0-based ordering of
1286      * sub-entries. i is by definition the insertion point of k in
1287      * numSubEntriesAt.
1288      *
1289      * @param k sub-entry index, from 0 to the total number sub-entries - 1
1290      * @return the 0-based index of the entry corresponding to the given
1291      *         sub-entry
1292      */
1293     public int getEntryBySubEntry(long k) {
1294       // We define mid-key as the key corresponding to k'th sub-entry
1295       // (0-based).
1296 
1297       int i = Collections.binarySearch(numSubEntriesAt, k);
1298 
1299       // Exact match: cumulativeWeight[i] = k. This means chunks #0 through
1300       // #i contain exactly k sub-entries, and the sub-entry #k (0-based)
1301       // is in the (i + 1)'th chunk.
1302       if (i >= 0)
1303         return i + 1;
1304 
1305       // Inexact match. Return the insertion point.
1306       return -i - 1;
1307     }
1308 
1309     /**
1310      * Used when writing the root block index of a multi-level block index.
1311      * Serializes additional information allowing to efficiently identify the
1312      * mid-key.
1313      *
1314      * @return a few serialized fields for finding the mid-key
1315      * @throws IOException if could not create metadata for computing mid-key
1316      */
1317     public byte[] getMidKeyMetadata() throws IOException {
1318       ByteArrayOutputStream baos = new ByteArrayOutputStream(
1319           MID_KEY_METADATA_SIZE);
1320       DataOutputStream baosDos = new DataOutputStream(baos);
1321       long totalNumSubEntries = numSubEntriesAt.get(blockKeys.size() - 1);
1322       if (totalNumSubEntries == 0) {
1323         throw new IOException("No leaf-level entries, mid-key unavailable");
1324       }
1325       long midKeySubEntry = (totalNumSubEntries - 1) / 2;
1326       int midKeyEntry = getEntryBySubEntry(midKeySubEntry);
1327 
1328       baosDos.writeLong(blockOffsets.get(midKeyEntry));
1329       baosDos.writeInt(onDiskDataSizes.get(midKeyEntry));
1330 
1331       long numSubEntriesBefore = midKeyEntry > 0
1332           ? numSubEntriesAt.get(midKeyEntry - 1) : 0;
1333       long subEntryWithinEntry = midKeySubEntry - numSubEntriesBefore;
1334       if (subEntryWithinEntry < 0 || subEntryWithinEntry > Integer.MAX_VALUE)
1335       {
1336         throw new IOException("Could not identify mid-key index within the "
1337             + "leaf-level block containing mid-key: out of range ("
1338             + subEntryWithinEntry + ", numSubEntriesBefore="
1339             + numSubEntriesBefore + ", midKeySubEntry=" + midKeySubEntry
1340             + ")");
1341       }
1342 
1343       baosDos.writeInt((int) subEntryWithinEntry);
1344 
1345       if (baosDos.size() != MID_KEY_METADATA_SIZE) {
1346         throw new IOException("Could not write mid-key metadata: size=" +
1347             baosDos.size() + ", correct size: " + MID_KEY_METADATA_SIZE);
1348       }
1349 
1350       // Close just to be good citizens, although this has no effect.
1351       baos.close();
1352 
1353       return baos.toByteArray();
1354     }
1355 
1356     /**
1357      * Writes the block index chunk in the non-root index block format. This
1358      * format contains the number of entries, an index of integer offsets
1359      * for quick binary search on variable-length records, and tuples of
1360      * block offset, on-disk block size, and the first key for each entry.
1361      *
1362      * @param out
1363      * @throws IOException
1364      */
1365     void writeNonRoot(DataOutput out) throws IOException {
1366       // The number of entries in the block.
1367       out.writeInt(blockKeys.size());
1368 
1369       if (secondaryIndexOffsetMarks.size() != blockKeys.size()) {
1370         throw new IOException("Corrupted block index chunk writer: " +
1371             blockKeys.size() + " entries but " +
1372             secondaryIndexOffsetMarks.size() + " secondary index items");
1373       }
1374 
1375       // For each entry, write a "secondary index" of relative offsets to the
1376       // entries from the end of the secondary index. This works, because at
1377       // read time we read the number of entries and know where the secondary
1378       // index ends.
1379       for (int currentSecondaryIndex : secondaryIndexOffsetMarks)
1380         out.writeInt(currentSecondaryIndex);
1381 
1382       // We include one other element in the secondary index to calculate the
1383       // size of each entry more easily by subtracting secondary index elements.
1384       out.writeInt(curTotalNonRootEntrySize);
1385 
1386       for (int i = 0; i < blockKeys.size(); ++i) {
1387         out.writeLong(blockOffsets.get(i));
1388         out.writeInt(onDiskDataSizes.get(i));
1389         out.write(blockKeys.get(i));
1390       }
1391     }
1392 
1393     /**
1394      * @return the size of this chunk if stored in the non-root index block
1395      *         format
1396      */
1397     int getNonRootSize() {
1398       return Bytes.SIZEOF_INT                          // Number of entries
1399           + Bytes.SIZEOF_INT * (blockKeys.size() + 1)  // Secondary index
1400           + curTotalNonRootEntrySize;                  // All entries
1401     }
1402 
1403     /**
1404      * Writes this chunk into the given output stream in the root block index
1405      * format. This format is similar to the {@link HFile} version 1 block
1406      * index format, except that we store on-disk size of the block instead of
1407      * its uncompressed size.
1408      *
1409      * @param out the data output stream to write the block index to. Typically
1410      *          a stream writing into an {@link HFile} block.
1411      * @throws IOException
1412      */
1413     void writeRoot(DataOutput out) throws IOException {
1414       for (int i = 0; i < blockKeys.size(); ++i) {
1415         out.writeLong(blockOffsets.get(i));
1416         out.writeInt(onDiskDataSizes.get(i));
1417         Bytes.writeByteArray(out, blockKeys.get(i));
1418       }
1419     }
1420 
1421     /**
1422      * @return the size of this chunk if stored in the root index block format
1423      */
1424     int getRootSize() {
1425       return curTotalRootSize;
1426     }
1427 
1428     /**
1429      * @return the number of entries in this block index chunk
1430      */
1431     public int getNumEntries() {
1432       return blockKeys.size();
1433     }
1434 
1435     public byte[] getBlockKey(int i) {
1436       return blockKeys.get(i);
1437     }
1438 
1439     public long getBlockOffset(int i) {
1440       return blockOffsets.get(i);
1441     }
1442 
1443     public int getOnDiskDataSize(int i) {
1444       return onDiskDataSizes.get(i);
1445     }
1446 
1447     public long getCumulativeNumKV(int i) {
1448       if (i < 0)
1449         return 0;
1450       return numSubEntriesAt.get(i);
1451     }
1452 
1453   }
1454 
1455   public static int getMaxChunkSize(Configuration conf) {
1456     return conf.getInt(MAX_CHUNK_SIZE_KEY, DEFAULT_MAX_CHUNK_SIZE);
1457   }
1458 }