001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile;
019
020import java.io.ByteArrayOutputStream;
021import java.io.DataInput;
022import java.io.DataInputStream;
023import java.io.DataOutput;
024import java.io.DataOutputStream;
025import java.io.IOException;
026import java.nio.ByteBuffer;
027import java.util.ArrayList;
028import java.util.Collections;
029import java.util.List;
030import java.util.concurrent.atomic.AtomicReference;
031import org.apache.hadoop.conf.Configuration;
032import org.apache.hadoop.fs.FSDataOutputStream;
033import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue;
034import org.apache.hadoop.hbase.Cell;
035import org.apache.hadoop.hbase.CellComparator;
036import org.apache.hadoop.hbase.CellUtil;
037import org.apache.hadoop.hbase.KeyValue;
038import org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue;
039import org.apache.hadoop.hbase.PrivateCellUtil;
040import org.apache.hadoop.hbase.io.HeapSize;
041import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
042import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader;
043import org.apache.hadoop.hbase.nio.ByteBuff;
044import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
045import org.apache.hadoop.hbase.util.Bytes;
046import org.apache.hadoop.hbase.util.ClassSize;
047import org.apache.hadoop.hbase.util.ObjectIntPair;
048import org.apache.hadoop.io.WritableUtils;
049import org.apache.hadoop.util.StringUtils;
050import org.apache.yetus.audience.InterfaceAudience;
051import org.slf4j.Logger;
052import org.slf4j.LoggerFactory;
053
054/**
055 * Provides functionality to write ({@link BlockIndexWriter}) and read BlockIndexReader single-level
056 * and multi-level block indexes. Examples of how to use the block index writer can be found in
057 * {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and {@link HFileWriterImpl}.
058 * Examples of how to use the reader can be found in {@link HFileReaderImpl} and
059 * org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex.
060 */
061@InterfaceAudience.Private
062public class HFileBlockIndex {
063
064  private static final Logger LOG = LoggerFactory.getLogger(HFileBlockIndex.class);
065
066  static final int DEFAULT_MAX_CHUNK_SIZE = 128 * 1024;
067
068  /**
069   * The maximum size guideline for index blocks (both leaf, intermediate, and root). If not
070   * specified, <code>DEFAULT_MAX_CHUNK_SIZE</code> is used.
071   */
072  public static final String MAX_CHUNK_SIZE_KEY = "hfile.index.block.max.size";
073
074  /**
075   * Minimum number of entries in a single index block. Even if we are above the
076   * hfile.index.block.max.size we will keep writing to the same block unless we have that many
077   * entries. We should have at least a few entries so that we don't have too many levels in the
078   * multi-level index. This should be at least 2 to make sure there is no infinite recursion.
079   */
080  public static final String MIN_INDEX_NUM_ENTRIES_KEY = "hfile.index.block.min.entries";
081
082  static final int DEFAULT_MIN_INDEX_NUM_ENTRIES = 16;
083
084  /**
085   * The number of bytes stored in each "secondary index" entry in addition to key bytes in the
086   * non-root index block format. The first long is the file offset of the deeper-level block the
087   * entry points to, and the int that follows is that block's on-disk size without including
088   * header.
089   */
090  static final int SECONDARY_INDEX_ENTRY_OVERHEAD = Bytes.SIZEOF_INT + Bytes.SIZEOF_LONG;
091
092  /**
093   * Error message when trying to use inline block API in single-level mode.
094   */
095  private static final String INLINE_BLOCKS_NOT_ALLOWED =
096    "Inline blocks are not allowed in the single-level-only mode";
097
098  /**
099   * The size of a meta-data record used for finding the mid-key in a multi-level index. Consists of
100   * the middle leaf-level index block offset (long), its on-disk size without header included
101   * (int), and the mid-key entry's zero-based index in that leaf index block.
102   */
103  protected static final int MID_KEY_METADATA_SIZE = Bytes.SIZEOF_LONG + 2 * Bytes.SIZEOF_INT;
104
105  /**
106   * An implementation of the BlockIndexReader that deals with block keys which are plain byte[]
107   * like MetaBlock or the Bloom Block for ROW bloom. Does not need a comparator. It can work on
108   * Bytes.BYTES_RAWCOMPARATOR
109   */
110  static class ByteArrayKeyBlockIndexReader extends BlockIndexReader {
111
112    private byte[][] blockKeys;
113
114    public ByteArrayKeyBlockIndexReader(final int treeLevel) {
115      // Can be null for METAINDEX block
116      searchTreeLevel = treeLevel;
117    }
118
119    @Override
120    protected long calculateHeapSizeForBlockKeys(long heapSize) {
121      // Calculating the size of blockKeys
122      if (blockKeys != null) {
123        heapSize += ClassSize.REFERENCE;
124        // Adding array + references overhead
125        heapSize += ClassSize.align(ClassSize.ARRAY + blockKeys.length * ClassSize.REFERENCE);
126
127        // Adding bytes
128        for (byte[] key : blockKeys) {
129          heapSize += ClassSize.align(ClassSize.ARRAY + key.length);
130        }
131      }
132      return heapSize;
133    }
134
135    @Override
136    public boolean isEmpty() {
137      return blockKeys.length == 0;
138    }
139
140    /**
141     * from 0 to {@link #getRootBlockCount() - 1}
142     */
143    public byte[] getRootBlockKey(int i) {
144      return blockKeys[i];
145    }
146
147    @Override
148    public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock,
149      boolean cacheBlocks, boolean pread, boolean isCompaction,
150      DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader)
151      throws IOException {
152      // this would not be needed
153      return null;
154    }
155
156    @Override
157    public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException {
158      // Not needed here
159      return null;
160    }
161
162    @Override
163    protected void initialize(int numEntries) {
164      blockKeys = new byte[numEntries][];
165    }
166
167    @Override
168    protected void add(final byte[] key, final long offset, final int dataSize) {
169      blockOffsets[rootCount] = offset;
170      blockKeys[rootCount] = key;
171      blockDataSizes[rootCount] = dataSize;
172      rootCount++;
173    }
174
175    @Override
176    public int rootBlockContainingKey(byte[] key, int offset, int length, CellComparator comp) {
177      int pos = Bytes.binarySearch(blockKeys, key, offset, length);
178      // pos is between -(blockKeys.length + 1) to blockKeys.length - 1, see
179      // binarySearch's javadoc.
180
181      if (pos >= 0) {
182        // This means this is an exact match with an element of blockKeys.
183        assert pos < blockKeys.length;
184        return pos;
185      }
186
187      // Otherwise, pos = -(i + 1), where blockKeys[i - 1] < key < blockKeys[i],
188      // and i is in [0, blockKeys.length]. We are returning j = i - 1 such that
189      // blockKeys[j] <= key < blockKeys[j + 1]. In particular, j = -1 if
190      // key < blockKeys[0], meaning the file does not contain the given key.
191
192      int i = -pos - 1;
193      assert 0 <= i && i <= blockKeys.length;
194      return i - 1;
195    }
196
197    @Override
198    public int rootBlockContainingKey(Cell key) {
199      // Should not be called on this because here it deals only with byte[]
200      throw new UnsupportedOperationException(
201        "Cannot search for a key that is of Cell type. Only plain byte array keys "
202          + "can be searched for");
203    }
204
205    @Override
206    public String toString() {
207      StringBuilder sb = new StringBuilder();
208      sb.append("size=" + rootCount).append("\n");
209      for (int i = 0; i < rootCount; i++) {
210        sb.append("key=").append(KeyValue.keyToString(blockKeys[i])).append("\n  offset=")
211          .append(blockOffsets[i]).append(", dataSize=" + blockDataSizes[i]).append("\n");
212      }
213      return sb.toString();
214    }
215  }
216
217  /**
218   * An implementation of the BlockIndexReader that deals with block keys which are the key part of
219   * a cell like the Data block index or the ROW_COL bloom blocks This needs a comparator to work
220   * with the Cells
221   */
222  static class CellBasedKeyBlockIndexReader extends BlockIndexReader {
223
224    private Cell[] blockKeys;
225    /** Pre-computed mid-key */
226    private AtomicReference<Cell> midKey = new AtomicReference<>();
227    /** Needed doing lookup on blocks. */
228    protected CellComparator comparator;
229
230    public CellBasedKeyBlockIndexReader(final CellComparator c, final int treeLevel) {
231      // Can be null for METAINDEX block
232      comparator = c;
233      searchTreeLevel = treeLevel;
234    }
235
236    @Override
237    protected long calculateHeapSizeForBlockKeys(long heapSize) {
238      if (blockKeys != null) {
239        heapSize += ClassSize.REFERENCE;
240        // Adding array + references overhead
241        heapSize += ClassSize.align(ClassSize.ARRAY + blockKeys.length * ClassSize.REFERENCE);
242
243        // Adding blockKeys
244        for (Cell key : blockKeys) {
245          heapSize += ClassSize.align(key.heapSize());
246        }
247      }
248      // Add comparator and the midkey atomicreference
249      heapSize += 2 * ClassSize.REFERENCE;
250      return heapSize;
251    }
252
253    @Override
254    public boolean isEmpty() {
255      return blockKeys.length == 0;
256    }
257
258    /**
259     * from 0 to {@link #getRootBlockCount() - 1}
260     */
261    public Cell getRootBlockKey(int i) {
262      return blockKeys[i];
263    }
264
265    @Override
266    public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock,
267      boolean cacheBlocks, boolean pread, boolean isCompaction,
268      DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader)
269      throws IOException {
270      int rootLevelIndex = rootBlockContainingKey(key);
271      if (rootLevelIndex < 0 || rootLevelIndex >= blockOffsets.length) {
272        return null;
273      }
274
275      // the next indexed key
276      Cell nextIndexedKey = null;
277
278      // Read the next-level (intermediate or leaf) index block.
279      long currentOffset = blockOffsets[rootLevelIndex];
280      int currentOnDiskSize = blockDataSizes[rootLevelIndex];
281
282      if (rootLevelIndex < blockKeys.length - 1) {
283        nextIndexedKey = blockKeys[rootLevelIndex + 1];
284      } else {
285        nextIndexedKey = KeyValueScanner.NO_NEXT_INDEXED_KEY;
286      }
287
288      int lookupLevel = 1; // How many levels deep we are in our lookup.
289      int index = -1;
290
291      HFileBlock block = null;
292      KeyOnlyKeyValue tmpNextIndexKV = new KeyValue.KeyOnlyKeyValue();
293      while (true) {
294        try {
295          // Must initialize it with null here, because if don't and once an exception happen in
296          // readBlock, then we'll release the previous assigned block twice in the finally block.
297          // (See HBASE-22422)
298          block = null;
299          if (currentBlock != null && currentBlock.getOffset() == currentOffset) {
300            // Avoid reading the same block again, even with caching turned off.
301            // This is crucial for compaction-type workload which might have
302            // caching turned off. This is like a one-block cache inside the
303            // scanner.
304            block = currentBlock;
305          } else {
306            // Call HFile's caching block reader API. We always cache index
307            // blocks, otherwise we might get terrible performance.
308            boolean shouldCache = cacheBlocks || (lookupLevel < searchTreeLevel);
309            BlockType expectedBlockType;
310            if (lookupLevel < searchTreeLevel - 1) {
311              expectedBlockType = BlockType.INTERMEDIATE_INDEX;
312            } else if (lookupLevel == searchTreeLevel - 1) {
313              expectedBlockType = BlockType.LEAF_INDEX;
314            } else {
315              // this also accounts for ENCODED_DATA
316              expectedBlockType = BlockType.DATA;
317            }
318            block = cachingBlockReader.readBlock(currentOffset, currentOnDiskSize, shouldCache,
319              pread, isCompaction, true, expectedBlockType, expectedDataBlockEncoding);
320          }
321
322          if (block == null) {
323            throw new IOException("Failed to read block at offset " + currentOffset
324              + ", onDiskSize=" + currentOnDiskSize);
325          }
326
327          // Found a data block, break the loop and check our level in the tree.
328          if (block.getBlockType().isData()) {
329            break;
330          }
331
332          // Not a data block. This must be a leaf-level or intermediate-level
333          // index block. We don't allow going deeper than searchTreeLevel.
334          if (++lookupLevel > searchTreeLevel) {
335            throw new IOException("Search Tree Level overflow: lookupLevel=" + lookupLevel
336              + ", searchTreeLevel=" + searchTreeLevel);
337          }
338
339          // Locate the entry corresponding to the given key in the non-root
340          // (leaf or intermediate-level) index block.
341          ByteBuff buffer = block.getBufferWithoutHeader();
342          index = locateNonRootIndexEntry(buffer, key, comparator);
343          if (index == -1) {
344            // This has to be changed
345            // For now change this to key value
346            throw new IOException("The key " + CellUtil.getCellKeyAsString(key) + " is before the"
347              + " first key of the non-root index block " + block);
348          }
349
350          currentOffset = buffer.getLong();
351          currentOnDiskSize = buffer.getInt();
352
353          // Only update next indexed key if there is a next indexed key in the current level
354          byte[] nonRootIndexedKey = getNonRootIndexedKey(buffer, index + 1);
355          if (nonRootIndexedKey != null) {
356            tmpNextIndexKV.setKey(nonRootIndexedKey, 0, nonRootIndexedKey.length);
357            nextIndexedKey = tmpNextIndexKV;
358          }
359        } finally {
360          if (block != null && !block.getBlockType().isData()) {
361            // Release the block immediately if it is not the data block
362            block.release();
363          }
364        }
365      }
366
367      if (lookupLevel != searchTreeLevel) {
368        assert block.getBlockType().isData();
369        // Though we have retrieved a data block we have found an issue
370        // in the retrieved data block. Hence returned the block so that
371        // the ref count can be decremented
372        if (block != null) {
373          block.release();
374        }
375        throw new IOException("Reached a data block at level " + lookupLevel
376          + " but the number of levels is " + searchTreeLevel);
377      }
378
379      // set the next indexed key for the current block.
380      return new BlockWithScanInfo(block, nextIndexedKey);
381    }
382
383    @Override
384    public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException {
385      if (rootCount == 0) throw new IOException("HFile empty");
386
387      Cell targetMidKey = this.midKey.get();
388      if (targetMidKey != null) {
389        return targetMidKey;
390      }
391
392      if (midLeafBlockOffset >= 0) {
393        if (cachingBlockReader == null) {
394          throw new IOException(
395            "Have to read the middle leaf block but " + "no block reader available");
396        }
397
398        // Caching, using pread, assuming this is not a compaction.
399        HFileBlock midLeafBlock = cachingBlockReader.readBlock(midLeafBlockOffset,
400          midLeafBlockOnDiskSize, true, true, false, true, BlockType.LEAF_INDEX, null);
401        try {
402          ByteBuff b = midLeafBlock.getBufferWithoutHeader();
403          int numDataBlocks = b.getIntAfterPosition(0);
404          int keyRelOffset = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 1));
405          int keyLen = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 2)) - keyRelOffset
406            - SECONDARY_INDEX_ENTRY_OVERHEAD;
407          int keyOffset =
408            Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset + SECONDARY_INDEX_ENTRY_OVERHEAD;
409          byte[] bytes = b.toBytes(keyOffset, keyLen);
410          targetMidKey = new KeyValue.KeyOnlyKeyValue(bytes, 0, bytes.length);
411        } finally {
412          midLeafBlock.release();
413        }
414      } else {
415        // The middle of the root-level index.
416        targetMidKey = blockKeys[rootCount / 2];
417      }
418
419      this.midKey.set(targetMidKey);
420      return targetMidKey;
421    }
422
423    @Override
424    protected void initialize(int numEntries) {
425      blockKeys = new Cell[numEntries];
426    }
427
428    /**
429     * Adds a new entry in the root block index. Only used when reading.
430     * @param key      Last key in the block
431     * @param offset   file offset where the block is stored
432     * @param dataSize the uncompressed data size
433     */
434    @Override
435    protected void add(final byte[] key, final long offset, final int dataSize) {
436      blockOffsets[rootCount] = offset;
437      // Create the blockKeys as Cells once when the reader is opened
438      blockKeys[rootCount] = new KeyValue.KeyOnlyKeyValue(key, 0, key.length);
439      blockDataSizes[rootCount] = dataSize;
440      rootCount++;
441    }
442
443    @Override
444    public int rootBlockContainingKey(final byte[] key, int offset, int length,
445      CellComparator comp) {
446      // This should always be called with Cell not with a byte[] key
447      throw new UnsupportedOperationException("Cannot find for a key containing plain byte "
448        + "array. Only cell based keys can be searched for");
449    }
450
451    @Override
452    public int rootBlockContainingKey(Cell key) {
453      // Here the comparator should not be null as this happens for the root-level block
454      int pos = Bytes.binarySearch(blockKeys, key, comparator);
455      // pos is between -(blockKeys.length + 1) to blockKeys.length - 1, see
456      // binarySearch's javadoc.
457
458      if (pos >= 0) {
459        // This means this is an exact match with an element of blockKeys.
460        assert pos < blockKeys.length;
461        return pos;
462      }
463
464      // Otherwise, pos = -(i + 1), where blockKeys[i - 1] < key < blockKeys[i],
465      // and i is in [0, blockKeys.length]. We are returning j = i - 1 such that
466      // blockKeys[j] <= key < blockKeys[j + 1]. In particular, j = -1 if
467      // key < blockKeys[0], meaning the file does not contain the given key.
468
469      int i = -pos - 1;
470      assert 0 <= i && i <= blockKeys.length;
471      return i - 1;
472    }
473
474    @Override
475    public String toString() {
476      StringBuilder sb = new StringBuilder();
477      sb.append("size=" + rootCount).append("\n");
478      for (int i = 0; i < rootCount; i++) {
479        sb.append("key=").append((blockKeys[i])).append("\n  offset=").append(blockOffsets[i])
480          .append(", dataSize=" + blockDataSizes[i]).append("\n");
481      }
482      return sb.toString();
483    }
484  }
485
486  static class CellBasedKeyBlockIndexReaderV2 extends CellBasedKeyBlockIndexReader {
487
488    private HFileIndexBlockEncoder indexBlockEncoder;
489
490    private HFileIndexBlockEncoder.EncodedSeeker seeker;
491
492    public CellBasedKeyBlockIndexReaderV2(final CellComparator c, final int treeLevel) {
493      this(c, treeLevel, null);
494    }
495
496    public CellBasedKeyBlockIndexReaderV2(final CellComparator c, final int treeLevel,
497      HFileIndexBlockEncoder indexBlockEncoder) {
498      super(c, treeLevel);
499      // Can be null for METAINDEX block
500      this.indexBlockEncoder =
501        indexBlockEncoder != null ? indexBlockEncoder : NoOpIndexBlockEncoder.INSTANCE;
502    }
503
504    @Override
505    public boolean isEmpty() {
506      return seeker.isEmpty();
507    }
508
509    @Override
510    public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock,
511      boolean cacheBlocks, boolean pread, boolean isCompaction,
512      DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader)
513      throws IOException {
514      return seeker.loadDataBlockWithScanInfo(key, currentBlock, cacheBlocks, pread, isCompaction,
515        expectedDataBlockEncoding, cachingBlockReader);
516    }
517
518    @Override
519    public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException {
520      return seeker.midkey(cachingBlockReader);
521    }
522
523    /**
524     * from 0 to {@link #getRootBlockCount() - 1}
525     */
526    public Cell getRootBlockKey(int i) {
527      return seeker.getRootBlockKey(i);
528    }
529
530    @Override
531    public int getRootBlockCount() {
532      return seeker.getRootBlockCount();
533    }
534
535    @Override
536    public int rootBlockContainingKey(Cell key) {
537      return seeker.rootBlockContainingKey(key);
538    }
539
540    @Override
541    protected long calculateHeapSizeForBlockKeys(long heapSize) {
542      heapSize = super.calculateHeapSizeForBlockKeys(heapSize);
543      if (seeker != null) {
544        heapSize += ClassSize.REFERENCE;
545        heapSize += ClassSize.align(seeker.heapSize());
546      }
547      return heapSize;
548    }
549
550    @Override
551    public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException {
552      seeker = indexBlockEncoder.createSeeker();
553      seeker.initRootIndex(blk, numEntries, comparator, searchTreeLevel);
554    }
555
556    @Override
557    public String toString() {
558      return seeker.toString();
559    }
560  }
561
562  /**
563   * The reader will always hold the root level index in the memory. Index blocks at all other
564   * levels will be cached in the LRU cache in practice, although this API does not enforce that.
565   * <p>
566   * All non-root (leaf and intermediate) index blocks contain what we call a "secondary index": an
567   * array of offsets to the entries within the block. This allows us to do binary search for the
568   * entry corresponding to the given key without having to deserialize the block.
569   */
570  static abstract class BlockIndexReader implements HeapSize {
571
572    protected long[] blockOffsets;
573    protected int[] blockDataSizes;
574    protected int rootCount = 0;
575
576    // Mid-key metadata.
577    protected long midLeafBlockOffset = -1;
578    protected int midLeafBlockOnDiskSize = -1;
579    protected int midKeyEntry = -1;
580
581    /**
582     * The number of levels in the block index tree. One if there is only root level, two for root
583     * and leaf levels, etc.
584     */
585    protected int searchTreeLevel;
586
587    /** Returns true if the block index is empty. */
588    public abstract boolean isEmpty();
589
590    /**
591     * Verifies that the block index is non-empty and throws an {@link IllegalStateException}
592     * otherwise.
593     */
594    public void ensureNonEmpty() {
595      if (isEmpty()) {
596        throw new IllegalStateException("Block index is empty or not loaded");
597      }
598    }
599
600    /**
601     * Return the data block which contains this key. This function will only be called when the
602     * HFile version is larger than 1.
603     * @param key                       the key we are looking for
604     * @param currentBlock              the current block, to avoid re-reading the same block
605     * @param expectedDataBlockEncoding the data block encoding the caller is expecting the data
606     *                                  block to be in, or null to not perform this check and return
607     *                                  the block irrespective of the encoding
608     * @return reader a basic way to load blocks
609     */
610    public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boolean cacheBlocks,
611      boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding,
612      CachingBlockReader cachingBlockReader) throws IOException {
613      BlockWithScanInfo blockWithScanInfo = loadDataBlockWithScanInfo(key, currentBlock,
614        cacheBlocks, pread, isCompaction, expectedDataBlockEncoding, cachingBlockReader);
615      if (blockWithScanInfo == null) {
616        return null;
617      } else {
618        return blockWithScanInfo.getHFileBlock();
619      }
620    }
621
622    /**
623     * Return the BlockWithScanInfo, a data structure which contains the Data HFileBlock with other
624     * scan info such as the key that starts the next HFileBlock. This function will only be called
625     * when the HFile version is larger than 1.
626     * @param key                       the key we are looking for
627     * @param currentBlock              the current block, to avoid re-reading the same block
628     * @param expectedDataBlockEncoding the data block encoding the caller is expecting the data
629     *                                  block to be in, or null to not perform this check and return
630     *                                  the block irrespective of the encoding.
631     * @return the BlockWithScanInfo which contains the DataBlock with other scan info such as
632     *         nextIndexedKey.
633     */
634    public abstract BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock,
635      boolean cacheBlocks, boolean pread, boolean isCompaction,
636      DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader)
637      throws IOException;
638
639    /**
640     * An approximation to the {@link HFile}'s mid-key. Operates on block boundaries, and does not
641     * go inside blocks. In other words, returns the first key of the middle block of the file.
642     * @return the first key of the middle block
643     */
644    public abstract Cell midkey(CachingBlockReader cachingBlockReader) throws IOException;
645
646    /**
647     * @param i from 0 to {@link #getRootBlockCount() - 1}
648     */
649    public long getRootBlockOffset(int i) {
650      return blockOffsets[i];
651    }
652
653    /**
654     * @param i zero-based index of a root-level block
655     * @return the on-disk size of the root-level block for version 2, or the uncompressed size for
656     *         version 1
657     */
658    public int getRootBlockDataSize(int i) {
659      return blockDataSizes[i];
660    }
661
662    /** Returns the number of root-level blocks in this block index */
663    public int getRootBlockCount() {
664      return rootCount;
665    }
666
667    /**
668     * Finds the root-level index block containing the given key. Key to find the comparator to be
669     * used
670     * @return Offset of block containing <code>key</code> (between 0 and the number of blocks - 1)
671     *         or -1 if this file does not contain the request.
672     */
673    // When we want to find the meta index block or bloom block for ROW bloom
674    // type Bytes.BYTES_RAWCOMPARATOR would be enough. For the ROW_COL bloom case we need the
675    // CellComparator.
676    public abstract int rootBlockContainingKey(final byte[] key, int offset, int length,
677      CellComparator comp);
678
679    /**
680     * Finds the root-level index block containing the given key. Key to find
681     * @return Offset of block containing <code>key</code> (between 0 and the number of blocks - 1)
682     *         or -1 if this file does not contain the request.
683     */
684    // When we want to find the meta index block or bloom block for ROW bloom
685    // type
686    // Bytes.BYTES_RAWCOMPARATOR would be enough. For the ROW_COL bloom case we
687    // need the CellComparator.
688    public int rootBlockContainingKey(final byte[] key, int offset, int length) {
689      return rootBlockContainingKey(key, offset, length, null);
690    }
691
692    /**
693     * Finds the root-level index block containing the given key. Key to find
694     */
695    public abstract int rootBlockContainingKey(final Cell key);
696
697    /**
698     * The indexed key at the ith position in the nonRootIndex. The position starts at 0.
699     * @param i the ith position
700     * @return The indexed key at the ith position in the nonRootIndex.
701     */
702    protected byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) {
703      int numEntries = nonRootIndex.getInt(0);
704      if (i < 0 || i >= numEntries) {
705        return null;
706      }
707
708      // Entries start after the number of entries and the secondary index.
709      // The secondary index takes numEntries + 1 ints.
710      int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2);
711      // Targetkey's offset relative to the end of secondary index
712      int targetKeyRelOffset = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 1));
713
714      // The offset of the target key in the blockIndex buffer
715      int targetKeyOffset = entriesOffset // Skip secondary index
716        + targetKeyRelOffset // Skip all entries until mid
717        + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size
718
719      // We subtract the two consecutive secondary index elements, which
720      // gives us the size of the whole (offset, onDiskSize, key) tuple. We
721      // then need to subtract the overhead of offset and onDiskSize.
722      int targetKeyLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 2)) - targetKeyRelOffset
723        - SECONDARY_INDEX_ENTRY_OVERHEAD;
724
725      // TODO check whether we can make BB backed Cell here? So can avoid bytes copy.
726      return nonRootIndex.toBytes(targetKeyOffset, targetKeyLength);
727    }
728
729    /**
730     * Performs a binary search over a non-root level index block. Utilizes the secondary index,
731     * which records the offsets of (offset, onDiskSize, firstKey) tuples of all entries. the key we
732     * are searching for offsets to individual entries in the blockIndex buffer the non-root index
733     * block buffer, starting with the secondary index. The position is ignored.
734     * @return the index i in [0, numEntries - 1] such that keys[i] <= key < keys[i + 1], if keys is
735     *         the array of all keys being searched, or -1 otherwise
736     */
737    static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex,
738      CellComparator comparator) {
739
740      int numEntries = nonRootIndex.getIntAfterPosition(0);
741      int low = 0;
742      int high = numEntries - 1;
743      int mid = 0;
744
745      // Entries start after the number of entries and the secondary index.
746      // The secondary index takes numEntries + 1 ints.
747      int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2);
748
749      // If we imagine that keys[-1] = -Infinity and
750      // keys[numEntries] = Infinity, then we are maintaining an invariant that
751      // keys[low - 1] < key < keys[high + 1] while narrowing down the range.
752      ByteBufferKeyOnlyKeyValue nonRootIndexkeyOnlyKV = new ByteBufferKeyOnlyKeyValue();
753      ObjectIntPair<ByteBuffer> pair = new ObjectIntPair<>();
754      while (low <= high) {
755        mid = low + ((high - low) >> 1);
756
757        // Midkey's offset relative to the end of secondary index
758        int midKeyRelOffset = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 1));
759
760        // The offset of the middle key in the blockIndex buffer
761        int midKeyOffset = entriesOffset // Skip secondary index
762          + midKeyRelOffset // Skip all entries until mid
763          + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size
764
765        // We subtract the two consecutive secondary index elements, which
766        // gives us the size of the whole (offset, onDiskSize, key) tuple. We
767        // then need to subtract the overhead of offset and onDiskSize.
768        int midLength = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 2))
769          - midKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD;
770
771        // we have to compare in this order, because the comparator order
772        // has special logic when the 'left side' is a special key.
773        // TODO make KeyOnlyKeyValue to be Buffer backed and avoid array() call. This has to be
774        // done after HBASE-12224 & HBASE-12282
775        // TODO avoid array call.
776        nonRootIndex.asSubByteBuffer(midKeyOffset, midLength, pair);
777        nonRootIndexkeyOnlyKV.setKey(pair.getFirst(), pair.getSecond(), midLength);
778        int cmp = PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, nonRootIndexkeyOnlyKV);
779
780        // key lives above the midpoint
781        if (cmp > 0) low = mid + 1; // Maintain the invariant that keys[low - 1] < key
782        // key lives below the midpoint
783        else if (cmp < 0) high = mid - 1; // Maintain the invariant that key < keys[high + 1]
784        else return mid; // exact match
785      }
786
787      // As per our invariant, keys[low - 1] < key < keys[high + 1], meaning
788      // that low - 1 < high + 1 and (low - high) <= 1. As per the loop break
789      // condition, low >= high + 1. Therefore, low = high + 1.
790
791      if (low != high + 1) {
792        throw new IllegalStateException(
793          "Binary search broken: low=" + low + " " + "instead of " + (high + 1));
794      }
795
796      // OK, our invariant says that keys[low - 1] < key < keys[low]. We need to
797      // return i such that keys[i] <= key < keys[i + 1]. Therefore i = low - 1.
798      int i = low - 1;
799
800      // Some extra validation on the result.
801      if (i < -1 || i >= numEntries) {
802        throw new IllegalStateException("Binary search broken: result is " + i
803          + " but expected to be between -1 and (numEntries - 1) = " + (numEntries - 1));
804      }
805
806      return i;
807    }
808
809    /**
810     * Search for one key using the secondary index in a non-root block. In case of success,
811     * positions the provided buffer at the entry of interest, where the file offset and the
812     * on-disk-size can be read. a non-root block without header. Initial position does not matter.
813     * the byte array containing the key
814     * @return the index position where the given key was found, otherwise return -1 in the case the
815     *         given key is before the first key.
816     */
817    static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, CellComparator comparator) {
818      int entryIndex = binarySearchNonRootIndex(key, nonRootBlock, comparator);
819
820      if (entryIndex != -1) {
821        int numEntries = nonRootBlock.getIntAfterPosition(0);
822
823        // The end of secondary index and the beginning of entries themselves.
824        int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2);
825
826        // The offset of the entry we are interested in relative to the end of
827        // the secondary index.
828        int entryRelOffset = nonRootBlock.getIntAfterPosition(Bytes.SIZEOF_INT * (1 + entryIndex));
829
830        nonRootBlock.position(entriesOffset + entryRelOffset);
831      }
832
833      return entryIndex;
834    }
835
836    /**
837     * Read in the root-level index from the given input stream. Must match what was written into
838     * the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset
839     * that function returned.
840     * @param in         the buffered input stream or wrapped byte input stream
841     * @param numEntries the number of root-level index entries
842     */
843    public void readRootIndex(DataInput in, final int numEntries) throws IOException {
844      blockOffsets = new long[numEntries];
845      initialize(numEntries);
846      blockDataSizes = new int[numEntries];
847
848      // If index size is zero, no index was written.
849      if (numEntries > 0) {
850        for (int i = 0; i < numEntries; ++i) {
851          long offset = in.readLong();
852          int dataSize = in.readInt();
853          byte[] key = Bytes.readByteArray(in);
854          add(key, offset, dataSize);
855        }
856      }
857    }
858
859    protected abstract void initialize(int numEntries);
860
861    protected abstract void add(final byte[] key, final long offset, final int dataSize);
862
863    /**
864     * Read in the root-level index from the given input stream. Must match what was written into
865     * the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset
866     * that function returned.
867     * @param blk        the HFile block
868     * @param numEntries the number of root-level index entries
869     * @return the buffered input stream or wrapped byte input stream
870     */
871    public DataInputStream readRootIndex(HFileBlock blk, final int numEntries) throws IOException {
872      DataInputStream in = blk.getByteStream();
873      readRootIndex(in, numEntries);
874      return in;
875    }
876
877    /**
878     * Read the root-level metadata of a multi-level block index. Based on
879     * {@link #readRootIndex(DataInput, int)}, but also reads metadata necessary to compute the
880     * mid-key in a multi-level index.
881     * @param blk        the HFile block
882     * @param numEntries the number of root-level index entries
883     */
884    public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException {
885      DataInputStream in = readRootIndex(blk, numEntries);
886      // after reading the root index the checksum bytes have to
887      // be subtracted to know if the mid key exists.
888      int checkSumBytes = blk.totalChecksumBytes();
889      if ((in.available() - checkSumBytes) < MID_KEY_METADATA_SIZE) {
890        // No mid-key metadata available.
891        return;
892      }
893      midLeafBlockOffset = in.readLong();
894      midLeafBlockOnDiskSize = in.readInt();
895      midKeyEntry = in.readInt();
896    }
897
898    @Override
899    public long heapSize() {
900      // The BlockIndexReader does not have the blockKey, comparator and the midkey atomic reference
901      long heapSize =
902        ClassSize.align(3 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + ClassSize.OBJECT);
903
904      // Mid-key metadata.
905      heapSize += MID_KEY_METADATA_SIZE;
906
907      heapSize = calculateHeapSizeForBlockKeys(heapSize);
908
909      if (blockOffsets != null) {
910        heapSize += ClassSize.align(ClassSize.ARRAY + blockOffsets.length * Bytes.SIZEOF_LONG);
911      }
912
913      if (blockDataSizes != null) {
914        heapSize += ClassSize.align(ClassSize.ARRAY + blockDataSizes.length * Bytes.SIZEOF_INT);
915      }
916
917      return ClassSize.align(heapSize);
918    }
919
920    protected abstract long calculateHeapSizeForBlockKeys(long heapSize);
921  }
922
923  /**
924   * Writes the block index into the output stream. Generate the tree from bottom up. The leaf level
925   * is written to disk as a sequence of inline blocks, if it is larger than a certain number of
926   * bytes. If the leaf level is not large enough, we write all entries to the root level instead.
927   * After all leaf blocks have been written, we end up with an index referencing the resulting leaf
928   * index blocks. If that index is larger than the allowed root index size, the writer will break
929   * it up into reasonable-size intermediate-level index block chunks write those chunks out, and
930   * create another index referencing those chunks. This will be repeated until the remaining index
931   * is small enough to become the root index. However, in most practical cases we will only have
932   * leaf-level blocks and the root index, or just the root index.
933   */
934  public static class BlockIndexWriter implements InlineBlockWriter {
935    /**
936     * While the index is being written, this represents the current block index referencing all
937     * leaf blocks, with one exception. If the file is being closed and there are not enough blocks
938     * to complete even a single leaf block, no leaf blocks get written and this contains the entire
939     * block index. After all levels of the index were written by
940     * {@link #writeIndexBlocks(FSDataOutputStream)}, this contains the final root-level index.
941     */
942    private BlockIndexChunk rootChunk = new BlockIndexChunkImpl();
943
944    /**
945     * Current leaf-level chunk. New entries referencing data blocks get added to this chunk until
946     * it grows large enough to be written to disk.
947     */
948    private BlockIndexChunk curInlineChunk = new BlockIndexChunkImpl();
949
950    /**
951     * The number of block index levels. This is one if there is only root level (even empty), two
952     * if there a leaf level and root level, and is higher if there are intermediate levels. This is
953     * only final after {@link #writeIndexBlocks(FSDataOutputStream)} has been called. The initial
954     * value accounts for the root level, and will be increased to two as soon as we find out there
955     * is a leaf-level in {@link #blockWritten(long, int, int)}.
956     */
957    private int numLevels = 1;
958
959    private HFileBlock.Writer blockWriter;
960    private byte[] firstKey = null;
961
962    /**
963     * The total number of leaf-level entries, i.e. entries referenced by leaf-level blocks. For the
964     * data block index this is equal to the number of data blocks.
965     */
966    private long totalNumEntries;
967
968    /** Total compressed size of all index blocks. */
969    private long totalBlockOnDiskSize;
970
971    /** Total uncompressed size of all index blocks. */
972    private long totalBlockUncompressedSize;
973
974    /** The maximum size guideline of all multi-level index blocks. */
975    private int maxChunkSize;
976
977    /** The maximum level of multi-level index blocks */
978    private int minIndexNumEntries;
979
980    /** Whether we require this block index to always be single-level. */
981    private boolean singleLevelOnly;
982
983    /** CacheConfig, or null if cache-on-write is disabled */
984    private CacheConfig cacheConf;
985
986    /** Name to use for computing cache keys */
987    private String nameForCaching;
988
989    /** Type of encoding used for index blocks in HFile */
990    private HFileIndexBlockEncoder indexBlockEncoder;
991
992    /** Creates a single-level block index writer */
993    public BlockIndexWriter() {
994      this(null, null, null, null);
995      singleLevelOnly = true;
996    }
997
998    /**
999     * Creates a multi-level block index writer.
1000     * @param blockWriter the block writer to use to write index blocks
1001     * @param cacheConf   used to determine when and how a block should be cached-on-write.
1002     */
1003    public BlockIndexWriter(HFileBlock.Writer blockWriter, CacheConfig cacheConf,
1004      String nameForCaching, HFileIndexBlockEncoder indexBlockEncoder) {
1005      if ((cacheConf == null) != (nameForCaching == null)) {
1006        throw new IllegalArgumentException(
1007          "Block cache and file name for " + "caching must be both specified or both null");
1008      }
1009
1010      this.blockWriter = blockWriter;
1011      this.cacheConf = cacheConf;
1012      this.nameForCaching = nameForCaching;
1013      this.maxChunkSize = HFileBlockIndex.DEFAULT_MAX_CHUNK_SIZE;
1014      this.minIndexNumEntries = HFileBlockIndex.DEFAULT_MIN_INDEX_NUM_ENTRIES;
1015      this.indexBlockEncoder =
1016        indexBlockEncoder != null ? indexBlockEncoder : NoOpIndexBlockEncoder.INSTANCE;
1017    }
1018
1019    public void setMaxChunkSize(int maxChunkSize) {
1020      if (maxChunkSize <= 0) {
1021        throw new IllegalArgumentException("Invalid maximum index block size");
1022      }
1023      this.maxChunkSize = maxChunkSize;
1024    }
1025
1026    public void setMinIndexNumEntries(int minIndexNumEntries) {
1027      if (minIndexNumEntries <= 1) {
1028        throw new IllegalArgumentException("Invalid maximum index level, should be >= 2");
1029      }
1030      this.minIndexNumEntries = minIndexNumEntries;
1031    }
1032
1033    /**
1034     * Writes the root level and intermediate levels of the block index into the output stream,
1035     * generating the tree from bottom up. Assumes that the leaf level has been inline-written to
1036     * the disk if there is enough data for more than one leaf block. We iterate by breaking the
1037     * current level of the block index, starting with the index of all leaf-level blocks, into
1038     * chunks small enough to be written to disk, and generate its parent level, until we end up
1039     * with a level small enough to become the root level. If the leaf level is not large enough,
1040     * there is no inline block index anymore, so we only write that level of block index to disk as
1041     * the root level.
1042     * @param out FSDataOutputStream
1043     * @return position at which we entered the root-level index.
1044     */
1045    public long writeIndexBlocks(FSDataOutputStream out) throws IOException {
1046      if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) {
1047        throw new IOException("Trying to write a multi-level block index, " + "but are "
1048          + curInlineChunk.getNumEntries() + " entries in the " + "last inline chunk.");
1049      }
1050
1051      // We need to get mid-key metadata before we create intermediate
1052      // indexes and overwrite the root chunk.
1053      byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata() : null;
1054
1055      if (curInlineChunk != null) {
1056        while (
1057          rootChunk.getRootSize() > maxChunkSize
1058            // HBASE-16288: if firstKey is larger than maxChunkSize we will loop indefinitely
1059            && rootChunk.getNumEntries() > minIndexNumEntries
1060            // Sanity check. We will not hit this (minIndexNumEntries ^ 16) blocks can be addressed
1061            && numLevels < 16
1062        ) {
1063          rootChunk = writeIntermediateLevel(out, rootChunk);
1064          numLevels += 1;
1065        }
1066      }
1067
1068      // write the root level
1069      long rootLevelIndexPos = out.getPos();
1070
1071      {
1072        DataOutput blockStream = blockWriter.startWriting(BlockType.ROOT_INDEX);
1073        indexBlockEncoder.encode(rootChunk, true, blockStream);
1074        if (midKeyMetadata != null) blockStream.write(midKeyMetadata);
1075        blockWriter.writeHeaderAndData(out);
1076        if (cacheConf != null) {
1077          cacheConf.getBlockCache().ifPresent(cache -> {
1078            HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf);
1079            cache.cacheBlock(new BlockCacheKey(nameForCaching, rootLevelIndexPos, true,
1080              blockForCaching.getBlockType()), blockForCaching);
1081          });
1082        }
1083      }
1084
1085      // Add root index block size
1086      totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader();
1087      totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader();
1088
1089      if (LOG.isTraceEnabled()) {
1090        LOG.trace("Wrote a " + numLevels + "-level index with root level at pos "
1091          + rootLevelIndexPos + ", " + rootChunk.getNumEntries() + " root-level entries, "
1092          + totalNumEntries + " total entries, "
1093          + StringUtils.humanReadableInt(this.totalBlockOnDiskSize) + " on-disk size, "
1094          + StringUtils.humanReadableInt(totalBlockUncompressedSize) + " total uncompressed size.");
1095      }
1096      return rootLevelIndexPos;
1097    }
1098
1099    /**
1100     * Writes the block index data as a single level only. Does not do any block framing.
1101     * @param out         the buffered output stream to write the index to. Typically a stream
1102     *                    writing into an {@link HFile} block.
1103     * @param description a short description of the index being written. Used in a log message.
1104     */
1105    public void writeSingleLevelIndex(DataOutput out, String description) throws IOException {
1106      expectNumLevels(1);
1107
1108      if (!singleLevelOnly) throw new IOException("Single-level mode is turned off");
1109
1110      if (rootChunk.getNumEntries() > 0)
1111        throw new IOException("Root-level entries already added in " + "single-level mode");
1112
1113      rootChunk = curInlineChunk;
1114      curInlineChunk = new BlockIndexChunkImpl();
1115
1116      if (LOG.isTraceEnabled()) {
1117        LOG.trace("Wrote a single-level " + description + " index with " + rootChunk.getNumEntries()
1118          + " entries, " + rootChunk.getRootSize() + " bytes");
1119      }
1120      indexBlockEncoder.encode(rootChunk, true, out);
1121    }
1122
1123    /**
1124     * Split the current level of the block index into intermediate index blocks of permitted size
1125     * and write those blocks to disk. Return the next level of the block index referencing those
1126     * intermediate-level blocks.
1127     * @param currentLevel the current level of the block index, such as the a chunk referencing all
1128     *                     leaf-level index blocks
1129     * @return the parent level block index, which becomes the root index after a few (usually zero)
1130     *         iterations
1131     */
1132    private BlockIndexChunk writeIntermediateLevel(FSDataOutputStream out,
1133      BlockIndexChunk currentLevel) throws IOException {
1134      // Entries referencing intermediate-level blocks we are about to create.
1135      BlockIndexChunk parent = new BlockIndexChunkImpl();
1136
1137      // The current intermediate-level block index chunk.
1138      BlockIndexChunk curChunk = new BlockIndexChunkImpl();
1139
1140      for (int i = 0; i < currentLevel.getNumEntries(); ++i) {
1141        curChunk.add(currentLevel.getBlockKey(i), currentLevel.getBlockOffset(i),
1142          currentLevel.getOnDiskDataSize(i));
1143
1144        // HBASE-16288: We have to have at least minIndexNumEntries(16) items in the index so that
1145        // we won't end up with too-many levels for a index with very large rowKeys. Also, if the
1146        // first key is larger than maxChunkSize this will cause infinite recursion.
1147        if (i >= minIndexNumEntries && curChunk.getRootSize() >= maxChunkSize) {
1148          writeIntermediateBlock(out, parent, curChunk);
1149        }
1150      }
1151
1152      if (curChunk.getNumEntries() > 0) {
1153        writeIntermediateBlock(out, parent, curChunk);
1154      }
1155
1156      return parent;
1157    }
1158
1159    private void writeIntermediateBlock(FSDataOutputStream out, BlockIndexChunk parent,
1160      BlockIndexChunk curChunk) throws IOException {
1161      long beginOffset = out.getPos();
1162      DataOutputStream dos = blockWriter.startWriting(BlockType.INTERMEDIATE_INDEX);
1163      indexBlockEncoder.encode(curChunk, false, dos);
1164      byte[] curFirstKey = curChunk.getBlockKey(0);
1165      blockWriter.writeHeaderAndData(out);
1166
1167      if (getCacheOnWrite()) {
1168        cacheConf.getBlockCache().ifPresent(cache -> {
1169          HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf);
1170          cache.cacheBlock(
1171            new BlockCacheKey(nameForCaching, beginOffset, true, blockForCaching.getBlockType()),
1172            blockForCaching);
1173        });
1174      }
1175
1176      // Add intermediate index block size
1177      totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader();
1178      totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader();
1179
1180      // OFFSET is the beginning offset the chunk of block index entries.
1181      // SIZE is the total byte size of the chunk of block index entries
1182      // + the secondary index size
1183      // FIRST_KEY is the first key in the chunk of block index
1184      // entries.
1185      parent.add(curFirstKey, beginOffset, blockWriter.getOnDiskSizeWithHeader());
1186
1187      // clear current block index chunk
1188      curChunk.clear();
1189      curFirstKey = null;
1190    }
1191
1192    /** Returns how many block index entries there are in the root level */
1193    public final int getNumRootEntries() {
1194      return rootChunk.getNumEntries();
1195    }
1196
1197    /** Returns the number of levels in this block index. */
1198    public int getNumLevels() {
1199      return numLevels;
1200    }
1201
1202    private void expectNumLevels(int expectedNumLevels) {
1203      if (numLevels != expectedNumLevels) {
1204        throw new IllegalStateException("Number of block index levels is " + numLevels
1205          + "but is expected to be " + expectedNumLevels);
1206      }
1207    }
1208
1209    /**
1210     * Whether there is an inline block ready to be written. In general, we write an leaf-level
1211     * index block as an inline block as soon as its size as serialized in the non-root format
1212     * reaches a certain threshold.
1213     */
1214    @Override
1215    public boolean shouldWriteBlock(boolean closing) {
1216      if (singleLevelOnly) {
1217        throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
1218      }
1219
1220      if (curInlineChunk == null) {
1221        throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been "
1222          + "called with closing=true and then called again?");
1223      }
1224
1225      if (curInlineChunk.getNumEntries() == 0) {
1226        return false;
1227      }
1228
1229      // We do have some entries in the current inline chunk.
1230      if (closing) {
1231        if (rootChunk.getNumEntries() == 0) {
1232          // We did not add any leaf-level blocks yet. Instead of creating a
1233          // leaf level with one block, move these entries to the root level.
1234
1235          expectNumLevels(1);
1236          rootChunk = curInlineChunk;
1237          curInlineChunk = null; // Disallow adding any more index entries.
1238          return false;
1239        }
1240
1241        return true;
1242      } else {
1243        return curInlineChunk.getNonRootSize() >= maxChunkSize;
1244      }
1245    }
1246
1247    /**
1248     * Write out the current inline index block. Inline blocks are non-root blocks, so the non-root
1249     * index format is used.
1250     */
1251    @Override
1252    public void writeInlineBlock(DataOutput out) throws IOException {
1253      if (singleLevelOnly) throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
1254
1255      // Write the inline block index to the output stream in the non-root
1256      // index block format.
1257      indexBlockEncoder.encode(curInlineChunk, false, out);
1258
1259      // Save the first key of the inline block so that we can add it to the
1260      // parent-level index.
1261      firstKey = curInlineChunk.getBlockKey(0);
1262
1263      // Start a new inline index block
1264      curInlineChunk.clear();
1265    }
1266
1267    /**
1268     * Called after an inline block has been written so that we can add an entry referring to that
1269     * block to the parent-level index.
1270     */
1271    @Override
1272    public void blockWritten(long offset, int onDiskSize, int uncompressedSize) {
1273      // Add leaf index block size
1274      totalBlockOnDiskSize += onDiskSize;
1275      totalBlockUncompressedSize += uncompressedSize;
1276
1277      if (singleLevelOnly) throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
1278
1279      if (firstKey == null) {
1280        throw new IllegalStateException(
1281          "Trying to add second-level index " + "entry with offset=" + offset + " and onDiskSize="
1282            + onDiskSize + "but the first key was not set in writeInlineBlock");
1283      }
1284
1285      if (rootChunk.getNumEntries() == 0) {
1286        // We are writing the first leaf block, so increase index level.
1287        expectNumLevels(1);
1288        numLevels = 2;
1289      }
1290
1291      // Add another entry to the second-level index. Include the number of
1292      // entries in all previous leaf-level chunks for mid-key calculation.
1293      rootChunk.add(firstKey, offset, onDiskSize, totalNumEntries);
1294      firstKey = null;
1295    }
1296
1297    @Override
1298    public BlockType getInlineBlockType() {
1299      return BlockType.LEAF_INDEX;
1300    }
1301
1302    /**
1303     * Add one index entry to the current leaf-level block. When the leaf-level block gets large
1304     * enough, it will be flushed to disk as an inline block.
1305     * @param firstKey      the first key of the data block
1306     * @param blockOffset   the offset of the data block
1307     * @param blockDataSize the on-disk size of the data block ({@link HFile} format version 2), or
1308     *                      the uncompressed size of the data block ( {@link HFile} format version
1309     *                      1).
1310     */
1311    public void addEntry(byte[] firstKey, long blockOffset, int blockDataSize) {
1312      curInlineChunk.add(firstKey, blockOffset, blockDataSize);
1313      ++totalNumEntries;
1314    }
1315
1316    /**
1317     * @throws IOException if we happened to write a multi-level index.
1318     */
1319    public void ensureSingleLevel() throws IOException {
1320      if (numLevels > 1) {
1321        throw new IOException(
1322          "Wrote a " + numLevels + "-level index with " + rootChunk.getNumEntries()
1323            + " root-level entries, but " + "this is expected to be a single-level block index.");
1324      }
1325    }
1326
1327    /**
1328     * @return true if we are using cache-on-write. This is configured by the caller of the
1329     *         constructor by either passing a valid block cache or null.
1330     */
1331    @Override
1332    public boolean getCacheOnWrite() {
1333      return cacheConf != null && cacheConf.shouldCacheIndexesOnWrite();
1334    }
1335
1336    /**
1337     * The total uncompressed size of the root index block, intermediate-level index blocks, and
1338     * leaf-level index blocks.
1339     * @return the total uncompressed size of all index blocks
1340     */
1341    public long getTotalUncompressedSize() {
1342      return totalBlockUncompressedSize;
1343    }
1344
1345  }
1346
1347  /**
1348   * A single chunk of the block index in the process of writing. The data in this chunk can become
1349   * a leaf-level, intermediate-level, or root index block.
1350   */
1351  static class BlockIndexChunkImpl implements BlockIndexChunk {
1352
1353    /** First keys of the key range corresponding to each index entry. */
1354    private final List<byte[]> blockKeys = new ArrayList<>();
1355
1356    /** Block offset in backing stream. */
1357    private final List<Long> blockOffsets = new ArrayList<>();
1358
1359    /** On-disk data sizes of lower-level data or index blocks. */
1360    private final List<Integer> onDiskDataSizes = new ArrayList<>();
1361
1362    /**
1363     * The cumulative number of sub-entries, i.e. entries on deeper-level block index entries.
1364     * numSubEntriesAt[i] is the number of sub-entries in the blocks corresponding to this chunk's
1365     * entries #0 through #i inclusively.
1366     */
1367    private final List<Long> numSubEntriesAt = new ArrayList<>();
1368
1369    /**
1370     * The offset of the next entry to be added, relative to the end of the "secondary index" in the
1371     * "non-root" format representation of this index chunk. This is the next value to be added to
1372     * the secondary index.
1373     */
1374    private int curTotalNonRootEntrySize = 0;
1375
1376    /**
1377     * The accumulated size of this chunk if stored in the root index format.
1378     */
1379    private int curTotalRootSize = 0;
1380
1381    /**
1382     * The "secondary index" used for binary search over variable-length records in a "non-root"
1383     * format block. These offsets are relative to the end of this secondary index.
1384     */
1385    private final List<Integer> secondaryIndexOffsetMarks = new ArrayList<>();
1386
1387    /**
1388     * Adds a new entry to this block index chunk.
1389     * @param firstKey              the first key in the block pointed to by this entry
1390     * @param blockOffset           the offset of the next-level block pointed to by this entry
1391     * @param onDiskDataSize        the on-disk data of the block pointed to by this entry,
1392     *                              including header size
1393     * @param curTotalNumSubEntries if this chunk is the root index chunk under construction, this
1394     *                              specifies the current total number of sub-entries in all
1395     *                              leaf-level chunks, including the one corresponding to the
1396     *                              second-level entry being added.
1397     */
1398    @Override
1399    public void add(byte[] firstKey, long blockOffset, int onDiskDataSize,
1400      long curTotalNumSubEntries) {
1401      // Record the offset for the secondary index
1402      secondaryIndexOffsetMarks.add(curTotalNonRootEntrySize);
1403      curTotalNonRootEntrySize += SECONDARY_INDEX_ENTRY_OVERHEAD + firstKey.length;
1404
1405      curTotalRootSize += Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT
1406        + WritableUtils.getVIntSize(firstKey.length) + firstKey.length;
1407
1408      blockKeys.add(firstKey);
1409      blockOffsets.add(blockOffset);
1410      onDiskDataSizes.add(onDiskDataSize);
1411
1412      if (curTotalNumSubEntries != -1) {
1413        numSubEntriesAt.add(curTotalNumSubEntries);
1414
1415        // Make sure the parallel arrays are in sync.
1416        if (numSubEntriesAt.size() != blockKeys.size()) {
1417          throw new IllegalStateException("Only have key/value count " + "stats for "
1418            + numSubEntriesAt.size() + " block index " + "entries out of " + blockKeys.size());
1419        }
1420      }
1421    }
1422
1423    /**
1424     * The same as {@link #add(byte[], long, int, long)} but does not take the key/value into
1425     * account. Used for single-level indexes.
1426     * @see #add(byte[], long, int, long)
1427     */
1428    @Override
1429    public void add(byte[] firstKey, long blockOffset, int onDiskDataSize) {
1430      add(firstKey, blockOffset, onDiskDataSize, -1);
1431    }
1432
1433    @Override
1434    public void clear() {
1435      blockKeys.clear();
1436      blockOffsets.clear();
1437      onDiskDataSizes.clear();
1438      secondaryIndexOffsetMarks.clear();
1439      numSubEntriesAt.clear();
1440      curTotalNonRootEntrySize = 0;
1441      curTotalRootSize = 0;
1442    }
1443
1444    /**
1445     * Finds the entry corresponding to the deeper-level index block containing the given
1446     * deeper-level entry (a "sub-entry"), assuming a global 0-based ordering of sub-entries.
1447     * <p>
1448     * <i> Implementation note. </i> We are looking for i such that numSubEntriesAt[i - 1] <= k <
1449     * numSubEntriesAt[i], because a deeper-level block #i (0-based) contains sub-entries #
1450     * numSubEntriesAt[i - 1]'th through numSubEntriesAt[i] - 1, assuming a global 0-based ordering
1451     * of sub-entries. i is by definition the insertion point of k in numSubEntriesAt.
1452     * @param k sub-entry index, from 0 to the total number sub-entries - 1
1453     * @return the 0-based index of the entry corresponding to the given sub-entry
1454     */
1455    @Override
1456    public int getEntryBySubEntry(long k) {
1457      // We define mid-key as the key corresponding to k'th sub-entry
1458      // (0-based).
1459
1460      int i = Collections.binarySearch(numSubEntriesAt, k);
1461
1462      // Exact match: cumulativeWeight[i] = k. This means chunks #0 through
1463      // #i contain exactly k sub-entries, and the sub-entry #k (0-based)
1464      // is in the (i + 1)'th chunk.
1465      if (i >= 0) return i + 1;
1466
1467      // Inexact match. Return the insertion point.
1468      return -i - 1;
1469    }
1470
1471    /**
1472     * Used when writing the root block index of a multi-level block index. Serializes additional
1473     * information allowing to efficiently identify the mid-key.
1474     * @return a few serialized fields for finding the mid-key
1475     * @throws IOException if could not create metadata for computing mid-key
1476     */
1477    @Override
1478    public byte[] getMidKeyMetadata() throws IOException {
1479      ByteArrayOutputStream baos = new ByteArrayOutputStream(MID_KEY_METADATA_SIZE);
1480      DataOutputStream baosDos = new DataOutputStream(baos);
1481      long totalNumSubEntries = numSubEntriesAt.get(blockKeys.size() - 1);
1482      if (totalNumSubEntries == 0) {
1483        throw new IOException("No leaf-level entries, mid-key unavailable");
1484      }
1485      long midKeySubEntry = (totalNumSubEntries - 1) / 2;
1486      int midKeyEntry = getEntryBySubEntry(midKeySubEntry);
1487
1488      baosDos.writeLong(blockOffsets.get(midKeyEntry));
1489      baosDos.writeInt(onDiskDataSizes.get(midKeyEntry));
1490
1491      long numSubEntriesBefore = midKeyEntry > 0 ? numSubEntriesAt.get(midKeyEntry - 1) : 0;
1492      long subEntryWithinEntry = midKeySubEntry - numSubEntriesBefore;
1493      if (subEntryWithinEntry < 0 || subEntryWithinEntry > Integer.MAX_VALUE) {
1494        throw new IOException("Could not identify mid-key index within the "
1495          + "leaf-level block containing mid-key: out of range (" + subEntryWithinEntry
1496          + ", numSubEntriesBefore=" + numSubEntriesBefore + ", midKeySubEntry=" + midKeySubEntry
1497          + ")");
1498      }
1499
1500      baosDos.writeInt((int) subEntryWithinEntry);
1501
1502      if (baosDos.size() != MID_KEY_METADATA_SIZE) {
1503        throw new IOException("Could not write mid-key metadata: size=" + baosDos.size()
1504          + ", correct size: " + MID_KEY_METADATA_SIZE);
1505      }
1506
1507      // Close just to be good citizens, although this has no effect.
1508      baos.close();
1509
1510      return baos.toByteArray();
1511    }
1512
1513    /** Returns the size of this chunk if stored in the non-root index block format */
1514    @Override
1515    public int getNonRootSize() {
1516      return Bytes.SIZEOF_INT // Number of entries
1517        + Bytes.SIZEOF_INT * (blockKeys.size() + 1) // Secondary index
1518        + curTotalNonRootEntrySize; // All entries
1519    }
1520
1521    @Override
1522    public int getCurTotalNonRootEntrySize() {
1523      return curTotalNonRootEntrySize;
1524    }
1525
1526    @Override
1527    public List<byte[]> getBlockKeys() {
1528      return blockKeys;
1529    }
1530
1531    @Override
1532    public List<Integer> getSecondaryIndexOffsetMarks() {
1533      return secondaryIndexOffsetMarks;
1534    }
1535
1536    /** Returns the size of this chunk if stored in the root index block format */
1537    @Override
1538    public int getRootSize() {
1539      return curTotalRootSize;
1540    }
1541
1542    /** Returns the number of entries in this block index chunk */
1543    public int getNumEntries() {
1544      return blockKeys.size();
1545    }
1546
1547    public byte[] getBlockKey(int i) {
1548      return blockKeys.get(i);
1549    }
1550
1551    public long getBlockOffset(int i) {
1552      return blockOffsets.get(i);
1553    }
1554
1555    public int getOnDiskDataSize(int i) {
1556      return onDiskDataSizes.get(i);
1557    }
1558
1559    public long getCumulativeNumKV(int i) {
1560      if (i < 0) return 0;
1561      return numSubEntriesAt.get(i);
1562    }
1563
1564  }
1565
1566  public static int getMaxChunkSize(Configuration conf) {
1567    return conf.getInt(MAX_CHUNK_SIZE_KEY, DEFAULT_MAX_CHUNK_SIZE);
1568  }
1569
1570  public static int getMinIndexNumEntries(Configuration conf) {
1571    return conf.getInt(MIN_INDEX_NUM_ENTRIES_KEY, DEFAULT_MIN_INDEX_NUM_ENTRIES);
1572  }
1573}