001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile;
019
020import java.io.ByteArrayInputStream;
021import java.io.DataInputStream;
022import java.io.DataOutputStream;
023import java.io.IOException;
024import java.io.SequenceInputStream;
025import java.security.Key;
026import java.util.ArrayList;
027import java.util.Collection;
028import java.util.Comparator;
029import java.util.List;
030import java.util.Map;
031import java.util.Objects;
032import java.util.Set;
033import java.util.SortedMap;
034import java.util.TreeMap;
035import org.apache.commons.io.IOUtils;
036import org.apache.hadoop.conf.Configuration;
037import org.apache.hadoop.fs.Path;
038import org.apache.hadoop.hbase.Cell;
039import org.apache.hadoop.hbase.CellUtil;
040import org.apache.hadoop.hbase.ExtendedCell;
041import org.apache.hadoop.hbase.KeyValue;
042import org.apache.hadoop.hbase.io.crypto.Cipher;
043import org.apache.hadoop.hbase.io.crypto.Encryption;
044import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
045import org.apache.hadoop.hbase.security.EncryptionUtil;
046import org.apache.hadoop.hbase.util.Bytes;
047import org.apache.yetus.audience.InterfaceAudience;
048import org.slf4j.Logger;
049import org.slf4j.LoggerFactory;
050
051import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
052
053import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
054import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
055import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
056import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
057
058/**
059 * Metadata Map of attributes for HFile written out as HFile Trailer. Created by the Writer and
060 * added to the tail of the file just before close. Metadata includes core attributes such as last
061 * key seen, comparator used writing the file, etc. Clients can add their own attributes via
062 * {@link #append(byte[], byte[], boolean)} and they'll be persisted and available at read time.
063 * Reader creates the HFileInfo on open by reading the tail of the HFile. The parse of the HFile
064 * trailer also creates a {@link HFileContext}, a read-only data structure that includes bulk of the
065 * HFileInfo and extras that is safe to pass around when working on HFiles.
066 * @see HFileContext
067 */
068@InterfaceAudience.Private
069public class HFileInfo implements SortedMap<byte[], byte[]> {
070
071  private static final Logger LOG = LoggerFactory.getLogger(HFileInfo.class);
072
073  static final String RESERVED_PREFIX = "hfile.";
074  static final byte[] RESERVED_PREFIX_BYTES = Bytes.toBytes(RESERVED_PREFIX);
075  static final byte[] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY");
076  static final byte[] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN");
077  static final byte[] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN");
078  static final byte[] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS");
079  static final byte[] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED");
080  static final byte[] KEY_OF_BIGGEST_CELL = Bytes.toBytes(RESERVED_PREFIX + "KEY_OF_BIGGEST_CELL");
081  static final byte[] LEN_OF_BIGGEST_CELL = Bytes.toBytes(RESERVED_PREFIX + "LEN_OF_BIGGEST_CELL");
082  public static final byte[] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN");
083  private final SortedMap<byte[], byte[]> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
084
085  /**
086   * We can read files whose major version is v2 IFF their minor version is at least 3.
087   */
088  private static final int MIN_V2_MINOR_VERSION_WITH_PB = 3;
089
090  /** Maximum minor version supported by this HFile format */
091  // We went to version 2 when we moved to pb'ing fileinfo and the trailer on
092  // the file. This version can read Writables version 1.
093  static final int MAX_MINOR_VERSION = 3;
094
095  /** Last key in the file. Filled in when we read in the file info */
096  private ExtendedCell lastKeyCell = null;
097  /** Average key length read from file info */
098  private int avgKeyLen = -1;
099  /** Average value length read from file info */
100  private int avgValueLen = -1;
101  /** Biggest Cell in the file, key only. Filled in when we read in the file info */
102  private Cell biggestCell = null;
103  /** Length of the biggest Cell */
104  private long lenOfBiggestCell = -1;
105  private boolean includesMemstoreTS = false;
106  private boolean decodeMemstoreTS = false;
107
108  /**
109   * Blocks read from the load-on-open section, excluding data root index, meta index, and file
110   * info.
111   */
112  private List<HFileBlock> loadOnOpenBlocks = new ArrayList<>();
113
114  /**
115   * The iterator will track all blocks in load-on-open section, since we use the
116   * {@link org.apache.hadoop.hbase.io.ByteBuffAllocator} to manage the ByteBuffers in block now, so
117   * we must ensure that deallocate all ByteBuffers in the end.
118   */
119  private HFileBlock.BlockIterator blockIter;
120
121  private HFileBlockIndex.CellBasedKeyBlockIndexReader dataIndexReader;
122  private HFileBlockIndex.ByteArrayKeyBlockIndexReader metaIndexReader;
123
124  private FixedFileTrailer trailer;
125  private HFileContext hfileContext;
126
127  public HFileInfo() {
128    super();
129  }
130
131  public HFileInfo(ReaderContext context, Configuration conf) throws IOException {
132    this.initTrailerAndContext(context, conf);
133  }
134
135  /**
136   * Append the given key/value pair to the file info, optionally checking the key prefix.
137   * @param k           key to add
138   * @param v           value to add
139   * @param checkPrefix whether to check that the provided key does not start with the reserved
140   *                    prefix
141   * @return this file info object
142   * @throws IOException          if the key or value is invalid
143   * @throws NullPointerException if {@code key} or {@code value} is {@code null}
144   */
145  public HFileInfo append(final byte[] k, final byte[] v, final boolean checkPrefix)
146    throws IOException {
147    Objects.requireNonNull(k, "key cannot be null");
148    Objects.requireNonNull(v, "value cannot be null");
149
150    if (checkPrefix && isReservedFileInfoKey(k)) {
151      throw new IOException("Keys with a " + HFileInfo.RESERVED_PREFIX + " are reserved");
152    }
153    put(k, v);
154    return this;
155  }
156
157  /** Return true if the given file info key is reserved for internal use. */
158  public static boolean isReservedFileInfoKey(byte[] key) {
159    return Bytes.startsWith(key, HFileInfo.RESERVED_PREFIX_BYTES);
160  }
161
162  @Override
163  public void clear() {
164    this.map.clear();
165  }
166
167  @Override
168  public Comparator<? super byte[]> comparator() {
169    return map.comparator();
170  }
171
172  @Override
173  public boolean containsKey(Object key) {
174    return map.containsKey(key);
175  }
176
177  @Override
178  public boolean containsValue(Object value) {
179    return map.containsValue(value);
180  }
181
182  @Override
183  public Set<java.util.Map.Entry<byte[], byte[]>> entrySet() {
184    return map.entrySet();
185  }
186
187  @Override
188  public boolean equals(Object o) {
189    return map.equals(o);
190  }
191
192  @Override
193  public byte[] firstKey() {
194    return map.firstKey();
195  }
196
197  @Override
198  public byte[] get(Object key) {
199    return map.get(key);
200  }
201
202  @Override
203  public int hashCode() {
204    return map.hashCode();
205  }
206
207  @Override
208  public SortedMap<byte[], byte[]> headMap(byte[] toKey) {
209    return this.map.headMap(toKey);
210  }
211
212  @Override
213  public boolean isEmpty() {
214    return map.isEmpty();
215  }
216
217  @Override
218  public Set<byte[]> keySet() {
219    return map.keySet();
220  }
221
222  @Override
223  public byte[] lastKey() {
224    return map.lastKey();
225  }
226
227  @Override
228  public byte[] put(byte[] key, byte[] value) {
229    return this.map.put(key, value);
230  }
231
232  @Override
233  public void putAll(Map<? extends byte[], ? extends byte[]> m) {
234    this.map.putAll(m);
235  }
236
237  @Override
238  public byte[] remove(Object key) {
239    return this.map.remove(key);
240  }
241
242  @Override
243  public int size() {
244    return map.size();
245  }
246
247  @Override
248  public SortedMap<byte[], byte[]> subMap(byte[] fromKey, byte[] toKey) {
249    return this.map.subMap(fromKey, toKey);
250  }
251
252  @Override
253  public SortedMap<byte[], byte[]> tailMap(byte[] fromKey) {
254    return this.map.tailMap(fromKey);
255  }
256
257  @Override
258  public Collection<byte[]> values() {
259    return map.values();
260  }
261
262  /**
263   * Write out this instance on the passed in <code>out</code> stream. We write it as a protobuf.
264   * @see #read(DataInputStream)
265   */
266  void write(final DataOutputStream out) throws IOException {
267    HFileProtos.FileInfoProto.Builder builder = HFileProtos.FileInfoProto.newBuilder();
268    for (Map.Entry<byte[], byte[]> e : this.map.entrySet()) {
269      HBaseProtos.BytesBytesPair.Builder bbpBuilder = HBaseProtos.BytesBytesPair.newBuilder();
270      bbpBuilder.setFirst(UnsafeByteOperations.unsafeWrap(e.getKey()));
271      bbpBuilder.setSecond(UnsafeByteOperations.unsafeWrap(e.getValue()));
272      builder.addMapEntry(bbpBuilder.build());
273    }
274    out.write(ProtobufMagic.PB_MAGIC);
275    builder.build().writeDelimitedTo(out);
276  }
277
278  /**
279   * Populate this instance with what we find on the passed in <code>in</code> stream. Can
280   * deserialize protobuf of old Writables format.
281   * @see #write(DataOutputStream)
282   */
283  void read(final DataInputStream in) throws IOException {
284    // This code is tested over in TestHFileReaderV1 where we read an old hfile w/ this new code.
285    int pblen = ProtobufUtil.lengthOfPBMagic();
286    byte[] pbuf = new byte[pblen];
287    if (in.markSupported()) {
288      in.mark(pblen);
289    }
290    int read = in.read(pbuf);
291    if (read != pblen) {
292      throw new IOException("read=" + read + ", wanted=" + pblen);
293    }
294    if (ProtobufUtil.isPBMagicPrefix(pbuf)) {
295      parsePB(HFileProtos.FileInfoProto.parseDelimitedFrom(in));
296    } else {
297      if (in.markSupported()) {
298        in.reset();
299        parseWritable(in);
300      } else {
301        // We cannot use BufferedInputStream, it consumes more than we read from the underlying IS
302        ByteArrayInputStream bais = new ByteArrayInputStream(pbuf);
303        SequenceInputStream sis = new SequenceInputStream(bais, in); // Concatenate input streams
304        // TODO: Am I leaking anything here wrapping the passed in stream? We are not calling
305        // close on the wrapped streams but they should be let go after we leave this context?
306        // I see that we keep a reference to the passed in inputstream but since we no longer
307        // have a reference to this after we leave, we should be ok.
308        parseWritable(new DataInputStream(sis));
309      }
310    }
311  }
312
313  /**
314   * Now parse the old Writable format. It was a list of Map entries. Each map entry was a key and a
315   * value of a byte []. The old map format had a byte before each entry that held a code which was
316   * short for the key or value type. We know it was a byte [] so in below we just read and dump it.
317   */
318  void parseWritable(final DataInputStream in) throws IOException {
319    // First clear the map.
320    // Otherwise we will just accumulate entries every time this method is called.
321    this.map.clear();
322    // Read the number of entries in the map
323    int entries = in.readInt();
324    // Then read each key/value pair
325    for (int i = 0; i < entries; i++) {
326      byte[] key = Bytes.readByteArray(in);
327      // We used to read a byte that encoded the class type.
328      // Read and ignore it because it is always byte [] in hfile
329      in.readByte();
330      byte[] value = Bytes.readByteArray(in);
331      this.map.put(key, value);
332    }
333  }
334
335  /**
336   * Fill our map with content of the pb we read off disk
337   * @param fip protobuf message to read
338   */
339  void parsePB(final HFileProtos.FileInfoProto fip) {
340    this.map.clear();
341    for (BytesBytesPair pair : fip.getMapEntryList()) {
342      this.map.put(pair.getFirst().toByteArray(), pair.getSecond().toByteArray());
343    }
344  }
345
346  public void initTrailerAndContext(ReaderContext context, Configuration conf) throws IOException {
347    try {
348      boolean isHBaseChecksum = context.getInputStreamWrapper().shouldUseHBaseChecksum();
349      trailer = FixedFileTrailer.readFromStream(
350        context.getInputStreamWrapper().getStream(isHBaseChecksum), context.getFileSize());
351      Path path = context.getFilePath();
352      checkFileVersion(path);
353      this.hfileContext = createHFileContext(path, trailer, conf);
354      context.getInputStreamWrapper().unbuffer();
355    } catch (Throwable t) {
356      IOUtils.closeQuietly(context.getInputStreamWrapper(),
357        e -> LOG.warn("failed to close input stream wrapper", e));
358      throw new CorruptHFileException(
359        "Problem reading HFile Trailer from file " + context.getFilePath(), t);
360    }
361  }
362
363  /**
364   * should be called after initTrailerAndContext
365   */
366  public void initMetaAndIndex(HFile.Reader reader) throws IOException {
367    ReaderContext context = reader.getContext();
368    try {
369      HFileBlock.FSReader blockReader = reader.getUncachedBlockReader();
370      // Initialize an block iterator, and parse load-on-open blocks in the following.
371      blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(),
372        context.getFileSize() - trailer.getTrailerSize());
373      // Data index. We also read statistics about the block index written after
374      // the root level.
375      HFileBlock dataBlockRootIndex = blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX);
376      HFileBlock metaBlockIndex = blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX);
377      loadMetaInfo(blockIter, hfileContext);
378
379      HFileIndexBlockEncoder indexBlockEncoder =
380        HFileIndexBlockEncoderImpl.createFromFileInfo(this);
381      this.dataIndexReader = new HFileBlockIndex.CellBasedKeyBlockIndexReaderV2(
382        trailer.createComparator(), trailer.getNumDataIndexLevels(), indexBlockEncoder);
383      dataIndexReader.readMultiLevelIndexRoot(dataBlockRootIndex, trailer.getDataIndexCount());
384      reader.setDataBlockIndexReader(dataIndexReader);
385      // Meta index.
386      this.metaIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1);
387      metaIndexReader.readRootIndex(metaBlockIndex, trailer.getMetaIndexCount());
388      reader.setMetaBlockIndexReader(metaIndexReader);
389
390      reader.setDataBlockEncoder(HFileDataBlockEncoderImpl.createFromFileInfo(this));
391      // Load-On-Open info
392      HFileBlock b;
393      while ((b = blockIter.nextBlock()) != null) {
394        loadOnOpenBlocks.add(b);
395      }
396      // close the block reader
397      context.getInputStreamWrapper().unbuffer();
398    } catch (Throwable t) {
399      IOUtils.closeQuietly(context.getInputStreamWrapper(),
400        e -> LOG.warn("failed to close input stream wrapper", e));
401      throw new CorruptHFileException(
402        "Problem reading data index and meta index from file " + context.getFilePath(), t);
403    }
404  }
405
406  private HFileContext createHFileContext(Path path, FixedFileTrailer trailer, Configuration conf)
407    throws IOException {
408    HFileContextBuilder builder = new HFileContextBuilder().withHBaseCheckSum(true)
409      .withHFileName(path.getName()).withCompression(trailer.getCompressionCodec())
410      .withCellComparator(FixedFileTrailer.createComparator(trailer.getComparatorClassName()));
411    // Check for any key material available
412    byte[] keyBytes = trailer.getEncryptionKey();
413    if (keyBytes != null) {
414      Encryption.Context cryptoContext = Encryption.newContext(conf);
415      Key key = EncryptionUtil.unwrapKey(conf, keyBytes);
416      // Use the algorithm the key wants
417      Cipher cipher = Encryption.getCipher(conf, key.getAlgorithm());
418      if (cipher == null) {
419        throw new IOException(
420          "Cipher '" + key.getAlgorithm() + "' is not available" + ", path=" + path);
421      }
422      cryptoContext.setCipher(cipher);
423      cryptoContext.setKey(key);
424      builder.withEncryptionContext(cryptoContext);
425    }
426    HFileContext context = builder.build();
427    return context;
428  }
429
430  private void loadMetaInfo(HFileBlock.BlockIterator blockIter, HFileContext hfileContext)
431    throws IOException {
432    read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
433    byte[] creationTimeBytes = get(HFileInfo.CREATE_TIME_TS);
434    hfileContext.setFileCreateTime(creationTimeBytes == null ? 0 : Bytes.toLong(creationTimeBytes));
435    byte[] tmp = get(HFileInfo.MAX_TAGS_LEN);
436    // max tag length is not present in the HFile means tags were not at all written to file.
437    if (tmp != null) {
438      hfileContext.setIncludesTags(true);
439      tmp = get(HFileInfo.TAGS_COMPRESSED);
440      if (tmp != null && Bytes.toBoolean(tmp)) {
441        hfileContext.setCompressTags(true);
442      }
443    }
444    // parse meta info
445    if (get(HFileInfo.LASTKEY) != null) {
446      lastKeyCell = new KeyValue.KeyOnlyKeyValue(get(HFileInfo.LASTKEY));
447    }
448    if (get(HFileInfo.KEY_OF_BIGGEST_CELL) != null) {
449      biggestCell = new KeyValue.KeyOnlyKeyValue(get(HFileInfo.KEY_OF_BIGGEST_CELL));
450      lenOfBiggestCell = Bytes.toLong(get(HFileInfo.LEN_OF_BIGGEST_CELL));
451    }
452    avgKeyLen = Bytes.toInt(get(HFileInfo.AVG_KEY_LEN));
453    avgValueLen = Bytes.toInt(get(HFileInfo.AVG_VALUE_LEN));
454    byte[] keyValueFormatVersion = get(HFileWriterImpl.KEY_VALUE_VERSION);
455    includesMemstoreTS = keyValueFormatVersion != null
456      && Bytes.toInt(keyValueFormatVersion) == HFileWriterImpl.KEY_VALUE_VER_WITH_MEMSTORE;
457    hfileContext.setIncludesMvcc(includesMemstoreTS);
458    if (includesMemstoreTS) {
459      decodeMemstoreTS = Bytes.toLong(get(HFileWriterImpl.MAX_MEMSTORE_TS_KEY)) > 0;
460    }
461  }
462
463  /**
464   * File version check is a little sloppy. We read v3 files but can also read v2 files if their
465   * content has been pb'd; files written with 0.98.
466   */
467  private void checkFileVersion(Path path) {
468    int majorVersion = trailer.getMajorVersion();
469    if (majorVersion == getMajorVersion()) {
470      return;
471    }
472    int minorVersion = trailer.getMinorVersion();
473    if (majorVersion == 2 && minorVersion >= MIN_V2_MINOR_VERSION_WITH_PB) {
474      return;
475    }
476    // We can read v3 or v2 versions of hfile.
477    throw new IllegalArgumentException("Invalid HFile version: major=" + trailer.getMajorVersion()
478      + ", minor=" + trailer.getMinorVersion() + ": expected at least " + "major=2 and minor="
479      + MAX_MINOR_VERSION + ", path=" + path);
480  }
481
482  public void close() {
483    if (blockIter != null) {
484      blockIter.freeBlocks();
485    }
486  }
487
488  public int getMajorVersion() {
489    return 3;
490  }
491
492  public void setTrailer(FixedFileTrailer trailer) {
493    this.trailer = trailer;
494  }
495
496  public FixedFileTrailer getTrailer() {
497    return this.trailer;
498  }
499
500  public HFileBlockIndex.CellBasedKeyBlockIndexReader getDataBlockIndexReader() {
501    return this.dataIndexReader;
502  }
503
504  public HFileBlockIndex.ByteArrayKeyBlockIndexReader getMetaBlockIndexReader() {
505    return this.metaIndexReader;
506  }
507
508  public HFileContext getHFileContext() {
509    return this.hfileContext;
510  }
511
512  public List<HFileBlock> getLoadOnOpenBlocks() {
513    return loadOnOpenBlocks;
514  }
515
516  public ExtendedCell getLastKeyCell() {
517    return lastKeyCell;
518  }
519
520  public int getAvgKeyLen() {
521    return avgKeyLen;
522  }
523
524  public int getAvgValueLen() {
525    return avgValueLen;
526  }
527
528  public String getKeyOfBiggestCell() {
529    return CellUtil.toString(biggestCell, false);
530  }
531
532  public long getLenOfBiggestCell() {
533    return lenOfBiggestCell;
534  }
535
536  public boolean shouldIncludeMemStoreTS() {
537    return includesMemstoreTS;
538  }
539
540  public boolean isDecodeMemstoreTS() {
541    return decodeMemstoreTS;
542  }
543}