001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile;
019
020import java.io.ByteArrayInputStream;
021import java.io.ByteArrayOutputStream;
022import java.io.DataInput;
023import java.io.DataInputStream;
024import java.io.DataOutputStream;
025import java.io.IOException;
026import java.nio.ByteBuffer;
027import org.apache.hadoop.fs.FSDataInputStream;
028import org.apache.hadoop.hbase.CellComparator;
029import org.apache.hadoop.hbase.HBaseInterfaceAudience;
030import org.apache.hadoop.hbase.InnerStoreCellComparator;
031import org.apache.hadoop.hbase.MetaCellComparator;
032import org.apache.hadoop.hbase.io.compress.Compression;
033import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics;
034import org.apache.hadoop.hbase.util.Bytes;
035import org.apache.yetus.audience.InterfaceAudience;
036import org.slf4j.Logger;
037import org.slf4j.LoggerFactory;
038
039import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
040
041import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
042
043/**
044 * The {@link HFile} has a fixed trailer which contains offsets to other variable parts of the file.
045 * Also includes basic metadata on this file. The trailer size is fixed within a given {@link HFile}
046 * format version only, but we always store the version number as the last four-byte integer of the
047 * file. The version number itself is split into two portions, a major version and a minor version.
048 * The last three bytes of a file are the major version and a single preceding byte is the minor
049 * number. The major version determines which readers/writers to use to read/write a hfile while a
050 * minor version determines smaller changes in hfile format that do not need a new reader/writer
051 * type.
052 */
053@InterfaceAudience.Private
054public class FixedFileTrailer {
055  private static final Logger LOG = LoggerFactory.getLogger(FixedFileTrailer.class);
056
057  /**
058   * We store the comparator class name as a fixed-length field in the trailer.
059   */
060  private static final int MAX_COMPARATOR_NAME_LENGTH = 128;
061
062  /**
063   * Offset to the fileinfo data, a small block of vitals. Necessary in v1 but only potentially
064   * useful for pretty-printing in v2.
065   */
066  private long fileInfoOffset;
067
068  /**
069   * In version 1, the offset to the data block index. Starting from version 2, the meaning of this
070   * field is the offset to the section of the file that should be loaded at the time the file is
071   * being opened: i.e. on open we load the root index, file info, etc. See
072   * http://hbase.apache.org/book.html#_hfile_format_2 in the reference guide.
073   */
074  private long loadOnOpenDataOffset;
075
076  /**
077   * The number of entries in the root data index.
078   */
079  private int dataIndexCount;
080
081  /**
082   * Total uncompressed size of all blocks of the data index
083   */
084  private long uncompressedDataIndexSize;
085
086  /**
087   * The number of entries in the meta index
088   */
089  private int metaIndexCount;
090
091  /**
092   * The total uncompressed size of keys/values stored in the file.
093   */
094  private long totalUncompressedBytes;
095
096  /**
097   * The number of key/value pairs in the file. This field was int in version 1, but is now long.
098   */
099  private long entryCount;
100
101  /**
102   * The compression codec used for all blocks.
103   */
104  private Compression.Algorithm compressionCodec = Compression.Algorithm.NONE;
105
106  /**
107   * The number of levels in the potentially multi-level data index. Used from version 2 onwards.
108   */
109  private int numDataIndexLevels;
110
111  /**
112   * The offset of the first data block.
113   */
114  private long firstDataBlockOffset;
115
116  /**
117   * It is guaranteed that no key/value data blocks start after this offset in the file.
118   */
119  private long lastDataBlockOffset;
120
121  /**
122   * Raw key comparator class name in version 3
123   */
124  // We could write the actual class name from 2.0 onwards and handle BC
125  private String comparatorClassName =
126    InnerStoreCellComparator.INNER_STORE_COMPARATOR.getClass().getName();
127
128  /**
129   * The encryption key
130   */
131  private byte[] encryptionKey;
132
133  /**
134   * The {@link HFile} format major version.
135   */
136  private final int majorVersion;
137
138  /**
139   * The {@link HFile} format minor version.
140   */
141  private final int minorVersion;
142
143  FixedFileTrailer(int majorVersion, int minorVersion) {
144    this.majorVersion = majorVersion;
145    this.minorVersion = minorVersion;
146    HFile.checkFormatVersion(majorVersion);
147  }
148
149  private static int[] computeTrailerSizeByVersion() {
150    int[] versionToSize = new int[HFile.MAX_FORMAT_VERSION + 1];
151    // We support only 2 major versions now. ie. V2, V3
152    versionToSize[2] = 212;
153    for (int version = 3; version <= HFile.MAX_FORMAT_VERSION; version++) {
154      // Max FFT size for V3 and above is taken as 4KB for future enhancements
155      // if any.
156      // Unless the trailer size exceeds 4K this can continue
157      versionToSize[version] = 1024 * 4;
158    }
159    return versionToSize;
160  }
161
162  private static int getMaxTrailerSize() {
163    int maxSize = 0;
164    for (int version = HFile.MIN_FORMAT_VERSION; version <= HFile.MAX_FORMAT_VERSION; ++version) {
165      maxSize = Math.max(getTrailerSize(version), maxSize);
166    }
167    return maxSize;
168  }
169
170  private static final int[] TRAILER_SIZE = computeTrailerSizeByVersion();
171  private static final int MAX_TRAILER_SIZE = getMaxTrailerSize();
172
173  private static final int NOT_PB_SIZE = BlockType.MAGIC_LENGTH + Bytes.SIZEOF_INT;
174
175  static int getTrailerSize(int version) {
176    return TRAILER_SIZE[version];
177  }
178
179  public int getTrailerSize() {
180    return getTrailerSize(majorVersion);
181  }
182
183  /**
184   * Write the trailer to a data stream. We support writing version 1 for testing and for
185   * determining version 1 trailer size. It is also easy to see what fields changed in version 2.
186   */
187  void serialize(DataOutputStream outputStream) throws IOException {
188    HFile.checkFormatVersion(majorVersion);
189
190    ByteArrayOutputStream baos = new ByteArrayOutputStream();
191    DataOutputStream baosDos = new DataOutputStream(baos);
192
193    BlockType.TRAILER.write(baosDos);
194    serializeAsPB(baosDos);
195
196    // The last 4 bytes of the file encode the major and minor version universally
197    baosDos.writeInt(materializeVersion(majorVersion, minorVersion));
198
199    baos.writeTo(outputStream);
200  }
201
202  HFileProtos.FileTrailerProto toProtobuf() {
203    HFileProtos.FileTrailerProto.Builder builder = HFileProtos.FileTrailerProto.newBuilder()
204      .setFileInfoOffset(fileInfoOffset).setLoadOnOpenDataOffset(loadOnOpenDataOffset)
205      .setUncompressedDataIndexSize(uncompressedDataIndexSize)
206      .setTotalUncompressedBytes(totalUncompressedBytes).setDataIndexCount(dataIndexCount)
207      .setMetaIndexCount(metaIndexCount).setEntryCount(entryCount)
208      .setNumDataIndexLevels(numDataIndexLevels).setFirstDataBlockOffset(firstDataBlockOffset)
209      .setLastDataBlockOffset(lastDataBlockOffset).setComparatorClassName(comparatorClassName)
210      .setCompressionCodec(compressionCodec.ordinal());
211    if (encryptionKey != null) {
212      builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(encryptionKey));
213    }
214    return builder.build();
215  }
216
217  /**
218   * Write trailer data as protobuf.
219   */
220  void serializeAsPB(DataOutputStream output) throws IOException {
221    ByteArrayOutputStream baos = new ByteArrayOutputStream();
222    // We need this extra copy unfortunately to determine the final size of the
223    // delimited output, see use of baos.size() below.
224    toProtobuf().writeDelimitedTo(baos);
225    baos.writeTo(output);
226    // Pad to make up the difference between variable PB encoding length and the
227    // length when encoded as writable under earlier V2 formats. Failure to pad
228    // properly or if the PB encoding is too big would mean the trailer wont be read
229    // in properly by HFile.
230    int padding = getTrailerSize() - NOT_PB_SIZE - baos.size();
231    if (padding < 0) {
232      throw new IOException("Pbuf encoding size exceeded fixed trailer size limit");
233    }
234    for (int i = 0; i < padding; i++) {
235      output.write(0);
236    }
237  }
238
239  /**
240   * Deserialize the fixed file trailer from the given stream. The version needs to already be
241   * specified. Make sure this is consistent with {@link #serialize(DataOutputStream)}.
242   */
243  void deserialize(DataInputStream inputStream) throws IOException {
244    HFile.checkFormatVersion(majorVersion);
245
246    BlockType.TRAILER.readAndCheck(inputStream);
247
248    if (
249      majorVersion > 2
250        || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION)
251    ) {
252      deserializeFromPB(inputStream);
253    } else {
254      deserializeFromWritable(inputStream);
255    }
256
257    // The last 4 bytes of the file encode the major and minor version universally
258    int version = inputStream.readInt();
259    expectMajorVersion(extractMajorVersion(version));
260    expectMinorVersion(extractMinorVersion(version));
261  }
262
263  /**
264   * Deserialize the file trailer as protobuf
265   */
266  void deserializeFromPB(DataInputStream inputStream) throws IOException {
267    // read PB and skip padding
268    int start = inputStream.available();
269    HFileProtos.FileTrailerProto trailerProto =
270      HFileProtos.FileTrailerProto.parser().parseDelimitedFrom(inputStream);
271    int size = start - inputStream.available();
272    inputStream.skip(getTrailerSize() - NOT_PB_SIZE - size);
273
274    // process the PB
275    if (trailerProto.hasFileInfoOffset()) {
276      fileInfoOffset = trailerProto.getFileInfoOffset();
277    }
278    if (trailerProto.hasLoadOnOpenDataOffset()) {
279      loadOnOpenDataOffset = trailerProto.getLoadOnOpenDataOffset();
280    }
281    if (trailerProto.hasUncompressedDataIndexSize()) {
282      uncompressedDataIndexSize = trailerProto.getUncompressedDataIndexSize();
283    }
284    if (trailerProto.hasTotalUncompressedBytes()) {
285      totalUncompressedBytes = trailerProto.getTotalUncompressedBytes();
286    }
287    if (trailerProto.hasDataIndexCount()) {
288      dataIndexCount = trailerProto.getDataIndexCount();
289    }
290    if (trailerProto.hasMetaIndexCount()) {
291      metaIndexCount = trailerProto.getMetaIndexCount();
292    }
293    if (trailerProto.hasEntryCount()) {
294      entryCount = trailerProto.getEntryCount();
295    }
296    if (trailerProto.hasNumDataIndexLevels()) {
297      numDataIndexLevels = trailerProto.getNumDataIndexLevels();
298    }
299    if (trailerProto.hasFirstDataBlockOffset()) {
300      firstDataBlockOffset = trailerProto.getFirstDataBlockOffset();
301    }
302    if (trailerProto.hasLastDataBlockOffset()) {
303      lastDataBlockOffset = trailerProto.getLastDataBlockOffset();
304    }
305    if (trailerProto.hasComparatorClassName()) {
306      setComparatorClass(getComparatorClass(trailerProto.getComparatorClassName()));
307    }
308    if (trailerProto.hasCompressionCodec()) {
309      compressionCodec = Compression.Algorithm.values()[trailerProto.getCompressionCodec()];
310    } else {
311      compressionCodec = Compression.Algorithm.NONE;
312    }
313    if (trailerProto.hasEncryptionKey()) {
314      encryptionKey = trailerProto.getEncryptionKey().toByteArray();
315    }
316  }
317
318  /**
319   * Deserialize the file trailer as writable data
320   */
321  void deserializeFromWritable(DataInput input) throws IOException {
322    fileInfoOffset = input.readLong();
323    loadOnOpenDataOffset = input.readLong();
324    dataIndexCount = input.readInt();
325    uncompressedDataIndexSize = input.readLong();
326    metaIndexCount = input.readInt();
327
328    totalUncompressedBytes = input.readLong();
329    entryCount = input.readLong();
330    compressionCodec = Compression.Algorithm.values()[input.readInt()];
331    numDataIndexLevels = input.readInt();
332    firstDataBlockOffset = input.readLong();
333    lastDataBlockOffset = input.readLong();
334    // TODO this is a classname encoded into an HFile's trailer. We are going to need to have
335    // some compat code here.
336    setComparatorClass(
337      getComparatorClass(Bytes.readStringFixedSize(input, MAX_COMPARATOR_NAME_LENGTH)));
338  }
339
340  private void append(StringBuilder sb, String s) {
341    if (sb.length() > 0) {
342      sb.append(", ");
343    }
344    sb.append(s);
345  }
346
347  @Override
348  public String toString() {
349    StringBuilder sb = new StringBuilder();
350    append(sb, "fileinfoOffset=" + fileInfoOffset);
351    append(sb, "loadOnOpenDataOffset=" + loadOnOpenDataOffset);
352    append(sb, "dataIndexCount=" + dataIndexCount);
353    append(sb, "metaIndexCount=" + metaIndexCount);
354    append(sb, "totalUncomressedBytes=" + totalUncompressedBytes);
355    append(sb, "entryCount=" + entryCount);
356    append(sb, "compressionCodec=" + compressionCodec);
357    append(sb, "uncompressedDataIndexSize=" + uncompressedDataIndexSize);
358    append(sb, "numDataIndexLevels=" + numDataIndexLevels);
359    append(sb, "firstDataBlockOffset=" + firstDataBlockOffset);
360    append(sb, "lastDataBlockOffset=" + lastDataBlockOffset);
361    append(sb, "comparatorClassName=" + comparatorClassName);
362    if (majorVersion >= 3) {
363      append(sb, "encryptionKey=" + (encryptionKey != null ? "PRESENT" : "NONE"));
364    }
365    append(sb, "majorVersion=" + majorVersion);
366    append(sb, "minorVersion=" + minorVersion);
367
368    return sb.toString();
369  }
370
371  /**
372   * Reads a file trailer from the given file.
373   * @param istream  the input stream with the ability to seek. Does not have to be buffered, as
374   *                 only one read operation is made.
375   * @param fileSize the file size. Can be obtained using
376   *                 {@link org.apache.hadoop.fs.FileSystem#getFileStatus( org.apache.hadoop.fs.Path)}.
377   * @return the fixed file trailer read
378   * @throws IOException if failed to read from the underlying stream, or the trailer is corrupted,
379   *                     or the version of the trailer is unsupported
380   */
381  public static FixedFileTrailer readFromStream(FSDataInputStream istream, long fileSize)
382    throws IOException {
383    int bufferSize = MAX_TRAILER_SIZE;
384    long seekPoint = fileSize - bufferSize;
385    if (seekPoint < 0) {
386      // It is hard to imagine such a small HFile.
387      seekPoint = 0;
388      bufferSize = (int) fileSize;
389    }
390
391    istream.seek(seekPoint);
392
393    ByteBuffer buf = ByteBuffer.allocate(bufferSize);
394    istream.readFully(buf.array(), buf.arrayOffset(), buf.arrayOffset() + buf.limit());
395
396    // Read the version from the last int of the file.
397    buf.position(buf.limit() - Bytes.SIZEOF_INT);
398    int version = buf.getInt();
399
400    // Extract the major and minor versions.
401    int majorVersion = extractMajorVersion(version);
402    int minorVersion = extractMinorVersion(version);
403
404    HFile.checkFormatVersion(majorVersion); // throws IAE if invalid
405
406    int trailerSize = getTrailerSize(majorVersion);
407
408    FixedFileTrailer fft = new FixedFileTrailer(majorVersion, minorVersion);
409    fft.deserialize(new DataInputStream(new ByteArrayInputStream(buf.array(),
410      buf.arrayOffset() + bufferSize - trailerSize, trailerSize)));
411    boolean isScanMetricsEnabled = ThreadLocalServerSideScanMetrics.isScanMetricsEnabled();
412    if (isScanMetricsEnabled) {
413      ThreadLocalServerSideScanMetrics.addBytesReadFromFs(trailerSize);
414      ThreadLocalServerSideScanMetrics.addBlockReadOpsCount(1);
415    }
416    return fft;
417  }
418
419  public void expectMajorVersion(int expected) {
420    if (majorVersion != expected) {
421      throw new IllegalArgumentException(
422        "Invalid HFile major version: " + majorVersion + " (expected: " + expected + ")");
423    }
424  }
425
426  public void expectMinorVersion(int expected) {
427    if (minorVersion != expected) {
428      throw new IllegalArgumentException(
429        "Invalid HFile minor version: " + minorVersion + " (expected: " + expected + ")");
430    }
431  }
432
433  public void expectAtLeastMajorVersion(int lowerBound) {
434    if (majorVersion < lowerBound) {
435      throw new IllegalArgumentException("Invalid HFile major version: " + majorVersion
436        + " (expected: " + lowerBound + " or higher).");
437    }
438  }
439
440  public long getFileInfoOffset() {
441    return fileInfoOffset;
442  }
443
444  public void setFileInfoOffset(long fileInfoOffset) {
445    this.fileInfoOffset = fileInfoOffset;
446  }
447
448  public long getLoadOnOpenDataOffset() {
449    return loadOnOpenDataOffset;
450  }
451
452  public void setLoadOnOpenOffset(long loadOnOpenDataOffset) {
453    this.loadOnOpenDataOffset = loadOnOpenDataOffset;
454  }
455
456  public int getDataIndexCount() {
457    return dataIndexCount;
458  }
459
460  public void setDataIndexCount(int dataIndexCount) {
461    this.dataIndexCount = dataIndexCount;
462  }
463
464  public int getMetaIndexCount() {
465    return metaIndexCount;
466  }
467
468  public void setMetaIndexCount(int metaIndexCount) {
469    this.metaIndexCount = metaIndexCount;
470  }
471
472  public long getTotalUncompressedBytes() {
473    return totalUncompressedBytes;
474  }
475
476  public void setTotalUncompressedBytes(long totalUncompressedBytes) {
477    this.totalUncompressedBytes = totalUncompressedBytes;
478  }
479
480  public long getEntryCount() {
481    return entryCount;
482  }
483
484  public void setEntryCount(long newEntryCount) {
485    entryCount = newEntryCount;
486  }
487
488  public Compression.Algorithm getCompressionCodec() {
489    return compressionCodec;
490  }
491
492  public void setCompressionCodec(Compression.Algorithm compressionCodec) {
493    this.compressionCodec = compressionCodec;
494  }
495
496  public int getNumDataIndexLevels() {
497    expectAtLeastMajorVersion(2);
498    return numDataIndexLevels;
499  }
500
501  public void setNumDataIndexLevels(int numDataIndexLevels) {
502    expectAtLeastMajorVersion(2);
503    this.numDataIndexLevels = numDataIndexLevels;
504  }
505
506  public long getLastDataBlockOffset() {
507    expectAtLeastMajorVersion(2);
508    return lastDataBlockOffset;
509  }
510
511  public void setLastDataBlockOffset(long lastDataBlockOffset) {
512    expectAtLeastMajorVersion(2);
513    this.lastDataBlockOffset = lastDataBlockOffset;
514  }
515
516  public long getFirstDataBlockOffset() {
517    expectAtLeastMajorVersion(2);
518    return firstDataBlockOffset;
519  }
520
521  public void setFirstDataBlockOffset(long firstDataBlockOffset) {
522    expectAtLeastMajorVersion(2);
523    this.firstDataBlockOffset = firstDataBlockOffset;
524  }
525
526  public String getComparatorClassName() {
527    return comparatorClassName;
528  }
529
530  /**
531   * Returns the major version of this HFile format
532   */
533  public int getMajorVersion() {
534    return majorVersion;
535  }
536
537  /**
538   * Returns the minor version of this HFile format
539   */
540  public int getMinorVersion() {
541    return minorVersion;
542  }
543
544  public void setComparatorClass(Class<? extends CellComparator> klass) {
545    // Is the comparator instantiable?
546    try {
547      // If null, it should be the Bytes.BYTES_RAWCOMPARATOR
548      if (klass != null) {
549        CellComparator comp = klass.getDeclaredConstructor().newInstance();
550        // if the name wasn't one of the legacy names, maybe its a legit new
551        // kind of comparator.
552        this.comparatorClassName = klass.getName();
553      }
554    } catch (Exception e) {
555      throw new RuntimeException("Comparator class " + klass.getName() + " is not instantiable", e);
556    }
557  }
558
559  @SuppressWarnings("unchecked")
560  private static Class<? extends CellComparator> getComparatorClass(String comparatorClassName)
561    throws IOException {
562    Class<? extends CellComparator> comparatorKlass;
563    // for backward compatibility
564    // We will force comparator class name to be "KeyValue$KVComparator" and
565    // "KeyValue$MetaComparator" on 2.x although we do not use them on newer 2.x versions, for
566    // maintaining compatibility while upgrading and downgrading between different 2.x versions. So
567    // here on 3.x, we still need to check these two class names although the actual classes have
568    // already been purged.
569    if (
570      comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$KVComparator")
571        || comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator")
572    ) {
573      comparatorKlass = InnerStoreCellComparator.class;
574    } else if (
575      comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$MetaComparator")
576        || comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")
577        || comparatorClassName
578          .equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")
579        || comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator")
580    ) {
581      comparatorKlass = MetaCellComparator.class;
582    } else if (
583      comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator")
584        || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator")
585    ) {
586      // When the comparator to be used is Bytes.BYTES_RAWCOMPARATOR, we just return null from here
587      // Bytes.BYTES_RAWCOMPARATOR is not a CellComparator
588      comparatorKlass = null;
589    } else {
590      // if the name wasn't one of the legacy names, maybe its a legit new kind of comparator.
591      try {
592        comparatorKlass = (Class<? extends CellComparator>) Class.forName(comparatorClassName);
593      } catch (ClassNotFoundException e) {
594        throw new IOException(e);
595      }
596    }
597    return comparatorKlass;
598  }
599
600  static CellComparator createComparator(String comparatorClassName) throws IOException {
601    if (
602      comparatorClassName
603        .equals(InnerStoreCellComparator.INNER_STORE_COMPARATOR.getClass().getName())
604    ) {
605      return InnerStoreCellComparator.INNER_STORE_COMPARATOR;
606    } else
607      if (comparatorClassName.equals(MetaCellComparator.META_COMPARATOR.getClass().getName())) {
608        return MetaCellComparator.META_COMPARATOR;
609      }
610    try {
611      Class<? extends CellComparator> comparatorClass = getComparatorClass(comparatorClassName);
612      if (comparatorClass != null) {
613        return comparatorClass.getDeclaredConstructor().newInstance();
614      }
615      LOG.warn("No Comparator class for " + comparatorClassName + ". Returning Null.");
616      return null;
617    } catch (Exception e) {
618      throw new IOException("Comparator class " + comparatorClassName + " is not instantiable", e);
619    }
620  }
621
622  @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
623  public CellComparator createComparator() throws IOException {
624    expectAtLeastMajorVersion(2);
625    return createComparator(comparatorClassName);
626  }
627
628  public long getUncompressedDataIndexSize() {
629    return uncompressedDataIndexSize;
630  }
631
632  public void setUncompressedDataIndexSize(long uncompressedDataIndexSize) {
633    expectAtLeastMajorVersion(2);
634    this.uncompressedDataIndexSize = uncompressedDataIndexSize;
635  }
636
637  public byte[] getEncryptionKey() {
638    // This is a v3 feature but if reading a v2 file the encryptionKey will just be null which
639    // if fine for this feature.
640    expectAtLeastMajorVersion(2);
641    return encryptionKey;
642  }
643
644  public void setEncryptionKey(byte[] keyBytes) {
645    this.encryptionKey = keyBytes;
646  }
647
648  /**
649   * Extracts the major version for a 4-byte serialized version data. The major version is the 3
650   * least significant bytes
651   */
652  private static int extractMajorVersion(int serializedVersion) {
653    return (serializedVersion & 0x00ffffff);
654  }
655
656  /**
657   * Extracts the minor version for a 4-byte serialized version data. The major version are the 3
658   * the most significant bytes
659   */
660  private static int extractMinorVersion(int serializedVersion) {
661    return (serializedVersion >>> 24);
662  }
663
664  /**
665   * Create a 4 byte serialized version number by combining the minor and major version numbers.
666   */
667  static int materializeVersion(int majorVersion, int minorVersion) {
668    return ((majorVersion & 0x00ffffff) | (minorVersion << 24));
669  }
670}