001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import java.io.ByteArrayInputStream; 021import java.io.ByteArrayOutputStream; 022import java.io.DataInput; 023import java.io.DataInputStream; 024import java.io.DataOutputStream; 025import java.io.IOException; 026import java.nio.ByteBuffer; 027import org.apache.hadoop.fs.FSDataInputStream; 028import org.apache.hadoop.hbase.CellComparator; 029import org.apache.hadoop.hbase.InnerStoreCellComparator; 030import org.apache.hadoop.hbase.KeyValue; 031import org.apache.hadoop.hbase.MetaCellComparator; 032import org.apache.hadoop.hbase.io.compress.Compression; 033import org.apache.hadoop.hbase.util.Bytes; 034import org.apache.yetus.audience.InterfaceAudience; 035import org.slf4j.Logger; 036import org.slf4j.LoggerFactory; 037 038import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; 039 040import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; 041 042/** 043 * The {@link HFile} has a fixed trailer which contains offsets to other variable parts of the file. 044 * Also includes basic metadata on this file. The trailer size is fixed within a given {@link HFile} 045 * format version only, but we always store the version number as the last four-byte integer of the 046 * file. The version number itself is split into two portions, a major version and a minor version. 047 * The last three bytes of a file are the major version and a single preceding byte is the minor 048 * number. The major version determines which readers/writers to use to read/write a hfile while a 049 * minor version determines smaller changes in hfile format that do not need a new reader/writer 050 * type. 051 */ 052@InterfaceAudience.Private 053public class FixedFileTrailer { 054 private static final Logger LOG = LoggerFactory.getLogger(FixedFileTrailer.class); 055 056 /** 057 * We store the comparator class name as a fixed-length field in the trailer. 058 */ 059 private static final int MAX_COMPARATOR_NAME_LENGTH = 128; 060 061 /** 062 * Offset to the fileinfo data, a small block of vitals. Necessary in v1 but only potentially 063 * useful for pretty-printing in v2. 064 */ 065 private long fileInfoOffset; 066 067 /** 068 * In version 1, the offset to the data block index. Starting from version 2, the meaning of this 069 * field is the offset to the section of the file that should be loaded at the time the file is 070 * being opened: i.e. on open we load the root index, file info, etc. See 071 * http://hbase.apache.org/book.html#_hfile_format_2 in the reference guide. 072 */ 073 private long loadOnOpenDataOffset; 074 075 /** 076 * The number of entries in the root data index. 077 */ 078 private int dataIndexCount; 079 080 /** 081 * Total uncompressed size of all blocks of the data index 082 */ 083 private long uncompressedDataIndexSize; 084 085 /** 086 * The number of entries in the meta index 087 */ 088 private int metaIndexCount; 089 090 /** 091 * The total uncompressed size of keys/values stored in the file. 092 */ 093 private long totalUncompressedBytes; 094 095 /** 096 * The number of key/value pairs in the file. This field was int in version 1, but is now long. 097 */ 098 private long entryCount; 099 100 /** 101 * The compression codec used for all blocks. 102 */ 103 private Compression.Algorithm compressionCodec = Compression.Algorithm.NONE; 104 105 /** 106 * The number of levels in the potentially multi-level data index. Used from version 2 onwards. 107 */ 108 private int numDataIndexLevels; 109 110 /** 111 * The offset of the first data block. 112 */ 113 private long firstDataBlockOffset; 114 115 /** 116 * It is guaranteed that no key/value data blocks start after this offset in the file. 117 */ 118 private long lastDataBlockOffset; 119 120 /** 121 * Raw key comparator class name in version 3 122 */ 123 // We could write the actual class name from 2.0 onwards and handle BC 124 private String comparatorClassName = 125 InnerStoreCellComparator.INNER_STORE_COMPARATOR.getClass().getName(); 126 127 /** 128 * The encryption key 129 */ 130 private byte[] encryptionKey; 131 132 /** 133 * The {@link HFile} format major version. 134 */ 135 private final int majorVersion; 136 137 /** 138 * The {@link HFile} format minor version. 139 */ 140 private final int minorVersion; 141 142 FixedFileTrailer(int majorVersion, int minorVersion) { 143 this.majorVersion = majorVersion; 144 this.minorVersion = minorVersion; 145 HFile.checkFormatVersion(majorVersion); 146 } 147 148 private static int[] computeTrailerSizeByVersion() { 149 int[] versionToSize = new int[HFile.MAX_FORMAT_VERSION + 1]; 150 // We support only 2 major versions now. ie. V2, V3 151 versionToSize[2] = 212; 152 for (int version = 3; version <= HFile.MAX_FORMAT_VERSION; version++) { 153 // Max FFT size for V3 and above is taken as 4KB for future enhancements 154 // if any. 155 // Unless the trailer size exceeds 4K this can continue 156 versionToSize[version] = 1024 * 4; 157 } 158 return versionToSize; 159 } 160 161 private static int getMaxTrailerSize() { 162 int maxSize = 0; 163 for (int version = HFile.MIN_FORMAT_VERSION; version <= HFile.MAX_FORMAT_VERSION; ++version) { 164 maxSize = Math.max(getTrailerSize(version), maxSize); 165 } 166 return maxSize; 167 } 168 169 private static final int[] TRAILER_SIZE = computeTrailerSizeByVersion(); 170 private static final int MAX_TRAILER_SIZE = getMaxTrailerSize(); 171 172 private static final int NOT_PB_SIZE = BlockType.MAGIC_LENGTH + Bytes.SIZEOF_INT; 173 174 static int getTrailerSize(int version) { 175 return TRAILER_SIZE[version]; 176 } 177 178 public int getTrailerSize() { 179 return getTrailerSize(majorVersion); 180 } 181 182 /** 183 * Write the trailer to a data stream. We support writing version 1 for testing and for 184 * determining version 1 trailer size. It is also easy to see what fields changed in version 2. 185 */ 186 void serialize(DataOutputStream outputStream) throws IOException { 187 HFile.checkFormatVersion(majorVersion); 188 189 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 190 DataOutputStream baosDos = new DataOutputStream(baos); 191 192 BlockType.TRAILER.write(baosDos); 193 serializeAsPB(baosDos); 194 195 // The last 4 bytes of the file encode the major and minor version universally 196 baosDos.writeInt(materializeVersion(majorVersion, minorVersion)); 197 198 baos.writeTo(outputStream); 199 } 200 201 HFileProtos.FileTrailerProto toProtobuf() { 202 HFileProtos.FileTrailerProto.Builder builder = HFileProtos.FileTrailerProto.newBuilder() 203 .setFileInfoOffset(fileInfoOffset).setLoadOnOpenDataOffset(loadOnOpenDataOffset) 204 .setUncompressedDataIndexSize(uncompressedDataIndexSize) 205 .setTotalUncompressedBytes(totalUncompressedBytes).setDataIndexCount(dataIndexCount) 206 .setMetaIndexCount(metaIndexCount).setEntryCount(entryCount) 207 .setNumDataIndexLevels(numDataIndexLevels).setFirstDataBlockOffset(firstDataBlockOffset) 208 .setLastDataBlockOffset(lastDataBlockOffset).setComparatorClassName(comparatorClassName) 209 .setCompressionCodec(compressionCodec.ordinal()); 210 if (encryptionKey != null) { 211 builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(encryptionKey)); 212 } 213 return builder.build(); 214 } 215 216 /** 217 * Write trailer data as protobuf. 218 */ 219 void serializeAsPB(DataOutputStream output) throws IOException { 220 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 221 // We need this extra copy unfortunately to determine the final size of the 222 // delimited output, see use of baos.size() below. 223 toProtobuf().writeDelimitedTo(baos); 224 baos.writeTo(output); 225 // Pad to make up the difference between variable PB encoding length and the 226 // length when encoded as writable under earlier V2 formats. Failure to pad 227 // properly or if the PB encoding is too big would mean the trailer wont be read 228 // in properly by HFile. 229 int padding = getTrailerSize() - NOT_PB_SIZE - baos.size(); 230 if (padding < 0) { 231 throw new IOException("Pbuf encoding size exceeded fixed trailer size limit"); 232 } 233 for (int i = 0; i < padding; i++) { 234 output.write(0); 235 } 236 } 237 238 /** 239 * Deserialize the fixed file trailer from the given stream. The version needs to already be 240 * specified. Make sure this is consistent with {@link #serialize(DataOutputStream)}. 241 */ 242 void deserialize(DataInputStream inputStream) throws IOException { 243 HFile.checkFormatVersion(majorVersion); 244 245 BlockType.TRAILER.readAndCheck(inputStream); 246 247 if ( 248 majorVersion > 2 249 || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION) 250 ) { 251 deserializeFromPB(inputStream); 252 } else { 253 deserializeFromWritable(inputStream); 254 } 255 256 // The last 4 bytes of the file encode the major and minor version universally 257 int version = inputStream.readInt(); 258 expectMajorVersion(extractMajorVersion(version)); 259 expectMinorVersion(extractMinorVersion(version)); 260 } 261 262 /** 263 * Deserialize the file trailer as protobuf 264 */ 265 void deserializeFromPB(DataInputStream inputStream) throws IOException { 266 // read PB and skip padding 267 int start = inputStream.available(); 268 HFileProtos.FileTrailerProto trailerProto = 269 HFileProtos.FileTrailerProto.parser().parseDelimitedFrom(inputStream); 270 int size = start - inputStream.available(); 271 inputStream.skip(getTrailerSize() - NOT_PB_SIZE - size); 272 273 // process the PB 274 if (trailerProto.hasFileInfoOffset()) { 275 fileInfoOffset = trailerProto.getFileInfoOffset(); 276 } 277 if (trailerProto.hasLoadOnOpenDataOffset()) { 278 loadOnOpenDataOffset = trailerProto.getLoadOnOpenDataOffset(); 279 } 280 if (trailerProto.hasUncompressedDataIndexSize()) { 281 uncompressedDataIndexSize = trailerProto.getUncompressedDataIndexSize(); 282 } 283 if (trailerProto.hasTotalUncompressedBytes()) { 284 totalUncompressedBytes = trailerProto.getTotalUncompressedBytes(); 285 } 286 if (trailerProto.hasDataIndexCount()) { 287 dataIndexCount = trailerProto.getDataIndexCount(); 288 } 289 if (trailerProto.hasMetaIndexCount()) { 290 metaIndexCount = trailerProto.getMetaIndexCount(); 291 } 292 if (trailerProto.hasEntryCount()) { 293 entryCount = trailerProto.getEntryCount(); 294 } 295 if (trailerProto.hasNumDataIndexLevels()) { 296 numDataIndexLevels = trailerProto.getNumDataIndexLevels(); 297 } 298 if (trailerProto.hasFirstDataBlockOffset()) { 299 firstDataBlockOffset = trailerProto.getFirstDataBlockOffset(); 300 } 301 if (trailerProto.hasLastDataBlockOffset()) { 302 lastDataBlockOffset = trailerProto.getLastDataBlockOffset(); 303 } 304 if (trailerProto.hasComparatorClassName()) { 305 setComparatorClass(getComparatorClass(trailerProto.getComparatorClassName())); 306 } 307 if (trailerProto.hasCompressionCodec()) { 308 compressionCodec = Compression.Algorithm.values()[trailerProto.getCompressionCodec()]; 309 } else { 310 compressionCodec = Compression.Algorithm.NONE; 311 } 312 if (trailerProto.hasEncryptionKey()) { 313 encryptionKey = trailerProto.getEncryptionKey().toByteArray(); 314 } 315 } 316 317 /** 318 * Deserialize the file trailer as writable data 319 */ 320 void deserializeFromWritable(DataInput input) throws IOException { 321 fileInfoOffset = input.readLong(); 322 loadOnOpenDataOffset = input.readLong(); 323 dataIndexCount = input.readInt(); 324 uncompressedDataIndexSize = input.readLong(); 325 metaIndexCount = input.readInt(); 326 327 totalUncompressedBytes = input.readLong(); 328 entryCount = input.readLong(); 329 compressionCodec = Compression.Algorithm.values()[input.readInt()]; 330 numDataIndexLevels = input.readInt(); 331 firstDataBlockOffset = input.readLong(); 332 lastDataBlockOffset = input.readLong(); 333 // TODO this is a classname encoded into an HFile's trailer. We are going to need to have 334 // some compat code here. 335 setComparatorClass( 336 getComparatorClass(Bytes.readStringFixedSize(input, MAX_COMPARATOR_NAME_LENGTH))); 337 } 338 339 private void append(StringBuilder sb, String s) { 340 if (sb.length() > 0) { 341 sb.append(", "); 342 } 343 sb.append(s); 344 } 345 346 @Override 347 public String toString() { 348 StringBuilder sb = new StringBuilder(); 349 append(sb, "fileinfoOffset=" + fileInfoOffset); 350 append(sb, "loadOnOpenDataOffset=" + loadOnOpenDataOffset); 351 append(sb, "dataIndexCount=" + dataIndexCount); 352 append(sb, "metaIndexCount=" + metaIndexCount); 353 append(sb, "totalUncomressedBytes=" + totalUncompressedBytes); 354 append(sb, "entryCount=" + entryCount); 355 append(sb, "compressionCodec=" + compressionCodec); 356 append(sb, "uncompressedDataIndexSize=" + uncompressedDataIndexSize); 357 append(sb, "numDataIndexLevels=" + numDataIndexLevels); 358 append(sb, "firstDataBlockOffset=" + firstDataBlockOffset); 359 append(sb, "lastDataBlockOffset=" + lastDataBlockOffset); 360 append(sb, "comparatorClassName=" + comparatorClassName); 361 if (majorVersion >= 3) { 362 append(sb, "encryptionKey=" + (encryptionKey != null ? "PRESENT" : "NONE")); 363 } 364 append(sb, "majorVersion=" + majorVersion); 365 append(sb, "minorVersion=" + minorVersion); 366 367 return sb.toString(); 368 } 369 370 /** 371 * Reads a file trailer from the given file. 372 * @param istream the input stream with the ability to seek. Does not have to be buffered, as 373 * only one read operation is made. 374 * @param fileSize the file size. Can be obtained using 375 * {@link org.apache.hadoop.fs.FileSystem#getFileStatus( org.apache.hadoop.fs.Path)}. 376 * @return the fixed file trailer read 377 * @throws IOException if failed to read from the underlying stream, or the trailer is corrupted, 378 * or the version of the trailer is unsupported 379 */ 380 public static FixedFileTrailer readFromStream(FSDataInputStream istream, long fileSize) 381 throws IOException { 382 int bufferSize = MAX_TRAILER_SIZE; 383 long seekPoint = fileSize - bufferSize; 384 if (seekPoint < 0) { 385 // It is hard to imagine such a small HFile. 386 seekPoint = 0; 387 bufferSize = (int) fileSize; 388 } 389 390 istream.seek(seekPoint); 391 392 ByteBuffer buf = ByteBuffer.allocate(bufferSize); 393 istream.readFully(buf.array(), buf.arrayOffset(), buf.arrayOffset() + buf.limit()); 394 395 // Read the version from the last int of the file. 396 buf.position(buf.limit() - Bytes.SIZEOF_INT); 397 int version = buf.getInt(); 398 399 // Extract the major and minor versions. 400 int majorVersion = extractMajorVersion(version); 401 int minorVersion = extractMinorVersion(version); 402 403 HFile.checkFormatVersion(majorVersion); // throws IAE if invalid 404 405 int trailerSize = getTrailerSize(majorVersion); 406 407 FixedFileTrailer fft = new FixedFileTrailer(majorVersion, minorVersion); 408 fft.deserialize(new DataInputStream(new ByteArrayInputStream(buf.array(), 409 buf.arrayOffset() + bufferSize - trailerSize, trailerSize))); 410 return fft; 411 } 412 413 public void expectMajorVersion(int expected) { 414 if (majorVersion != expected) { 415 throw new IllegalArgumentException( 416 "Invalid HFile major version: " + majorVersion + " (expected: " + expected + ")"); 417 } 418 } 419 420 public void expectMinorVersion(int expected) { 421 if (minorVersion != expected) { 422 throw new IllegalArgumentException( 423 "Invalid HFile minor version: " + minorVersion + " (expected: " + expected + ")"); 424 } 425 } 426 427 public void expectAtLeastMajorVersion(int lowerBound) { 428 if (majorVersion < lowerBound) { 429 throw new IllegalArgumentException("Invalid HFile major version: " + majorVersion 430 + " (expected: " + lowerBound + " or higher)."); 431 } 432 } 433 434 public long getFileInfoOffset() { 435 return fileInfoOffset; 436 } 437 438 public void setFileInfoOffset(long fileInfoOffset) { 439 this.fileInfoOffset = fileInfoOffset; 440 } 441 442 public long getLoadOnOpenDataOffset() { 443 return loadOnOpenDataOffset; 444 } 445 446 public void setLoadOnOpenOffset(long loadOnOpenDataOffset) { 447 this.loadOnOpenDataOffset = loadOnOpenDataOffset; 448 } 449 450 public int getDataIndexCount() { 451 return dataIndexCount; 452 } 453 454 public void setDataIndexCount(int dataIndexCount) { 455 this.dataIndexCount = dataIndexCount; 456 } 457 458 public int getMetaIndexCount() { 459 return metaIndexCount; 460 } 461 462 public void setMetaIndexCount(int metaIndexCount) { 463 this.metaIndexCount = metaIndexCount; 464 } 465 466 public long getTotalUncompressedBytes() { 467 return totalUncompressedBytes; 468 } 469 470 public void setTotalUncompressedBytes(long totalUncompressedBytes) { 471 this.totalUncompressedBytes = totalUncompressedBytes; 472 } 473 474 public long getEntryCount() { 475 return entryCount; 476 } 477 478 public void setEntryCount(long newEntryCount) { 479 entryCount = newEntryCount; 480 } 481 482 public Compression.Algorithm getCompressionCodec() { 483 return compressionCodec; 484 } 485 486 public void setCompressionCodec(Compression.Algorithm compressionCodec) { 487 this.compressionCodec = compressionCodec; 488 } 489 490 public int getNumDataIndexLevels() { 491 expectAtLeastMajorVersion(2); 492 return numDataIndexLevels; 493 } 494 495 public void setNumDataIndexLevels(int numDataIndexLevels) { 496 expectAtLeastMajorVersion(2); 497 this.numDataIndexLevels = numDataIndexLevels; 498 } 499 500 public long getLastDataBlockOffset() { 501 expectAtLeastMajorVersion(2); 502 return lastDataBlockOffset; 503 } 504 505 public void setLastDataBlockOffset(long lastDataBlockOffset) { 506 expectAtLeastMajorVersion(2); 507 this.lastDataBlockOffset = lastDataBlockOffset; 508 } 509 510 public long getFirstDataBlockOffset() { 511 expectAtLeastMajorVersion(2); 512 return firstDataBlockOffset; 513 } 514 515 public void setFirstDataBlockOffset(long firstDataBlockOffset) { 516 expectAtLeastMajorVersion(2); 517 this.firstDataBlockOffset = firstDataBlockOffset; 518 } 519 520 public String getComparatorClassName() { 521 return comparatorClassName; 522 } 523 524 /** 525 * Returns the major version of this HFile format 526 */ 527 public int getMajorVersion() { 528 return majorVersion; 529 } 530 531 /** 532 * Returns the minor version of this HFile format 533 */ 534 public int getMinorVersion() { 535 return minorVersion; 536 } 537 538 public void setComparatorClass(Class<? extends CellComparator> klass) { 539 // Is the comparator instantiable? 540 try { 541 // If null, it should be the Bytes.BYTES_RAWCOMPARATOR 542 if (klass != null) { 543 CellComparator comp = klass.getDeclaredConstructor().newInstance(); 544 // if the name wasn't one of the legacy names, maybe its a legit new 545 // kind of comparator. 546 this.comparatorClassName = klass.getName(); 547 } 548 } catch (Exception e) { 549 throw new RuntimeException("Comparator class " + klass.getName() + " is not instantiable", e); 550 } 551 } 552 553 @SuppressWarnings("unchecked") 554 private static Class<? extends CellComparator> getComparatorClass(String comparatorClassName) 555 throws IOException { 556 Class<? extends CellComparator> comparatorKlass; 557 // for BC 558 if ( 559 comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName()) 560 || comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName()) 561 || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator")) 562 ) { 563 comparatorKlass = InnerStoreCellComparator.class; 564 } else if ( 565 comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()) 566 || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) 567 || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")) 568 || (comparatorClassName 569 .equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")) 570 || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator")) 571 ) { 572 comparatorKlass = MetaCellComparator.class; 573 } else if ( 574 comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") 575 || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator") 576 ) { 577 // When the comparator to be used is Bytes.BYTES_RAWCOMPARATOR, we just return null from here 578 // Bytes.BYTES_RAWCOMPARATOR is not a CellComparator 579 comparatorKlass = null; 580 } else { 581 // if the name wasn't one of the legacy names, maybe its a legit new kind of comparator. 582 try { 583 comparatorKlass = (Class<? extends CellComparator>) Class.forName(comparatorClassName); 584 } catch (ClassNotFoundException e) { 585 throw new IOException(e); 586 } 587 } 588 return comparatorKlass; 589 } 590 591 static CellComparator createComparator(String comparatorClassName) throws IOException { 592 if ( 593 comparatorClassName 594 .equals(InnerStoreCellComparator.INNER_STORE_COMPARATOR.getClass().getName()) 595 ) { 596 return InnerStoreCellComparator.INNER_STORE_COMPARATOR; 597 } else 598 if (comparatorClassName.equals(MetaCellComparator.META_COMPARATOR.getClass().getName())) { 599 return MetaCellComparator.META_COMPARATOR; 600 } 601 try { 602 Class<? extends CellComparator> comparatorClass = getComparatorClass(comparatorClassName); 603 if (comparatorClass != null) { 604 return comparatorClass.getDeclaredConstructor().newInstance(); 605 } 606 LOG.warn("No Comparator class for " + comparatorClassName + ". Returning Null."); 607 return null; 608 } catch (Exception e) { 609 throw new IOException("Comparator class " + comparatorClassName + " is not instantiable", e); 610 } 611 } 612 613 CellComparator createComparator() throws IOException { 614 expectAtLeastMajorVersion(2); 615 return createComparator(comparatorClassName); 616 } 617 618 public long getUncompressedDataIndexSize() { 619 return uncompressedDataIndexSize; 620 } 621 622 public void setUncompressedDataIndexSize(long uncompressedDataIndexSize) { 623 expectAtLeastMajorVersion(2); 624 this.uncompressedDataIndexSize = uncompressedDataIndexSize; 625 } 626 627 public byte[] getEncryptionKey() { 628 // This is a v3 feature but if reading a v2 file the encryptionKey will just be null which 629 // if fine for this feature. 630 expectAtLeastMajorVersion(2); 631 return encryptionKey; 632 } 633 634 public void setEncryptionKey(byte[] keyBytes) { 635 this.encryptionKey = keyBytes; 636 } 637 638 /** 639 * Extracts the major version for a 4-byte serialized version data. The major version is the 3 640 * least significant bytes 641 */ 642 private static int extractMajorVersion(int serializedVersion) { 643 return (serializedVersion & 0x00ffffff); 644 } 645 646 /** 647 * Extracts the minor version for a 4-byte serialized version data. The major version are the 3 648 * the most significant bytes 649 */ 650 private static int extractMinorVersion(int serializedVersion) { 651 return (serializedVersion >>> 24); 652 } 653 654 /** 655 * Create a 4 byte serialized version number by combining the minor and major version numbers. 656 */ 657 static int materializeVersion(int majorVersion, int minorVersion) { 658 return ((majorVersion & 0x00ffffff) | (minorVersion << 24)); 659 } 660}