001/* 002 * 003 * Licensed to the Apache Software Foundation (ASF) under one 004 * or more contributor license agreements. See the NOTICE file 005 * distributed with this work for additional information 006 * regarding copyright ownership. The ASF licenses this file 007 * to you under the Apache License, Version 2.0 (the 008 * "License"); you may not use this file except in compliance 009 * with the License. You may obtain a copy of the License at 010 * 011 * http://www.apache.org/licenses/LICENSE-2.0 012 * 013 * Unless required by applicable law or agreed to in writing, software 014 * distributed under the License is distributed on an "AS IS" BASIS, 015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 016 * See the License for the specific language governing permissions and 017 * limitations under the License. 018 */ 019package org.apache.hadoop.hbase.io.hfile; 020 021 022import java.io.ByteArrayInputStream; 023import java.io.ByteArrayOutputStream; 024import java.io.DataInput; 025import java.io.DataInputStream; 026import java.io.DataOutputStream; 027import java.io.IOException; 028import java.nio.ByteBuffer; 029import org.apache.hadoop.fs.FSDataInputStream; 030import org.apache.hadoop.hbase.CellComparator; 031import org.apache.hadoop.hbase.CellComparatorImpl; 032import org.apache.hadoop.hbase.KeyValue; 033import org.apache.hadoop.hbase.MetaCellComparator; 034import org.apache.hadoop.hbase.io.compress.Compression; 035import org.apache.hadoop.hbase.util.Bytes; 036import org.apache.yetus.audience.InterfaceAudience; 037import org.slf4j.Logger; 038import org.slf4j.LoggerFactory; 039import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; 040import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; 041 042/** 043 * The {@link HFile} has a fixed trailer which contains offsets to other 044 * variable parts of the file. Also includes basic metadata on this file. The 045 * trailer size is fixed within a given {@link HFile} format version only, but 046 * we always store the version number as the last four-byte integer of the file. 047 * The version number itself is split into two portions, a major 048 * version and a minor version. The last three bytes of a file are the major 049 * version and a single preceding byte is the minor number. The major version 050 * determines which readers/writers to use to read/write a hfile while a minor 051 * version determines smaller changes in hfile format that do not need a new 052 * reader/writer type. 053 */ 054@InterfaceAudience.Private 055public class FixedFileTrailer { 056 private static final Logger LOG = LoggerFactory.getLogger(FixedFileTrailer.class); 057 058 /** 059 * We store the comparator class name as a fixed-length field in the trailer. 060 */ 061 private static final int MAX_COMPARATOR_NAME_LENGTH = 128; 062 063 /** 064 * Offset to the fileinfo data, a small block of vitals. Necessary in v1 but 065 * only potentially useful for pretty-printing in v2. 066 */ 067 private long fileInfoOffset; 068 069 /** 070 * In version 1, the offset to the data block index. Starting from version 2, 071 * the meaning of this field is the offset to the section of the file that 072 * should be loaded at the time the file is being opened: i.e. on open we load 073 * the root index, file info, etc. See http://hbase.apache.org/book.html#_hfile_format_2 074 * in the reference guide. 075 */ 076 private long loadOnOpenDataOffset; 077 078 /** 079 * The number of entries in the root data index. 080 */ 081 private int dataIndexCount; 082 083 /** 084 * Total uncompressed size of all blocks of the data index 085 */ 086 private long uncompressedDataIndexSize; 087 088 /** 089 * The number of entries in the meta index 090 */ 091 private int metaIndexCount; 092 093 /** 094 * The total uncompressed size of keys/values stored in the file. 095 */ 096 private long totalUncompressedBytes; 097 098 /** 099 * The number of key/value pairs in the file. This field was int in version 1, 100 * but is now long. 101 */ 102 private long entryCount; 103 104 /** 105 * The compression codec used for all blocks. 106 */ 107 private Compression.Algorithm compressionCodec = Compression.Algorithm.NONE; 108 109 /** 110 * The number of levels in the potentially multi-level data index. Used from 111 * version 2 onwards. 112 */ 113 private int numDataIndexLevels; 114 115 /** 116 * The offset of the first data block. 117 */ 118 private long firstDataBlockOffset; 119 120 /** 121 * It is guaranteed that no key/value data blocks start after this offset in 122 * the file. 123 */ 124 private long lastDataBlockOffset; 125 126 /** 127 * Raw key comparator class name in version 3 128 */ 129 // We could write the actual class name from 2.0 onwards and handle BC 130 private String comparatorClassName = CellComparator.getInstance().getClass().getName(); 131 132 /** 133 * The encryption key 134 */ 135 private byte[] encryptionKey; 136 137 /** 138 * The {@link HFile} format major version. 139 */ 140 private final int majorVersion; 141 142 /** 143 * The {@link HFile} format minor version. 144 */ 145 private final int minorVersion; 146 147 FixedFileTrailer(int majorVersion, int minorVersion) { 148 this.majorVersion = majorVersion; 149 this.minorVersion = minorVersion; 150 HFile.checkFormatVersion(majorVersion); 151 } 152 153 private static int[] computeTrailerSizeByVersion() { 154 int[] versionToSize = new int[HFile.MAX_FORMAT_VERSION + 1]; 155 // We support only 2 major versions now. ie. V2, V3 156 versionToSize[2] = 212; 157 for (int version = 3; version <= HFile.MAX_FORMAT_VERSION; version++) { 158 // Max FFT size for V3 and above is taken as 4KB for future enhancements 159 // if any. 160 // Unless the trailer size exceeds 4K this can continue 161 versionToSize[version] = 1024 * 4; 162 } 163 return versionToSize; 164 } 165 166 private static int getMaxTrailerSize() { 167 int maxSize = 0; 168 for (int version = HFile.MIN_FORMAT_VERSION; version <= HFile.MAX_FORMAT_VERSION; ++version) { 169 maxSize = Math.max(getTrailerSize(version), maxSize); 170 } 171 return maxSize; 172 } 173 174 private static final int[] TRAILER_SIZE = computeTrailerSizeByVersion(); 175 private static final int MAX_TRAILER_SIZE = getMaxTrailerSize(); 176 177 private static final int NOT_PB_SIZE = BlockType.MAGIC_LENGTH + Bytes.SIZEOF_INT; 178 179 static int getTrailerSize(int version) { 180 return TRAILER_SIZE[version]; 181 } 182 183 public int getTrailerSize() { 184 return getTrailerSize(majorVersion); 185 } 186 187 /** 188 * Write the trailer to a data stream. We support writing version 1 for 189 * testing and for determining version 1 trailer size. It is also easy to see 190 * what fields changed in version 2. 191 */ 192 void serialize(DataOutputStream outputStream) throws IOException { 193 HFile.checkFormatVersion(majorVersion); 194 195 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 196 DataOutputStream baosDos = new DataOutputStream(baos); 197 198 BlockType.TRAILER.write(baosDos); 199 serializeAsPB(baosDos); 200 201 // The last 4 bytes of the file encode the major and minor version universally 202 baosDos.writeInt(materializeVersion(majorVersion, minorVersion)); 203 204 baos.writeTo(outputStream); 205 } 206 207 HFileProtos.FileTrailerProto toProtobuf() { 208 HFileProtos.FileTrailerProto.Builder builder = HFileProtos.FileTrailerProto.newBuilder() 209 .setFileInfoOffset(fileInfoOffset) 210 .setLoadOnOpenDataOffset(loadOnOpenDataOffset) 211 .setUncompressedDataIndexSize(uncompressedDataIndexSize) 212 .setTotalUncompressedBytes(totalUncompressedBytes) 213 .setDataIndexCount(dataIndexCount) 214 .setMetaIndexCount(metaIndexCount) 215 .setEntryCount(entryCount) 216 .setNumDataIndexLevels(numDataIndexLevels) 217 .setFirstDataBlockOffset(firstDataBlockOffset) 218 .setLastDataBlockOffset(lastDataBlockOffset) 219 .setComparatorClassName(getHBase1CompatibleName(comparatorClassName)) 220 .setCompressionCodec(compressionCodec.ordinal()); 221 if (encryptionKey != null) { 222 builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(encryptionKey)); 223 } 224 return builder.build(); 225 } 226 227 /** 228 * Write trailer data as protobuf. 229 * NOTE: we run a translation on the comparator name and will serialize the old hbase-1.x where 230 * it makes sense. See {@link #getHBase1CompatibleName(String)}. 231 */ 232 void serializeAsPB(DataOutputStream output) throws IOException { 233 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 234 // We need this extra copy unfortunately to determine the final size of the 235 // delimited output, see use of baos.size() below. 236 toProtobuf().writeDelimitedTo(baos); 237 baos.writeTo(output); 238 // Pad to make up the difference between variable PB encoding length and the 239 // length when encoded as writable under earlier V2 formats. Failure to pad 240 // properly or if the PB encoding is too big would mean the trailer wont be read 241 // in properly by HFile. 242 int padding = getTrailerSize() - NOT_PB_SIZE - baos.size(); 243 if (padding < 0) { 244 throw new IOException("Pbuf encoding size exceeded fixed trailer size limit"); 245 } 246 for (int i = 0; i < padding; i++) { 247 output.write(0); 248 } 249 } 250 251 /** 252 * Deserialize the fixed file trailer from the given stream. The version needs 253 * to already be specified. Make sure this is consistent with 254 * {@link #serialize(DataOutputStream)}. 255 */ 256 void deserialize(DataInputStream inputStream) throws IOException { 257 HFile.checkFormatVersion(majorVersion); 258 259 BlockType.TRAILER.readAndCheck(inputStream); 260 261 if (majorVersion > 2 262 || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION)) { 263 deserializeFromPB(inputStream); 264 } else { 265 deserializeFromWritable(inputStream); 266 } 267 268 // The last 4 bytes of the file encode the major and minor version universally 269 int version = inputStream.readInt(); 270 expectMajorVersion(extractMajorVersion(version)); 271 expectMinorVersion(extractMinorVersion(version)); 272 } 273 274 /** 275 * Deserialize the file trailer as protobuf 276 */ 277 void deserializeFromPB(DataInputStream inputStream) throws IOException { 278 // read PB and skip padding 279 int start = inputStream.available(); 280 HFileProtos.FileTrailerProto trailerProto = 281 HFileProtos.FileTrailerProto.PARSER.parseDelimitedFrom(inputStream); 282 int size = start - inputStream.available(); 283 inputStream.skip(getTrailerSize() - NOT_PB_SIZE - size); 284 285 // process the PB 286 if (trailerProto.hasFileInfoOffset()) { 287 fileInfoOffset = trailerProto.getFileInfoOffset(); 288 } 289 if (trailerProto.hasLoadOnOpenDataOffset()) { 290 loadOnOpenDataOffset = trailerProto.getLoadOnOpenDataOffset(); 291 } 292 if (trailerProto.hasUncompressedDataIndexSize()) { 293 uncompressedDataIndexSize = trailerProto.getUncompressedDataIndexSize(); 294 } 295 if (trailerProto.hasTotalUncompressedBytes()) { 296 totalUncompressedBytes = trailerProto.getTotalUncompressedBytes(); 297 } 298 if (trailerProto.hasDataIndexCount()) { 299 dataIndexCount = trailerProto.getDataIndexCount(); 300 } 301 if (trailerProto.hasMetaIndexCount()) { 302 metaIndexCount = trailerProto.getMetaIndexCount(); 303 } 304 if (trailerProto.hasEntryCount()) { 305 entryCount = trailerProto.getEntryCount(); 306 } 307 if (trailerProto.hasNumDataIndexLevels()) { 308 numDataIndexLevels = trailerProto.getNumDataIndexLevels(); 309 } 310 if (trailerProto.hasFirstDataBlockOffset()) { 311 firstDataBlockOffset = trailerProto.getFirstDataBlockOffset(); 312 } 313 if (trailerProto.hasLastDataBlockOffset()) { 314 lastDataBlockOffset = trailerProto.getLastDataBlockOffset(); 315 } 316 if (trailerProto.hasComparatorClassName()) { 317 setComparatorClass(getComparatorClass(trailerProto.getComparatorClassName())); 318 } 319 if (trailerProto.hasCompressionCodec()) { 320 compressionCodec = Compression.Algorithm.values()[trailerProto.getCompressionCodec()]; 321 } else { 322 compressionCodec = Compression.Algorithm.NONE; 323 } 324 if (trailerProto.hasEncryptionKey()) { 325 encryptionKey = trailerProto.getEncryptionKey().toByteArray(); 326 } 327 } 328 329 /** 330 * Deserialize the file trailer as writable data 331 */ 332 void deserializeFromWritable(DataInput input) throws IOException { 333 fileInfoOffset = input.readLong(); 334 loadOnOpenDataOffset = input.readLong(); 335 dataIndexCount = input.readInt(); 336 uncompressedDataIndexSize = input.readLong(); 337 metaIndexCount = input.readInt(); 338 339 totalUncompressedBytes = input.readLong(); 340 entryCount = input.readLong(); 341 compressionCodec = Compression.Algorithm.values()[input.readInt()]; 342 numDataIndexLevels = input.readInt(); 343 firstDataBlockOffset = input.readLong(); 344 lastDataBlockOffset = input.readLong(); 345 // TODO this is a classname encoded into an HFile's trailer. We are going to need to have 346 // some compat code here. 347 setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input, 348 MAX_COMPARATOR_NAME_LENGTH))); 349 } 350 351 private void append(StringBuilder sb, String s) { 352 if (sb.length() > 0) { 353 sb.append(", "); 354 } 355 sb.append(s); 356 } 357 358 @Override 359 public String toString() { 360 StringBuilder sb = new StringBuilder(); 361 append(sb, "fileinfoOffset=" + fileInfoOffset); 362 append(sb, "loadOnOpenDataOffset=" + loadOnOpenDataOffset); 363 append(sb, "dataIndexCount=" + dataIndexCount); 364 append(sb, "metaIndexCount=" + metaIndexCount); 365 append(sb, "totalUncomressedBytes=" + totalUncompressedBytes); 366 append(sb, "entryCount=" + entryCount); 367 append(sb, "compressionCodec=" + compressionCodec); 368 append(sb, "uncompressedDataIndexSize=" + uncompressedDataIndexSize); 369 append(sb, "numDataIndexLevels=" + numDataIndexLevels); 370 append(sb, "firstDataBlockOffset=" + firstDataBlockOffset); 371 append(sb, "lastDataBlockOffset=" + lastDataBlockOffset); 372 append(sb, "comparatorClassName=" + comparatorClassName); 373 if (majorVersion >= 3) { 374 append(sb, "encryptionKey=" + (encryptionKey != null ? "PRESENT" : "NONE")); 375 } 376 append(sb, "majorVersion=" + majorVersion); 377 append(sb, "minorVersion=" + minorVersion); 378 379 return sb.toString(); 380 } 381 382 /** 383 * Reads a file trailer from the given file. 384 * 385 * @param istream the input stream with the ability to seek. Does not have to 386 * be buffered, as only one read operation is made. 387 * @param fileSize the file size. Can be obtained using 388 * {@link org.apache.hadoop.fs.FileSystem#getFileStatus( 389 *org.apache.hadoop.fs.Path)}. 390 * @return the fixed file trailer read 391 * @throws IOException if failed to read from the underlying stream, or the 392 * trailer is corrupted, or the version of the trailer is 393 * unsupported 394 */ 395 public static FixedFileTrailer readFromStream(FSDataInputStream istream, 396 long fileSize) throws IOException { 397 int bufferSize = MAX_TRAILER_SIZE; 398 long seekPoint = fileSize - bufferSize; 399 if (seekPoint < 0) { 400 // It is hard to imagine such a small HFile. 401 seekPoint = 0; 402 bufferSize = (int) fileSize; 403 } 404 405 HFileUtil.seekOnMultipleSources(istream, seekPoint); 406 407 ByteBuffer buf = ByteBuffer.allocate(bufferSize); 408 istream.readFully(buf.array(), buf.arrayOffset(), 409 buf.arrayOffset() + buf.limit()); 410 411 // Read the version from the last int of the file. 412 buf.position(buf.limit() - Bytes.SIZEOF_INT); 413 int version = buf.getInt(); 414 415 // Extract the major and minor versions. 416 int majorVersion = extractMajorVersion(version); 417 int minorVersion = extractMinorVersion(version); 418 419 HFile.checkFormatVersion(majorVersion); // throws IAE if invalid 420 421 int trailerSize = getTrailerSize(majorVersion); 422 423 FixedFileTrailer fft = new FixedFileTrailer(majorVersion, minorVersion); 424 fft.deserialize(new DataInputStream(new ByteArrayInputStream(buf.array(), 425 buf.arrayOffset() + bufferSize - trailerSize, trailerSize))); 426 return fft; 427 } 428 429 public void expectMajorVersion(int expected) { 430 if (majorVersion != expected) { 431 throw new IllegalArgumentException("Invalid HFile major version: " 432 + majorVersion 433 + " (expected: " + expected + ")"); 434 } 435 } 436 437 public void expectMinorVersion(int expected) { 438 if (minorVersion != expected) { 439 throw new IllegalArgumentException("Invalid HFile minor version: " 440 + minorVersion + " (expected: " + expected + ")"); 441 } 442 } 443 444 public void expectAtLeastMajorVersion(int lowerBound) { 445 if (majorVersion < lowerBound) { 446 throw new IllegalArgumentException("Invalid HFile major version: " 447 + majorVersion 448 + " (expected: " + lowerBound + " or higher)."); 449 } 450 } 451 452 public long getFileInfoOffset() { 453 return fileInfoOffset; 454 } 455 456 public void setFileInfoOffset(long fileInfoOffset) { 457 this.fileInfoOffset = fileInfoOffset; 458 } 459 460 public long getLoadOnOpenDataOffset() { 461 return loadOnOpenDataOffset; 462 } 463 464 public void setLoadOnOpenOffset(long loadOnOpenDataOffset) { 465 this.loadOnOpenDataOffset = loadOnOpenDataOffset; 466 } 467 468 public int getDataIndexCount() { 469 return dataIndexCount; 470 } 471 472 public void setDataIndexCount(int dataIndexCount) { 473 this.dataIndexCount = dataIndexCount; 474 } 475 476 public int getMetaIndexCount() { 477 return metaIndexCount; 478 } 479 480 public void setMetaIndexCount(int metaIndexCount) { 481 this.metaIndexCount = metaIndexCount; 482 } 483 484 public long getTotalUncompressedBytes() { 485 return totalUncompressedBytes; 486 } 487 488 public void setTotalUncompressedBytes(long totalUncompressedBytes) { 489 this.totalUncompressedBytes = totalUncompressedBytes; 490 } 491 492 public long getEntryCount() { 493 return entryCount; 494 } 495 496 public void setEntryCount(long newEntryCount) { 497 entryCount = newEntryCount; 498 } 499 500 public Compression.Algorithm getCompressionCodec() { 501 return compressionCodec; 502 } 503 504 public void setCompressionCodec(Compression.Algorithm compressionCodec) { 505 this.compressionCodec = compressionCodec; 506 } 507 508 public int getNumDataIndexLevels() { 509 expectAtLeastMajorVersion(2); 510 return numDataIndexLevels; 511 } 512 513 public void setNumDataIndexLevels(int numDataIndexLevels) { 514 expectAtLeastMajorVersion(2); 515 this.numDataIndexLevels = numDataIndexLevels; 516 } 517 518 public long getLastDataBlockOffset() { 519 expectAtLeastMajorVersion(2); 520 return lastDataBlockOffset; 521 } 522 523 public void setLastDataBlockOffset(long lastDataBlockOffset) { 524 expectAtLeastMajorVersion(2); 525 this.lastDataBlockOffset = lastDataBlockOffset; 526 } 527 528 public long getFirstDataBlockOffset() { 529 expectAtLeastMajorVersion(2); 530 return firstDataBlockOffset; 531 } 532 533 public void setFirstDataBlockOffset(long firstDataBlockOffset) { 534 expectAtLeastMajorVersion(2); 535 this.firstDataBlockOffset = firstDataBlockOffset; 536 } 537 538 public String getComparatorClassName() { 539 return comparatorClassName; 540 } 541 542 /** 543 * Returns the major version of this HFile format 544 */ 545 public int getMajorVersion() { 546 return majorVersion; 547 } 548 549 /** 550 * Returns the minor version of this HFile format 551 */ 552 public int getMinorVersion() { 553 return minorVersion; 554 } 555 556 public void setComparatorClass(Class<? extends CellComparator> klass) { 557 // Is the comparator instantiable? 558 try { 559 // If null, it should be the Bytes.BYTES_RAWCOMPARATOR 560 if (klass != null) { 561 CellComparator comp = klass.getDeclaredConstructor().newInstance(); 562 // if the name wasn't one of the legacy names, maybe its a legit new 563 // kind of comparator. 564 this.comparatorClassName = klass.getName(); 565 } 566 } catch (Exception e) { 567 throw new RuntimeException("Comparator class " + klass.getName() + " is not instantiable", e); 568 } 569 } 570 571 /** 572 * If a 'standard' Comparator, write the old name for the Comparator when we serialize rather 573 * than the new name; writing the new name will make it so newly-written hfiles are not parseable 574 * by hbase-1.x, a facility we'd like to preserve across rolling upgrade and hbase-1.x clusters 575 * reading hbase-2.x produce. 576 * <p> 577 * The Comparators in hbase-2.x work the same as they did in hbase-1.x; they compare 578 * KeyValues. In hbase-2.x they were renamed making use of the more generic 'Cell' 579 * nomenclature to indicate that we intend to move away from KeyValues post hbase-2. A naming 580 * change is not reason enough to make it so hbase-1.x cannot read hbase-2.x files given the 581 * structure goes unchanged (hfile v3). So, lets write the old names for Comparators into the 582 * hfile tails in hbase-2. Here is where we do the translation. 583 * {@link #getComparatorClass(String)} does translation going the other way. 584 * 585 * <p>The translation is done on the serialized Protobuf only.</p> 586 * 587 * @param comparator String class name of the Comparator used in this hfile. 588 * @return What to store in the trailer as our comparator name. 589 * @see #getComparatorClass(String) 590 * @since hbase-2.0.0. 591 * @deprecated Since hbase-2.0.0. Will be removed in hbase-3.0.0. 592 */ 593 @Deprecated 594 private String getHBase1CompatibleName(final String comparator) { 595 if (comparator.equals(CellComparatorImpl.class.getName())) { 596 return KeyValue.COMPARATOR.getClass().getName(); 597 } 598 if (comparator.equals(MetaCellComparator.class.getName())) { 599 return KeyValue.META_COMPARATOR.getClass().getName(); 600 } 601 return comparator; 602 } 603 604 @SuppressWarnings("unchecked") 605 private static Class<? extends CellComparator> getComparatorClass(String comparatorClassName) 606 throws IOException { 607 Class<? extends CellComparator> comparatorKlass; 608 // for BC 609 if (comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName()) 610 || comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName()) 611 || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator"))) { 612 comparatorKlass = CellComparatorImpl.class; 613 } else if (comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()) 614 || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) 615 || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator"))) { 616 comparatorKlass = MetaCellComparator.class; 617 } else if (comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") 618 || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator")) { 619 // When the comparator to be used is Bytes.BYTES_RAWCOMPARATOR, we just return null from here 620 // Bytes.BYTES_RAWCOMPARATOR is not a CellComparator 621 comparatorKlass = null; 622 } else { 623 // if the name wasn't one of the legacy names, maybe its a legit new kind of comparator. 624 try { 625 comparatorKlass = (Class<? extends CellComparator>) Class.forName(comparatorClassName); 626 } catch (ClassNotFoundException e) { 627 throw new IOException(e); 628 } 629 } 630 return comparatorKlass; 631 } 632 633 static CellComparator createComparator(String comparatorClassName) throws IOException { 634 if (comparatorClassName.equals(CellComparatorImpl.COMPARATOR.getClass().getName())) { 635 return CellComparatorImpl.COMPARATOR; 636 } else if (comparatorClassName.equals( 637 MetaCellComparator.META_COMPARATOR.getClass().getName())) { 638 return MetaCellComparator.META_COMPARATOR; 639 } 640 try { 641 Class<? extends CellComparator> comparatorClass = getComparatorClass(comparatorClassName); 642 if (comparatorClass != null) { 643 return comparatorClass.getDeclaredConstructor().newInstance(); 644 } 645 LOG.warn("No Comparator class for " + comparatorClassName + ". Returning Null."); 646 return null; 647 } catch (Exception e) { 648 throw new IOException("Comparator class " + comparatorClassName + " is not instantiable", e); 649 } 650 } 651 652 CellComparator createComparator() throws IOException { 653 expectAtLeastMajorVersion(2); 654 return createComparator(comparatorClassName); 655 } 656 657 public long getUncompressedDataIndexSize() { 658 return uncompressedDataIndexSize; 659 } 660 661 public void setUncompressedDataIndexSize( 662 long uncompressedDataIndexSize) { 663 expectAtLeastMajorVersion(2); 664 this.uncompressedDataIndexSize = uncompressedDataIndexSize; 665 } 666 667 public byte[] getEncryptionKey() { 668 // This is a v3 feature but if reading a v2 file the encryptionKey will just be null which 669 // if fine for this feature. 670 expectAtLeastMajorVersion(2); 671 return encryptionKey; 672 } 673 674 public void setEncryptionKey(byte[] keyBytes) { 675 this.encryptionKey = keyBytes; 676 } 677 678 /** 679 * Extracts the major version for a 4-byte serialized version data. 680 * The major version is the 3 least significant bytes 681 */ 682 private static int extractMajorVersion(int serializedVersion) { 683 return (serializedVersion & 0x00ffffff); 684 } 685 686 /** 687 * Extracts the minor version for a 4-byte serialized version data. 688 * The major version are the 3 the most significant bytes 689 */ 690 private static int extractMinorVersion(int serializedVersion) { 691 return (serializedVersion >>> 24); 692 } 693 694 /** 695 * Create a 4 byte serialized version number by combining the 696 * minor and major version numbers. 697 */ 698 static int materializeVersion(int majorVersion, int minorVersion) { 699 return ((majorVersion & 0x00ffffff) | (minorVersion << 24)); 700 } 701}