001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import java.io.ByteArrayInputStream; 021import java.io.ByteArrayOutputStream; 022import java.io.DataInput; 023import java.io.DataInputStream; 024import java.io.DataOutputStream; 025import java.io.IOException; 026import java.nio.ByteBuffer; 027import org.apache.hadoop.fs.FSDataInputStream; 028import org.apache.hadoop.hbase.CellComparator; 029import org.apache.hadoop.hbase.HBaseInterfaceAudience; 030import org.apache.hadoop.hbase.InnerStoreCellComparator; 031import org.apache.hadoop.hbase.MetaCellComparator; 032import org.apache.hadoop.hbase.io.compress.Compression; 033import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics; 034import org.apache.hadoop.hbase.util.Bytes; 035import org.apache.yetus.audience.InterfaceAudience; 036import org.slf4j.Logger; 037import org.slf4j.LoggerFactory; 038 039import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; 040 041import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; 042 043/** 044 * The {@link HFile} has a fixed trailer which contains offsets to other variable parts of the file. 045 * Also includes basic metadata on this file. The trailer size is fixed within a given {@link HFile} 046 * format version only, but we always store the version number as the last four-byte integer of the 047 * file. The version number itself is split into two portions, a major version and a minor version. 048 * The last three bytes of a file are the major version and a single preceding byte is the minor 049 * number. The major version determines which readers/writers to use to read/write a hfile while a 050 * minor version determines smaller changes in hfile format that do not need a new reader/writer 051 * type. 052 */ 053@InterfaceAudience.Private 054public class FixedFileTrailer { 055 private static final Logger LOG = LoggerFactory.getLogger(FixedFileTrailer.class); 056 057 /** 058 * We store the comparator class name as a fixed-length field in the trailer. 059 */ 060 private static final int MAX_COMPARATOR_NAME_LENGTH = 128; 061 062 /** 063 * Offset to the fileinfo data, a small block of vitals. Necessary in v1 but only potentially 064 * useful for pretty-printing in v2. 065 */ 066 private long fileInfoOffset; 067 068 /** 069 * In version 1, the offset to the data block index. Starting from version 2, the meaning of this 070 * field is the offset to the section of the file that should be loaded at the time the file is 071 * being opened: i.e. on open we load the root index, file info, etc. See 072 * http://hbase.apache.org/book.html#_hfile_format_2 in the reference guide. 073 */ 074 private long loadOnOpenDataOffset; 075 076 /** 077 * The number of entries in the root data index. 078 */ 079 private int dataIndexCount; 080 081 /** 082 * Total uncompressed size of all blocks of the data index 083 */ 084 private long uncompressedDataIndexSize; 085 086 /** 087 * The number of entries in the meta index 088 */ 089 private int metaIndexCount; 090 091 /** 092 * The total uncompressed size of keys/values stored in the file. 093 */ 094 private long totalUncompressedBytes; 095 096 /** 097 * The number of key/value pairs in the file. This field was int in version 1, but is now long. 098 */ 099 private long entryCount; 100 101 /** 102 * The compression codec used for all blocks. 103 */ 104 private Compression.Algorithm compressionCodec = Compression.Algorithm.NONE; 105 106 /** 107 * The number of levels in the potentially multi-level data index. Used from version 2 onwards. 108 */ 109 private int numDataIndexLevels; 110 111 /** 112 * The offset of the first data block. 113 */ 114 private long firstDataBlockOffset; 115 116 /** 117 * It is guaranteed that no key/value data blocks start after this offset in the file. 118 */ 119 private long lastDataBlockOffset; 120 121 /** 122 * Raw key comparator class name in version 3 123 */ 124 // We could write the actual class name from 2.0 onwards and handle BC 125 private String comparatorClassName = 126 InnerStoreCellComparator.INNER_STORE_COMPARATOR.getClass().getName(); 127 128 /** 129 * The encryption key 130 */ 131 private byte[] encryptionKey; 132 133 /** 134 * The key namespace 135 */ 136 private String keyNamespace; 137 138 /** 139 * The KEK checksum 140 */ 141 private long kekChecksum; 142 143 /** 144 * The KEK metadata 145 */ 146 private String kekMetadata; 147 148 /** 149 * The {@link HFile} format major version. 150 */ 151 private final int majorVersion; 152 153 /** 154 * The {@link HFile} format minor version. 155 */ 156 private final int minorVersion; 157 158 FixedFileTrailer(int majorVersion, int minorVersion) { 159 this.majorVersion = majorVersion; 160 this.minorVersion = minorVersion; 161 HFile.checkFormatVersion(majorVersion); 162 } 163 164 private static int[] computeTrailerSizeByVersion() { 165 int[] versionToSize = new int[HFile.MAX_FORMAT_VERSION + 1]; 166 // We support only 2 major versions now. ie. V2, V3 167 versionToSize[2] = 212; 168 for (int version = 3; version <= HFile.MAX_FORMAT_VERSION; version++) { 169 // Max FFT size for V3 and above is taken as 4KB for future enhancements 170 // if any. 171 // Unless the trailer size exceeds 4K this can continue 172 versionToSize[version] = 1024 * 4; 173 } 174 return versionToSize; 175 } 176 177 private static int getMaxTrailerSize() { 178 int maxSize = 0; 179 for (int version = HFile.MIN_FORMAT_VERSION; version <= HFile.MAX_FORMAT_VERSION; ++version) { 180 maxSize = Math.max(getTrailerSize(version), maxSize); 181 } 182 return maxSize; 183 } 184 185 private static final int[] TRAILER_SIZE = computeTrailerSizeByVersion(); 186 private static final int MAX_TRAILER_SIZE = getMaxTrailerSize(); 187 188 private static final int NOT_PB_SIZE = BlockType.MAGIC_LENGTH + Bytes.SIZEOF_INT; 189 190 static int getTrailerSize(int version) { 191 return TRAILER_SIZE[version]; 192 } 193 194 public int getTrailerSize() { 195 return getTrailerSize(majorVersion); 196 } 197 198 /** 199 * Write the trailer to a data stream. We support writing version 1 for testing and for 200 * determining version 1 trailer size. It is also easy to see what fields changed in version 2. 201 */ 202 void serialize(DataOutputStream outputStream) throws IOException { 203 HFile.checkFormatVersion(majorVersion); 204 205 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 206 DataOutputStream baosDos = new DataOutputStream(baos); 207 208 BlockType.TRAILER.write(baosDos); 209 serializeAsPB(baosDos); 210 211 // The last 4 bytes of the file encode the major and minor version universally 212 baosDos.writeInt(materializeVersion(majorVersion, minorVersion)); 213 214 baos.writeTo(outputStream); 215 } 216 217 HFileProtos.FileTrailerProto toProtobuf() { 218 HFileProtos.FileTrailerProto.Builder builder = HFileProtos.FileTrailerProto.newBuilder() 219 .setFileInfoOffset(fileInfoOffset).setLoadOnOpenDataOffset(loadOnOpenDataOffset) 220 .setUncompressedDataIndexSize(uncompressedDataIndexSize) 221 .setTotalUncompressedBytes(totalUncompressedBytes).setDataIndexCount(dataIndexCount) 222 .setMetaIndexCount(metaIndexCount).setEntryCount(entryCount) 223 .setNumDataIndexLevels(numDataIndexLevels).setFirstDataBlockOffset(firstDataBlockOffset) 224 .setLastDataBlockOffset(lastDataBlockOffset).setComparatorClassName(comparatorClassName) 225 .setCompressionCodec(compressionCodec.ordinal()); 226 if (encryptionKey != null) { 227 builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(encryptionKey)); 228 } 229 if (keyNamespace != null) { 230 builder.setKeyNamespace(keyNamespace); 231 } 232 if (kekMetadata != null) { 233 builder.setKekMetadata(kekMetadata); 234 } 235 if (kekChecksum != 0) { 236 builder.setKekChecksum(kekChecksum); 237 } 238 return builder.build(); 239 } 240 241 /** 242 * Write trailer data as protobuf. 243 */ 244 void serializeAsPB(DataOutputStream output) throws IOException { 245 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 246 // We need this extra copy unfortunately to determine the final size of the 247 // delimited output, see use of baos.size() below. 248 toProtobuf().writeDelimitedTo(baos); 249 baos.writeTo(output); 250 // Pad to make up the difference between variable PB encoding length and the 251 // length when encoded as writable under earlier V2 formats. Failure to pad 252 // properly or if the PB encoding is too big would mean the trailer wont be read 253 // in properly by HFile. 254 int padding = getTrailerSize() - NOT_PB_SIZE - baos.size(); 255 if (padding < 0) { 256 throw new IOException("Pbuf encoding size exceeded fixed trailer size limit"); 257 } 258 for (int i = 0; i < padding; i++) { 259 output.write(0); 260 } 261 } 262 263 /** 264 * Deserialize the fixed file trailer from the given stream. The version needs to already be 265 * specified. Make sure this is consistent with {@link #serialize(DataOutputStream)}. 266 */ 267 void deserialize(DataInputStream inputStream) throws IOException { 268 HFile.checkFormatVersion(majorVersion); 269 270 BlockType.TRAILER.readAndCheck(inputStream); 271 272 if ( 273 majorVersion > 2 274 || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION) 275 ) { 276 deserializeFromPB(inputStream); 277 } else { 278 deserializeFromWritable(inputStream); 279 } 280 281 // The last 4 bytes of the file encode the major and minor version universally 282 int version = inputStream.readInt(); 283 expectMajorVersion(extractMajorVersion(version)); 284 expectMinorVersion(extractMinorVersion(version)); 285 } 286 287 /** 288 * Deserialize the file trailer as protobuf 289 */ 290 void deserializeFromPB(DataInputStream inputStream) throws IOException { 291 // read PB and skip padding 292 int start = inputStream.available(); 293 HFileProtos.FileTrailerProto trailerProto = 294 HFileProtos.FileTrailerProto.parser().parseDelimitedFrom(inputStream); 295 int size = start - inputStream.available(); 296 inputStream.skip(getTrailerSize() - NOT_PB_SIZE - size); 297 298 // process the PB 299 if (trailerProto.hasFileInfoOffset()) { 300 fileInfoOffset = trailerProto.getFileInfoOffset(); 301 } 302 if (trailerProto.hasLoadOnOpenDataOffset()) { 303 loadOnOpenDataOffset = trailerProto.getLoadOnOpenDataOffset(); 304 } 305 if (trailerProto.hasUncompressedDataIndexSize()) { 306 uncompressedDataIndexSize = trailerProto.getUncompressedDataIndexSize(); 307 } 308 if (trailerProto.hasTotalUncompressedBytes()) { 309 totalUncompressedBytes = trailerProto.getTotalUncompressedBytes(); 310 } 311 if (trailerProto.hasDataIndexCount()) { 312 dataIndexCount = trailerProto.getDataIndexCount(); 313 } 314 if (trailerProto.hasMetaIndexCount()) { 315 metaIndexCount = trailerProto.getMetaIndexCount(); 316 } 317 if (trailerProto.hasEntryCount()) { 318 entryCount = trailerProto.getEntryCount(); 319 } 320 if (trailerProto.hasNumDataIndexLevels()) { 321 numDataIndexLevels = trailerProto.getNumDataIndexLevels(); 322 } 323 if (trailerProto.hasFirstDataBlockOffset()) { 324 firstDataBlockOffset = trailerProto.getFirstDataBlockOffset(); 325 } 326 if (trailerProto.hasLastDataBlockOffset()) { 327 lastDataBlockOffset = trailerProto.getLastDataBlockOffset(); 328 } 329 if (trailerProto.hasComparatorClassName()) { 330 setComparatorClass(getComparatorClass(trailerProto.getComparatorClassName())); 331 } 332 if (trailerProto.hasCompressionCodec()) { 333 compressionCodec = Compression.Algorithm.values()[trailerProto.getCompressionCodec()]; 334 } else { 335 compressionCodec = Compression.Algorithm.NONE; 336 } 337 if (trailerProto.hasEncryptionKey()) { 338 encryptionKey = trailerProto.getEncryptionKey().toByteArray(); 339 } 340 if (trailerProto.hasKeyNamespace()) { 341 keyNamespace = trailerProto.getKeyNamespace(); 342 } 343 if (trailerProto.hasKekMetadata()) { 344 kekMetadata = trailerProto.getKekMetadata(); 345 } 346 if (trailerProto.hasKekChecksum()) { 347 kekChecksum = trailerProto.getKekChecksum(); 348 } 349 } 350 351 /** 352 * Deserialize the file trailer as writable data 353 */ 354 void deserializeFromWritable(DataInput input) throws IOException { 355 fileInfoOffset = input.readLong(); 356 loadOnOpenDataOffset = input.readLong(); 357 dataIndexCount = input.readInt(); 358 uncompressedDataIndexSize = input.readLong(); 359 metaIndexCount = input.readInt(); 360 361 totalUncompressedBytes = input.readLong(); 362 entryCount = input.readLong(); 363 compressionCodec = Compression.Algorithm.values()[input.readInt()]; 364 numDataIndexLevels = input.readInt(); 365 firstDataBlockOffset = input.readLong(); 366 lastDataBlockOffset = input.readLong(); 367 // TODO this is a classname encoded into an HFile's trailer. We are going to need to have 368 // some compat code here. 369 setComparatorClass( 370 getComparatorClass(Bytes.readStringFixedSize(input, MAX_COMPARATOR_NAME_LENGTH))); 371 } 372 373 private void append(StringBuilder sb, String s) { 374 if (sb.length() > 0) { 375 sb.append(", "); 376 } 377 sb.append(s); 378 } 379 380 @Override 381 public String toString() { 382 StringBuilder sb = new StringBuilder(); 383 append(sb, "fileinfoOffset=" + fileInfoOffset); 384 append(sb, "loadOnOpenDataOffset=" + loadOnOpenDataOffset); 385 append(sb, "dataIndexCount=" + dataIndexCount); 386 append(sb, "metaIndexCount=" + metaIndexCount); 387 append(sb, "totalUncomressedBytes=" + totalUncompressedBytes); 388 append(sb, "entryCount=" + entryCount); 389 append(sb, "compressionCodec=" + compressionCodec); 390 append(sb, "uncompressedDataIndexSize=" + uncompressedDataIndexSize); 391 append(sb, "numDataIndexLevels=" + numDataIndexLevels); 392 append(sb, "firstDataBlockOffset=" + firstDataBlockOffset); 393 append(sb, "lastDataBlockOffset=" + lastDataBlockOffset); 394 append(sb, "comparatorClassName=" + comparatorClassName); 395 if (majorVersion >= 3) { 396 append(sb, "encryptionKey=" + (encryptionKey != null ? "PRESENT" : "NONE")); 397 } 398 if (keyNamespace != null) { 399 append(sb, "keyNamespace=" + keyNamespace); 400 } 401 append(sb, "majorVersion=" + majorVersion); 402 append(sb, "minorVersion=" + minorVersion); 403 404 return sb.toString(); 405 } 406 407 /** 408 * Reads a file trailer from the given file. 409 * @param istream the input stream with the ability to seek. Does not have to be buffered, as 410 * only one read operation is made. 411 * @param fileSize the file size. Can be obtained using 412 * {@link org.apache.hadoop.fs.FileSystem#getFileStatus( org.apache.hadoop.fs.Path)}. 413 * @return the fixed file trailer read 414 * @throws IOException if failed to read from the underlying stream, or the trailer is corrupted, 415 * or the version of the trailer is unsupported 416 */ 417 public static FixedFileTrailer readFromStream(FSDataInputStream istream, long fileSize) 418 throws IOException { 419 int bufferSize = MAX_TRAILER_SIZE; 420 long seekPoint = fileSize - bufferSize; 421 if (seekPoint < 0) { 422 // It is hard to imagine such a small HFile. 423 seekPoint = 0; 424 bufferSize = (int) fileSize; 425 } 426 427 istream.seek(seekPoint); 428 429 ByteBuffer buf = ByteBuffer.allocate(bufferSize); 430 istream.readFully(buf.array(), buf.arrayOffset(), buf.arrayOffset() + buf.limit()); 431 432 // Read the version from the last int of the file. 433 buf.position(buf.limit() - Bytes.SIZEOF_INT); 434 int version = buf.getInt(); 435 436 // Extract the major and minor versions. 437 int majorVersion = extractMajorVersion(version); 438 int minorVersion = extractMinorVersion(version); 439 440 HFile.checkFormatVersion(majorVersion); // throws IAE if invalid 441 442 int trailerSize = getTrailerSize(majorVersion); 443 444 FixedFileTrailer fft = new FixedFileTrailer(majorVersion, minorVersion); 445 fft.deserialize(new DataInputStream(new ByteArrayInputStream(buf.array(), 446 buf.arrayOffset() + bufferSize - trailerSize, trailerSize))); 447 boolean isScanMetricsEnabled = ThreadLocalServerSideScanMetrics.isScanMetricsEnabled(); 448 if (isScanMetricsEnabled) { 449 ThreadLocalServerSideScanMetrics.addBytesReadFromFs(trailerSize); 450 ThreadLocalServerSideScanMetrics.addBlockReadOpsCount(1); 451 } 452 return fft; 453 } 454 455 public void expectMajorVersion(int expected) { 456 if (majorVersion != expected) { 457 throw new IllegalArgumentException( 458 "Invalid HFile major version: " + majorVersion + " (expected: " + expected + ")"); 459 } 460 } 461 462 public void expectMinorVersion(int expected) { 463 if (minorVersion != expected) { 464 throw new IllegalArgumentException( 465 "Invalid HFile minor version: " + minorVersion + " (expected: " + expected + ")"); 466 } 467 } 468 469 public void expectAtLeastMajorVersion(int lowerBound) { 470 if (majorVersion < lowerBound) { 471 throw new IllegalArgumentException("Invalid HFile major version: " + majorVersion 472 + " (expected: " + lowerBound + " or higher)."); 473 } 474 } 475 476 public long getFileInfoOffset() { 477 return fileInfoOffset; 478 } 479 480 public void setFileInfoOffset(long fileInfoOffset) { 481 this.fileInfoOffset = fileInfoOffset; 482 } 483 484 public long getLoadOnOpenDataOffset() { 485 return loadOnOpenDataOffset; 486 } 487 488 public void setLoadOnOpenOffset(long loadOnOpenDataOffset) { 489 this.loadOnOpenDataOffset = loadOnOpenDataOffset; 490 } 491 492 public int getDataIndexCount() { 493 return dataIndexCount; 494 } 495 496 public void setDataIndexCount(int dataIndexCount) { 497 this.dataIndexCount = dataIndexCount; 498 } 499 500 public int getMetaIndexCount() { 501 return metaIndexCount; 502 } 503 504 public void setMetaIndexCount(int metaIndexCount) { 505 this.metaIndexCount = metaIndexCount; 506 } 507 508 public long getTotalUncompressedBytes() { 509 return totalUncompressedBytes; 510 } 511 512 public void setTotalUncompressedBytes(long totalUncompressedBytes) { 513 this.totalUncompressedBytes = totalUncompressedBytes; 514 } 515 516 public long getEntryCount() { 517 return entryCount; 518 } 519 520 public void setEntryCount(long newEntryCount) { 521 entryCount = newEntryCount; 522 } 523 524 public Compression.Algorithm getCompressionCodec() { 525 return compressionCodec; 526 } 527 528 public void setCompressionCodec(Compression.Algorithm compressionCodec) { 529 this.compressionCodec = compressionCodec; 530 } 531 532 public int getNumDataIndexLevels() { 533 expectAtLeastMajorVersion(2); 534 return numDataIndexLevels; 535 } 536 537 public void setNumDataIndexLevels(int numDataIndexLevels) { 538 expectAtLeastMajorVersion(2); 539 this.numDataIndexLevels = numDataIndexLevels; 540 } 541 542 public long getLastDataBlockOffset() { 543 expectAtLeastMajorVersion(2); 544 return lastDataBlockOffset; 545 } 546 547 public void setLastDataBlockOffset(long lastDataBlockOffset) { 548 expectAtLeastMajorVersion(2); 549 this.lastDataBlockOffset = lastDataBlockOffset; 550 } 551 552 public long getFirstDataBlockOffset() { 553 expectAtLeastMajorVersion(2); 554 return firstDataBlockOffset; 555 } 556 557 public void setFirstDataBlockOffset(long firstDataBlockOffset) { 558 expectAtLeastMajorVersion(2); 559 this.firstDataBlockOffset = firstDataBlockOffset; 560 } 561 562 public String getComparatorClassName() { 563 return comparatorClassName; 564 } 565 566 /** 567 * Returns the major version of this HFile format 568 */ 569 public int getMajorVersion() { 570 return majorVersion; 571 } 572 573 /** 574 * Returns the minor version of this HFile format 575 */ 576 public int getMinorVersion() { 577 return minorVersion; 578 } 579 580 public void setComparatorClass(Class<? extends CellComparator> klass) { 581 // Is the comparator instantiable? 582 try { 583 // If null, it should be the Bytes.BYTES_RAWCOMPARATOR 584 if (klass != null) { 585 CellComparator comp = klass.getDeclaredConstructor().newInstance(); 586 // if the name wasn't one of the legacy names, maybe its a legit new 587 // kind of comparator. 588 this.comparatorClassName = klass.getName(); 589 } 590 } catch (Exception e) { 591 throw new RuntimeException("Comparator class " + klass.getName() + " is not instantiable", e); 592 } 593 } 594 595 @SuppressWarnings("unchecked") 596 private static Class<? extends CellComparator> getComparatorClass(String comparatorClassName) 597 throws IOException { 598 Class<? extends CellComparator> comparatorKlass; 599 // for backward compatibility 600 // We will force comparator class name to be "KeyValue$KVComparator" and 601 // "KeyValue$MetaComparator" on 2.x although we do not use them on newer 2.x versions, for 602 // maintaining compatibility while upgrading and downgrading between different 2.x versions. So 603 // here on 3.x, we still need to check these two class names although the actual classes have 604 // already been purged. 605 if ( 606 comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$KVComparator") 607 || comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator") 608 ) { 609 comparatorKlass = InnerStoreCellComparator.class; 610 } else if ( 611 comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$MetaComparator") 612 || comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator") 613 || comparatorClassName 614 .equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator") 615 || comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator") 616 ) { 617 comparatorKlass = MetaCellComparator.class; 618 } else if ( 619 comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") 620 || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator") 621 ) { 622 // When the comparator to be used is Bytes.BYTES_RAWCOMPARATOR, we just return null from here 623 // Bytes.BYTES_RAWCOMPARATOR is not a CellComparator 624 comparatorKlass = null; 625 } else { 626 // if the name wasn't one of the legacy names, maybe its a legit new kind of comparator. 627 try { 628 comparatorKlass = (Class<? extends CellComparator>) Class.forName(comparatorClassName); 629 } catch (ClassNotFoundException e) { 630 throw new IOException(e); 631 } 632 } 633 return comparatorKlass; 634 } 635 636 static CellComparator createComparator(String comparatorClassName) throws IOException { 637 if ( 638 comparatorClassName 639 .equals(InnerStoreCellComparator.INNER_STORE_COMPARATOR.getClass().getName()) 640 ) { 641 return InnerStoreCellComparator.INNER_STORE_COMPARATOR; 642 } else 643 if (comparatorClassName.equals(MetaCellComparator.META_COMPARATOR.getClass().getName())) { 644 return MetaCellComparator.META_COMPARATOR; 645 } 646 try { 647 Class<? extends CellComparator> comparatorClass = getComparatorClass(comparatorClassName); 648 if (comparatorClass != null) { 649 return comparatorClass.getDeclaredConstructor().newInstance(); 650 } 651 LOG.warn("No Comparator class for " + comparatorClassName + ". Returning Null."); 652 return null; 653 } catch (Exception e) { 654 throw new IOException("Comparator class " + comparatorClassName + " is not instantiable", e); 655 } 656 } 657 658 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST) 659 public CellComparator createComparator() throws IOException { 660 expectAtLeastMajorVersion(2); 661 return createComparator(comparatorClassName); 662 } 663 664 public long getUncompressedDataIndexSize() { 665 return uncompressedDataIndexSize; 666 } 667 668 public void setUncompressedDataIndexSize(long uncompressedDataIndexSize) { 669 expectAtLeastMajorVersion(2); 670 this.uncompressedDataIndexSize = uncompressedDataIndexSize; 671 } 672 673 public byte[] getEncryptionKey() { 674 // This is a v3 feature but if reading a v2 file the encryptionKey will just be null which 675 // if fine for this feature. 676 expectAtLeastMajorVersion(2); 677 return encryptionKey; 678 } 679 680 public String getKeyNamespace() { 681 return keyNamespace; 682 } 683 684 public void setKeyNamespace(String keyNamespace) { 685 this.keyNamespace = keyNamespace; 686 } 687 688 public void setKEKChecksum(long kekChecksum) { 689 this.kekChecksum = kekChecksum; 690 } 691 692 public long getKEKChecksum() { 693 return kekChecksum; 694 } 695 696 public void setEncryptionKey(byte[] keyBytes) { 697 this.encryptionKey = keyBytes; 698 } 699 700 public String getKEKMetadata() { 701 return kekMetadata; 702 } 703 704 public void setKEKMetadata(String kekMetadata) { 705 this.kekMetadata = kekMetadata; 706 } 707 708 /** 709 * Extracts the major version for a 4-byte serialized version data. The major version is the 3 710 * least significant bytes 711 */ 712 private static int extractMajorVersion(int serializedVersion) { 713 return (serializedVersion & 0x00ffffff); 714 } 715 716 /** 717 * Extracts the minor version for a 4-byte serialized version data. The major version are the 3 718 * the most significant bytes 719 */ 720 private static int extractMinorVersion(int serializedVersion) { 721 return (serializedVersion >>> 24); 722 } 723 724 /** 725 * Create a 4 byte serialized version number by combining the minor and major version numbers. 726 */ 727 static int materializeVersion(int majorVersion, int minorVersion) { 728 return ((majorVersion & 0x00ffffff) | (minorVersion << 24)); 729 } 730}