001/** 002 * 003 * Licensed to the Apache Software Foundation (ASF) under one 004 * or more contributor license agreements. See the NOTICE file 005 * distributed with this work for additional information 006 * regarding copyright ownership. The ASF licenses this file 007 * to you under the Apache License, Version 2.0 (the 008 * "License"); you may not use this file except in compliance 009 * with the License. You may obtain a copy of the License at 010 * 011 * http://www.apache.org/licenses/LICENSE-2.0 012 * 013 * Unless required by applicable law or agreed to in writing, software 014 * distributed under the License is distributed on an "AS IS" BASIS, 015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 016 * See the License for the specific language governing permissions and 017 * limitations under the License. 018 */ 019package org.apache.hadoop.hbase.client; 020 021import java.io.IOException; 022import java.util.ArrayList; 023import java.util.Arrays; 024import java.util.Collection; 025import java.util.Collections; 026import java.util.HashMap; 027import java.util.HashSet; 028import java.util.List; 029import java.util.Map; 030import java.util.Objects; 031import java.util.Optional; 032import java.util.Set; 033import java.util.TreeMap; 034import java.util.TreeSet; 035import java.util.function.Function; 036import java.util.regex.Matcher; 037import java.util.regex.Pattern; 038import org.apache.hadoop.fs.Path; 039import org.apache.hadoop.hbase.Coprocessor; 040import org.apache.hadoop.hbase.HConstants; 041import org.apache.hadoop.hbase.TableName; 042import org.apache.hadoop.hbase.exceptions.DeserializationException; 043import org.apache.hadoop.hbase.security.User; 044import org.apache.hadoop.hbase.util.Bytes; 045import org.apache.yetus.audience.InterfaceAudience; 046import org.slf4j.Logger; 047import org.slf4j.LoggerFactory; 048 049import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 050import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 051 052/** 053 * @since 2.0.0 054 */ 055@InterfaceAudience.Public 056public class TableDescriptorBuilder { 057 public static final Logger LOG = LoggerFactory.getLogger(TableDescriptorBuilder.class); 058 @InterfaceAudience.Private 059 public static final String SPLIT_POLICY = "SPLIT_POLICY"; 060 private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY)); 061 /** 062 * Used by HBase Shell interface to access this metadata 063 * attribute which denotes the maximum size of the store file after which a 064 * region split occurs. 065 */ 066 @InterfaceAudience.Private 067 public static final String MAX_FILESIZE = "MAX_FILESIZE"; 068 private static final Bytes MAX_FILESIZE_KEY 069 = new Bytes(Bytes.toBytes(MAX_FILESIZE)); 070 071 @InterfaceAudience.Private 072 public static final String OWNER = "OWNER"; 073 @InterfaceAudience.Private 074 public static final Bytes OWNER_KEY 075 = new Bytes(Bytes.toBytes(OWNER)); 076 077 /** 078 * Used by rest interface to access this metadata attribute 079 * which denotes if the table is Read Only. 080 */ 081 @InterfaceAudience.Private 082 public static final String READONLY = "READONLY"; 083 private static final Bytes READONLY_KEY 084 = new Bytes(Bytes.toBytes(READONLY)); 085 086 /** 087 * Used by HBase Shell interface to access this metadata 088 * attribute which denotes if the table is compaction enabled. 089 */ 090 @InterfaceAudience.Private 091 public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED"; 092 private static final Bytes COMPACTION_ENABLED_KEY 093 = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); 094 095 /** 096 * Used by HBase Shell interface to access this metadata 097 * attribute which denotes if the table is split enabled. 098 */ 099 @InterfaceAudience.Private 100 public static final String SPLIT_ENABLED = "SPLIT_ENABLED"; 101 private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED)); 102 103 /** 104 * Used by HBase Shell interface to access this metadata 105 * attribute which denotes if the table is merge enabled. 106 */ 107 @InterfaceAudience.Private 108 public static final String MERGE_ENABLED = "MERGE_ENABLED"; 109 private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED)); 110 111 /** 112 * Used by HBase Shell interface to access this metadata 113 * attribute which represents the maximum size of the memstore after which its 114 * contents are flushed onto the disk. 115 */ 116 @InterfaceAudience.Private 117 public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; 118 private static final Bytes MEMSTORE_FLUSHSIZE_KEY 119 = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); 120 121 @InterfaceAudience.Private 122 public static final String FLUSH_POLICY = "FLUSH_POLICY"; 123 private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY)); 124 /** 125 * Used by rest interface to access this metadata attribute 126 * which denotes if it is a catalog table, either <code> hbase:meta </code>. 127 */ 128 @InterfaceAudience.Private 129 public static final String IS_META = "IS_META"; 130 private static final Bytes IS_META_KEY 131 = new Bytes(Bytes.toBytes(IS_META)); 132 133 /** 134 * {@link Durability} setting for the table. 135 */ 136 @InterfaceAudience.Private 137 public static final String DURABILITY = "DURABILITY"; 138 private static final Bytes DURABILITY_KEY 139 = new Bytes(Bytes.toBytes("DURABILITY")); 140 141 /** 142 * The number of region replicas for the table. 143 */ 144 @InterfaceAudience.Private 145 public static final String REGION_REPLICATION = "REGION_REPLICATION"; 146 private static final Bytes REGION_REPLICATION_KEY 147 = new Bytes(Bytes.toBytes(REGION_REPLICATION)); 148 149 /** 150 * The flag to indicate whether or not the memstore should be 151 * replicated for read-replicas (CONSISTENCY => TIMELINE). 152 */ 153 @InterfaceAudience.Private 154 public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; 155 private static final Bytes REGION_MEMSTORE_REPLICATION_KEY 156 = new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); 157 158 private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY 159 = new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY)); 160 /** 161 * Used by shell/rest interface to access this metadata 162 * attribute which denotes if the table should be treated by region 163 * normalizer. 164 */ 165 @InterfaceAudience.Private 166 public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED"; 167 private static final Bytes NORMALIZATION_ENABLED_KEY 168 = new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); 169 170 @InterfaceAudience.Private 171 public static final String NORMALIZER_TARGET_REGION_COUNT = 172 "NORMALIZER_TARGET_REGION_COUNT"; 173 private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY = 174 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT)); 175 176 @InterfaceAudience.Private 177 public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE"; 178 private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY = 179 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE)); 180 181 /** 182 * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global 183 * default value 184 */ 185 private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT; 186 187 @InterfaceAudience.Private 188 public static final String PRIORITY = "PRIORITY"; 189 private static final Bytes PRIORITY_KEY 190 = new Bytes(Bytes.toBytes(PRIORITY)); 191 192 /** 193 * Relative priority of the table used for rpc scheduling 194 */ 195 private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS; 196 197 /** 198 * Constant that denotes whether the table is READONLY by default and is false 199 */ 200 public static final boolean DEFAULT_READONLY = false; 201 202 /** 203 * Constant that denotes whether the table is compaction enabled by default 204 */ 205 public static final boolean DEFAULT_COMPACTION_ENABLED = true; 206 207 /** 208 * Constant that denotes whether the table is split enabled by default 209 */ 210 public static final boolean DEFAULT_SPLIT_ENABLED = true; 211 212 /** 213 * Constant that denotes whether the table is merge enabled by default 214 */ 215 public static final boolean DEFAULT_MERGE_ENABLED = true; 216 217 /** 218 * Constant that denotes whether the table is normalized by default. 219 */ 220 public static final boolean DEFAULT_NORMALIZATION_ENABLED = false; 221 222 /** 223 * Constant that denotes the maximum default size of the memstore in bytes after which 224 * the contents are flushed to the store files. 225 */ 226 public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L; 227 228 public static final int DEFAULT_REGION_REPLICATION = 1; 229 230 public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true; 231 232 private final static Map<String, String> DEFAULT_VALUES = new HashMap<>(); 233 private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>(); 234 235 static { 236 DEFAULT_VALUES.put(MAX_FILESIZE, 237 String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); 238 DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); 239 DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, 240 String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); 241 DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name 242 DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); 243 DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED)); 244 DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); 245 DEFAULT_VALUES.keySet().stream() 246 .map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add); 247 RESERVED_KEYWORDS.add(IS_META_KEY); 248 } 249 250 @InterfaceAudience.Private 251 public final static String NAMESPACE_FAMILY_INFO = "info"; 252 @InterfaceAudience.Private 253 public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); 254 @InterfaceAudience.Private 255 public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); 256 257 /** 258 * <pre> 259 * Pattern that matches a coprocessor specification. Form is: 260 * {@code <coprocessor jar file location> '|' <class name> ['|' <priority> ['|' <arguments>]]} 261 * where arguments are {@code <KEY> '=' <VALUE> [,...]} 262 * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2} 263 * </pre> 264 */ 265 private static final Pattern CP_HTD_ATTR_VALUE_PATTERN = 266 Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); 267 268 private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; 269 private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; 270 private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile( 271 "(" + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + 272 CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); 273 private static final Pattern CP_HTD_ATTR_KEY_PATTERN = 274 Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); 275 /** 276 * Table descriptor for namespace table 277 */ 278 // TODO We used to set CacheDataInL1 for NS table. When we have BucketCache in file mode, now the 279 // NS data goes to File mode BC only. Test how that affect the system. If too much, we have to 280 // rethink about adding back the setCacheDataInL1 for NS table. 281 public static final TableDescriptor NAMESPACE_TABLEDESC 282 = TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME) 283 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES) 284 // Ten is arbitrary number. Keep versions to help debugging. 285 .setMaxVersions(10) 286 .setInMemory(true) 287 .setBlocksize(8 * 1024) 288 .setScope(HConstants.REPLICATION_SCOPE_LOCAL) 289 .build()) 290 .build(); 291 private final ModifyableTableDescriptor desc; 292 293 /** 294 * @param desc The table descriptor to serialize 295 * @return This instance serialized with pb with pb magic prefix 296 */ 297 public static byte[] toByteArray(TableDescriptor desc) { 298 if (desc instanceof ModifyableTableDescriptor) { 299 return ((ModifyableTableDescriptor) desc).toByteArray(); 300 } 301 return new ModifyableTableDescriptor(desc).toByteArray(); 302 } 303 304 /** 305 * The input should be created by {@link #toByteArray}. 306 * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix 307 * @return This instance serialized with pb with pb magic prefix 308 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException 309 */ 310 public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException { 311 return ModifyableTableDescriptor.parseFrom(pbBytes); 312 } 313 314 public static TableDescriptorBuilder newBuilder(final TableName name) { 315 return new TableDescriptorBuilder(name); 316 } 317 318 public static TableDescriptor copy(TableDescriptor desc) { 319 return new ModifyableTableDescriptor(desc); 320 } 321 322 public static TableDescriptor copy(TableName name, TableDescriptor desc) { 323 return new ModifyableTableDescriptor(name, desc); 324 } 325 326 /** 327 * Copy all values, families, and name from the input. 328 * @param desc The desciptor to copy 329 * @return A clone of input 330 */ 331 public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) { 332 return new TableDescriptorBuilder(desc); 333 } 334 335 private TableDescriptorBuilder(final TableName name) { 336 this.desc = new ModifyableTableDescriptor(name); 337 } 338 339 private TableDescriptorBuilder(final TableDescriptor desc) { 340 this.desc = new ModifyableTableDescriptor(desc); 341 } 342 343 /** 344 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 345 * Use {@link #setCoprocessor(String)} instead 346 */ 347 @Deprecated 348 public TableDescriptorBuilder addCoprocessor(String className) throws IOException { 349 return addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null); 350 } 351 352 /** 353 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 354 * Use {@link #setCoprocessor(CoprocessorDescriptor)} instead 355 */ 356 @Deprecated 357 public TableDescriptorBuilder addCoprocessor(String className, Path jarFilePath, 358 int priority, final Map<String, String> kvs) throws IOException { 359 desc.setCoprocessor( 360 CoprocessorDescriptorBuilder.newBuilder(className) 361 .setJarPath(jarFilePath == null ? null : jarFilePath.toString()) 362 .setPriority(priority) 363 .setProperties(kvs == null ? Collections.emptyMap() : kvs) 364 .build()); 365 return this; 366 } 367 368 /** 369 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 370 * Use {@link #setCoprocessor(CoprocessorDescriptor)} instead 371 */ 372 @Deprecated 373 public TableDescriptorBuilder addCoprocessorWithSpec(final String specStr) throws IOException { 374 desc.setCoprocessorWithSpec(specStr); 375 return this; 376 } 377 378 /** 379 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 380 * Use {@link #setColumnFamily(ColumnFamilyDescriptor)} instead 381 */ 382 @Deprecated 383 public TableDescriptorBuilder addColumnFamily(final ColumnFamilyDescriptor family) { 384 desc.setColumnFamily(family); 385 return this; 386 } 387 388 public TableDescriptorBuilder setCoprocessor(String className) throws IOException { 389 return setCoprocessor(CoprocessorDescriptorBuilder.of(className)); 390 } 391 392 public TableDescriptorBuilder setCoprocessor(CoprocessorDescriptor cpDesc) throws IOException { 393 desc.setCoprocessor(Objects.requireNonNull(cpDesc)); 394 return this; 395 } 396 397 public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor> cpDescs) 398 throws IOException { 399 for (CoprocessorDescriptor cpDesc : cpDescs) { 400 desc.setCoprocessor(cpDesc); 401 } 402 return this; 403 } 404 405 public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) { 406 desc.setColumnFamily(Objects.requireNonNull(family)); 407 return this; 408 } 409 410 public TableDescriptorBuilder setColumnFamilies( 411 final Collection<ColumnFamilyDescriptor> families) { 412 families.forEach(desc::setColumnFamily); 413 return this; 414 } 415 416 public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) { 417 desc.modifyColumnFamily(Objects.requireNonNull(family)); 418 return this; 419 } 420 421 public TableDescriptorBuilder removeValue(Bytes key) { 422 desc.removeValue(key); 423 return this; 424 } 425 426 public TableDescriptorBuilder removeValue(byte[] key) { 427 desc.removeValue(key); 428 return this; 429 } 430 431 public TableDescriptorBuilder removeColumnFamily(final byte[] name) { 432 desc.removeColumnFamily(name); 433 return this; 434 } 435 436 public TableDescriptorBuilder removeCoprocessor(String className) { 437 desc.removeCoprocessor(className); 438 return this; 439 } 440 441 public TableDescriptorBuilder setCompactionEnabled(final boolean isEnable) { 442 desc.setCompactionEnabled(isEnable); 443 return this; 444 } 445 446 public TableDescriptorBuilder setSplitEnabled(final boolean isEnable) { 447 desc.setSplitEnabled(isEnable); 448 return this; 449 } 450 451 public TableDescriptorBuilder setMergeEnabled(final boolean isEnable) { 452 desc.setMergeEnabled(isEnable); 453 return this; 454 } 455 456 public TableDescriptorBuilder setDurability(Durability durability) { 457 desc.setDurability(durability); 458 return this; 459 } 460 461 public TableDescriptorBuilder setFlushPolicyClassName(String clazz) { 462 desc.setFlushPolicyClassName(clazz); 463 return this; 464 } 465 466 public TableDescriptorBuilder setMaxFileSize(long maxFileSize) { 467 desc.setMaxFileSize(maxFileSize); 468 return this; 469 } 470 471 public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) { 472 desc.setMemStoreFlushSize(memstoreFlushSize); 473 return this; 474 } 475 476 public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) { 477 desc.setNormalizerTargetRegionCount(regionCount); 478 return this; 479 } 480 481 public TableDescriptorBuilder setNormalizerTargetRegionSize(final long regionSize) { 482 desc.setNormalizerTargetRegionSize(regionSize); 483 return this; 484 } 485 486 public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) { 487 desc.setNormalizationEnabled(isEnable); 488 return this; 489 } 490 491 /** 492 * @deprecated since 2.0.0 and will be removed in 3.0.0. 493 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 494 */ 495 @Deprecated 496 public TableDescriptorBuilder setOwner(User owner) { 497 desc.setOwner(owner); 498 return this; 499 } 500 501 /** 502 * @deprecated since 2.0.0 and will be removed in 3.0.0. 503 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 504 */ 505 @Deprecated 506 public TableDescriptorBuilder setOwnerString(String ownerString) { 507 desc.setOwnerString(ownerString); 508 return this; 509 } 510 511 public TableDescriptorBuilder setPriority(int priority) { 512 desc.setPriority(priority); 513 return this; 514 } 515 516 public TableDescriptorBuilder setReadOnly(final boolean readOnly) { 517 desc.setReadOnly(readOnly); 518 return this; 519 } 520 521 public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) { 522 desc.setRegionMemStoreReplication(memstoreReplication); 523 return this; 524 } 525 526 public TableDescriptorBuilder setRegionReplication(int regionReplication) { 527 desc.setRegionReplication(regionReplication); 528 return this; 529 } 530 531 public TableDescriptorBuilder setRegionSplitPolicyClassName(String clazz) { 532 desc.setRegionSplitPolicyClassName(clazz); 533 return this; 534 } 535 536 public TableDescriptorBuilder setValue(final String key, final String value) { 537 desc.setValue(key, value); 538 return this; 539 } 540 541 public TableDescriptorBuilder setValue(final Bytes key, final Bytes value) { 542 desc.setValue(key, value); 543 return this; 544 } 545 546 public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) { 547 desc.setValue(key, value); 548 return this; 549 } 550 551 /** 552 * Sets replication scope all & only the columns already in the builder. Columns added later won't 553 * be backfilled with replication scope. 554 * @param scope replication scope 555 * @return a TableDescriptorBuilder 556 */ 557 public TableDescriptorBuilder setReplicationScope(int scope) { 558 Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 559 newFamilies.putAll(desc.families); 560 newFamilies 561 .forEach((cf, cfDesc) -> { 562 desc.removeColumnFamily(cf); 563 desc.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope) 564 .build()); 565 }); 566 return this; 567 } 568 569 public TableDescriptor build() { 570 return new ModifyableTableDescriptor(desc); 571 } 572 573 /** 574 * TODO: make this private after removing the HTableDescriptor 575 */ 576 @InterfaceAudience.Private 577 public static class ModifyableTableDescriptor 578 implements TableDescriptor, Comparable<ModifyableTableDescriptor> { 579 580 private final TableName name; 581 582 /** 583 * A map which holds the metadata information of the table. This metadata 584 * includes values like IS_META, SPLIT_POLICY, MAX_FILE_SIZE, 585 * READONLY, MEMSTORE_FLUSHSIZE etc... 586 */ 587 private final Map<Bytes, Bytes> values = new HashMap<>(); 588 589 /** 590 * Maps column family name to the respective FamilyDescriptors 591 */ 592 private final Map<byte[], ColumnFamilyDescriptor> families 593 = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 594 595 /** 596 * Construct a table descriptor specifying a TableName object 597 * 598 * @param name Table name. 599 * TODO: make this private after removing the HTableDescriptor 600 */ 601 @InterfaceAudience.Private 602 public ModifyableTableDescriptor(final TableName name) { 603 this(name, Collections.EMPTY_LIST, Collections.EMPTY_MAP); 604 } 605 606 private ModifyableTableDescriptor(final TableDescriptor desc) { 607 this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 608 } 609 610 /** 611 * Construct a table descriptor by cloning the descriptor passed as a 612 * parameter. 613 * <p> 614 * Makes a deep copy of the supplied descriptor. 615 * @param name The new name 616 * @param desc The descriptor. 617 * TODO: make this private after removing the HTableDescriptor 618 */ 619 @InterfaceAudience.Private 620 @Deprecated // only used by HTableDescriptor. remove this method if HTD is removed 621 public ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) { 622 this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 623 } 624 625 private ModifyableTableDescriptor(final TableName name, final Collection<ColumnFamilyDescriptor> families, 626 Map<Bytes, Bytes> values) { 627 this.name = name; 628 families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); 629 this.values.putAll(values); 630 this.values.put(IS_META_KEY, 631 new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME))))); 632 } 633 634 /** 635 * Checks if this table is <code> hbase:meta </code> region. 636 * 637 * @return true if this table is <code> hbase:meta </code> region 638 */ 639 @Override 640 public boolean isMetaRegion() { 641 return getOrDefault(IS_META_KEY, Boolean::valueOf, false); 642 } 643 644 /** 645 * Checks if the table is a <code>hbase:meta</code> table 646 * 647 * @return true if table is <code> hbase:meta </code> region. 648 */ 649 @Override 650 public boolean isMetaTable() { 651 return isMetaRegion(); 652 } 653 654 @Override 655 public Bytes getValue(Bytes key) { 656 Bytes rval = values.get(key); 657 return rval == null ? null : new Bytes(rval.copyBytes()); 658 } 659 660 @Override 661 public String getValue(String key) { 662 Bytes rval = values.get(new Bytes(Bytes.toBytes(key))); 663 return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength()); 664 } 665 666 @Override 667 public byte[] getValue(byte[] key) { 668 Bytes value = values.get(new Bytes(key)); 669 return value == null ? null : value.copyBytes(); 670 } 671 672 private <T> T getOrDefault(Bytes key, Function<String, T> function, T defaultValue) { 673 Bytes value = values.get(key); 674 if (value == null) { 675 return defaultValue; 676 } else { 677 return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 678 } 679 } 680 681 /** 682 * Getter for fetching an unmodifiable {@link #values} map. 683 * 684 * @return unmodifiable map {@link #values}. 685 * @see #values 686 */ 687 @Override 688 public Map<Bytes, Bytes> getValues() { 689 // shallow pointer copy 690 return Collections.unmodifiableMap(values); 691 } 692 693 /** 694 * Setter for storing metadata as a (key, value) pair in {@link #values} map 695 * 696 * @param key The key. 697 * @param value The value. If null, removes the setting. 698 * @return the modifyable TD 699 * @see #values 700 */ 701 public ModifyableTableDescriptor setValue(byte[] key, byte[] value) { 702 return setValue(toBytesOrNull(key, v -> v), 703 toBytesOrNull(value, v -> v)); 704 } 705 706 public ModifyableTableDescriptor setValue(String key, String value) { 707 return setValue(toBytesOrNull(key, Bytes::toBytes), 708 toBytesOrNull(value, Bytes::toBytes)); 709 } 710 711 /* 712 * @param key The key. 713 * @param value The value. If null, removes the setting. 714 */ 715 private ModifyableTableDescriptor setValue(final Bytes key, 716 final String value) { 717 return setValue(key, toBytesOrNull(value, Bytes::toBytes)); 718 } 719 720 /* 721 * Setter for storing metadata as a (key, value) pair in {@link #values} map 722 * 723 * @param key The key. 724 * @param value The value. If null, removes the setting. 725 */ 726 public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { 727 if (value == null) { 728 values.remove(key); 729 } else { 730 values.put(key, value); 731 } 732 return this; 733 } 734 735 private static <T> Bytes toBytesOrNull(T t, Function<T, byte[]> f) { 736 if (t == null) { 737 return null; 738 } else { 739 return new Bytes(f.apply(t)); 740 } 741 } 742 743 /** 744 * Remove metadata represented by the key from the {@link #values} map 745 * 746 * @param key Key whose key and value we're to remove from TableDescriptor 747 * parameters. 748 * @return the modifyable TD 749 */ 750 public ModifyableTableDescriptor removeValue(Bytes key) { 751 return setValue(key, (Bytes) null); 752 } 753 754 /** 755 * Remove metadata represented by the key from the {@link #values} map 756 * 757 * @param key Key whose key and value we're to remove from TableDescriptor 758 * parameters. 759 * @return the modifyable TD 760 */ 761 public ModifyableTableDescriptor removeValue(final byte[] key) { 762 return removeValue(new Bytes(key)); 763 } 764 765 /** 766 * Check if the readOnly flag of the table is set. If the readOnly flag is 767 * set then the contents of the table can only be read from but not 768 * modified. 769 * 770 * @return true if all columns in the table should be read only 771 */ 772 @Override 773 public boolean isReadOnly() { 774 return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY); 775 } 776 777 /** 778 * Setting the table as read only sets all the columns in the table as read 779 * only. By default all tables are modifiable, but if the readOnly flag is 780 * set to true then the contents of the table can only be read but not 781 * modified. 782 * 783 * @param readOnly True if all of the columns in the table should be read 784 * only. 785 * @return the modifyable TD 786 */ 787 public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { 788 return setValue(READONLY_KEY, Boolean.toString(readOnly)); 789 } 790 791 /** 792 * Check if the compaction enable flag of the table is true. If flag is 793 * false then no minor/major compactions will be done in real. 794 * 795 * @return true if table compaction enabled 796 */ 797 @Override 798 public boolean isCompactionEnabled() { 799 return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED); 800 } 801 802 /** 803 * Setting the table compaction enable flag. 804 * 805 * @param isEnable True if enable compaction. 806 * @return the modifyable TD 807 */ 808 public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) { 809 return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable)); 810 } 811 812 /** 813 * Check if the split enable flag of the table is true. If flag is false then no split will be 814 * done. 815 * 816 * @return true if table region split enabled 817 */ 818 @Override 819 public boolean isSplitEnabled() { 820 return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED); 821 } 822 823 /** 824 * Setting the table region split enable flag. 825 * @param isEnable True if enable region split. 826 * 827 * @return the modifyable TD 828 */ 829 public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) { 830 return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable)); 831 } 832 833 /** 834 * Check if the region merge enable flag of the table is true. If flag is false then no merge 835 * will be done. 836 * 837 * @return true if table region merge enabled 838 */ 839 @Override 840 public boolean isMergeEnabled() { 841 return getOrDefault(MERGE_ENABLED_KEY, Boolean::valueOf, DEFAULT_MERGE_ENABLED); 842 } 843 844 /** 845 * Setting the table region merge enable flag. 846 * @param isEnable True if enable region merge. 847 * 848 * @return the modifyable TD 849 */ 850 public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) { 851 return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable)); 852 } 853 854 /** 855 * Check if normalization enable flag of the table is true. If flag is false 856 * then no region normalizer won't attempt to normalize this table. 857 * 858 * @return true if region normalization is enabled for this table 859 */ 860 @Override 861 public boolean isNormalizationEnabled() { 862 return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, DEFAULT_NORMALIZATION_ENABLED); 863 } 864 865 /** 866 * Check if there is the target region count. If so, the normalize plan will be calculated based 867 * on the target region count. 868 * @return target region count after normalize done 869 */ 870 @Override 871 public int getNormalizerTargetRegionCount() { 872 return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf, 873 Integer.valueOf(-1)); 874 } 875 876 /** 877 * Check if there is the target region size. If so, the normalize plan will be calculated based 878 * on the target region size. 879 * @return target region size after normalize done 880 */ 881 @Override 882 public long getNormalizerTargetRegionSize() { 883 return getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)); 884 } 885 886 /** 887 * Setting the table normalization enable flag. 888 * 889 * @param isEnable True if enable normalization. 890 * @return the modifyable TD 891 */ 892 public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) { 893 return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable)); 894 } 895 896 /** 897 * Setting the target region count of table normalization . 898 * @param regionCount the target region count. 899 * @return the modifyable TD 900 */ 901 public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) { 902 return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount)); 903 } 904 905 /** 906 * Setting the target region size of table normalization. 907 * @param regionSize the target region size. 908 * @return the modifyable TD 909 */ 910 public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) { 911 return setValue(NORMALIZER_TARGET_REGION_SIZE_KEY, Long.toString(regionSize)); 912 } 913 914 /** 915 * Sets the {@link Durability} setting for the table. This defaults to 916 * Durability.USE_DEFAULT. 917 * 918 * @param durability enum value 919 * @return the modifyable TD 920 */ 921 public ModifyableTableDescriptor setDurability(Durability durability) { 922 return setValue(DURABILITY_KEY, durability.name()); 923 } 924 925 /** 926 * Returns the durability setting for the table. 927 * 928 * @return durability setting for the table. 929 */ 930 @Override 931 public Durability getDurability() { 932 return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY); 933 } 934 935 /** 936 * Get the name of the table 937 * 938 * @return TableName 939 */ 940 @Override 941 public TableName getTableName() { 942 return name; 943 } 944 945 /** 946 * This sets the class associated with the region split policy which 947 * determines when a region split should occur. The class used by default is 948 * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 949 * 950 * @param clazz the class name 951 * @return the modifyable TD 952 */ 953 public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) { 954 return setValue(SPLIT_POLICY_KEY, clazz); 955 } 956 957 /** 958 * This gets the class associated with the region split policy which 959 * determines when a region split should occur. The class used by default is 960 * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 961 * 962 * @return the class name of the region split policy for this table. If this 963 * returns null, the default split policy is used. 964 */ 965 @Override 966 public String getRegionSplitPolicyClassName() { 967 return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null); 968 } 969 970 /** 971 * Returns the maximum size upto which a region can grow to after which a 972 * region split is triggered. The region size is represented by the size of 973 * the biggest store file in that region. 974 * 975 * @return max hregion size for table, -1 if not set. 976 * 977 * @see #setMaxFileSize(long) 978 */ 979 @Override 980 public long getMaxFileSize() { 981 return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1); 982 } 983 984 /** 985 * Sets the maximum size upto which a region can grow to after which a 986 * region split is triggered. The region size is represented by the size of 987 * the biggest store file in that region, i.e. If the biggest store file 988 * grows beyond the maxFileSize, then the region split is triggered. This 989 * defaults to a value of 256 MB. 990 * <p> 991 * This is not an absolute value and might vary. Assume that a single row 992 * exceeds the maxFileSize then the storeFileSize will be greater than 993 * maxFileSize since a single row cannot be split across multiple regions 994 * </p> 995 * 996 * @param maxFileSize The maximum file size that a store file can grow to 997 * before a split is triggered. 998 * @return the modifyable TD 999 */ 1000 public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { 1001 return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize)); 1002 } 1003 1004 /** 1005 * Returns the size of the memstore after which a flush to filesystem is 1006 * triggered. 1007 * 1008 * @return memory cache flush size for each hregion, -1 if not set. 1009 * 1010 * @see #setMemStoreFlushSize(long) 1011 */ 1012 @Override 1013 public long getMemStoreFlushSize() { 1014 return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1); 1015 } 1016 1017 /** 1018 * Represents the maximum size of the memstore after which the contents of 1019 * the memstore are flushed to the filesystem. This defaults to a size of 64 1020 * MB. 1021 * 1022 * @param memstoreFlushSize memory cache flush size for each hregion 1023 * @return the modifyable TD 1024 */ 1025 public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { 1026 return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize)); 1027 } 1028 1029 /** 1030 * This sets the class associated with the flush policy which determines 1031 * determines the stores need to be flushed when flushing a region. The 1032 * class used by default is defined in 1033 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 1034 * 1035 * @param clazz the class name 1036 * @return the modifyable TD 1037 */ 1038 public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) { 1039 return setValue(FLUSH_POLICY_KEY, clazz); 1040 } 1041 1042 /** 1043 * This gets the class associated with the flush policy which determines the 1044 * stores need to be flushed when flushing a region. The class used by 1045 * default is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy. 1046 * 1047 * @return the class name of the flush policy for this table. If this 1048 * returns null, the default flush policy is used. 1049 */ 1050 @Override 1051 public String getFlushPolicyClassName() { 1052 return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null); 1053 } 1054 1055 /** 1056 * Adds a column family. For the updating purpose please use 1057 * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. 1058 * 1059 * @param family to add. 1060 * @return the modifyable TD 1061 */ 1062 public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) { 1063 if (family.getName() == null || family.getName().length <= 0) { 1064 throw new IllegalArgumentException("Family name cannot be null or empty"); 1065 } 1066 if (hasColumnFamily(family.getName())) { 1067 throw new IllegalArgumentException("Family '" 1068 + family.getNameAsString() + "' already exists so cannot be added"); 1069 } 1070 return putColumnFamily(family); 1071 } 1072 1073 /** 1074 * Modifies the existing column family. 1075 * 1076 * @param family to update 1077 * @return this (for chained invocation) 1078 */ 1079 public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) { 1080 if (family.getName() == null || family.getName().length <= 0) { 1081 throw new IllegalArgumentException("Family name cannot be null or empty"); 1082 } 1083 if (!hasColumnFamily(family.getName())) { 1084 throw new IllegalArgumentException("Column family '" + family.getNameAsString() 1085 + "' does not exist"); 1086 } 1087 return putColumnFamily(family); 1088 } 1089 1090 private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) { 1091 families.put(family.getName(), family); 1092 return this; 1093 } 1094 1095 /** 1096 * Checks to see if this table contains the given column family 1097 * 1098 * @param familyName Family name or column name. 1099 * @return true if the table contains the specified family name 1100 */ 1101 @Override 1102 public boolean hasColumnFamily(final byte[] familyName) { 1103 return families.containsKey(familyName); 1104 } 1105 1106 /** 1107 * @return Name of this table and then a map of all of the column family descriptors. 1108 */ 1109 @Override 1110 public String toString() { 1111 StringBuilder s = new StringBuilder(); 1112 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1113 s.append(getValues(true)); 1114 families.values().forEach(f -> s.append(", ").append(f)); 1115 return s.toString(); 1116 } 1117 1118 /** 1119 * @return Name of this table and then a map of all of the column family 1120 * descriptors (with only the non-default column family attributes) 1121 */ 1122 @Override 1123 public String toStringCustomizedValues() { 1124 StringBuilder s = new StringBuilder(); 1125 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1126 s.append(getValues(false)); 1127 families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues())); 1128 return s.toString(); 1129 } 1130 1131 /** 1132 * @return map of all table attributes formatted into string. 1133 */ 1134 public String toStringTableAttributes() { 1135 return getValues(true).toString(); 1136 } 1137 1138 private StringBuilder getValues(boolean printDefaults) { 1139 StringBuilder s = new StringBuilder(); 1140 1141 // step 1: set partitioning and pruning 1142 Set<Bytes> reservedKeys = new TreeSet<>(); 1143 Set<Bytes> userKeys = new TreeSet<>(); 1144 for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) { 1145 if (entry.getKey() == null || entry.getKey().get() == null) { 1146 continue; 1147 } 1148 String key = Bytes.toString(entry.getKey().get()); 1149 // in this section, print out reserved keywords + coprocessor info 1150 if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) { 1151 userKeys.add(entry.getKey()); 1152 continue; 1153 } 1154 // only print out IS_META if true 1155 String value = Bytes.toString(entry.getValue().get()); 1156 if (key.equalsIgnoreCase(IS_META)) { 1157 if (Boolean.valueOf(value) == false) { 1158 continue; 1159 } 1160 } 1161 // see if a reserved key is a default value. may not want to print it out 1162 if (printDefaults 1163 || !DEFAULT_VALUES.containsKey(key) 1164 || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { 1165 reservedKeys.add(entry.getKey()); 1166 } 1167 } 1168 1169 // early exit optimization 1170 boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty(); 1171 if (!hasAttributes) { 1172 return s; 1173 } 1174 1175 s.append(", {"); 1176 // step 2: printing attributes 1177 if (hasAttributes) { 1178 s.append("TABLE_ATTRIBUTES => {"); 1179 1180 // print all reserved keys first 1181 boolean printCommaForAttr = false; 1182 for (Bytes k : reservedKeys) { 1183 String key = Bytes.toString(k.get()); 1184 String value = Bytes.toStringBinary(values.get(k).get()); 1185 if (printCommaForAttr) { 1186 s.append(", "); 1187 } 1188 printCommaForAttr = true; 1189 s.append(key); 1190 s.append(" => "); 1191 s.append('\'').append(value).append('\''); 1192 } 1193 1194 if (!userKeys.isEmpty()) { 1195 // print all non-reserved as a separate subset 1196 if (printCommaForAttr) { 1197 s.append(", "); 1198 } 1199 s.append(HConstants.METADATA).append(" => "); 1200 s.append("{"); 1201 boolean printCommaForCfg = false; 1202 for (Bytes k : userKeys) { 1203 String key = Bytes.toString(k.get()); 1204 String value = Bytes.toStringBinary(values.get(k).get()); 1205 if (printCommaForCfg) { 1206 s.append(", "); 1207 } 1208 printCommaForCfg = true; 1209 s.append('\'').append(key).append('\''); 1210 s.append(" => "); 1211 s.append('\'').append(value).append('\''); 1212 } 1213 s.append("}"); 1214 } 1215 1216 s.append("}"); 1217 } 1218 1219 s.append("}"); // end METHOD 1220 return s; 1221 } 1222 1223 /** 1224 * Compare the contents of the descriptor with another one passed as a 1225 * parameter. Checks if the obj passed is an instance of ModifyableTableDescriptor, 1226 * if yes then the contents of the descriptors are compared. 1227 * 1228 * @param obj The object to compare 1229 * @return true if the contents of the the two descriptors exactly match 1230 * 1231 * @see java.lang.Object#equals(java.lang.Object) 1232 */ 1233 @Override 1234 public boolean equals(Object obj) { 1235 if (this == obj) { 1236 return true; 1237 } 1238 if (obj instanceof ModifyableTableDescriptor) { 1239 return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0; 1240 } 1241 return false; 1242 } 1243 1244 /** 1245 * @return hash code 1246 */ 1247 @Override 1248 public int hashCode() { 1249 int result = this.name.hashCode(); 1250 if (this.families.size() > 0) { 1251 for (ColumnFamilyDescriptor e : this.families.values()) { 1252 result ^= e.hashCode(); 1253 } 1254 } 1255 result ^= values.hashCode(); 1256 return result; 1257 } 1258 1259 // Comparable 1260 /** 1261 * Compares the descriptor with another descriptor which is passed as a 1262 * parameter. This compares the content of the two descriptors and not the 1263 * reference. 1264 * 1265 * @param other The MTD to compare 1266 * @return 0 if the contents of the descriptors are exactly matching, 1 if 1267 * there is a mismatch in the contents 1268 */ 1269 @Override 1270 public int compareTo(final ModifyableTableDescriptor other) { 1271 return TableDescriptor.COMPARATOR.compare(this, other); 1272 } 1273 1274 @Override 1275 public ColumnFamilyDescriptor[] getColumnFamilies() { 1276 return families.values().toArray(new ColumnFamilyDescriptor[families.size()]); 1277 } 1278 1279 /** 1280 * Returns the configured replicas per region 1281 */ 1282 @Override 1283 public int getRegionReplication() { 1284 return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION); 1285 } 1286 1287 /** 1288 * Sets the number of replicas per region. 1289 * 1290 * @param regionReplication the replication factor per region 1291 * @return the modifyable TD 1292 */ 1293 public ModifyableTableDescriptor setRegionReplication(int regionReplication) { 1294 return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication)); 1295 } 1296 1297 /** 1298 * @return true if the read-replicas memstore replication is enabled. 1299 */ 1300 @Override 1301 public boolean hasRegionMemStoreReplication() { 1302 return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, DEFAULT_REGION_MEMSTORE_REPLICATION); 1303 } 1304 1305 /** 1306 * Enable or Disable the memstore replication from the primary region to the 1307 * replicas. The replication will be used only for meta operations (e.g. 1308 * flush, compaction, ...) 1309 * 1310 * @param memstoreReplication true if the new data written to the primary 1311 * region should be replicated. false if the secondaries can tollerate to 1312 * have new data only when the primary flushes the memstore. 1313 * @return the modifyable TD 1314 */ 1315 public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) { 1316 setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication)); 1317 // If the memstore replication is setup, we do not have to wait for observing a flush event 1318 // from primary before starting to serve reads, because gaps from replication is not applicable 1319 return setValue(REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, 1320 Boolean.toString(memstoreReplication)); 1321 } 1322 1323 public ModifyableTableDescriptor setPriority(int priority) { 1324 return setValue(PRIORITY_KEY, Integer.toString(priority)); 1325 } 1326 1327 @Override 1328 public int getPriority() { 1329 return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY); 1330 } 1331 1332 /** 1333 * Returns all the column family names of the current table. The map of 1334 * TableDescriptor contains mapping of family name to ColumnFamilyDescriptor. 1335 * This returns all the keys of the family map which represents the column 1336 * family names of the table. 1337 * 1338 * @return Immutable sorted set of the keys of the families. 1339 */ 1340 @Override 1341 public Set<byte[]> getColumnFamilyNames() { 1342 return Collections.unmodifiableSet(this.families.keySet()); 1343 } 1344 1345 /** 1346 * Returns the ColumnFamilyDescriptor for a specific column family with name as 1347 * specified by the parameter column. 1348 * 1349 * @param column Column family name 1350 * @return Column descriptor for the passed family name or the family on 1351 * passed in column. 1352 */ 1353 @Override 1354 public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { 1355 return this.families.get(column); 1356 } 1357 1358 /** 1359 * Removes the ColumnFamilyDescriptor with name specified by the parameter column 1360 * from the table descriptor 1361 * 1362 * @param column Name of the column family to be removed. 1363 * @return Column descriptor for the passed family name or the family on 1364 * passed in column. 1365 */ 1366 public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { 1367 return this.families.remove(column); 1368 } 1369 1370 /** 1371 * Add a table coprocessor to this table. The coprocessor type must be 1372 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't 1373 * check if the class can be loaded or not. Whether a coprocessor is 1374 * loadable or not will be determined when a region is opened. 1375 * 1376 * @param className Full class name. 1377 * @throws IOException 1378 * @return the modifyable TD 1379 */ 1380 public ModifyableTableDescriptor setCoprocessor(String className) throws IOException { 1381 return setCoprocessor( 1382 CoprocessorDescriptorBuilder.newBuilder(className).setPriority(Coprocessor.PRIORITY_USER) 1383 .build()); 1384 } 1385 1386 /** 1387 * Add a table coprocessor to this table. The coprocessor type must be 1388 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't 1389 * check if the class can be loaded or not. Whether a coprocessor is 1390 * loadable or not will be determined when a region is opened. 1391 * 1392 * @throws IOException any illegal parameter key/value 1393 * @return the modifyable TD 1394 */ 1395 public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) 1396 throws IOException { 1397 checkHasCoprocessor(cp.getClassName()); 1398 if (cp.getPriority() < 0) { 1399 throw new IOException("Priority must be bigger than or equal with zero, current:" 1400 + cp.getPriority()); 1401 } 1402 // Validate parameter kvs and then add key/values to kvString. 1403 StringBuilder kvString = new StringBuilder(); 1404 for (Map.Entry<String, String> e : cp.getProperties().entrySet()) { 1405 if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) { 1406 throw new IOException("Illegal parameter key = " + e.getKey()); 1407 } 1408 if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { 1409 throw new IOException("Illegal parameter (" + e.getKey() 1410 + ") value = " + e.getValue()); 1411 } 1412 if (kvString.length() != 0) { 1413 kvString.append(','); 1414 } 1415 kvString.append(e.getKey()); 1416 kvString.append('='); 1417 kvString.append(e.getValue()); 1418 } 1419 1420 String value = cp.getJarPath().orElse("") 1421 + "|" + cp.getClassName() + "|" + Integer.toString(cp.getPriority()) + "|" 1422 + kvString.toString(); 1423 return setCoprocessorToMap(value); 1424 } 1425 1426 /** 1427 * Add a table coprocessor to this table. The coprocessor type must be 1428 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't 1429 * check if the class can be loaded or not. Whether a coprocessor is 1430 * loadable or not will be determined when a region is opened. 1431 * 1432 * @param specStr The Coprocessor specification all in in one String 1433 * @throws IOException 1434 * @return the modifyable TD 1435 * @deprecated used by HTableDescriptor and admin.rb. 1436 * As of release 2.0.0, this will be removed in HBase 3.0.0. 1437 */ 1438 @Deprecated 1439 public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr) 1440 throws IOException { 1441 CoprocessorDescriptor cpDesc = toCoprocessorDescriptor(specStr).orElseThrow( 1442 () -> new IllegalArgumentException( 1443 "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr)); 1444 checkHasCoprocessor(cpDesc.getClassName()); 1445 return setCoprocessorToMap(specStr); 1446 } 1447 1448 private void checkHasCoprocessor(final String className) throws IOException { 1449 if (hasCoprocessor(className)) { 1450 throw new IOException("Coprocessor " + className + " already exists."); 1451 } 1452 } 1453 1454 /** 1455 * Add coprocessor to values Map 1456 * @param specStr The Coprocessor specification all in in one String 1457 * @return Returns <code>this</code> 1458 */ 1459 private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) { 1460 if (specStr == null) { 1461 return this; 1462 } 1463 // generate a coprocessor key 1464 int maxCoprocessorNumber = 0; 1465 Matcher keyMatcher; 1466 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1467 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1468 if (!keyMatcher.matches()) { 1469 continue; 1470 } 1471 maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); 1472 } 1473 maxCoprocessorNumber++; 1474 String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); 1475 return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr))); 1476 } 1477 1478 /** 1479 * Check if the table has an attached co-processor represented by the name 1480 * className 1481 * 1482 * @param classNameToMatch - Class name of the co-processor 1483 * @return true of the table has a co-processor className 1484 */ 1485 @Override 1486 public boolean hasCoprocessor(String classNameToMatch) { 1487 return getCoprocessorDescriptors().stream().anyMatch(cp -> cp.getClassName() 1488 .equals(classNameToMatch)); 1489 } 1490 1491 /** 1492 * Return the list of attached co-processor represented by their name 1493 * className 1494 * 1495 * @return The list of co-processors classNames 1496 */ 1497 @Override 1498 public List<CoprocessorDescriptor> getCoprocessorDescriptors() { 1499 List<CoprocessorDescriptor> result = new ArrayList<>(); 1500 for (Map.Entry<Bytes, Bytes> e: getValues().entrySet()) { 1501 String key = Bytes.toString(e.getKey().get()).trim(); 1502 if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) { 1503 toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()) 1504 .ifPresent(result::add); 1505 } 1506 } 1507 return result; 1508 } 1509 1510 /** 1511 * Remove a coprocessor from those set on the table 1512 * 1513 * @param className Class name of the co-processor 1514 */ 1515 public void removeCoprocessor(String className) { 1516 Bytes match = null; 1517 Matcher keyMatcher; 1518 Matcher valueMatcher; 1519 for (Map.Entry<Bytes, Bytes> e : this.values 1520 .entrySet()) { 1521 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e 1522 .getKey().get())); 1523 if (!keyMatcher.matches()) { 1524 continue; 1525 } 1526 valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes 1527 .toString(e.getValue().get())); 1528 if (!valueMatcher.matches()) { 1529 continue; 1530 } 1531 // get className and compare 1532 String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field 1533 // remove the CP if it is present 1534 if (clazz.equals(className.trim())) { 1535 match = e.getKey(); 1536 break; 1537 } 1538 } 1539 // if we found a match, remove it 1540 if (match != null) { 1541 ModifyableTableDescriptor.this.removeValue(match); 1542 } 1543 } 1544 1545 /** 1546 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1547 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1548 */ 1549 @Deprecated 1550 public ModifyableTableDescriptor setOwner(User owner) { 1551 return setOwnerString(owner != null ? owner.getShortName() : null); 1552 } 1553 1554 /** 1555 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1556 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1557 */ 1558 // used by admin.rb:alter(table_name,*args) to update owner. 1559 @Deprecated 1560 public ModifyableTableDescriptor setOwnerString(String ownerString) { 1561 return setValue(OWNER_KEY, ownerString); 1562 } 1563 1564 /** 1565 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1566 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1567 */ 1568 @Override 1569 @Deprecated 1570 public String getOwnerString() { 1571 // Note that every table should have an owner (i.e. should have OWNER_KEY set). 1572 // hbase:meta should return system user as owner, not null (see 1573 // MasterFileSystem.java:bootstrap()). 1574 return getOrDefault(OWNER_KEY, Function.identity(), null); 1575 } 1576 1577 /** 1578 * @return the bytes in pb format 1579 */ 1580 private byte[] toByteArray() { 1581 return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray()); 1582 } 1583 1584 /** 1585 * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance 1586 * with pb magic prefix 1587 * @return An instance of {@link ModifyableTableDescriptor} made from 1588 * <code>bytes</code> 1589 * @throws DeserializationException 1590 * @see #toByteArray() 1591 */ 1592 private static TableDescriptor parseFrom(final byte[] bytes) 1593 throws DeserializationException { 1594 if (!ProtobufUtil.isPBMagicPrefix(bytes)) { 1595 throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor"); 1596 } 1597 int pblen = ProtobufUtil.lengthOfPBMagic(); 1598 HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder(); 1599 try { 1600 ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); 1601 return ProtobufUtil.toTableDescriptor(builder.build()); 1602 } catch (IOException e) { 1603 throw new DeserializationException(e); 1604 } 1605 } 1606 1607 @Override 1608 public int getColumnFamilyCount() { 1609 return families.size(); 1610 } 1611 } 1612 1613 private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) { 1614 Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec); 1615 if (matcher.matches()) { 1616 // jar file path can be empty if the cp class can be loaded 1617 // from class loader. 1618 String path = matcher.group(1).trim().isEmpty() ? 1619 null : matcher.group(1).trim(); 1620 String className = matcher.group(2).trim(); 1621 if (className.isEmpty()) { 1622 return Optional.empty(); 1623 } 1624 String priorityStr = matcher.group(3).trim(); 1625 int priority = priorityStr.isEmpty() ? 1626 Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); 1627 String cfgSpec = null; 1628 try { 1629 cfgSpec = matcher.group(4); 1630 } catch (IndexOutOfBoundsException ex) { 1631 // ignore 1632 } 1633 Map<String, String> ourConf = new TreeMap<>(); 1634 if (cfgSpec != null && !cfgSpec.trim().equals("|")) { 1635 cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1); 1636 Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec); 1637 while (m.find()) { 1638 ourConf.put(m.group(1), m.group(2)); 1639 } 1640 } 1641 return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className) 1642 .setJarPath(path) 1643 .setPriority(priority) 1644 .setProperties(ourConf) 1645 .build()); 1646 } 1647 return Optional.empty(); 1648 } 1649}