001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import java.io.IOException; 021import java.util.ArrayList; 022import java.util.Arrays; 023import java.util.Collection; 024import java.util.Collections; 025import java.util.HashMap; 026import java.util.HashSet; 027import java.util.List; 028import java.util.Map; 029import java.util.Objects; 030import java.util.Optional; 031import java.util.Set; 032import java.util.TreeMap; 033import java.util.TreeSet; 034import java.util.function.Function; 035import java.util.regex.Matcher; 036import java.util.regex.Pattern; 037import org.apache.hadoop.fs.Path; 038import org.apache.hadoop.hbase.Coprocessor; 039import org.apache.hadoop.hbase.HConstants; 040import org.apache.hadoop.hbase.TableName; 041import org.apache.hadoop.hbase.exceptions.DeserializationException; 042import org.apache.hadoop.hbase.exceptions.HBaseException; 043import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; 044import org.apache.hadoop.hbase.security.User; 045import org.apache.hadoop.hbase.util.Bytes; 046import org.apache.hadoop.hbase.util.PrettyPrinter; 047import org.apache.yetus.audience.InterfaceAudience; 048import org.slf4j.Logger; 049import org.slf4j.LoggerFactory; 050 051import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 052import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 053 054/** 055 * Convenience class for composing an instance of {@link TableDescriptor}. 056 * @since 2.0.0 057 */ 058@InterfaceAudience.Public 059public class TableDescriptorBuilder { 060 public static final Logger LOG = LoggerFactory.getLogger(TableDescriptorBuilder.class); 061 @InterfaceAudience.Private 062 public static final String SPLIT_POLICY = "SPLIT_POLICY"; 063 private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY)); 064 /** 065 * Used by HBase Shell interface to access this metadata attribute which denotes the maximum size 066 * of the store file after which a region split occurs. 067 */ 068 @InterfaceAudience.Private 069 public static final String MAX_FILESIZE = "MAX_FILESIZE"; 070 private static final Bytes MAX_FILESIZE_KEY = new Bytes(Bytes.toBytes(MAX_FILESIZE)); 071 072 @InterfaceAudience.Private 073 public static final String OWNER = "OWNER"; 074 @InterfaceAudience.Private 075 public static final Bytes OWNER_KEY = new Bytes(Bytes.toBytes(OWNER)); 076 077 /** 078 * Used by rest interface to access this metadata attribute which denotes if the table is Read 079 * Only. 080 */ 081 @InterfaceAudience.Private 082 public static final String READONLY = "READONLY"; 083 private static final Bytes READONLY_KEY = new Bytes(Bytes.toBytes(READONLY)); 084 085 /** 086 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 087 * compaction enabled. 088 */ 089 @InterfaceAudience.Private 090 public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED"; 091 private static final Bytes COMPACTION_ENABLED_KEY = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); 092 093 /** 094 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 095 * split enabled. 096 */ 097 @InterfaceAudience.Private 098 public static final String SPLIT_ENABLED = "SPLIT_ENABLED"; 099 private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED)); 100 101 /** 102 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 103 * merge enabled. 104 */ 105 @InterfaceAudience.Private 106 public static final String MERGE_ENABLED = "MERGE_ENABLED"; 107 private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED)); 108 109 /** 110 * Used by HBase Shell interface to access this metadata attribute which represents the maximum 111 * size of the memstore after which its contents are flushed onto the disk. 112 */ 113 @InterfaceAudience.Private 114 public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; 115 private static final Bytes MEMSTORE_FLUSHSIZE_KEY = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); 116 117 @InterfaceAudience.Private 118 public static final String FLUSH_POLICY = "FLUSH_POLICY"; 119 private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY)); 120 /** 121 * Used by rest interface to access this metadata attribute which denotes if it is a catalog 122 * table, either <code> hbase:meta </code>. 123 */ 124 @InterfaceAudience.Private 125 public static final String IS_META = "IS_META"; 126 private static final Bytes IS_META_KEY = new Bytes(Bytes.toBytes(IS_META)); 127 128 /** 129 * {@link Durability} setting for the table. 130 */ 131 @InterfaceAudience.Private 132 public static final String DURABILITY = "DURABILITY"; 133 private static final Bytes DURABILITY_KEY = new Bytes(Bytes.toBytes("DURABILITY")); 134 135 /** 136 * The number of region replicas for the table. 137 */ 138 @InterfaceAudience.Private 139 public static final String REGION_REPLICATION = "REGION_REPLICATION"; 140 private static final Bytes REGION_REPLICATION_KEY = new Bytes(Bytes.toBytes(REGION_REPLICATION)); 141 142 /** 143 * The flag to indicate whether or not the memstore should be replicated for read-replicas 144 * (CONSISTENCY => TIMELINE). 145 */ 146 @InterfaceAudience.Private 147 public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; 148 private static final Bytes REGION_MEMSTORE_REPLICATION_KEY = 149 new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); 150 151 /** 152 * Used by shell/rest interface to access this metadata attribute which denotes if the table 153 * should be treated by region normalizer. 154 */ 155 @InterfaceAudience.Private 156 public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED"; 157 private static final Bytes NORMALIZATION_ENABLED_KEY = 158 new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); 159 @InterfaceAudience.Private 160 @Deprecated 161 public static final boolean DEFAULT_NORMALIZATION_ENABLED = false; 162 163 @InterfaceAudience.Private 164 public static final String NORMALIZER_TARGET_REGION_COUNT = "NORMALIZER_TARGET_REGION_COUNT"; 165 private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY = 166 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT)); 167 168 @InterfaceAudience.Private 169 public static final String NORMALIZER_TARGET_REGION_SIZE_MB = "NORMALIZER_TARGET_REGION_SIZE_MB"; 170 private static final Bytes NORMALIZER_TARGET_REGION_SIZE_MB_KEY = 171 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE_MB)); 172 // TODO: Keeping backward compatability with HBASE-25651 change. Can be removed in later version 173 @InterfaceAudience.Private 174 @Deprecated 175 public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE"; 176 @Deprecated 177 private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY = 178 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE)); 179 180 /** 181 * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value 182 */ 183 private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT; 184 185 @InterfaceAudience.Private 186 public static final String PRIORITY = "PRIORITY"; 187 private static final Bytes PRIORITY_KEY = new Bytes(Bytes.toBytes(PRIORITY)); 188 189 private static final Bytes RSGROUP_KEY = 190 new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP)); 191 192 /** 193 * Relative priority of the table used for rpc scheduling 194 */ 195 private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS; 196 197 /** 198 * Constant that denotes whether the table is READONLY by default and is false 199 */ 200 public static final boolean DEFAULT_READONLY = false; 201 202 /** 203 * Constant that denotes whether the table is compaction enabled by default 204 */ 205 public static final boolean DEFAULT_COMPACTION_ENABLED = true; 206 207 /** 208 * Constant that denotes whether the table is split enabled by default 209 */ 210 public static final boolean DEFAULT_SPLIT_ENABLED = true; 211 212 /** 213 * Constant that denotes whether the table is merge enabled by default 214 */ 215 public static final boolean DEFAULT_MERGE_ENABLED = true; 216 217 /** 218 * Constant that denotes the maximum default size of the memstore in bytes after which the 219 * contents are flushed to the store files. 220 */ 221 public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L; 222 223 public static final int DEFAULT_REGION_REPLICATION = 1; 224 225 public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true; 226 227 private final static Map<String, String> DEFAULT_VALUES = new HashMap<>(); 228 private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>(); 229 230 static { 231 DEFAULT_VALUES.put(MAX_FILESIZE, String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); 232 DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); 233 DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); 234 DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); // use the enum name 235 DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); 236 DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); 237 DEFAULT_VALUES.keySet().stream().map(s -> new Bytes(Bytes.toBytes(s))) 238 .forEach(RESERVED_KEYWORDS::add); 239 RESERVED_KEYWORDS.add(IS_META_KEY); 240 } 241 242 public static PrettyPrinter.Unit getUnit(String key) { 243 switch (key) { 244 case MAX_FILESIZE: 245 case MEMSTORE_FLUSHSIZE: 246 return PrettyPrinter.Unit.BYTE; 247 default: 248 return PrettyPrinter.Unit.NONE; 249 } 250 } 251 252 @InterfaceAudience.Private 253 public final static String NAMESPACE_FAMILY_INFO = "info"; 254 @InterfaceAudience.Private 255 public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); 256 @InterfaceAudience.Private 257 public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); 258 259 /** 260 * <pre> 261 * Pattern that matches a coprocessor specification. Form is: 262 * {@code <coprocessor jar file location> '|' <class name> ['|' <priority> ['|' <arguments>]]} 263 * where arguments are {@code <KEY> '=' <VALUE> [,...]} 264 * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2} 265 * </pre> 266 */ 267 private static final Pattern CP_HTD_ATTR_VALUE_PATTERN = 268 Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); 269 270 private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; 271 private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; 272 private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile("(" 273 + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); 274 private static final Pattern CP_HTD_ATTR_KEY_PATTERN = 275 Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); 276 277 /** 278 * Table descriptor for namespace table 279 */ 280 // TODO We used to set CacheDataInL1 for NS table. When we have BucketCache in file mode, now the 281 // NS data goes to File mode BC only. Test how that affect the system. If too much, we have to 282 // rethink about adding back the setCacheDataInL1 for NS table. 283 // Note: namespace schema is hard-coded. In hbase3, namespace goes away; it is integrated into 284 // hbase:meta. 285 public static final TableDescriptor NAMESPACE_TABLEDESC = 286 TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME) 287 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES) 288 // Ten is arbitrary number. Keep versions to help debugging. 289 .setMaxVersions(10).setInMemory(true).setBlocksize(8 * 1024) 290 .setScope(HConstants.REPLICATION_SCOPE_LOCAL).build()) 291 .build(); 292 293 private final ModifyableTableDescriptor desc; 294 295 /** Returns This instance serialized with pb with pb magic prefix */ 296 public static byte[] toByteArray(TableDescriptor desc) { 297 if (desc instanceof ModifyableTableDescriptor) { 298 return ((ModifyableTableDescriptor) desc).toByteArray(); 299 } 300 return new ModifyableTableDescriptor(desc).toByteArray(); 301 } 302 303 /** 304 * The input should be created by {@link #toByteArray}. 305 * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix 306 * @return This instance serialized with pb with pb magic prefix 307 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException if an error occurred 308 */ 309 public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException { 310 return ModifyableTableDescriptor.parseFrom(pbBytes); 311 } 312 313 public static TableDescriptorBuilder newBuilder(final TableName name) { 314 return new TableDescriptorBuilder(name); 315 } 316 317 public static TableDescriptor copy(TableDescriptor desc) { 318 return new ModifyableTableDescriptor(desc); 319 } 320 321 public static TableDescriptor copy(TableName name, TableDescriptor desc) { 322 return new ModifyableTableDescriptor(name, desc); 323 } 324 325 /** 326 * Copy all values, families, and name from the input. 327 * @param desc The desciptor to copy 328 * @return A clone of input 329 */ 330 public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) { 331 return new TableDescriptorBuilder(desc); 332 } 333 334 private TableDescriptorBuilder(final TableName name) { 335 this.desc = new ModifyableTableDescriptor(name); 336 } 337 338 private TableDescriptorBuilder(final TableDescriptor desc) { 339 this.desc = new ModifyableTableDescriptor(desc); 340 } 341 342 /** 343 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use 344 * {@link #setCoprocessor(String)} instead 345 */ 346 @Deprecated 347 public TableDescriptorBuilder addCoprocessor(String className) throws IOException { 348 return addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null); 349 } 350 351 /** 352 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use 353 * {@link #setCoprocessor(CoprocessorDescriptor)} instead 354 */ 355 @Deprecated 356 public TableDescriptorBuilder addCoprocessor(String className, Path jarFilePath, int priority, 357 final Map<String, String> kvs) throws IOException { 358 desc.setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) 359 .setJarPath(jarFilePath == null ? null : jarFilePath.toString()).setPriority(priority) 360 .setProperties(kvs == null ? Collections.emptyMap() : kvs).build()); 361 return this; 362 } 363 364 /** 365 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use 366 * {@link #setCoprocessor(CoprocessorDescriptor)} instead 367 */ 368 @Deprecated 369 public TableDescriptorBuilder addCoprocessorWithSpec(final String specStr) throws IOException { 370 desc.setCoprocessorWithSpec(specStr); 371 return this; 372 } 373 374 /** 375 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use 376 * {@link #setColumnFamily(ColumnFamilyDescriptor)} instead 377 */ 378 @Deprecated 379 public TableDescriptorBuilder addColumnFamily(final ColumnFamilyDescriptor family) { 380 desc.setColumnFamily(family); 381 return this; 382 } 383 384 public TableDescriptorBuilder setCoprocessor(String className) throws IOException { 385 return setCoprocessor(CoprocessorDescriptorBuilder.of(className)); 386 } 387 388 public TableDescriptorBuilder setCoprocessor(CoprocessorDescriptor cpDesc) throws IOException { 389 desc.setCoprocessor(Objects.requireNonNull(cpDesc)); 390 return this; 391 } 392 393 public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor> cpDescs) 394 throws IOException { 395 for (CoprocessorDescriptor cpDesc : cpDescs) { 396 desc.setCoprocessor(cpDesc); 397 } 398 return this; 399 } 400 401 public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) { 402 desc.setColumnFamily(Objects.requireNonNull(family)); 403 return this; 404 } 405 406 public TableDescriptorBuilder 407 setColumnFamilies(final Collection<ColumnFamilyDescriptor> families) { 408 families.forEach(desc::setColumnFamily); 409 return this; 410 } 411 412 public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) { 413 desc.modifyColumnFamily(Objects.requireNonNull(family)); 414 return this; 415 } 416 417 public TableDescriptorBuilder removeValue(final String key) { 418 desc.removeValue(key); 419 return this; 420 } 421 422 public TableDescriptorBuilder removeValue(Bytes key) { 423 desc.removeValue(key); 424 return this; 425 } 426 427 public TableDescriptorBuilder removeValue(byte[] key) { 428 desc.removeValue(key); 429 return this; 430 } 431 432 public TableDescriptorBuilder removeColumnFamily(final byte[] name) { 433 desc.removeColumnFamily(name); 434 return this; 435 } 436 437 public TableDescriptorBuilder removeCoprocessor(String className) { 438 desc.removeCoprocessor(className); 439 return this; 440 } 441 442 public TableDescriptorBuilder setCompactionEnabled(final boolean isEnable) { 443 desc.setCompactionEnabled(isEnable); 444 return this; 445 } 446 447 public TableDescriptorBuilder setSplitEnabled(final boolean isEnable) { 448 desc.setSplitEnabled(isEnable); 449 return this; 450 } 451 452 public TableDescriptorBuilder setMergeEnabled(final boolean isEnable) { 453 desc.setMergeEnabled(isEnable); 454 return this; 455 } 456 457 public TableDescriptorBuilder setDurability(Durability durability) { 458 desc.setDurability(durability); 459 return this; 460 } 461 462 public TableDescriptorBuilder setFlushPolicyClassName(String clazz) { 463 desc.setFlushPolicyClassName(clazz); 464 return this; 465 } 466 467 public TableDescriptorBuilder setMaxFileSize(long maxFileSize) { 468 desc.setMaxFileSize(maxFileSize); 469 return this; 470 } 471 472 public TableDescriptorBuilder setMaxFileSize(String maxFileSize) throws HBaseException { 473 desc.setMaxFileSize(maxFileSize); 474 return this; 475 } 476 477 public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) { 478 desc.setMemStoreFlushSize(memstoreFlushSize); 479 return this; 480 } 481 482 public TableDescriptorBuilder setMemStoreFlushSize(String memStoreFlushSize) 483 throws HBaseException { 484 desc.setMemStoreFlushSize(memStoreFlushSize); 485 return this; 486 } 487 488 public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) { 489 desc.setNormalizerTargetRegionCount(regionCount); 490 return this; 491 } 492 493 public TableDescriptorBuilder setNormalizerTargetRegionSize(final long regionSize) { 494 desc.setNormalizerTargetRegionSize(regionSize); 495 return this; 496 } 497 498 public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) { 499 desc.setNormalizationEnabled(isEnable); 500 return this; 501 } 502 503 /** 504 * Set the table owner 505 * @deprecated since 2.0.0 and will be removed in 3.0.0. 506 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 507 */ 508 @Deprecated 509 public TableDescriptorBuilder setOwner(User owner) { 510 desc.setOwner(owner); 511 return this; 512 } 513 514 /** 515 * Set the table owner 516 * @deprecated since 2.0.0 and will be removed in 3.0.0. 517 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 518 */ 519 @Deprecated 520 public TableDescriptorBuilder setOwnerString(String ownerString) { 521 desc.setOwnerString(ownerString); 522 return this; 523 } 524 525 public TableDescriptorBuilder setPriority(int priority) { 526 desc.setPriority(priority); 527 return this; 528 } 529 530 public TableDescriptorBuilder setReadOnly(final boolean readOnly) { 531 desc.setReadOnly(readOnly); 532 return this; 533 } 534 535 public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) { 536 desc.setRegionMemStoreReplication(memstoreReplication); 537 return this; 538 } 539 540 public TableDescriptorBuilder setRegionReplication(int regionReplication) { 541 desc.setRegionReplication(regionReplication); 542 return this; 543 } 544 545 public TableDescriptorBuilder setRegionSplitPolicyClassName(String clazz) { 546 desc.setRegionSplitPolicyClassName(clazz); 547 return this; 548 } 549 550 public TableDescriptorBuilder setValue(final String key, final String value) { 551 desc.setValue(key, value); 552 return this; 553 } 554 555 public TableDescriptorBuilder setValue(final Bytes key, final Bytes value) { 556 desc.setValue(key, value); 557 return this; 558 } 559 560 public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) { 561 desc.setValue(key, value); 562 return this; 563 } 564 565 public String getValue(String key) { 566 return desc.getValue(key); 567 } 568 569 /** 570 * Sets replication scope all & only the columns already in the builder. Columns added later won't 571 * be backfilled with replication scope. 572 * @param scope replication scope 573 * @return a TableDescriptorBuilder 574 */ 575 public TableDescriptorBuilder setReplicationScope(int scope) { 576 Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 577 newFamilies.putAll(desc.families); 578 newFamilies.forEach((cf, cfDesc) -> { 579 desc.removeColumnFamily(cf); 580 desc 581 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope).build()); 582 }); 583 return this; 584 } 585 586 /** 587 * Set the RSGroup for this table, specified RSGroup must exist before create or modify table. 588 * @param group rsgroup name 589 * @return a TableDescriptorBuilder 590 */ 591 public TableDescriptorBuilder setRegionServerGroup(String group) { 592 desc.setValue(RSGROUP_KEY, group); 593 return this; 594 } 595 596 public TableDescriptor build() { 597 return new ModifyableTableDescriptor(desc); 598 } 599 600 /** 601 * TODO: make this private after removing the HTableDescriptor 602 */ 603 @InterfaceAudience.Private 604 public static class ModifyableTableDescriptor 605 implements TableDescriptor, Comparable<ModifyableTableDescriptor> { 606 607 private final TableName name; 608 609 /** 610 * A map which holds the metadata information of the table. This metadata includes values like 611 * IS_META, SPLIT_POLICY, MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc... 612 */ 613 private final Map<Bytes, Bytes> values = new HashMap<>(); 614 615 /** 616 * Maps column family name to the respective FamilyDescriptors 617 */ 618 private final Map<byte[], ColumnFamilyDescriptor> families = 619 new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 620 621 /** 622 * Construct a table descriptor specifying a TableName object 623 * @param name Table name. TODO: make this private after removing the HTableDescriptor 624 */ 625 @InterfaceAudience.Private 626 public ModifyableTableDescriptor(final TableName name) { 627 this(name, Collections.EMPTY_LIST, Collections.EMPTY_MAP); 628 } 629 630 private ModifyableTableDescriptor(final TableDescriptor desc) { 631 this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 632 } 633 634 /** 635 * Construct a table descriptor by cloning the descriptor passed as a parameter. 636 * <p> 637 * Makes a deep copy of the supplied descriptor. 638 * @param name The new name 639 * @param desc The descriptor. TODO: make this private after removing the HTableDescriptor 640 */ 641 @InterfaceAudience.Private 642 @Deprecated // only used by HTableDescriptor. remove this method if HTD is removed 643 public ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) { 644 this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 645 } 646 647 private ModifyableTableDescriptor(final TableName name, 648 final Collection<ColumnFamilyDescriptor> families, Map<Bytes, Bytes> values) { 649 this.name = name; 650 families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); 651 this.values.putAll(values); 652 this.values.put(IS_META_KEY, 653 new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME))))); 654 } 655 656 /** 657 * Checks if this table is <code> hbase:meta </code> region. 658 * @return true if this table is <code> hbase:meta </code> region 659 */ 660 @Override 661 public boolean isMetaRegion() { 662 return getOrDefault(IS_META_KEY, Boolean::valueOf, false); 663 } 664 665 /** 666 * Checks if the table is a <code>hbase:meta</code> table 667 * @return true if table is <code> hbase:meta </code> region. 668 */ 669 @Override 670 public boolean isMetaTable() { 671 return isMetaRegion(); 672 } 673 674 @Override 675 public Bytes getValue(Bytes key) { 676 Bytes rval = values.get(key); 677 return rval == null ? null : new Bytes(rval.copyBytes()); 678 } 679 680 @Override 681 public String getValue(String key) { 682 Bytes rval = values.get(new Bytes(Bytes.toBytes(key))); 683 return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength()); 684 } 685 686 @Override 687 public byte[] getValue(byte[] key) { 688 Bytes value = values.get(new Bytes(key)); 689 return value == null ? null : value.copyBytes(); 690 } 691 692 private <T> T getOrDefault(Bytes key, Function<String, T> function, T defaultValue) { 693 Bytes value = values.get(key); 694 if (value == null) { 695 return defaultValue; 696 } else { 697 return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 698 } 699 } 700 701 /** 702 * Getter for fetching an unmodifiable {@link #values} map. 703 * @return unmodifiable map {@link #values}. 704 * @see #values 705 */ 706 @Override 707 public Map<Bytes, Bytes> getValues() { 708 // shallow pointer copy 709 return Collections.unmodifiableMap(values); 710 } 711 712 /** 713 * Setter for storing metadata as a (key, value) pair in {@link #values} map 714 * @param key The key. 715 * @param value The value. If null, removes the setting. 716 * @return the modifyable TD 717 * @see #values 718 */ 719 public ModifyableTableDescriptor setValue(byte[] key, byte[] value) { 720 return setValue(toBytesOrNull(key, v -> v), toBytesOrNull(value, v -> v)); 721 } 722 723 public ModifyableTableDescriptor setValue(String key, String value) { 724 return setValue(toBytesOrNull(key, Bytes::toBytes), toBytesOrNull(value, Bytes::toBytes)); 725 } 726 727 /** 728 * @param key The key. 729 * @param value The value. If null, removes the setting. 730 */ 731 private ModifyableTableDescriptor setValue(final Bytes key, final String value) { 732 return setValue(key, toBytesOrNull(value, Bytes::toBytes)); 733 } 734 735 /** 736 * Setter for storing metadata as a (key, value) pair in {@link #values} map 737 * @param key The key. 738 * @param value The value. If null, removes the setting. 739 */ 740 public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { 741 if (value == null || value.getLength() == 0) { 742 values.remove(key); 743 } else { 744 values.put(key, value); 745 } 746 return this; 747 } 748 749 private static <T> Bytes toBytesOrNull(T t, Function<T, byte[]> f) { 750 if (t == null) { 751 return null; 752 } else { 753 return new Bytes(f.apply(t)); 754 } 755 } 756 757 /** 758 * Remove metadata represented by the key from the {@link #values} map 759 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 760 * @return the modifyable TD 761 */ 762 public ModifyableTableDescriptor removeValue(final String key) { 763 return setValue(key, (String) null); 764 } 765 766 /** 767 * Remove metadata represented by the key from the {@link #values} map 768 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 769 * @return the modifyable TD 770 */ 771 public ModifyableTableDescriptor removeValue(Bytes key) { 772 return setValue(key, (Bytes) null); 773 } 774 775 /** 776 * Remove metadata represented by the key from the {@link #values} map 777 * @param key Key whose key and value we're to remove from TableDescriptor parameters 778 * @return the modifiable TD 779 */ 780 public ModifyableTableDescriptor removeValue(final byte[] key) { 781 return removeValue(new Bytes(key)); 782 } 783 784 /** 785 * Check if the readOnly flag of the table is set. If the readOnly flag is set then the contents 786 * of the table can only be read from but not modified. 787 * @return true if all columns in the table should be read only 788 */ 789 @Override 790 public boolean isReadOnly() { 791 return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY); 792 } 793 794 /** 795 * Setting the table as read only sets all the columns in the table as read only. By default all 796 * tables are modifiable, but if the readOnly flag is set to true then the contents of the table 797 * can only be read but not modified. 798 * @param readOnly True if all of the columns in the table should be read only. 799 * @return the modifyable TD 800 */ 801 public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { 802 return setValue(READONLY_KEY, Boolean.toString(readOnly)); 803 } 804 805 /** 806 * Check if the compaction enable flag of the table is true. If flag is false then no 807 * minor/major compactions will be done in real. 808 * @return true if table compaction enabled 809 */ 810 @Override 811 public boolean isCompactionEnabled() { 812 return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED); 813 } 814 815 /** 816 * Setting the table compaction enable flag. 817 * @param isEnable True if enable compaction. 818 * @return the modifyable TD 819 */ 820 public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) { 821 return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable)); 822 } 823 824 /** 825 * Check if the split enable flag of the table is true. If flag is false then no split will be 826 * done. 827 * @return true if table region split enabled 828 */ 829 @Override 830 public boolean isSplitEnabled() { 831 return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED); 832 } 833 834 /** 835 * Setting the table region split enable flag. 836 * @param isEnable True if enable region split. 837 * @return the modifyable TD 838 */ 839 public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) { 840 return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable)); 841 } 842 843 /** 844 * Check if the region merge enable flag of the table is true. If flag is false then no merge 845 * will be done. 846 * @return true if table region merge enabled 847 */ 848 @Override 849 public boolean isMergeEnabled() { 850 return getOrDefault(MERGE_ENABLED_KEY, Boolean::valueOf, DEFAULT_MERGE_ENABLED); 851 } 852 853 /** 854 * Setting the table region merge enable flag. 855 * @param isEnable True if enable region merge. 856 * @return the modifyable TD 857 */ 858 public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) { 859 return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable)); 860 } 861 862 /** 863 * Check if normalization enable flag of the table is true. If flag is false then no region 864 * normalizer won't attempt to normalize this table. 865 * @return true if region normalization is enabled for this table 866 **/ 867 @Override 868 public boolean isNormalizationEnabled() { 869 return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, false); 870 } 871 872 /** 873 * Check if there is the target region count. If so, the normalize plan will be calculated based 874 * on the target region count. 875 * @return target region count after normalize done 876 */ 877 @Override 878 public int getNormalizerTargetRegionCount() { 879 return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf, 880 Integer.valueOf(-1)); 881 } 882 883 /** 884 * Check if there is the target region size. If so, the normalize plan will be calculated based 885 * on the target region size. 886 * @return target region size after normalize done 887 */ 888 @Override 889 public long getNormalizerTargetRegionSize() { 890 long target_region_size = 891 getOrDefault(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long::valueOf, Long.valueOf(-1)); 892 return target_region_size == Long.valueOf(-1) 893 ? getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)) 894 : target_region_size; 895 } 896 897 /** 898 * Setting the table normalization enable flag. 899 * @param isEnable True if enable normalization. 900 * @return the modifyable TD 901 */ 902 public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) { 903 return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable)); 904 } 905 906 /** 907 * Setting the target region count of table normalization . 908 * @param regionCount the target region count. 909 * @return the modifyable TD 910 */ 911 public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) { 912 return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount)); 913 } 914 915 /** 916 * Setting the target region size of table normalization. 917 * @param regionSize the target region size. 918 * @return the modifyable TD 919 */ 920 public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) { 921 return setValue(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long.toString(regionSize)); 922 } 923 924 /** 925 * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT. 926 * @param durability enum value 927 * @return the modifyable TD 928 */ 929 public ModifyableTableDescriptor setDurability(Durability durability) { 930 return setValue(DURABILITY_KEY, durability.name()); 931 } 932 933 /** 934 * Returns the durability setting for the table. 935 * @return durability setting for the table. 936 */ 937 @Override 938 public Durability getDurability() { 939 return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY); 940 } 941 942 /** 943 * Get the name of the table n 944 */ 945 @Override 946 public TableName getTableName() { 947 return name; 948 } 949 950 /** 951 * This sets the class associated with the region split policy which determines when a region 952 * split should occur. The class used by default is defined in 953 * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 954 * @param clazz the class name 955 * @return the modifyable TD 956 */ 957 public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) { 958 return setValue(SPLIT_POLICY_KEY, clazz); 959 } 960 961 /** 962 * This gets the class associated with the region split policy which determines when a region 963 * split should occur. The class used by default is defined in 964 * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 965 * @return the class name of the region split policy for this table. If this returns null, the 966 * default split policy is used. 967 */ 968 @Override 969 public String getRegionSplitPolicyClassName() { 970 return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null); 971 } 972 973 /** 974 * Returns the maximum size upto which a region can grow to after which a region split is 975 * triggered. The region size is represented by the size of the biggest store file in that 976 * region. 977 * @return max hregion size for table, -1 if not set. 978 * @see #setMaxFileSize(long) 979 */ 980 @Override 981 public long getMaxFileSize() { 982 return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1); 983 } 984 985 /** 986 * Sets the maximum size upto which a region can grow to after which a region split is 987 * triggered. The region size is represented by the size of the biggest store file in that 988 * region, i.e. If the biggest store file grows beyond the maxFileSize, then the region split is 989 * triggered. This defaults to a value of 256 MB. 990 * <p> 991 * This is not an absolute value and might vary. Assume that a single row exceeds the 992 * maxFileSize then the storeFileSize will be greater than maxFileSize since a single row cannot 993 * be split across multiple regions 994 * </p> 995 * @param maxFileSize The maximum file size that a store file can grow to before a split is 996 * triggered. 997 * @return the modifyable TD 998 */ 999 public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { 1000 return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize)); 1001 } 1002 1003 public ModifyableTableDescriptor setMaxFileSize(String maxFileSize) throws HBaseException { 1004 return setMaxFileSize( 1005 Long.parseLong(PrettyPrinter.valueOf(maxFileSize, PrettyPrinter.Unit.BYTE))); 1006 } 1007 1008 /** 1009 * Returns the size of the memstore after which a flush to filesystem is triggered. 1010 * @return memory cache flush size for each hregion, -1 if not set. 1011 * @see #setMemStoreFlushSize(long) 1012 */ 1013 @Override 1014 public long getMemStoreFlushSize() { 1015 return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1); 1016 } 1017 1018 /** 1019 * Represents the maximum size of the memstore after which the contents of the memstore are 1020 * flushed to the filesystem. This defaults to a size of 64 MB. 1021 * @param memstoreFlushSize memory cache flush size for each hregion 1022 * @return the modifyable TD 1023 */ 1024 public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { 1025 return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize)); 1026 } 1027 1028 public ModifyableTableDescriptor setMemStoreFlushSize(String memStoreFlushSize) 1029 throws HBaseException { 1030 return setMemStoreFlushSize( 1031 Long.parseLong(PrettyPrinter.valueOf(memStoreFlushSize, PrettyPrinter.Unit.BYTE))); 1032 } 1033 1034 /** 1035 * This sets the class associated with the flush policy which determines determines the stores 1036 * need to be flushed when flushing a region. The class used by default is defined in 1037 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 1038 * @param clazz the class name 1039 * @return the modifyable TD 1040 */ 1041 public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) { 1042 return setValue(FLUSH_POLICY_KEY, clazz); 1043 } 1044 1045 /** 1046 * This gets the class associated with the flush policy which determines the stores need to be 1047 * flushed when flushing a region. The class used by default is defined in 1048 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 1049 * @return the class name of the flush policy for this table. If this returns null, the default 1050 * flush policy is used. 1051 */ 1052 @Override 1053 public String getFlushPolicyClassName() { 1054 return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null); 1055 } 1056 1057 /** 1058 * Adds a column family. For the updating purpose please use 1059 * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. 1060 * @param family to add. 1061 * @return the modifyable TD 1062 */ 1063 public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) { 1064 if (family.getName() == null || family.getName().length <= 0) { 1065 throw new IllegalArgumentException("Family name cannot be null or empty"); 1066 } 1067 int flength = family.getName() == null ? 0 : family.getName().length; 1068 if (flength > Byte.MAX_VALUE) { 1069 throw new IllegalArgumentException( 1070 "The length of family name is bigger than " + Byte.MAX_VALUE); 1071 } 1072 if (hasColumnFamily(family.getName())) { 1073 throw new IllegalArgumentException( 1074 "Family '" + family.getNameAsString() + "' already exists so cannot be added"); 1075 } 1076 return putColumnFamily(family); 1077 } 1078 1079 /** 1080 * Modifies the existing column family. 1081 * @param family to update 1082 * @return this (for chained invocation) 1083 */ 1084 public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) { 1085 if (family.getName() == null || family.getName().length <= 0) { 1086 throw new IllegalArgumentException("Family name cannot be null or empty"); 1087 } 1088 if (!hasColumnFamily(family.getName())) { 1089 throw new IllegalArgumentException( 1090 "Column family '" + family.getNameAsString() + "' does not exist"); 1091 } 1092 return putColumnFamily(family); 1093 } 1094 1095 private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) { 1096 families.put(family.getName(), family); 1097 return this; 1098 } 1099 1100 /** 1101 * Checks to see if this table contains the given column family 1102 * @param familyName Family name or column name. 1103 * @return true if the table contains the specified family name 1104 */ 1105 @Override 1106 public boolean hasColumnFamily(final byte[] familyName) { 1107 return families.containsKey(familyName); 1108 } 1109 1110 /** Returns Name of this table and then a map of all of the column family descriptors. */ 1111 @Override 1112 public String toString() { 1113 StringBuilder s = new StringBuilder(); 1114 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1115 s.append(getValues(true)); 1116 families.values().forEach(f -> s.append(", ").append(f)); 1117 return s.toString(); 1118 } 1119 1120 /** 1121 * @return Name of this table and then a map of all of the column family descriptors (with only 1122 * the non-default column family attributes) 1123 */ 1124 @Override 1125 public String toStringCustomizedValues() { 1126 StringBuilder s = new StringBuilder(); 1127 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1128 s.append(getValues(false)); 1129 families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues())); 1130 return s.toString(); 1131 } 1132 1133 /** Returns map of all table attributes formatted into string. */ 1134 public String toStringTableAttributes() { 1135 return getValues(true).toString(); 1136 } 1137 1138 private StringBuilder getValues(boolean printDefaults) { 1139 StringBuilder s = new StringBuilder(); 1140 1141 // step 1: set partitioning and pruning 1142 Set<Bytes> reservedKeys = new TreeSet<>(); 1143 Set<Bytes> userKeys = new TreeSet<>(); 1144 for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) { 1145 if (entry.getKey() == null || entry.getKey().get() == null) { 1146 continue; 1147 } 1148 String key = Bytes.toString(entry.getKey().get()); 1149 // in this section, print out reserved keywords + coprocessor info 1150 if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) { 1151 userKeys.add(entry.getKey()); 1152 continue; 1153 } 1154 // only print out IS_META if true 1155 String value = Bytes.toString(entry.getValue().get()); 1156 if (key.equalsIgnoreCase(IS_META)) { 1157 if (Boolean.valueOf(value) == false) { 1158 continue; 1159 } 1160 } 1161 // see if a reserved key is a default value. may not want to print it out 1162 if ( 1163 printDefaults || !DEFAULT_VALUES.containsKey(key) 1164 || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value) 1165 ) { 1166 reservedKeys.add(entry.getKey()); 1167 } 1168 } 1169 1170 // early exit optimization 1171 boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty(); 1172 if (!hasAttributes) { 1173 return s; 1174 } 1175 1176 s.append(", {"); 1177 // step 2: printing attributes 1178 if (hasAttributes) { 1179 s.append("TABLE_ATTRIBUTES => {"); 1180 1181 // print all reserved keys first 1182 boolean printCommaForAttr = false; 1183 for (Bytes k : reservedKeys) { 1184 String key = Bytes.toString(k.get()); 1185 String value = Bytes.toStringBinary(values.get(k).get()); 1186 if (printCommaForAttr) { 1187 s.append(", "); 1188 } 1189 printCommaForAttr = true; 1190 s.append(key); 1191 s.append(" => "); 1192 s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); 1193 } 1194 1195 if (!userKeys.isEmpty()) { 1196 // print all non-reserved as a separate subset 1197 if (printCommaForAttr) { 1198 s.append(", "); 1199 } 1200 s.append(HConstants.METADATA).append(" => "); 1201 s.append("{"); 1202 boolean printCommaForCfg = false; 1203 for (Bytes k : userKeys) { 1204 String key = Bytes.toString(k.get()); 1205 String value = Bytes.toStringBinary(values.get(k).get()); 1206 if (printCommaForCfg) { 1207 s.append(", "); 1208 } 1209 printCommaForCfg = true; 1210 s.append('\'').append(key).append('\''); 1211 s.append(" => "); 1212 s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); 1213 } 1214 s.append("}"); 1215 } 1216 1217 s.append("}"); 1218 } 1219 1220 s.append("}"); // end METHOD 1221 return s; 1222 } 1223 1224 /** 1225 * Compare the contents of the descriptor with another one passed as a parameter. Checks if the 1226 * obj passed is an instance of ModifyableTableDescriptor, if yes then the contents of the 1227 * descriptors are compared. 1228 * @param obj The object to compare 1229 * @return true if the contents of the the two descriptors exactly match 1230 * @see java.lang.Object#equals(java.lang.Object) 1231 */ 1232 @Override 1233 public boolean equals(Object obj) { 1234 if (this == obj) { 1235 return true; 1236 } 1237 if (obj instanceof ModifyableTableDescriptor) { 1238 return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0; 1239 } 1240 return false; 1241 } 1242 1243 /** Returns hash code */ 1244 @Override 1245 public int hashCode() { 1246 int result = this.name.hashCode(); 1247 if (this.families.size() > 0) { 1248 for (ColumnFamilyDescriptor e : this.families.values()) { 1249 result ^= e.hashCode(); 1250 } 1251 } 1252 result ^= values.hashCode(); 1253 return result; 1254 } 1255 1256 // Comparable 1257 /** 1258 * Compares the descriptor with another descriptor which is passed as a parameter. This compares 1259 * the content of the two descriptors and not the reference. 1260 * @param other The MTD to compare 1261 * @return 0 if the contents of the descriptors are exactly matching, 1 if there is a mismatch 1262 * in the contents 1263 */ 1264 @Override 1265 public int compareTo(final ModifyableTableDescriptor other) { 1266 return TableDescriptor.COMPARATOR.compare(this, other); 1267 } 1268 1269 @Override 1270 public ColumnFamilyDescriptor[] getColumnFamilies() { 1271 return families.values().toArray(new ColumnFamilyDescriptor[families.size()]); 1272 } 1273 1274 /** 1275 * Returns the configured replicas per region 1276 */ 1277 @Override 1278 public int getRegionReplication() { 1279 return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION); 1280 } 1281 1282 /** 1283 * Sets the number of replicas per region. 1284 * @param regionReplication the replication factor per region 1285 * @return the modifyable TD 1286 */ 1287 public ModifyableTableDescriptor setRegionReplication(int regionReplication) { 1288 return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication)); 1289 } 1290 1291 /** Returns true if the read-replicas memstore replication is enabled. */ 1292 @Override 1293 public boolean hasRegionMemStoreReplication() { 1294 return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, 1295 DEFAULT_REGION_MEMSTORE_REPLICATION); 1296 } 1297 1298 /** 1299 * Enable or Disable the memstore replication from the primary region to the replicas. The 1300 * replication will be used only for meta operations (e.g. flush, compaction, ...) 1301 * @param memstoreReplication true if the new data written to the primary region should be 1302 * replicated. false if the secondaries can tollerate to have new 1303 * data only when the primary flushes the memstore. 1304 * @return the modifyable TD 1305 */ 1306 public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) { 1307 return setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication)); 1308 } 1309 1310 public ModifyableTableDescriptor setPriority(int priority) { 1311 return setValue(PRIORITY_KEY, Integer.toString(priority)); 1312 } 1313 1314 @Override 1315 public int getPriority() { 1316 return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY); 1317 } 1318 1319 /** 1320 * Returns all the column family names of the current table. The map of TableDescriptor contains 1321 * mapping of family name to ColumnFamilyDescriptor. This returns all the keys of the family map 1322 * which represents the column family names of the table. 1323 * @return Immutable sorted set of the keys of the families. 1324 */ 1325 @Override 1326 public Set<byte[]> getColumnFamilyNames() { 1327 return Collections.unmodifiableSet(this.families.keySet()); 1328 } 1329 1330 /** 1331 * Returns the ColumnFamilyDescriptor for a specific column family with name as specified by the 1332 * parameter column. 1333 * @param column Column family name 1334 * @return Column descriptor for the passed family name or the family on passed in column. 1335 */ 1336 @Override 1337 public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { 1338 return this.families.get(column); 1339 } 1340 1341 /** 1342 * Removes the ColumnFamilyDescriptor with name specified by the parameter column from the table 1343 * descriptor 1344 * @param column Name of the column family to be removed. 1345 * @return Column descriptor for the passed family name or the family on passed in column. 1346 */ 1347 public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { 1348 return this.families.remove(column); 1349 } 1350 1351 /** 1352 * Add a table coprocessor to this table. The coprocessor type must be 1353 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1354 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1355 * region is opened. 1356 * @param className Full class name. n * @return the modifyable TD 1357 */ 1358 public ModifyableTableDescriptor setCoprocessor(String className) throws IOException { 1359 return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) 1360 .setPriority(Coprocessor.PRIORITY_USER).build()); 1361 } 1362 1363 /** 1364 * Add a table coprocessor to this table. The coprocessor type must be 1365 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1366 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1367 * region is opened. 1368 * @throws IOException any illegal parameter key/value 1369 * @return the modifyable TD 1370 */ 1371 public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) throws IOException { 1372 checkHasCoprocessor(cp.getClassName()); 1373 if (cp.getPriority() < 0) { 1374 throw new IOException( 1375 "Priority must be bigger than or equal with zero, current:" + cp.getPriority()); 1376 } 1377 // Validate parameter kvs and then add key/values to kvString. 1378 StringBuilder kvString = new StringBuilder(); 1379 for (Map.Entry<String, String> e : cp.getProperties().entrySet()) { 1380 if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) { 1381 throw new IOException("Illegal parameter key = " + e.getKey()); 1382 } 1383 if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { 1384 throw new IOException("Illegal parameter (" + e.getKey() + ") value = " + e.getValue()); 1385 } 1386 if (kvString.length() != 0) { 1387 kvString.append(','); 1388 } 1389 kvString.append(e.getKey()); 1390 kvString.append('='); 1391 kvString.append(e.getValue()); 1392 } 1393 1394 String value = cp.getJarPath().orElse("") + "|" + cp.getClassName() + "|" 1395 + Integer.toString(cp.getPriority()) + "|" + kvString.toString(); 1396 return setCoprocessorToMap(value); 1397 } 1398 1399 /** 1400 * Add a table coprocessor to this table. The coprocessor type must be 1401 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1402 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1403 * region is opened. 1404 * @param specStr The Coprocessor specification all in in one String n * @return the modifyable 1405 * TD 1406 * @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed 1407 * in HBase 3.0.0. 1408 */ 1409 @Deprecated 1410 public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr) 1411 throws IOException { 1412 CoprocessorDescriptor cpDesc = 1413 toCoprocessorDescriptor(specStr).orElseThrow(() -> new IllegalArgumentException( 1414 "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr)); 1415 checkHasCoprocessor(cpDesc.getClassName()); 1416 return setCoprocessorToMap(specStr); 1417 } 1418 1419 private void checkHasCoprocessor(final String className) throws IOException { 1420 if (hasCoprocessor(className)) { 1421 throw new IOException("Coprocessor " + className + " already exists."); 1422 } 1423 } 1424 1425 /** 1426 * Add coprocessor to values Map 1427 * @param specStr The Coprocessor specification all in in one String 1428 * @return Returns <code>this</code> 1429 */ 1430 private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) { 1431 if (specStr == null) { 1432 return this; 1433 } 1434 // generate a coprocessor key 1435 int maxCoprocessorNumber = 0; 1436 Matcher keyMatcher; 1437 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1438 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1439 if (!keyMatcher.matches()) { 1440 continue; 1441 } 1442 maxCoprocessorNumber = 1443 Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); 1444 } 1445 maxCoprocessorNumber++; 1446 String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); 1447 return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr))); 1448 } 1449 1450 /** 1451 * Check if the table has an attached co-processor represented by the name className 1452 * @param classNameToMatch - Class name of the co-processor 1453 * @return true of the table has a co-processor className 1454 */ 1455 @Override 1456 public boolean hasCoprocessor(String classNameToMatch) { 1457 return getCoprocessorDescriptors().stream() 1458 .anyMatch(cp -> cp.getClassName().equals(classNameToMatch)); 1459 } 1460 1461 /** 1462 * Return the list of attached co-processor represented by their name className 1463 * @return The list of co-processors classNames 1464 */ 1465 @Override 1466 public List<CoprocessorDescriptor> getCoprocessorDescriptors() { 1467 List<CoprocessorDescriptor> result = new ArrayList<>(); 1468 for (Map.Entry<Bytes, Bytes> e : getValues().entrySet()) { 1469 String key = Bytes.toString(e.getKey().get()).trim(); 1470 if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) { 1471 toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()).ifPresent(result::add); 1472 } 1473 } 1474 return result; 1475 } 1476 1477 /** 1478 * Remove a coprocessor from those set on the table 1479 * @param className Class name of the co-processor 1480 */ 1481 public void removeCoprocessor(String className) { 1482 Bytes match = null; 1483 Matcher keyMatcher; 1484 Matcher valueMatcher; 1485 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1486 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1487 if (!keyMatcher.matches()) { 1488 continue; 1489 } 1490 valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes.toString(e.getValue().get())); 1491 if (!valueMatcher.matches()) { 1492 continue; 1493 } 1494 // get className and compare 1495 String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field 1496 // remove the CP if it is present 1497 if (clazz.equals(className.trim())) { 1498 match = e.getKey(); 1499 break; 1500 } 1501 } 1502 // if we found a match, remove it 1503 if (match != null) { 1504 ModifyableTableDescriptor.this.removeValue(match); 1505 } else { 1506 LOG.warn("coprocessor with class name {} was not found in the table attribute", className); 1507 } 1508 } 1509 1510 /** 1511 * Set the table owner 1512 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1513 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1514 */ 1515 @Deprecated 1516 public ModifyableTableDescriptor setOwner(User owner) { 1517 return setOwnerString(owner != null ? owner.getShortName() : null); 1518 } 1519 1520 /** 1521 * Set the table owner. 1522 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1523 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1524 */ 1525 // used by admin.rb:alter(table_name,*args) to update owner. 1526 @Deprecated 1527 public ModifyableTableDescriptor setOwnerString(String ownerString) { 1528 return setValue(OWNER_KEY, ownerString); 1529 } 1530 1531 /** 1532 * Set the table owner. 1533 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1534 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1535 */ 1536 @Override 1537 @Deprecated 1538 public String getOwnerString() { 1539 // Note that every table should have an owner (i.e. should have OWNER_KEY set). 1540 // hbase:meta should return system user as owner, not null (see 1541 // MasterFileSystem.java:bootstrap()). 1542 return getOrDefault(OWNER_KEY, Function.identity(), null); 1543 } 1544 1545 /** Returns the bytes in pb format */ 1546 private byte[] toByteArray() { 1547 return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray()); 1548 } 1549 1550 /** 1551 * Parse the serialized representation of a {@link ModifyableTableDescriptor} 1552 * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix 1553 * @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code> n 1554 * * @see #toByteArray() 1555 */ 1556 private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException { 1557 if (!ProtobufUtil.isPBMagicPrefix(bytes)) { 1558 throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor"); 1559 } 1560 int pblen = ProtobufUtil.lengthOfPBMagic(); 1561 HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder(); 1562 try { 1563 ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); 1564 return ProtobufUtil.toTableDescriptor(builder.build()); 1565 } catch (IOException e) { 1566 throw new DeserializationException(e); 1567 } 1568 } 1569 1570 @Override 1571 public int getColumnFamilyCount() { 1572 return families.size(); 1573 } 1574 1575 @Override 1576 public Optional<String> getRegionServerGroup() { 1577 Bytes value = values.get(RSGROUP_KEY); 1578 if (value != null) { 1579 return Optional.of(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 1580 } else { 1581 return Optional.empty(); 1582 } 1583 } 1584 } 1585 1586 private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) { 1587 Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec); 1588 if (matcher.matches()) { 1589 // jar file path can be empty if the cp class can be loaded 1590 // from class loader. 1591 String path = matcher.group(1).trim().isEmpty() ? null : matcher.group(1).trim(); 1592 String className = matcher.group(2).trim(); 1593 if (className.isEmpty()) { 1594 return Optional.empty(); 1595 } 1596 String priorityStr = matcher.group(3).trim(); 1597 int priority = 1598 priorityStr.isEmpty() ? Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); 1599 String cfgSpec = null; 1600 try { 1601 cfgSpec = matcher.group(4); 1602 } catch (IndexOutOfBoundsException ex) { 1603 // ignore 1604 } 1605 Map<String, String> ourConf = new TreeMap<>(); 1606 if (cfgSpec != null && !cfgSpec.trim().equals("|")) { 1607 cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1); 1608 Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec); 1609 while (m.find()) { 1610 ourConf.put(m.group(1), m.group(2)); 1611 } 1612 } 1613 return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) 1614 .setPriority(priority).setProperties(ourConf).build()); 1615 } 1616 return Optional.empty(); 1617 } 1618}