001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import java.io.IOException; 021import java.util.ArrayList; 022import java.util.Arrays; 023import java.util.Collection; 024import java.util.Collections; 025import java.util.HashMap; 026import java.util.HashSet; 027import java.util.List; 028import java.util.Map; 029import java.util.Objects; 030import java.util.Optional; 031import java.util.Set; 032import java.util.TreeMap; 033import java.util.TreeSet; 034import java.util.function.BiPredicate; 035import java.util.function.Function; 036import java.util.regex.Matcher; 037import java.util.regex.Pattern; 038import java.util.stream.Collectors; 039import org.apache.hadoop.hbase.Coprocessor; 040import org.apache.hadoop.hbase.HConstants; 041import org.apache.hadoop.hbase.TableName; 042import org.apache.hadoop.hbase.exceptions.DeserializationException; 043import org.apache.hadoop.hbase.exceptions.HBaseException; 044import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; 045import org.apache.hadoop.hbase.util.Bytes; 046import org.apache.hadoop.hbase.util.PrettyPrinter; 047import org.apache.yetus.audience.InterfaceAudience; 048import org.slf4j.Logger; 049import org.slf4j.LoggerFactory; 050 051import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 052import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 053 054/** 055 * Convenience class for composing an instance of {@link TableDescriptor}. 056 * @since 2.0.0 057 */ 058@InterfaceAudience.Public 059public class TableDescriptorBuilder { 060 public static final Logger LOG = LoggerFactory.getLogger(TableDescriptorBuilder.class); 061 @InterfaceAudience.Private 062 public static final String SPLIT_POLICY = "SPLIT_POLICY"; 063 private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY)); 064 /** 065 * Used by HBase Shell interface to access this metadata attribute which denotes the maximum size 066 * of the store file after which a region split occurs. 067 */ 068 @InterfaceAudience.Private 069 public static final String MAX_FILESIZE = "MAX_FILESIZE"; 070 private static final Bytes MAX_FILESIZE_KEY = new Bytes(Bytes.toBytes(MAX_FILESIZE)); 071 072 /** 073 * Used by rest interface to access this metadata attribute which denotes if the table is Read 074 * Only. 075 */ 076 @InterfaceAudience.Private 077 public static final String READONLY = "READONLY"; 078 private static final Bytes READONLY_KEY = new Bytes(Bytes.toBytes(READONLY)); 079 080 /** 081 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 082 * compaction enabled. 083 */ 084 @InterfaceAudience.Private 085 public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED"; 086 private static final Bytes COMPACTION_ENABLED_KEY = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); 087 088 /** 089 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 090 * split enabled. 091 */ 092 @InterfaceAudience.Private 093 public static final String SPLIT_ENABLED = "SPLIT_ENABLED"; 094 private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED)); 095 096 /** 097 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 098 * merge enabled. 099 */ 100 @InterfaceAudience.Private 101 public static final String MERGE_ENABLED = "MERGE_ENABLED"; 102 private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED)); 103 104 /** 105 * Used by HBase Shell interface to access this metadata attribute which represents the maximum 106 * size of the memstore after which its contents are flushed onto the disk. 107 */ 108 @InterfaceAudience.Private 109 public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; 110 private static final Bytes MEMSTORE_FLUSHSIZE_KEY = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); 111 112 @InterfaceAudience.Private 113 public static final String FLUSH_POLICY = "FLUSH_POLICY"; 114 private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY)); 115 /** 116 * Used by rest interface to access this metadata attribute which denotes if it is a catalog 117 * table, either <code> hbase:meta </code>. 118 */ 119 @InterfaceAudience.Private 120 public static final String IS_META = "IS_META"; 121 private static final Bytes IS_META_KEY = new Bytes(Bytes.toBytes(IS_META)); 122 123 /** 124 * {@link Durability} setting for the table. 125 */ 126 @InterfaceAudience.Private 127 public static final String DURABILITY = "DURABILITY"; 128 private static final Bytes DURABILITY_KEY = new Bytes(Bytes.toBytes("DURABILITY")); 129 130 /** 131 * The number of region replicas for the table. 132 */ 133 @InterfaceAudience.Private 134 public static final String REGION_REPLICATION = "REGION_REPLICATION"; 135 private static final Bytes REGION_REPLICATION_KEY = new Bytes(Bytes.toBytes(REGION_REPLICATION)); 136 137 /** 138 * The flag to indicate whether or not the memstore should be replicated for read-replicas 139 * (CONSISTENCY => TIMELINE). 140 */ 141 @InterfaceAudience.Private 142 public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; 143 private static final Bytes REGION_MEMSTORE_REPLICATION_KEY = 144 new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); 145 146 /** 147 * If non-null, the HDFS erasure coding policy to set on the data dir of the table 148 */ 149 public static final String ERASURE_CODING_POLICY = "ERASURE_CODING_POLICY"; 150 private static final Bytes ERASURE_CODING_POLICY_KEY = 151 new Bytes(Bytes.toBytes(ERASURE_CODING_POLICY)); 152 153 private static final String DEFAULT_ERASURE_CODING_POLICY = null; 154 /** 155 * Used by shell/rest interface to access this metadata attribute which denotes if the table 156 * should be treated by region normalizer. 157 */ 158 @InterfaceAudience.Private 159 public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED"; 160 private static final Bytes NORMALIZATION_ENABLED_KEY = 161 new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); 162 163 @InterfaceAudience.Private 164 public static final String NORMALIZER_TARGET_REGION_COUNT = "NORMALIZER_TARGET_REGION_COUNT"; 165 private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY = 166 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT)); 167 168 @InterfaceAudience.Private 169 public static final String NORMALIZER_TARGET_REGION_SIZE_MB = "NORMALIZER_TARGET_REGION_SIZE_MB"; 170 private static final Bytes NORMALIZER_TARGET_REGION_SIZE_MB_KEY = 171 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE_MB)); 172 // TODO: Keeping backward compatability with HBASE-25651 change. Can be removed in later version 173 @InterfaceAudience.Private 174 @Deprecated 175 public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE"; 176 @Deprecated 177 private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY = 178 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE)); 179 180 /** 181 * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value 182 */ 183 private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT; 184 185 @InterfaceAudience.Private 186 public static final String PRIORITY = "PRIORITY"; 187 private static final Bytes PRIORITY_KEY = new Bytes(Bytes.toBytes(PRIORITY)); 188 189 private static final Bytes RSGROUP_KEY = 190 new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP)); 191 192 /** 193 * Relative priority of the table used for rpc scheduling 194 */ 195 private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS; 196 197 /** 198 * Constant that denotes whether the table is READONLY by default and is false 199 */ 200 public static final boolean DEFAULT_READONLY = false; 201 202 /** 203 * Constant that denotes whether the table is compaction enabled by default 204 */ 205 public static final boolean DEFAULT_COMPACTION_ENABLED = true; 206 207 /** 208 * Constant that denotes whether the table is split enabled by default 209 */ 210 public static final boolean DEFAULT_SPLIT_ENABLED = true; 211 212 /** 213 * Constant that denotes whether the table is merge enabled by default 214 */ 215 public static final boolean DEFAULT_MERGE_ENABLED = true; 216 217 /** 218 * Constant that denotes the maximum default size of the memstore in bytes after which the 219 * contents are flushed to the store files. 220 */ 221 public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L; 222 223 public static final int DEFAULT_REGION_REPLICATION = 1; 224 225 public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true; 226 227 private final static Map<String, String> DEFAULT_VALUES = new HashMap<>(); 228 private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>(); 229 230 static { 231 DEFAULT_VALUES.put(MAX_FILESIZE, String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); 232 DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); 233 DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); 234 DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); // use the enum name 235 DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); 236 DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); 237 // Setting ERASURE_CODING_POLICY to NULL so that it is not considered as metadata 238 DEFAULT_VALUES.put(ERASURE_CODING_POLICY, String.valueOf(DEFAULT_ERASURE_CODING_POLICY)); 239 DEFAULT_VALUES.keySet().stream().map(s -> new Bytes(Bytes.toBytes(s))) 240 .forEach(RESERVED_KEYWORDS::add); 241 RESERVED_KEYWORDS.add(IS_META_KEY); 242 } 243 244 public static PrettyPrinter.Unit getUnit(String key) { 245 switch (key) { 246 case MAX_FILESIZE: 247 case MEMSTORE_FLUSHSIZE: 248 return PrettyPrinter.Unit.BYTE; 249 default: 250 return PrettyPrinter.Unit.NONE; 251 } 252 } 253 254 /** 255 * @deprecated namespace table has been folded into the ns family in meta table, do not use this 256 * any more. 257 */ 258 @InterfaceAudience.Private 259 @Deprecated 260 public final static String NAMESPACE_FAMILY_INFO = "info"; 261 262 /** 263 * @deprecated namespace table has been folded into the ns family in meta table, do not use this 264 * any more. 265 */ 266 @InterfaceAudience.Private 267 @Deprecated 268 public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); 269 270 /** 271 * @deprecated namespace table has been folded into the ns family in meta table, do not use this 272 * any more. 273 */ 274 @InterfaceAudience.Private 275 @Deprecated 276 public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); 277 278 /** 279 * <pre> 280 * Pattern that matches a coprocessor specification. Form is: 281 * {@code <coprocessor jar file location> '|' <class name> ['|' <priority> ['|' <arguments>]]} 282 * where arguments are {@code <KEY> '=' <VALUE> [,...]} 283 * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2} 284 * </pre> 285 */ 286 private static final Pattern CP_HTD_ATTR_VALUE_PATTERN = 287 Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); 288 289 private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; 290 private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; 291 private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile("(" 292 + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); 293 private static final Pattern CP_HTD_ATTR_KEY_PATTERN = 294 Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); 295 296 /** 297 * Table descriptor for namespace table 298 * @deprecated since 3.0.0 and will be removed in 4.0.0. We have folded the data in namespace 299 * table into meta table, so do not use it any more. 300 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21154">HBASE-21154</a> 301 */ 302 @Deprecated 303 public static final TableDescriptor NAMESPACE_TABLEDESC = 304 TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME) 305 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES) 306 // Ten is arbitrary number. Keep versions to help debugging. 307 .setMaxVersions(10).setInMemory(true).setBlocksize(8 * 1024) 308 .setScope(HConstants.REPLICATION_SCOPE_LOCAL).build()) 309 .build(); 310 311 private final ModifyableTableDescriptor desc; 312 313 /** Returns This instance serialized with pb with pb magic prefix */ 314 public static byte[] toByteArray(TableDescriptor desc) { 315 if (desc instanceof ModifyableTableDescriptor) { 316 return ((ModifyableTableDescriptor) desc).toByteArray(); 317 } 318 return new ModifyableTableDescriptor(desc).toByteArray(); 319 } 320 321 /** 322 * The input should be created by {@link #toByteArray}. 323 * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix 324 * @return This instance serialized with pb with pb magic prefix 325 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException if an error occurred 326 */ 327 public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException { 328 return ModifyableTableDescriptor.parseFrom(pbBytes); 329 } 330 331 public static TableDescriptorBuilder newBuilder(final TableName name) { 332 return new TableDescriptorBuilder(name); 333 } 334 335 public static TableDescriptor copy(TableDescriptor desc) { 336 return new ModifyableTableDescriptor(desc); 337 } 338 339 public static TableDescriptor copy(TableName name, TableDescriptor desc) { 340 return new ModifyableTableDescriptor(name, desc); 341 } 342 343 /** 344 * Copy all values, families, and name from the input. 345 * @param desc The desciptor to copy 346 * @return A clone of input 347 */ 348 public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) { 349 return new TableDescriptorBuilder(desc); 350 } 351 352 private TableDescriptorBuilder(final TableName name) { 353 this.desc = new ModifyableTableDescriptor(name); 354 } 355 356 private TableDescriptorBuilder(final TableDescriptor desc) { 357 this.desc = new ModifyableTableDescriptor(desc); 358 } 359 360 public TableDescriptorBuilder setCoprocessor(String className) throws IOException { 361 return setCoprocessor(CoprocessorDescriptorBuilder.of(className)); 362 } 363 364 public TableDescriptorBuilder setCoprocessor(CoprocessorDescriptor cpDesc) throws IOException { 365 desc.setCoprocessor(Objects.requireNonNull(cpDesc)); 366 return this; 367 } 368 369 public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor> cpDescs) 370 throws IOException { 371 for (CoprocessorDescriptor cpDesc : cpDescs) { 372 desc.setCoprocessor(cpDesc); 373 } 374 return this; 375 } 376 377 public boolean hasCoprocessor(String classNameToMatch) { 378 return desc.hasCoprocessor(classNameToMatch); 379 } 380 381 public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) { 382 desc.setColumnFamily(Objects.requireNonNull(family)); 383 return this; 384 } 385 386 public TableDescriptorBuilder 387 setColumnFamilies(final Collection<ColumnFamilyDescriptor> families) { 388 families.forEach(desc::setColumnFamily); 389 return this; 390 } 391 392 public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) { 393 desc.modifyColumnFamily(Objects.requireNonNull(family)); 394 return this; 395 } 396 397 public TableDescriptorBuilder removeValue(final String key) { 398 desc.removeValue(key); 399 return this; 400 } 401 402 public TableDescriptorBuilder removeValue(Bytes key) { 403 desc.removeValue(key); 404 return this; 405 } 406 407 public TableDescriptorBuilder removeValue(byte[] key) { 408 desc.removeValue(key); 409 return this; 410 } 411 412 public TableDescriptorBuilder removeValue(BiPredicate<Bytes, Bytes> predicate) { 413 List<Bytes> toRemove = 414 desc.getValues().entrySet().stream().filter(e -> predicate.test(e.getKey(), e.getValue())) 415 .map(Map.Entry::getKey).collect(Collectors.toList()); 416 for (Bytes key : toRemove) { 417 removeValue(key); 418 } 419 return this; 420 } 421 422 public TableDescriptorBuilder removeColumnFamily(final byte[] name) { 423 desc.removeColumnFamily(name); 424 return this; 425 } 426 427 public TableDescriptorBuilder removeCoprocessor(String className) { 428 desc.removeCoprocessor(className); 429 return this; 430 } 431 432 public TableDescriptorBuilder setCompactionEnabled(final boolean isEnable) { 433 desc.setCompactionEnabled(isEnable); 434 return this; 435 } 436 437 public TableDescriptorBuilder setSplitEnabled(final boolean isEnable) { 438 desc.setSplitEnabled(isEnable); 439 return this; 440 } 441 442 public TableDescriptorBuilder setMergeEnabled(final boolean isEnable) { 443 desc.setMergeEnabled(isEnable); 444 return this; 445 } 446 447 public TableDescriptorBuilder setDurability(Durability durability) { 448 desc.setDurability(durability); 449 return this; 450 } 451 452 public TableDescriptorBuilder setFlushPolicyClassName(String clazz) { 453 desc.setFlushPolicyClassName(clazz); 454 return this; 455 } 456 457 public TableDescriptorBuilder setMaxFileSize(long maxFileSize) { 458 desc.setMaxFileSize(maxFileSize); 459 return this; 460 } 461 462 public TableDescriptorBuilder setMaxFileSize(String maxFileSize) throws HBaseException { 463 desc.setMaxFileSize(maxFileSize); 464 return this; 465 } 466 467 public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) { 468 desc.setMemStoreFlushSize(memstoreFlushSize); 469 return this; 470 } 471 472 public TableDescriptorBuilder setMemStoreFlushSize(String memStoreFlushSize) 473 throws HBaseException { 474 desc.setMemStoreFlushSize(memStoreFlushSize); 475 return this; 476 } 477 478 public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) { 479 desc.setNormalizerTargetRegionCount(regionCount); 480 return this; 481 } 482 483 public TableDescriptorBuilder setNormalizerTargetRegionSize(final long regionSize) { 484 desc.setNormalizerTargetRegionSize(regionSize); 485 return this; 486 } 487 488 public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) { 489 desc.setNormalizationEnabled(isEnable); 490 return this; 491 } 492 493 public TableDescriptorBuilder setPriority(int priority) { 494 desc.setPriority(priority); 495 return this; 496 } 497 498 public TableDescriptorBuilder setReadOnly(final boolean readOnly) { 499 desc.setReadOnly(readOnly); 500 return this; 501 } 502 503 public TableDescriptorBuilder setErasureCodingPolicy(String policy) { 504 desc.setErasureCodingPolicy(policy); 505 return this; 506 } 507 508 public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) { 509 desc.setRegionMemStoreReplication(memstoreReplication); 510 return this; 511 } 512 513 public TableDescriptorBuilder setRegionReplication(int regionReplication) { 514 desc.setRegionReplication(regionReplication); 515 return this; 516 } 517 518 public TableDescriptorBuilder setRegionSplitPolicyClassName(String clazz) { 519 desc.setRegionSplitPolicyClassName(clazz); 520 return this; 521 } 522 523 public TableDescriptorBuilder setValue(final String key, final String value) { 524 desc.setValue(key, value); 525 return this; 526 } 527 528 public TableDescriptorBuilder setValue(final Bytes key, final Bytes value) { 529 desc.setValue(key, value); 530 return this; 531 } 532 533 public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) { 534 desc.setValue(key, value); 535 return this; 536 } 537 538 public String getValue(String key) { 539 return desc.getValue(key); 540 } 541 542 /** 543 * Sets replication scope all & only the columns already in the builder. Columns added later won't 544 * be backfilled with replication scope. 545 * @param scope replication scope 546 * @return a TableDescriptorBuilder 547 */ 548 public TableDescriptorBuilder setReplicationScope(int scope) { 549 Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 550 newFamilies.putAll(desc.families); 551 newFamilies.forEach((cf, cfDesc) -> { 552 desc.removeColumnFamily(cf); 553 desc 554 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope).build()); 555 }); 556 return this; 557 } 558 559 public TableDescriptorBuilder setRegionServerGroup(String group) { 560 desc.setValue(RSGROUP_KEY, group); 561 return this; 562 } 563 564 public TableDescriptor build() { 565 return new ModifyableTableDescriptor(desc); 566 } 567 568 private static final class ModifyableTableDescriptor 569 implements TableDescriptor, Comparable<ModifyableTableDescriptor> { 570 571 private final TableName name; 572 573 /** 574 * A map which holds the metadata information of the table. This metadata includes values like 575 * IS_META, SPLIT_POLICY, MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc... 576 */ 577 private final Map<Bytes, Bytes> values = new HashMap<>(); 578 579 /** 580 * Maps column family name to the respective FamilyDescriptors 581 */ 582 private final Map<byte[], ColumnFamilyDescriptor> families = 583 new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 584 585 /** 586 * Cached hash of the table descriptor. Computed lazily on first access. 587 */ 588 private volatile String descriptorHash; 589 590 /** 591 * Construct a table descriptor specifying a TableName object 592 * @param name Table name. 593 */ 594 private ModifyableTableDescriptor(final TableName name) { 595 this(name, Collections.emptyList(), Collections.emptyMap()); 596 } 597 598 private ModifyableTableDescriptor(final TableDescriptor desc) { 599 this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 600 } 601 602 /** 603 * Construct a table descriptor by cloning the descriptor passed as a parameter. 604 * <p> 605 * Makes a deep copy of the supplied descriptor. 606 * @param name The new name 607 * @param desc The descriptor. 608 */ 609 private ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) { 610 this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 611 } 612 613 private ModifyableTableDescriptor(final TableName name, 614 final Collection<ColumnFamilyDescriptor> families, Map<Bytes, Bytes> values) { 615 this.name = name; 616 families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); 617 this.values.putAll(values); 618 this.values.put(IS_META_KEY, 619 new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME))))); 620 } 621 622 /** 623 * Checks if this table is <code> hbase:meta </code> region. 624 * @return true if this table is <code> hbase:meta </code> region 625 */ 626 @Override 627 public boolean isMetaRegion() { 628 return getOrDefault(IS_META_KEY, Boolean::valueOf, false); 629 } 630 631 /** 632 * Checks if the table is a <code>hbase:meta</code> table 633 * @return true if table is <code> hbase:meta </code> region. 634 */ 635 @Override 636 public boolean isMetaTable() { 637 return isMetaRegion(); 638 } 639 640 @Override 641 public Bytes getValue(Bytes key) { 642 Bytes rval = values.get(key); 643 return rval == null ? null : new Bytes(rval.copyBytes()); 644 } 645 646 @Override 647 public String getValue(String key) { 648 Bytes rval = values.get(new Bytes(Bytes.toBytes(key))); 649 return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength()); 650 } 651 652 @Override 653 public byte[] getValue(byte[] key) { 654 Bytes value = values.get(new Bytes(key)); 655 return value == null ? null : value.copyBytes(); 656 } 657 658 private <T> T getOrDefault(Bytes key, Function<String, T> function, T defaultValue) { 659 Bytes value = values.get(key); 660 if (value == null) { 661 return defaultValue; 662 } else { 663 return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 664 } 665 } 666 667 /** 668 * Getter for fetching an unmodifiable {@link #values} map. 669 * @return unmodifiable map {@link #values}. 670 * @see #values 671 */ 672 @Override 673 public Map<Bytes, Bytes> getValues() { 674 // shallow pointer copy 675 return Collections.unmodifiableMap(values); 676 } 677 678 /** 679 * Setter for storing metadata as a (key, value) pair in {@link #values} map 680 * @param key The key. 681 * @param value The value. If null, removes the setting. 682 * @return the modifyable TD 683 * @see #values 684 */ 685 public ModifyableTableDescriptor setValue(byte[] key, byte[] value) { 686 return setValue(toBytesOrNull(key, v -> v), toBytesOrNull(value, v -> v)); 687 } 688 689 public ModifyableTableDescriptor setValue(String key, String value) { 690 return setValue(toBytesOrNull(key, Bytes::toBytes), toBytesOrNull(value, Bytes::toBytes)); 691 } 692 693 /** 694 * @param key The key. 695 * @param value The value. If null, removes the setting. 696 */ 697 private ModifyableTableDescriptor setValue(final Bytes key, final String value) { 698 return setValue(key, toBytesOrNull(value, Bytes::toBytes)); 699 } 700 701 /** 702 * Setter for storing metadata as a (key, value) pair in {@link #values} map 703 * @param key The key. 704 * @param value The value. If null, removes the setting. 705 */ 706 public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { 707 if (value == null || value.getLength() == 0) { 708 values.remove(key); 709 } else { 710 values.put(key, value); 711 } 712 return this; 713 } 714 715 private static <T> Bytes toBytesOrNull(T t, Function<T, byte[]> f) { 716 if (t == null) { 717 return null; 718 } else { 719 return new Bytes(f.apply(t)); 720 } 721 } 722 723 /** 724 * Remove metadata represented by the key from the {@link #values} map 725 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 726 * @return the modifyable TD 727 */ 728 public ModifyableTableDescriptor removeValue(final String key) { 729 return setValue(key, (String) null); 730 } 731 732 /** 733 * Remove metadata represented by the key from the {@link #values} map 734 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 735 * @return the modifyable TD 736 */ 737 public ModifyableTableDescriptor removeValue(Bytes key) { 738 return setValue(key, (Bytes) null); 739 } 740 741 /** 742 * Remove metadata represented by the key from the {@link #values} map 743 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 744 * @return the modifyable TD 745 */ 746 public ModifyableTableDescriptor removeValue(final byte[] key) { 747 return removeValue(new Bytes(key)); 748 } 749 750 /** 751 * Check if the readOnly flag of the table is set. If the readOnly flag is set then the contents 752 * of the table can only be read from but not modified. 753 * @return true if all columns in the table should be read only 754 */ 755 @Override 756 public boolean isReadOnly() { 757 return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY); 758 } 759 760 /** 761 * Setting the table as read only sets all the columns in the table as read only. By default all 762 * tables are modifiable, but if the readOnly flag is set to true then the contents of the table 763 * can only be read but not modified. 764 * @param readOnly True if all of the columns in the table should be read only. 765 * @return the modifyable TD 766 */ 767 public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { 768 return setValue(READONLY_KEY, Boolean.toString(readOnly)); 769 } 770 771 /** 772 * The HDFS erasure coding policy for a table. This will be set on the data dir of the table, 773 * and is an alternative to normal replication which takes less space at the cost of locality. 774 * @return the current policy, or null if undefined 775 */ 776 @Override 777 public String getErasureCodingPolicy() { 778 return getValue(ERASURE_CODING_POLICY); 779 } 780 781 /** 782 * Sets the HDFS erasure coding policy for the table. This will be propagated to HDFS for the 783 * data dir of the table. Erasure coding is an alternative to normal replication which takes 784 * less space at the cost of locality. The policy must be available and enabled on the hdfs 785 * cluster before being set. 786 * @param policy the policy to set, or null to disable erasure coding 787 * @return the modifyable TD 788 */ 789 public ModifyableTableDescriptor setErasureCodingPolicy(String policy) { 790 return setValue(ERASURE_CODING_POLICY_KEY, policy); 791 } 792 793 /** 794 * Check if the compaction enable flag of the table is true. If flag is false then no 795 * minor/major compactions will be done in real. 796 * @return true if table compaction enabled 797 */ 798 @Override 799 public boolean isCompactionEnabled() { 800 return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED); 801 } 802 803 /** 804 * Setting the table compaction enable flag. 805 * @param isEnable True if enable compaction. 806 * @return the modifyable TD 807 */ 808 public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) { 809 return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable)); 810 } 811 812 /** 813 * Check if the split enable flag of the table is true. If flag is false then no split will be 814 * done. 815 * @return true if table region split enabled 816 */ 817 @Override 818 public boolean isSplitEnabled() { 819 return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED); 820 } 821 822 /** 823 * Setting the table region split enable flag. 824 * @param isEnable True if enable region split. 825 * @return the modifyable TD 826 */ 827 public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) { 828 return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable)); 829 } 830 831 /** 832 * Check if the region merge enable flag of the table is true. If flag is false then no merge 833 * will be done. 834 * @return true if table region merge enabled 835 */ 836 @Override 837 public boolean isMergeEnabled() { 838 return getOrDefault(MERGE_ENABLED_KEY, Boolean::valueOf, DEFAULT_MERGE_ENABLED); 839 } 840 841 /** 842 * Setting the table region merge enable flag. 843 * @param isEnable True if enable region merge. 844 * @return the modifyable TD 845 */ 846 public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) { 847 return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable)); 848 } 849 850 /** 851 * Check if normalization enable flag of the table is true. If flag is false then no region 852 * normalizer won't attempt to normalize this table. 853 * @return true if region normalization is enabled for this table 854 **/ 855 @Override 856 public boolean isNormalizationEnabled() { 857 return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, false); 858 } 859 860 /** 861 * Check if there is the target region count. If so, the normalize plan will be calculated based 862 * on the target region count. 863 * @return target region count after normalize done 864 */ 865 @Override 866 public int getNormalizerTargetRegionCount() { 867 return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf, 868 Integer.valueOf(-1)); 869 } 870 871 /** 872 * Check if there is the target region size. If so, the normalize plan will be calculated based 873 * on the target region size. 874 * @return target region size after normalize done 875 */ 876 @Override 877 public long getNormalizerTargetRegionSize() { 878 long target_region_size = 879 getOrDefault(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long::valueOf, Long.valueOf(-1)); 880 return target_region_size == Long.valueOf(-1) 881 ? getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)) 882 : target_region_size; 883 } 884 885 /** 886 * Setting the table normalization enable flag. 887 * @param isEnable True if enable normalization. 888 * @return the modifyable TD 889 */ 890 public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) { 891 return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable)); 892 } 893 894 /** 895 * Setting the target region count of table normalization . 896 * @param regionCount the target region count. 897 * @return the modifyable TD 898 */ 899 public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) { 900 return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount)); 901 } 902 903 /** 904 * Setting the target region size of table normalization. 905 * @param regionSize the target region size. 906 * @return the modifyable TD 907 */ 908 public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) { 909 return setValue(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long.toString(regionSize)); 910 } 911 912 /** 913 * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT. 914 * @param durability enum value 915 * @return the modifyable TD 916 */ 917 public ModifyableTableDescriptor setDurability(Durability durability) { 918 return setValue(DURABILITY_KEY, durability.name()); 919 } 920 921 /** 922 * Returns the durability setting for the table. 923 * @return durability setting for the table. 924 */ 925 @Override 926 public Durability getDurability() { 927 return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY); 928 } 929 930 /** 931 * Get the name of the table 932 */ 933 @Override 934 public TableName getTableName() { 935 return name; 936 } 937 938 /** 939 * This sets the class associated with the region split policy which determines when a region 940 * split should occur. The class used by default is defined in 941 * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 942 * @param clazz the class name 943 * @return the modifyable TD 944 */ 945 public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) { 946 return setValue(SPLIT_POLICY_KEY, clazz); 947 } 948 949 /** 950 * This gets the class associated with the region split policy which determines when a region 951 * split should occur. The class used by default is defined in 952 * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 953 * @return the class name of the region split policy for this table. If this returns null, the 954 * default split policy is used. 955 */ 956 @Override 957 public String getRegionSplitPolicyClassName() { 958 return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null); 959 } 960 961 /** 962 * Returns the maximum size upto which a region can grow to after which a region split is 963 * triggered. The region size is represented by the size of the biggest store file in that 964 * region. 965 * @return max hregion size for table, -1 if not set. 966 * @see #setMaxFileSize(long) 967 */ 968 @Override 969 public long getMaxFileSize() { 970 return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1); 971 } 972 973 /** 974 * Sets the maximum size upto which a region can grow to after which a region split is 975 * triggered. The region size is represented by the size of the biggest store file in that 976 * region, i.e. If the biggest store file grows beyond the maxFileSize, then the region split is 977 * triggered. This defaults to a value of 256 MB. 978 * <p> 979 * This is not an absolute value and might vary. Assume that a single row exceeds the 980 * maxFileSize then the storeFileSize will be greater than maxFileSize since a single row cannot 981 * be split across multiple regions 982 * </p> 983 * @param maxFileSize The maximum file size that a store file can grow to before a split is 984 * triggered. 985 * @return the modifyable TD 986 */ 987 public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { 988 return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize)); 989 } 990 991 public ModifyableTableDescriptor setMaxFileSize(String maxFileSize) throws HBaseException { 992 return setMaxFileSize( 993 Long.parseLong(PrettyPrinter.valueOf(maxFileSize, PrettyPrinter.Unit.BYTE))); 994 } 995 996 /** 997 * Returns the size of the memstore after which a flush to filesystem is triggered. 998 * @return memory cache flush size for each hregion, -1 if not set. 999 * @see #setMemStoreFlushSize(long) 1000 */ 1001 @Override 1002 public long getMemStoreFlushSize() { 1003 return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1); 1004 } 1005 1006 /** 1007 * Represents the maximum size of the memstore after which the contents of the memstore are 1008 * flushed to the filesystem. This defaults to a size of 64 MB. 1009 * @param memstoreFlushSize memory cache flush size for each hregion 1010 * @return the modifyable TD 1011 */ 1012 public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { 1013 return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize)); 1014 } 1015 1016 public ModifyableTableDescriptor setMemStoreFlushSize(String memStoreFlushSize) 1017 throws HBaseException { 1018 return setMemStoreFlushSize( 1019 Long.parseLong(PrettyPrinter.valueOf(memStoreFlushSize, PrettyPrinter.Unit.BYTE))); 1020 } 1021 1022 /** 1023 * This sets the class associated with the flush policy which determines determines the stores 1024 * need to be flushed when flushing a region. The class used by default is defined in 1025 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 1026 * @param clazz the class name 1027 * @return the modifyable TD 1028 */ 1029 public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) { 1030 return setValue(FLUSH_POLICY_KEY, clazz); 1031 } 1032 1033 /** 1034 * This gets the class associated with the flush policy which determines the stores need to be 1035 * flushed when flushing a region. The class used by default is defined in 1036 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 1037 * @return the class name of the flush policy for this table. If this returns null, the default 1038 * flush policy is used. 1039 */ 1040 @Override 1041 public String getFlushPolicyClassName() { 1042 return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null); 1043 } 1044 1045 /** 1046 * Adds a column family. For the updating purpose please use 1047 * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. 1048 * @param family to add. 1049 * @return the modifyable TD 1050 */ 1051 public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) { 1052 if (family.getName() == null || family.getName().length <= 0) { 1053 throw new IllegalArgumentException("Family name cannot be null or empty"); 1054 } 1055 int flength = family.getName() == null ? 0 : family.getName().length; 1056 if (flength > Byte.MAX_VALUE) { 1057 throw new IllegalArgumentException( 1058 "The length of family name is bigger than " + Byte.MAX_VALUE); 1059 } 1060 if (hasColumnFamily(family.getName())) { 1061 throw new IllegalArgumentException( 1062 "Family '" + family.getNameAsString() + "' already exists so cannot be added"); 1063 } 1064 return putColumnFamily(family); 1065 } 1066 1067 /** 1068 * Modifies the existing column family. 1069 * @param family to update 1070 * @return this (for chained invocation) 1071 */ 1072 public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) { 1073 if (family.getName() == null || family.getName().length <= 0) { 1074 throw new IllegalArgumentException("Family name cannot be null or empty"); 1075 } 1076 if (!hasColumnFamily(family.getName())) { 1077 throw new IllegalArgumentException( 1078 "Column family '" + family.getNameAsString() + "' does not exist"); 1079 } 1080 return putColumnFamily(family); 1081 } 1082 1083 private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) { 1084 families.put(family.getName(), family); 1085 return this; 1086 } 1087 1088 /** 1089 * Checks to see if this table contains the given column family 1090 * @param familyName Family name or column name. 1091 * @return true if the table contains the specified family name 1092 */ 1093 @Override 1094 public boolean hasColumnFamily(final byte[] familyName) { 1095 return families.containsKey(familyName); 1096 } 1097 1098 /** Returns Name of this table and then a map of all of the column family descriptors. */ 1099 @Override 1100 public String toString() { 1101 StringBuilder s = new StringBuilder(); 1102 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1103 s.append(getValues(true)); 1104 families.values().forEach(f -> s.append(", ").append(f)); 1105 return s.toString(); 1106 } 1107 1108 /** 1109 * @return Name of this table and then a map of all of the column family descriptors (with only 1110 * the non-default column family attributes) 1111 */ 1112 @Override 1113 public String toStringCustomizedValues() { 1114 StringBuilder s = new StringBuilder(); 1115 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1116 s.append(getValues(false)); 1117 families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues())); 1118 return s.toString(); 1119 } 1120 1121 /** Returns map of all table attributes formatted into string. */ 1122 public String toStringTableAttributes() { 1123 return getValues(true).toString(); 1124 } 1125 1126 private StringBuilder getValues(boolean printDefaults) { 1127 StringBuilder s = new StringBuilder(); 1128 1129 // step 1: set partitioning and pruning 1130 Set<Bytes> reservedKeys = new TreeSet<>(); 1131 Set<Bytes> userKeys = new TreeSet<>(); 1132 for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) { 1133 if (entry.getKey() == null || entry.getKey().get() == null) { 1134 continue; 1135 } 1136 String key = Bytes.toString(entry.getKey().get()); 1137 // in this section, print out reserved keywords + coprocessor info 1138 if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) { 1139 userKeys.add(entry.getKey()); 1140 continue; 1141 } 1142 // only print out IS_META if true 1143 String value = Bytes.toString(entry.getValue().get()); 1144 if (key.equalsIgnoreCase(IS_META)) { 1145 if (Boolean.valueOf(value) == false) { 1146 continue; 1147 } 1148 } 1149 // see if a reserved key is a default value. may not want to print it out 1150 if ( 1151 printDefaults || !DEFAULT_VALUES.containsKey(key) 1152 || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value) 1153 ) { 1154 reservedKeys.add(entry.getKey()); 1155 } 1156 } 1157 1158 // early exit optimization 1159 boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty(); 1160 if (!hasAttributes) { 1161 return s; 1162 } 1163 1164 s.append(", {"); 1165 // step 2: printing attributes 1166 if (hasAttributes) { 1167 s.append("TABLE_ATTRIBUTES => {"); 1168 1169 // print all reserved keys first 1170 boolean printCommaForAttr = false; 1171 for (Bytes k : reservedKeys) { 1172 String key = Bytes.toString(k.get()); 1173 String value = Bytes.toStringBinary(values.get(k).get()); 1174 if (printCommaForAttr) { 1175 s.append(", "); 1176 } 1177 printCommaForAttr = true; 1178 s.append(key); 1179 s.append(" => "); 1180 s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); 1181 } 1182 1183 if (!userKeys.isEmpty()) { 1184 // print all non-reserved as a separate subset 1185 if (printCommaForAttr) { 1186 s.append(", "); 1187 } 1188 s.append(HConstants.METADATA).append(" => "); 1189 s.append("{"); 1190 boolean printCommaForCfg = false; 1191 for (Bytes k : userKeys) { 1192 String key = Bytes.toString(k.get()); 1193 String value = Bytes.toStringBinary(values.get(k).get()); 1194 if (printCommaForCfg) { 1195 s.append(", "); 1196 } 1197 printCommaForCfg = true; 1198 s.append('\'').append(key).append('\''); 1199 s.append(" => "); 1200 s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); 1201 } 1202 s.append("}"); 1203 } 1204 1205 s.append("}"); 1206 } 1207 1208 s.append("}"); // end METHOD 1209 return s; 1210 } 1211 1212 /** 1213 * Compare the contents of the descriptor with another one passed as a parameter. Checks if the 1214 * obj passed is an instance of ModifyableTableDescriptor, if yes then the contents of the 1215 * descriptors are compared. 1216 * @param obj The object to compare 1217 * @return true if the contents of the the two descriptors exactly match 1218 * @see java.lang.Object#equals(java.lang.Object) 1219 */ 1220 @Override 1221 public boolean equals(Object obj) { 1222 if (this == obj) { 1223 return true; 1224 } 1225 if (obj instanceof ModifyableTableDescriptor) { 1226 return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0; 1227 } 1228 return false; 1229 } 1230 1231 /** Returns hash code */ 1232 @Override 1233 public int hashCode() { 1234 int result = this.name.hashCode(); 1235 if (this.families.size() > 0) { 1236 for (ColumnFamilyDescriptor e : this.families.values()) { 1237 result ^= e.hashCode(); 1238 } 1239 } 1240 result ^= values.hashCode(); 1241 return result; 1242 } 1243 1244 // Comparable 1245 /** 1246 * Compares the descriptor with another descriptor which is passed as a parameter. This compares 1247 * the content of the two descriptors and not the reference. 1248 * @param other The MTD to compare 1249 * @return 0 if the contents of the descriptors are exactly matching, 1 if there is a mismatch 1250 * in the contents 1251 */ 1252 @Override 1253 public int compareTo(final ModifyableTableDescriptor other) { 1254 return TableDescriptor.COMPARATOR.compare(this, other); 1255 } 1256 1257 @Override 1258 public ColumnFamilyDescriptor[] getColumnFamilies() { 1259 return families.values().toArray(new ColumnFamilyDescriptor[families.size()]); 1260 } 1261 1262 /** 1263 * Returns the configured replicas per region 1264 */ 1265 @Override 1266 public int getRegionReplication() { 1267 return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION); 1268 } 1269 1270 /** 1271 * Sets the number of replicas per region. 1272 * @param regionReplication the replication factor per region 1273 * @return the modifyable TD 1274 */ 1275 public ModifyableTableDescriptor setRegionReplication(int regionReplication) { 1276 return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication)); 1277 } 1278 1279 /** Returns true if the read-replicas memstore replication is enabled. */ 1280 @Override 1281 public boolean hasRegionMemStoreReplication() { 1282 return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, 1283 DEFAULT_REGION_MEMSTORE_REPLICATION); 1284 } 1285 1286 /** 1287 * Enable or Disable the memstore replication from the primary region to the replicas. The 1288 * replication will be used only for meta operations (e.g. flush, compaction, ...) 1289 * @param memstoreReplication true if the new data written to the primary region should be 1290 * replicated. false if the secondaries can tollerate to have new 1291 * data only when the primary flushes the memstore. 1292 * @return the modifyable TD 1293 */ 1294 public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) { 1295 return setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication)); 1296 } 1297 1298 public ModifyableTableDescriptor setPriority(int priority) { 1299 return setValue(PRIORITY_KEY, Integer.toString(priority)); 1300 } 1301 1302 @Override 1303 public int getPriority() { 1304 return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY); 1305 } 1306 1307 /** 1308 * Returns all the column family names of the current table. The map of TableDescriptor contains 1309 * mapping of family name to ColumnFamilyDescriptor. This returns all the keys of the family map 1310 * which represents the column family names of the table. 1311 * @return Immutable sorted set of the keys of the families. 1312 */ 1313 @Override 1314 public Set<byte[]> getColumnFamilyNames() { 1315 return Collections.unmodifiableSet(this.families.keySet()); 1316 } 1317 1318 /** 1319 * Returns the ColumnFamilyDescriptor for a specific column family with name as specified by the 1320 * parameter column. 1321 * @param column Column family name 1322 * @return Column descriptor for the passed family name or the family on passed in column. 1323 */ 1324 @Override 1325 public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { 1326 return this.families.get(column); 1327 } 1328 1329 /** 1330 * Removes the ColumnFamilyDescriptor with name specified by the parameter column from the table 1331 * descriptor 1332 * @param column Name of the column family to be removed. 1333 * @return Column descriptor for the passed family name or the family on passed in column. 1334 */ 1335 public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { 1336 return this.families.remove(column); 1337 } 1338 1339 /** 1340 * Add a table coprocessor to this table. The coprocessor type must be 1341 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1342 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1343 * region is opened. 1344 * @param className Full class name. 1345 * @return the modifyable TD 1346 */ 1347 public ModifyableTableDescriptor setCoprocessor(String className) throws IOException { 1348 return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) 1349 .setPriority(Coprocessor.PRIORITY_USER).build()); 1350 } 1351 1352 /** 1353 * Add a table coprocessor to this table. The coprocessor type must be 1354 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1355 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1356 * region is opened. 1357 * @throws IOException any illegal parameter key/value 1358 * @return the modifyable TD 1359 */ 1360 public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) throws IOException { 1361 checkHasCoprocessor(cp.getClassName()); 1362 if (cp.getPriority() < 0) { 1363 throw new IOException( 1364 "Priority must be bigger than or equal with zero, current:" + cp.getPriority()); 1365 } 1366 // Validate parameter kvs and then add key/values to kvString. 1367 StringBuilder kvString = new StringBuilder(); 1368 for (Map.Entry<String, String> e : cp.getProperties().entrySet()) { 1369 if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) { 1370 throw new IOException("Illegal parameter key = " + e.getKey()); 1371 } 1372 if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { 1373 throw new IOException("Illegal parameter (" + e.getKey() + ") value = " + e.getValue()); 1374 } 1375 if (kvString.length() != 0) { 1376 kvString.append(','); 1377 } 1378 kvString.append(e.getKey()); 1379 kvString.append('='); 1380 kvString.append(e.getValue()); 1381 } 1382 1383 String value = cp.getJarPath().orElse("") + "|" + cp.getClassName() + "|" 1384 + Integer.toString(cp.getPriority()) + "|" + kvString.toString(); 1385 return setCoprocessorToMap(value); 1386 } 1387 1388 private void checkHasCoprocessor(final String className) throws IOException { 1389 if (hasCoprocessor(className)) { 1390 throw new IOException("Coprocessor " + className + " already exists."); 1391 } 1392 } 1393 1394 /** 1395 * Add coprocessor to values Map 1396 * @param specStr The Coprocessor specification all in in one String 1397 * @return Returns <code>this</code> 1398 */ 1399 private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) { 1400 if (specStr == null) { 1401 return this; 1402 } 1403 // generate a coprocessor key 1404 int maxCoprocessorNumber = 0; 1405 Matcher keyMatcher; 1406 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1407 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1408 if (!keyMatcher.matches()) { 1409 continue; 1410 } 1411 maxCoprocessorNumber = 1412 Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); 1413 } 1414 maxCoprocessorNumber++; 1415 String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); 1416 return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr))); 1417 } 1418 1419 /** 1420 * Check if the table has an attached co-processor represented by the name className 1421 * @param classNameToMatch - Class name of the co-processor 1422 * @return true of the table has a co-processor className 1423 */ 1424 @Override 1425 public boolean hasCoprocessor(String classNameToMatch) { 1426 return getCoprocessorDescriptors().stream() 1427 .anyMatch(cp -> cp.getClassName().equals(classNameToMatch)); 1428 } 1429 1430 /** 1431 * Return the list of attached co-processor represented by their name className 1432 * @return The list of co-processors classNames 1433 */ 1434 @Override 1435 public List<CoprocessorDescriptor> getCoprocessorDescriptors() { 1436 List<CoprocessorDescriptor> result = new ArrayList<>(); 1437 for (Map.Entry<Bytes, Bytes> e : getValues().entrySet()) { 1438 String key = Bytes.toString(e.getKey().get()).trim(); 1439 if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) { 1440 toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()).ifPresent(result::add); 1441 } 1442 } 1443 return result; 1444 } 1445 1446 /** 1447 * Remove a coprocessor from those set on the table 1448 * @param className Class name of the co-processor 1449 */ 1450 public void removeCoprocessor(String className) { 1451 Bytes match = null; 1452 Matcher keyMatcher; 1453 Matcher valueMatcher; 1454 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1455 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1456 if (!keyMatcher.matches()) { 1457 continue; 1458 } 1459 valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes.toString(e.getValue().get())); 1460 if (!valueMatcher.matches()) { 1461 continue; 1462 } 1463 // get className and compare 1464 String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field 1465 // remove the CP if it is present 1466 if (clazz.equals(className.trim())) { 1467 match = e.getKey(); 1468 break; 1469 } 1470 } 1471 // if we found a match, remove it 1472 if (match != null) { 1473 ModifyableTableDescriptor.this.removeValue(match); 1474 } else { 1475 throw new IllegalArgumentException(String.format( 1476 "coprocessor with class name %s was not found in the table attribute", className)); 1477 } 1478 } 1479 1480 /** Returns the bytes in pb format */ 1481 private byte[] toByteArray() { 1482 return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray()); 1483 } 1484 1485 /** 1486 * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix 1487 * @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code> 1488 * @see #toByteArray() 1489 */ 1490 private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException { 1491 if (!ProtobufUtil.isPBMagicPrefix(bytes)) { 1492 throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor"); 1493 } 1494 int pblen = ProtobufUtil.lengthOfPBMagic(); 1495 HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder(); 1496 try { 1497 ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); 1498 return ProtobufUtil.toTableDescriptor(builder.build()); 1499 } catch (IOException e) { 1500 throw new DeserializationException(e); 1501 } 1502 } 1503 1504 @Override 1505 public int getColumnFamilyCount() { 1506 return families.size(); 1507 } 1508 1509 @Override 1510 public Optional<String> getRegionServerGroup() { 1511 Bytes value = values.get(RSGROUP_KEY); 1512 if (value != null) { 1513 return Optional.of(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 1514 } else { 1515 return Optional.empty(); 1516 } 1517 } 1518 1519 @Override 1520 public String getDescriptorHash() { 1521 if (descriptorHash == null) { 1522 descriptorHash = TableDescriptor.super.getDescriptorHash(); 1523 } 1524 return descriptorHash; 1525 } 1526 } 1527 1528 /** 1529 * This method is mostly intended for internal use. However, it it also relied on by hbase-shell 1530 * for backwards compatibility. 1531 */ 1532 private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) { 1533 Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec); 1534 if (matcher.matches()) { 1535 // jar file path can be empty if the cp class can be loaded 1536 // from class loader. 1537 String path = matcher.group(1).trim().isEmpty() ? null : matcher.group(1).trim(); 1538 String className = matcher.group(2).trim(); 1539 if (className.isEmpty()) { 1540 return Optional.empty(); 1541 } 1542 String priorityStr = matcher.group(3).trim(); 1543 int priority = 1544 priorityStr.isEmpty() ? Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); 1545 String cfgSpec = null; 1546 try { 1547 cfgSpec = matcher.group(4); 1548 } catch (IndexOutOfBoundsException ex) { 1549 // ignore 1550 } 1551 Map<String, String> ourConf = new TreeMap<>(); 1552 if (cfgSpec != null && !cfgSpec.trim().equals("|")) { 1553 cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1); 1554 Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec); 1555 while (m.find()) { 1556 ourConf.put(m.group(1), m.group(2)); 1557 } 1558 } 1559 return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) 1560 .setPriority(priority).setProperties(ourConf).build()); 1561 } 1562 return Optional.empty(); 1563 } 1564}