001/** 002 * 003 * Licensed to the Apache Software Foundation (ASF) under one 004 * or more contributor license agreements. See the NOTICE file 005 * distributed with this work for additional information 006 * regarding copyright ownership. The ASF licenses this file 007 * to you under the Apache License, Version 2.0 (the 008 * "License"); you may not use this file except in compliance 009 * with the License. You may obtain a copy of the License at 010 * 011 * http://www.apache.org/licenses/LICENSE-2.0 012 * 013 * Unless required by applicable law or agreed to in writing, software 014 * distributed under the License is distributed on an "AS IS" BASIS, 015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 016 * See the License for the specific language governing permissions and 017 * limitations under the License. 018 */ 019package org.apache.hadoop.hbase.client; 020 021import java.io.IOException; 022import java.util.ArrayList; 023import java.util.Arrays; 024import java.util.Collection; 025import java.util.Collections; 026import java.util.HashMap; 027import java.util.HashSet; 028import java.util.List; 029import java.util.Map; 030import java.util.Objects; 031import java.util.Optional; 032import java.util.Set; 033import java.util.TreeMap; 034import java.util.TreeSet; 035import java.util.function.Function; 036import java.util.regex.Matcher; 037import java.util.regex.Pattern; 038import org.apache.hadoop.fs.Path; 039import org.apache.hadoop.hbase.Coprocessor; 040import org.apache.hadoop.hbase.HConstants; 041import org.apache.hadoop.hbase.TableName; 042import org.apache.hadoop.hbase.exceptions.DeserializationException; 043import org.apache.hadoop.hbase.security.User; 044import org.apache.hadoop.hbase.util.Bytes; 045import org.apache.yetus.audience.InterfaceAudience; 046import org.slf4j.Logger; 047import org.slf4j.LoggerFactory; 048 049import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 050import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 051 052/** 053 * @since 2.0.0 054 */ 055@InterfaceAudience.Public 056public class TableDescriptorBuilder { 057 public static final Logger LOG = LoggerFactory.getLogger(TableDescriptorBuilder.class); 058 @InterfaceAudience.Private 059 public static final String SPLIT_POLICY = "SPLIT_POLICY"; 060 private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY)); 061 /** 062 * Used by HBase Shell interface to access this metadata 063 * attribute which denotes the maximum size of the store file after which a 064 * region split occurs. 065 */ 066 @InterfaceAudience.Private 067 public static final String MAX_FILESIZE = "MAX_FILESIZE"; 068 private static final Bytes MAX_FILESIZE_KEY 069 = new Bytes(Bytes.toBytes(MAX_FILESIZE)); 070 071 @InterfaceAudience.Private 072 public static final String OWNER = "OWNER"; 073 @InterfaceAudience.Private 074 public static final Bytes OWNER_KEY 075 = new Bytes(Bytes.toBytes(OWNER)); 076 077 /** 078 * Used by rest interface to access this metadata attribute 079 * which denotes if the table is Read Only. 080 */ 081 @InterfaceAudience.Private 082 public static final String READONLY = "READONLY"; 083 private static final Bytes READONLY_KEY 084 = new Bytes(Bytes.toBytes(READONLY)); 085 086 /** 087 * Used by HBase Shell interface to access this metadata 088 * attribute which denotes if the table is compaction enabled. 089 */ 090 @InterfaceAudience.Private 091 public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED"; 092 private static final Bytes COMPACTION_ENABLED_KEY 093 = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); 094 095 /** 096 * Used by HBase Shell interface to access this metadata 097 * attribute which represents the maximum size of the memstore after which its 098 * contents are flushed onto the disk. 099 */ 100 @InterfaceAudience.Private 101 public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; 102 private static final Bytes MEMSTORE_FLUSHSIZE_KEY 103 = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); 104 105 @InterfaceAudience.Private 106 public static final String FLUSH_POLICY = "FLUSH_POLICY"; 107 private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY)); 108 /** 109 * Used by rest interface to access this metadata attribute 110 * which denotes if it is a catalog table, either <code> hbase:meta </code>. 111 */ 112 @InterfaceAudience.Private 113 public static final String IS_META = "IS_META"; 114 private static final Bytes IS_META_KEY 115 = new Bytes(Bytes.toBytes(IS_META)); 116 117 /** 118 * {@link Durability} setting for the table. 119 */ 120 @InterfaceAudience.Private 121 public static final String DURABILITY = "DURABILITY"; 122 private static final Bytes DURABILITY_KEY 123 = new Bytes(Bytes.toBytes("DURABILITY")); 124 125 /** 126 * The number of region replicas for the table. 127 */ 128 @InterfaceAudience.Private 129 public static final String REGION_REPLICATION = "REGION_REPLICATION"; 130 private static final Bytes REGION_REPLICATION_KEY 131 = new Bytes(Bytes.toBytes(REGION_REPLICATION)); 132 133 /** 134 * The flag to indicate whether or not the memstore should be 135 * replicated for read-replicas (CONSISTENCY => TIMELINE). 136 */ 137 @InterfaceAudience.Private 138 public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; 139 private static final Bytes REGION_MEMSTORE_REPLICATION_KEY 140 = new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); 141 142 private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY 143 = new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY)); 144 /** 145 * Used by shell/rest interface to access this metadata 146 * attribute which denotes if the table should be treated by region 147 * normalizer. 148 */ 149 @InterfaceAudience.Private 150 public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED"; 151 private static final Bytes NORMALIZATION_ENABLED_KEY 152 = new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); 153 154 /** 155 * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global 156 * default value 157 */ 158 private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT; 159 160 @InterfaceAudience.Private 161 public static final String PRIORITY = "PRIORITY"; 162 private static final Bytes PRIORITY_KEY 163 = new Bytes(Bytes.toBytes(PRIORITY)); 164 165 /** 166 * Relative priority of the table used for rpc scheduling 167 */ 168 private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS; 169 170 /** 171 * Constant that denotes whether the table is READONLY by default and is false 172 */ 173 public static final boolean DEFAULT_READONLY = false; 174 175 /** 176 * Constant that denotes whether the table is compaction enabled by default 177 */ 178 public static final boolean DEFAULT_COMPACTION_ENABLED = true; 179 180 /** 181 * Constant that denotes whether the table is normalized by default. 182 */ 183 public static final boolean DEFAULT_NORMALIZATION_ENABLED = false; 184 185 /** 186 * Constant that denotes the maximum default size of the memstore in bytes after which 187 * the contents are flushed to the store files. 188 */ 189 public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L; 190 191 public static final int DEFAULT_REGION_REPLICATION = 1; 192 193 public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true; 194 195 private final static Map<String, String> DEFAULT_VALUES = new HashMap<>(); 196 private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>(); 197 198 static { 199 DEFAULT_VALUES.put(MAX_FILESIZE, 200 String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); 201 DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); 202 DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, 203 String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); 204 DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name 205 DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); 206 DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED)); 207 DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); 208 DEFAULT_VALUES.keySet().stream() 209 .map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add); 210 RESERVED_KEYWORDS.add(IS_META_KEY); 211 } 212 213 @InterfaceAudience.Private 214 public final static String NAMESPACE_FAMILY_INFO = "info"; 215 @InterfaceAudience.Private 216 public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); 217 @InterfaceAudience.Private 218 public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); 219 220 /** 221 * <pre> 222 * Pattern that matches a coprocessor specification. Form is: 223 * {@code <coprocessor jar file location> '|' <class name> ['|' <priority> ['|' <arguments>]]} 224 * where arguments are {@code <KEY> '=' <VALUE> [,...]} 225 * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2} 226 * </pre> 227 */ 228 private static final Pattern CP_HTD_ATTR_VALUE_PATTERN = 229 Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); 230 231 private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; 232 private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; 233 private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile( 234 "(" + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + 235 CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); 236 private static final Pattern CP_HTD_ATTR_KEY_PATTERN = 237 Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); 238 /** 239 * Table descriptor for namespace table 240 */ 241 // TODO We used to set CacheDataInL1 for NS table. When we have BucketCache in file mode, now the 242 // NS data goes to File mode BC only. Test how that affect the system. If too much, we have to 243 // rethink about adding back the setCacheDataInL1 for NS table. 244 public static final TableDescriptor NAMESPACE_TABLEDESC 245 = TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME) 246 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES) 247 // Ten is arbitrary number. Keep versions to help debugging. 248 .setMaxVersions(10) 249 .setInMemory(true) 250 .setBlocksize(8 * 1024) 251 .setScope(HConstants.REPLICATION_SCOPE_LOCAL) 252 .build()) 253 .build(); 254 private final ModifyableTableDescriptor desc; 255 256 /** 257 * @param desc The table descriptor to serialize 258 * @return This instance serialized with pb with pb magic prefix 259 */ 260 public static byte[] toByteArray(TableDescriptor desc) { 261 if (desc instanceof ModifyableTableDescriptor) { 262 return ((ModifyableTableDescriptor) desc).toByteArray(); 263 } 264 return new ModifyableTableDescriptor(desc).toByteArray(); 265 } 266 267 /** 268 * The input should be created by {@link #toByteArray}. 269 * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix 270 * @return This instance serialized with pb with pb magic prefix 271 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException 272 */ 273 public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException { 274 return ModifyableTableDescriptor.parseFrom(pbBytes); 275 } 276 277 public static TableDescriptorBuilder newBuilder(final TableName name) { 278 return new TableDescriptorBuilder(name); 279 } 280 281 public static TableDescriptor copy(TableDescriptor desc) { 282 return new ModifyableTableDescriptor(desc); 283 } 284 285 public static TableDescriptor copy(TableName name, TableDescriptor desc) { 286 return new ModifyableTableDescriptor(name, desc); 287 } 288 289 /** 290 * Copy all values, families, and name from the input. 291 * @param desc The desciptor to copy 292 * @return A clone of input 293 */ 294 public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) { 295 return new TableDescriptorBuilder(desc); 296 } 297 298 private TableDescriptorBuilder(final TableName name) { 299 this.desc = new ModifyableTableDescriptor(name); 300 } 301 302 private TableDescriptorBuilder(final TableDescriptor desc) { 303 this.desc = new ModifyableTableDescriptor(desc); 304 } 305 306 /** 307 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 308 * Use {@link #setCoprocessor(String)} instead 309 */ 310 @Deprecated 311 public TableDescriptorBuilder addCoprocessor(String className) throws IOException { 312 return addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null); 313 } 314 315 /** 316 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 317 * Use {@link #setCoprocessor(CoprocessorDescriptor)} instead 318 */ 319 @Deprecated 320 public TableDescriptorBuilder addCoprocessor(String className, Path jarFilePath, 321 int priority, final Map<String, String> kvs) throws IOException { 322 desc.setCoprocessor( 323 CoprocessorDescriptorBuilder.newBuilder(className) 324 .setJarPath(jarFilePath == null ? null : jarFilePath.toString()) 325 .setPriority(priority) 326 .setProperties(kvs == null ? Collections.emptyMap() : kvs) 327 .build()); 328 return this; 329 } 330 331 /** 332 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 333 * Use {@link #setCoprocessor(CoprocessorDescriptor)} instead 334 */ 335 @Deprecated 336 public TableDescriptorBuilder addCoprocessorWithSpec(final String specStr) throws IOException { 337 desc.setCoprocessorWithSpec(specStr); 338 return this; 339 } 340 341 /** 342 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 343 * Use {@link #setColumnFamily(ColumnFamilyDescriptor)} instead 344 */ 345 @Deprecated 346 public TableDescriptorBuilder addColumnFamily(final ColumnFamilyDescriptor family) { 347 desc.setColumnFamily(family); 348 return this; 349 } 350 351 public TableDescriptorBuilder setCoprocessor(String className) throws IOException { 352 return setCoprocessor(CoprocessorDescriptorBuilder.of(className)); 353 } 354 355 public TableDescriptorBuilder setCoprocessor(CoprocessorDescriptor cpDesc) throws IOException { 356 desc.setCoprocessor(Objects.requireNonNull(cpDesc)); 357 return this; 358 } 359 360 public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor> cpDescs) 361 throws IOException { 362 for (CoprocessorDescriptor cpDesc : cpDescs) { 363 desc.setCoprocessor(cpDesc); 364 } 365 return this; 366 } 367 368 public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) { 369 desc.setColumnFamily(Objects.requireNonNull(family)); 370 return this; 371 } 372 373 public TableDescriptorBuilder setColumnFamilies( 374 final Collection<ColumnFamilyDescriptor> families) { 375 families.forEach(desc::setColumnFamily); 376 return this; 377 } 378 379 public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) { 380 desc.modifyColumnFamily(Objects.requireNonNull(family)); 381 return this; 382 } 383 384 public TableDescriptorBuilder removeValue(Bytes key) { 385 desc.removeValue(key); 386 return this; 387 } 388 389 public TableDescriptorBuilder removeValue(byte[] key) { 390 desc.removeValue(key); 391 return this; 392 } 393 394 public TableDescriptorBuilder removeColumnFamily(final byte[] name) { 395 desc.removeColumnFamily(name); 396 return this; 397 } 398 399 public TableDescriptorBuilder removeCoprocessor(String className) { 400 desc.removeCoprocessor(className); 401 return this; 402 } 403 404 public TableDescriptorBuilder setCompactionEnabled(final boolean isEnable) { 405 desc.setCompactionEnabled(isEnable); 406 return this; 407 } 408 409 public TableDescriptorBuilder setDurability(Durability durability) { 410 desc.setDurability(durability); 411 return this; 412 } 413 414 public TableDescriptorBuilder setFlushPolicyClassName(String clazz) { 415 desc.setFlushPolicyClassName(clazz); 416 return this; 417 } 418 419 public TableDescriptorBuilder setMaxFileSize(long maxFileSize) { 420 desc.setMaxFileSize(maxFileSize); 421 return this; 422 } 423 424 public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) { 425 desc.setMemStoreFlushSize(memstoreFlushSize); 426 return this; 427 } 428 429 public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) { 430 desc.setNormalizationEnabled(isEnable); 431 return this; 432 } 433 434 /** 435 * @deprecated since 2.0.0 and will be removed in 3.0.0. 436 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 437 */ 438 @Deprecated 439 public TableDescriptorBuilder setOwner(User owner) { 440 desc.setOwner(owner); 441 return this; 442 } 443 444 /** 445 * @deprecated since 2.0.0 and will be removed in 3.0.0. 446 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 447 */ 448 @Deprecated 449 public TableDescriptorBuilder setOwnerString(String ownerString) { 450 desc.setOwnerString(ownerString); 451 return this; 452 } 453 454 public TableDescriptorBuilder setPriority(int priority) { 455 desc.setPriority(priority); 456 return this; 457 } 458 459 public TableDescriptorBuilder setReadOnly(final boolean readOnly) { 460 desc.setReadOnly(readOnly); 461 return this; 462 } 463 464 public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) { 465 desc.setRegionMemStoreReplication(memstoreReplication); 466 return this; 467 } 468 469 public TableDescriptorBuilder setRegionReplication(int regionReplication) { 470 desc.setRegionReplication(regionReplication); 471 return this; 472 } 473 474 public TableDescriptorBuilder setRegionSplitPolicyClassName(String clazz) { 475 desc.setRegionSplitPolicyClassName(clazz); 476 return this; 477 } 478 479 public TableDescriptorBuilder setValue(final String key, final String value) { 480 desc.setValue(key, value); 481 return this; 482 } 483 484 public TableDescriptorBuilder setValue(final Bytes key, final Bytes value) { 485 desc.setValue(key, value); 486 return this; 487 } 488 489 public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) { 490 desc.setValue(key, value); 491 return this; 492 } 493 494 /** 495 * Sets replication scope all & only the columns already in the builder. Columns added later won't 496 * be backfilled with replication scope. 497 * @param scope replication scope 498 * @return a TableDescriptorBuilder 499 */ 500 public TableDescriptorBuilder setReplicationScope(int scope) { 501 Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 502 newFamilies.putAll(desc.families); 503 newFamilies 504 .forEach((cf, cfDesc) -> { 505 desc.removeColumnFamily(cf); 506 desc.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope) 507 .build()); 508 }); 509 return this; 510 } 511 512 public TableDescriptor build() { 513 return new ModifyableTableDescriptor(desc); 514 } 515 516 /** 517 * TODO: make this private after removing the HTableDescriptor 518 */ 519 @InterfaceAudience.Private 520 public static class ModifyableTableDescriptor 521 implements TableDescriptor, Comparable<ModifyableTableDescriptor> { 522 523 private final TableName name; 524 525 /** 526 * A map which holds the metadata information of the table. This metadata 527 * includes values like IS_META, SPLIT_POLICY, MAX_FILE_SIZE, 528 * READONLY, MEMSTORE_FLUSHSIZE etc... 529 */ 530 private final Map<Bytes, Bytes> values = new HashMap<>(); 531 532 /** 533 * Maps column family name to the respective FamilyDescriptors 534 */ 535 private final Map<byte[], ColumnFamilyDescriptor> families 536 = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 537 538 /** 539 * Construct a table descriptor specifying a TableName object 540 * 541 * @param name Table name. 542 * TODO: make this private after removing the HTableDescriptor 543 */ 544 @InterfaceAudience.Private 545 public ModifyableTableDescriptor(final TableName name) { 546 this(name, Collections.EMPTY_LIST, Collections.EMPTY_MAP); 547 } 548 549 private ModifyableTableDescriptor(final TableDescriptor desc) { 550 this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 551 } 552 553 /** 554 * Construct a table descriptor by cloning the descriptor passed as a 555 * parameter. 556 * <p> 557 * Makes a deep copy of the supplied descriptor. 558 * @param name The new name 559 * @param desc The descriptor. 560 * TODO: make this private after removing the HTableDescriptor 561 */ 562 @InterfaceAudience.Private 563 @Deprecated // only used by HTableDescriptor. remove this method if HTD is removed 564 public ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) { 565 this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 566 } 567 568 private ModifyableTableDescriptor(final TableName name, final Collection<ColumnFamilyDescriptor> families, 569 Map<Bytes, Bytes> values) { 570 this.name = name; 571 families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); 572 this.values.putAll(values); 573 this.values.put(IS_META_KEY, 574 new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME))))); 575 } 576 577 /** 578 * Checks if this table is <code> hbase:meta </code> region. 579 * 580 * @return true if this table is <code> hbase:meta </code> region 581 */ 582 @Override 583 public boolean isMetaRegion() { 584 return getOrDefault(IS_META_KEY, Boolean::valueOf, false); 585 } 586 587 /** 588 * Checks if the table is a <code>hbase:meta</code> table 589 * 590 * @return true if table is <code> hbase:meta </code> region. 591 */ 592 @Override 593 public boolean isMetaTable() { 594 return isMetaRegion(); 595 } 596 597 @Override 598 public Bytes getValue(Bytes key) { 599 Bytes rval = values.get(key); 600 return rval == null ? null : new Bytes(rval.copyBytes()); 601 } 602 603 @Override 604 public String getValue(String key) { 605 Bytes rval = values.get(new Bytes(Bytes.toBytes(key))); 606 return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength()); 607 } 608 609 @Override 610 public byte[] getValue(byte[] key) { 611 Bytes value = values.get(new Bytes(key)); 612 return value == null ? null : value.copyBytes(); 613 } 614 615 private <T> T getOrDefault(Bytes key, Function<String, T> function, T defaultValue) { 616 Bytes value = values.get(key); 617 if (value == null) { 618 return defaultValue; 619 } else { 620 return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 621 } 622 } 623 624 /** 625 * Getter for fetching an unmodifiable {@link #values} map. 626 * 627 * @return unmodifiable map {@link #values}. 628 * @see #values 629 */ 630 @Override 631 public Map<Bytes, Bytes> getValues() { 632 // shallow pointer copy 633 return Collections.unmodifiableMap(values); 634 } 635 636 /** 637 * Setter for storing metadata as a (key, value) pair in {@link #values} map 638 * 639 * @param key The key. 640 * @param value The value. If null, removes the setting. 641 * @return the modifyable TD 642 * @see #values 643 */ 644 public ModifyableTableDescriptor setValue(byte[] key, byte[] value) { 645 return setValue(toBytesOrNull(key, v -> v), 646 toBytesOrNull(value, v -> v)); 647 } 648 649 public ModifyableTableDescriptor setValue(String key, String value) { 650 return setValue(toBytesOrNull(key, Bytes::toBytes), 651 toBytesOrNull(value, Bytes::toBytes)); 652 } 653 654 /* 655 * @param key The key. 656 * @param value The value. If null, removes the setting. 657 */ 658 private ModifyableTableDescriptor setValue(final Bytes key, 659 final String value) { 660 return setValue(key, toBytesOrNull(value, Bytes::toBytes)); 661 } 662 663 /* 664 * Setter for storing metadata as a (key, value) pair in {@link #values} map 665 * 666 * @param key The key. 667 * @param value The value. If null, removes the setting. 668 */ 669 public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { 670 if (value == null) { 671 values.remove(key); 672 } else { 673 values.put(key, value); 674 } 675 return this; 676 } 677 678 private static <T> Bytes toBytesOrNull(T t, Function<T, byte[]> f) { 679 if (t == null) { 680 return null; 681 } else { 682 return new Bytes(f.apply(t)); 683 } 684 } 685 686 /** 687 * Remove metadata represented by the key from the {@link #values} map 688 * 689 * @param key Key whose key and value we're to remove from TableDescriptor 690 * parameters. 691 * @return the modifyable TD 692 */ 693 public ModifyableTableDescriptor removeValue(Bytes key) { 694 return setValue(key, (Bytes) null); 695 } 696 697 /** 698 * Remove metadata represented by the key from the {@link #values} map 699 * 700 * @param key Key whose key and value we're to remove from TableDescriptor 701 * parameters. 702 * @return the modifyable TD 703 */ 704 public ModifyableTableDescriptor removeValue(final byte[] key) { 705 return removeValue(new Bytes(key)); 706 } 707 708 /** 709 * Check if the readOnly flag of the table is set. If the readOnly flag is 710 * set then the contents of the table can only be read from but not 711 * modified. 712 * 713 * @return true if all columns in the table should be read only 714 */ 715 @Override 716 public boolean isReadOnly() { 717 return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY); 718 } 719 720 /** 721 * Setting the table as read only sets all the columns in the table as read 722 * only. By default all tables are modifiable, but if the readOnly flag is 723 * set to true then the contents of the table can only be read but not 724 * modified. 725 * 726 * @param readOnly True if all of the columns in the table should be read 727 * only. 728 * @return the modifyable TD 729 */ 730 public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { 731 return setValue(READONLY_KEY, Boolean.toString(readOnly)); 732 } 733 734 /** 735 * Check if the compaction enable flag of the table is true. If flag is 736 * false then no minor/major compactions will be done in real. 737 * 738 * @return true if table compaction enabled 739 */ 740 @Override 741 public boolean isCompactionEnabled() { 742 return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED); 743 } 744 745 /** 746 * Setting the table compaction enable flag. 747 * 748 * @param isEnable True if enable compaction. 749 * @return the modifyable TD 750 */ 751 public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) { 752 return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable)); 753 } 754 755 /** 756 * Check if normalization enable flag of the table is true. If flag is false 757 * then no region normalizer won't attempt to normalize this table. 758 * 759 * @return true if region normalization is enabled for this table 760 */ 761 @Override 762 public boolean isNormalizationEnabled() { 763 return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, DEFAULT_NORMALIZATION_ENABLED); 764 } 765 766 /** 767 * Setting the table normalization enable flag. 768 * 769 * @param isEnable True if enable normalization. 770 * @return the modifyable TD 771 */ 772 public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) { 773 return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable)); 774 } 775 776 /** 777 * Sets the {@link Durability} setting for the table. This defaults to 778 * Durability.USE_DEFAULT. 779 * 780 * @param durability enum value 781 * @return the modifyable TD 782 */ 783 public ModifyableTableDescriptor setDurability(Durability durability) { 784 return setValue(DURABILITY_KEY, durability.name()); 785 } 786 787 /** 788 * Returns the durability setting for the table. 789 * 790 * @return durability setting for the table. 791 */ 792 @Override 793 public Durability getDurability() { 794 return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY); 795 } 796 797 /** 798 * Get the name of the table 799 * 800 * @return TableName 801 */ 802 @Override 803 public TableName getTableName() { 804 return name; 805 } 806 807 /** 808 * This sets the class associated with the region split policy which 809 * determines when a region split should occur. The class used by default is 810 * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 811 * 812 * @param clazz the class name 813 * @return the modifyable TD 814 */ 815 public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) { 816 return setValue(SPLIT_POLICY_KEY, clazz); 817 } 818 819 /** 820 * This gets the class associated with the region split policy which 821 * determines when a region split should occur. The class used by default is 822 * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 823 * 824 * @return the class name of the region split policy for this table. If this 825 * returns null, the default split policy is used. 826 */ 827 @Override 828 public String getRegionSplitPolicyClassName() { 829 return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null); 830 } 831 832 /** 833 * Returns the maximum size upto which a region can grow to after which a 834 * region split is triggered. The region size is represented by the size of 835 * the biggest store file in that region. 836 * 837 * @return max hregion size for table, -1 if not set. 838 * 839 * @see #setMaxFileSize(long) 840 */ 841 @Override 842 public long getMaxFileSize() { 843 return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1); 844 } 845 846 /** 847 * Sets the maximum size upto which a region can grow to after which a 848 * region split is triggered. The region size is represented by the size of 849 * the biggest store file in that region, i.e. If the biggest store file 850 * grows beyond the maxFileSize, then the region split is triggered. This 851 * defaults to a value of 256 MB. 852 * <p> 853 * This is not an absolute value and might vary. Assume that a single row 854 * exceeds the maxFileSize then the storeFileSize will be greater than 855 * maxFileSize since a single row cannot be split across multiple regions 856 * </p> 857 * 858 * @param maxFileSize The maximum file size that a store file can grow to 859 * before a split is triggered. 860 * @return the modifyable TD 861 */ 862 public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { 863 return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize)); 864 } 865 866 /** 867 * Returns the size of the memstore after which a flush to filesystem is 868 * triggered. 869 * 870 * @return memory cache flush size for each hregion, -1 if not set. 871 * 872 * @see #setMemStoreFlushSize(long) 873 */ 874 @Override 875 public long getMemStoreFlushSize() { 876 return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1); 877 } 878 879 /** 880 * Represents the maximum size of the memstore after which the contents of 881 * the memstore are flushed to the filesystem. This defaults to a size of 64 882 * MB. 883 * 884 * @param memstoreFlushSize memory cache flush size for each hregion 885 * @return the modifyable TD 886 */ 887 public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { 888 return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize)); 889 } 890 891 /** 892 * This sets the class associated with the flush policy which determines 893 * determines the stores need to be flushed when flushing a region. The 894 * class used by default is defined in 895 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 896 * 897 * @param clazz the class name 898 * @return the modifyable TD 899 */ 900 public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) { 901 return setValue(FLUSH_POLICY_KEY, clazz); 902 } 903 904 /** 905 * This gets the class associated with the flush policy which determines the 906 * stores need to be flushed when flushing a region. The class used by 907 * default is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy. 908 * 909 * @return the class name of the flush policy for this table. If this 910 * returns null, the default flush policy is used. 911 */ 912 @Override 913 public String getFlushPolicyClassName() { 914 return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null); 915 } 916 917 /** 918 * Adds a column family. For the updating purpose please use 919 * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. 920 * 921 * @param family to add. 922 * @return the modifyable TD 923 */ 924 public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) { 925 if (family.getName() == null || family.getName().length <= 0) { 926 throw new IllegalArgumentException("Family name cannot be null or empty"); 927 } 928 if (hasColumnFamily(family.getName())) { 929 throw new IllegalArgumentException("Family '" 930 + family.getNameAsString() + "' already exists so cannot be added"); 931 } 932 return putColumnFamily(family); 933 } 934 935 /** 936 * Modifies the existing column family. 937 * 938 * @param family to update 939 * @return this (for chained invocation) 940 */ 941 public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) { 942 if (family.getName() == null || family.getName().length <= 0) { 943 throw new IllegalArgumentException("Family name cannot be null or empty"); 944 } 945 if (!hasColumnFamily(family.getName())) { 946 throw new IllegalArgumentException("Column family '" + family.getNameAsString() 947 + "' does not exist"); 948 } 949 return putColumnFamily(family); 950 } 951 952 private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) { 953 families.put(family.getName(), family); 954 return this; 955 } 956 957 /** 958 * Checks to see if this table contains the given column family 959 * 960 * @param familyName Family name or column name. 961 * @return true if the table contains the specified family name 962 */ 963 @Override 964 public boolean hasColumnFamily(final byte[] familyName) { 965 return families.containsKey(familyName); 966 } 967 968 /** 969 * @return Name of this table and then a map of all of the column family descriptors. 970 */ 971 @Override 972 public String toString() { 973 StringBuilder s = new StringBuilder(); 974 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 975 s.append(getValues(true)); 976 families.values().forEach(f -> s.append(", ").append(f)); 977 return s.toString(); 978 } 979 980 /** 981 * @return Name of this table and then a map of all of the column family 982 * descriptors (with only the non-default column family attributes) 983 */ 984 public String toStringCustomizedValues() { 985 StringBuilder s = new StringBuilder(); 986 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 987 s.append(getValues(false)); 988 families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues())); 989 return s.toString(); 990 } 991 992 /** 993 * @return map of all table attributes formatted into string. 994 */ 995 public String toStringTableAttributes() { 996 return getValues(true).toString(); 997 } 998 999 private StringBuilder getValues(boolean printDefaults) { 1000 StringBuilder s = new StringBuilder(); 1001 1002 // step 1: set partitioning and pruning 1003 Set<Bytes> reservedKeys = new TreeSet<>(); 1004 Set<Bytes> userKeys = new TreeSet<>(); 1005 for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) { 1006 if (entry.getKey() == null || entry.getKey().get() == null) { 1007 continue; 1008 } 1009 String key = Bytes.toString(entry.getKey().get()); 1010 // in this section, print out reserved keywords + coprocessor info 1011 if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) { 1012 userKeys.add(entry.getKey()); 1013 continue; 1014 } 1015 // only print out IS_META if true 1016 String value = Bytes.toString(entry.getValue().get()); 1017 if (key.equalsIgnoreCase(IS_META)) { 1018 if (Boolean.valueOf(value) == false) { 1019 continue; 1020 } 1021 } 1022 // see if a reserved key is a default value. may not want to print it out 1023 if (printDefaults 1024 || !DEFAULT_VALUES.containsKey(key) 1025 || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { 1026 reservedKeys.add(entry.getKey()); 1027 } 1028 } 1029 1030 // early exit optimization 1031 boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty(); 1032 if (!hasAttributes) { 1033 return s; 1034 } 1035 1036 s.append(", {"); 1037 // step 2: printing attributes 1038 if (hasAttributes) { 1039 s.append("TABLE_ATTRIBUTES => {"); 1040 1041 // print all reserved keys first 1042 boolean printCommaForAttr = false; 1043 for (Bytes k : reservedKeys) { 1044 String key = Bytes.toString(k.get()); 1045 String value = Bytes.toStringBinary(values.get(k).get()); 1046 if (printCommaForAttr) { 1047 s.append(", "); 1048 } 1049 printCommaForAttr = true; 1050 s.append(key); 1051 s.append(" => "); 1052 s.append('\'').append(value).append('\''); 1053 } 1054 1055 if (!userKeys.isEmpty()) { 1056 // print all non-reserved as a separate subset 1057 if (printCommaForAttr) { 1058 s.append(", "); 1059 } 1060 s.append(HConstants.METADATA).append(" => "); 1061 s.append("{"); 1062 boolean printCommaForCfg = false; 1063 for (Bytes k : userKeys) { 1064 String key = Bytes.toString(k.get()); 1065 String value = Bytes.toStringBinary(values.get(k).get()); 1066 if (printCommaForCfg) { 1067 s.append(", "); 1068 } 1069 printCommaForCfg = true; 1070 s.append('\'').append(key).append('\''); 1071 s.append(" => "); 1072 s.append('\'').append(value).append('\''); 1073 } 1074 s.append("}"); 1075 } 1076 } 1077 1078 s.append("}"); // end METHOD 1079 return s; 1080 } 1081 1082 /** 1083 * Compare the contents of the descriptor with another one passed as a 1084 * parameter. Checks if the obj passed is an instance of ModifyableTableDescriptor, 1085 * if yes then the contents of the descriptors are compared. 1086 * 1087 * @param obj The object to compare 1088 * @return true if the contents of the the two descriptors exactly match 1089 * 1090 * @see java.lang.Object#equals(java.lang.Object) 1091 */ 1092 @Override 1093 public boolean equals(Object obj) { 1094 if (this == obj) { 1095 return true; 1096 } 1097 if (obj instanceof ModifyableTableDescriptor) { 1098 return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0; 1099 } 1100 return false; 1101 } 1102 1103 /** 1104 * @return hash code 1105 */ 1106 @Override 1107 public int hashCode() { 1108 int result = this.name.hashCode(); 1109 if (this.families.size() > 0) { 1110 for (ColumnFamilyDescriptor e : this.families.values()) { 1111 result ^= e.hashCode(); 1112 } 1113 } 1114 result ^= values.hashCode(); 1115 return result; 1116 } 1117 1118 // Comparable 1119 /** 1120 * Compares the descriptor with another descriptor which is passed as a 1121 * parameter. This compares the content of the two descriptors and not the 1122 * reference. 1123 * 1124 * @param other The MTD to compare 1125 * @return 0 if the contents of the descriptors are exactly matching, 1 if 1126 * there is a mismatch in the contents 1127 */ 1128 @Override 1129 public int compareTo(final ModifyableTableDescriptor other) { 1130 return TableDescriptor.COMPARATOR.compare(this, other); 1131 } 1132 1133 @Override 1134 public ColumnFamilyDescriptor[] getColumnFamilies() { 1135 return families.values().toArray(new ColumnFamilyDescriptor[families.size()]); 1136 } 1137 1138 /** 1139 * Returns the configured replicas per region 1140 */ 1141 @Override 1142 public int getRegionReplication() { 1143 return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION); 1144 } 1145 1146 /** 1147 * Sets the number of replicas per region. 1148 * 1149 * @param regionReplication the replication factor per region 1150 * @return the modifyable TD 1151 */ 1152 public ModifyableTableDescriptor setRegionReplication(int regionReplication) { 1153 return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication)); 1154 } 1155 1156 /** 1157 * @return true if the read-replicas memstore replication is enabled. 1158 */ 1159 @Override 1160 public boolean hasRegionMemStoreReplication() { 1161 return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, DEFAULT_REGION_MEMSTORE_REPLICATION); 1162 } 1163 1164 /** 1165 * Enable or Disable the memstore replication from the primary region to the 1166 * replicas. The replication will be used only for meta operations (e.g. 1167 * flush, compaction, ...) 1168 * 1169 * @param memstoreReplication true if the new data written to the primary 1170 * region should be replicated. false if the secondaries can tollerate to 1171 * have new data only when the primary flushes the memstore. 1172 * @return the modifyable TD 1173 */ 1174 public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) { 1175 setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication)); 1176 // If the memstore replication is setup, we do not have to wait for observing a flush event 1177 // from primary before starting to serve reads, because gaps from replication is not applicable 1178 return setValue(REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, 1179 Boolean.toString(memstoreReplication)); 1180 } 1181 1182 public ModifyableTableDescriptor setPriority(int priority) { 1183 return setValue(PRIORITY_KEY, Integer.toString(priority)); 1184 } 1185 1186 @Override 1187 public int getPriority() { 1188 return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY); 1189 } 1190 1191 /** 1192 * Returns all the column family names of the current table. The map of 1193 * TableDescriptor contains mapping of family name to ColumnFamilyDescriptor. 1194 * This returns all the keys of the family map which represents the column 1195 * family names of the table. 1196 * 1197 * @return Immutable sorted set of the keys of the families. 1198 */ 1199 @Override 1200 public Set<byte[]> getColumnFamilyNames() { 1201 return Collections.unmodifiableSet(this.families.keySet()); 1202 } 1203 1204 /** 1205 * Returns the ColumnFamilyDescriptor for a specific column family with name as 1206 * specified by the parameter column. 1207 * 1208 * @param column Column family name 1209 * @return Column descriptor for the passed family name or the family on 1210 * passed in column. 1211 */ 1212 @Override 1213 public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { 1214 return this.families.get(column); 1215 } 1216 1217 /** 1218 * Removes the ColumnFamilyDescriptor with name specified by the parameter column 1219 * from the table descriptor 1220 * 1221 * @param column Name of the column family to be removed. 1222 * @return Column descriptor for the passed family name or the family on 1223 * passed in column. 1224 */ 1225 public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { 1226 return this.families.remove(column); 1227 } 1228 1229 /** 1230 * Add a table coprocessor to this table. The coprocessor type must be 1231 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't 1232 * check if the class can be loaded or not. Whether a coprocessor is 1233 * loadable or not will be determined when a region is opened. 1234 * 1235 * @param className Full class name. 1236 * @throws IOException 1237 * @return the modifyable TD 1238 */ 1239 public ModifyableTableDescriptor setCoprocessor(String className) throws IOException { 1240 return setCoprocessor( 1241 CoprocessorDescriptorBuilder.newBuilder(className).setPriority(Coprocessor.PRIORITY_USER) 1242 .build()); 1243 } 1244 1245 /** 1246 * Add a table coprocessor to this table. The coprocessor type must be 1247 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't 1248 * check if the class can be loaded or not. Whether a coprocessor is 1249 * loadable or not will be determined when a region is opened. 1250 * 1251 * @throws IOException any illegal parameter key/value 1252 * @return the modifyable TD 1253 */ 1254 public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) 1255 throws IOException { 1256 checkHasCoprocessor(cp.getClassName()); 1257 if (cp.getPriority() < 0) { 1258 throw new IOException("Priority must be bigger than or equal with zero, current:" 1259 + cp.getPriority()); 1260 } 1261 // Validate parameter kvs and then add key/values to kvString. 1262 StringBuilder kvString = new StringBuilder(); 1263 for (Map.Entry<String, String> e : cp.getProperties().entrySet()) { 1264 if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) { 1265 throw new IOException("Illegal parameter key = " + e.getKey()); 1266 } 1267 if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { 1268 throw new IOException("Illegal parameter (" + e.getKey() 1269 + ") value = " + e.getValue()); 1270 } 1271 if (kvString.length() != 0) { 1272 kvString.append(','); 1273 } 1274 kvString.append(e.getKey()); 1275 kvString.append('='); 1276 kvString.append(e.getValue()); 1277 } 1278 1279 String value = cp.getJarPath().orElse("") 1280 + "|" + cp.getClassName() + "|" + Integer.toString(cp.getPriority()) + "|" 1281 + kvString.toString(); 1282 return setCoprocessorToMap(value); 1283 } 1284 1285 /** 1286 * Add a table coprocessor to this table. The coprocessor type must be 1287 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't 1288 * check if the class can be loaded or not. Whether a coprocessor is 1289 * loadable or not will be determined when a region is opened. 1290 * 1291 * @param specStr The Coprocessor specification all in in one String 1292 * @throws IOException 1293 * @return the modifyable TD 1294 * @deprecated used by HTableDescriptor and admin.rb. 1295 * As of release 2.0.0, this will be removed in HBase 3.0.0. 1296 */ 1297 @Deprecated 1298 public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr) 1299 throws IOException { 1300 CoprocessorDescriptor cpDesc = toCoprocessorDescriptor(specStr).orElseThrow( 1301 () -> new IllegalArgumentException( 1302 "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr)); 1303 checkHasCoprocessor(cpDesc.getClassName()); 1304 return setCoprocessorToMap(specStr); 1305 } 1306 1307 private void checkHasCoprocessor(final String className) throws IOException { 1308 if (hasCoprocessor(className)) { 1309 throw new IOException("Coprocessor " + className + " already exists."); 1310 } 1311 } 1312 1313 /** 1314 * Add coprocessor to values Map 1315 * @param specStr The Coprocessor specification all in in one String 1316 * @return Returns <code>this</code> 1317 */ 1318 private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) { 1319 if (specStr == null) { 1320 return this; 1321 } 1322 // generate a coprocessor key 1323 int maxCoprocessorNumber = 0; 1324 Matcher keyMatcher; 1325 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1326 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1327 if (!keyMatcher.matches()) { 1328 continue; 1329 } 1330 maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); 1331 } 1332 maxCoprocessorNumber++; 1333 String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); 1334 return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr))); 1335 } 1336 1337 /** 1338 * Check if the table has an attached co-processor represented by the name 1339 * className 1340 * 1341 * @param classNameToMatch - Class name of the co-processor 1342 * @return true of the table has a co-processor className 1343 */ 1344 @Override 1345 public boolean hasCoprocessor(String classNameToMatch) { 1346 return getCoprocessorDescriptors().stream().anyMatch(cp -> cp.getClassName() 1347 .equals(classNameToMatch)); 1348 } 1349 1350 /** 1351 * Return the list of attached co-processor represented by their name 1352 * className 1353 * 1354 * @return The list of co-processors classNames 1355 */ 1356 @Override 1357 public List<CoprocessorDescriptor> getCoprocessorDescriptors() { 1358 List<CoprocessorDescriptor> result = new ArrayList<>(); 1359 for (Map.Entry<Bytes, Bytes> e: getValues().entrySet()) { 1360 String key = Bytes.toString(e.getKey().get()).trim(); 1361 if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) { 1362 toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()) 1363 .ifPresent(result::add); 1364 } 1365 } 1366 return result; 1367 } 1368 1369 /** 1370 * Remove a coprocessor from those set on the table 1371 * 1372 * @param className Class name of the co-processor 1373 */ 1374 public void removeCoprocessor(String className) { 1375 Bytes match = null; 1376 Matcher keyMatcher; 1377 Matcher valueMatcher; 1378 for (Map.Entry<Bytes, Bytes> e : this.values 1379 .entrySet()) { 1380 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e 1381 .getKey().get())); 1382 if (!keyMatcher.matches()) { 1383 continue; 1384 } 1385 valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes 1386 .toString(e.getValue().get())); 1387 if (!valueMatcher.matches()) { 1388 continue; 1389 } 1390 // get className and compare 1391 String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field 1392 // remove the CP if it is present 1393 if (clazz.equals(className.trim())) { 1394 match = e.getKey(); 1395 break; 1396 } 1397 } 1398 // if we found a match, remove it 1399 if (match != null) { 1400 ModifyableTableDescriptor.this.removeValue(match); 1401 } 1402 } 1403 1404 /** 1405 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1406 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1407 */ 1408 @Deprecated 1409 public ModifyableTableDescriptor setOwner(User owner) { 1410 return setOwnerString(owner != null ? owner.getShortName() : null); 1411 } 1412 1413 /** 1414 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1415 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1416 */ 1417 // used by admin.rb:alter(table_name,*args) to update owner. 1418 @Deprecated 1419 public ModifyableTableDescriptor setOwnerString(String ownerString) { 1420 return setValue(OWNER_KEY, ownerString); 1421 } 1422 1423 /** 1424 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1425 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1426 */ 1427 @Override 1428 @Deprecated 1429 public String getOwnerString() { 1430 // Note that every table should have an owner (i.e. should have OWNER_KEY set). 1431 // hbase:meta should return system user as owner, not null (see 1432 // MasterFileSystem.java:bootstrap()). 1433 return getOrDefault(OWNER_KEY, Function.identity(), null); 1434 } 1435 1436 /** 1437 * @return the bytes in pb format 1438 */ 1439 private byte[] toByteArray() { 1440 return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray()); 1441 } 1442 1443 /** 1444 * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance 1445 * with pb magic prefix 1446 * @return An instance of {@link ModifyableTableDescriptor} made from 1447 * <code>bytes</code> 1448 * @throws DeserializationException 1449 * @see #toByteArray() 1450 */ 1451 private static TableDescriptor parseFrom(final byte[] bytes) 1452 throws DeserializationException { 1453 if (!ProtobufUtil.isPBMagicPrefix(bytes)) { 1454 throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor"); 1455 } 1456 int pblen = ProtobufUtil.lengthOfPBMagic(); 1457 HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder(); 1458 try { 1459 ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); 1460 return ProtobufUtil.toTableDescriptor(builder.build()); 1461 } catch (IOException e) { 1462 throw new DeserializationException(e); 1463 } 1464 } 1465 1466 @Override 1467 public int getColumnFamilyCount() { 1468 return families.size(); 1469 } 1470 } 1471 1472 private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) { 1473 Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec); 1474 if (matcher.matches()) { 1475 // jar file path can be empty if the cp class can be loaded 1476 // from class loader. 1477 String path = matcher.group(1).trim().isEmpty() ? 1478 null : matcher.group(1).trim(); 1479 String className = matcher.group(2).trim(); 1480 if (className.isEmpty()) { 1481 return Optional.empty(); 1482 } 1483 String priorityStr = matcher.group(3).trim(); 1484 int priority = priorityStr.isEmpty() ? 1485 Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); 1486 String cfgSpec = null; 1487 try { 1488 cfgSpec = matcher.group(4); 1489 } catch (IndexOutOfBoundsException ex) { 1490 // ignore 1491 } 1492 Map<String, String> ourConf = new TreeMap<>(); 1493 if (cfgSpec != null && !cfgSpec.trim().equals("|")) { 1494 cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1); 1495 Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec); 1496 while (m.find()) { 1497 ourConf.put(m.group(1), m.group(2)); 1498 } 1499 } 1500 return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className) 1501 .setJarPath(path) 1502 .setPriority(priority) 1503 .setProperties(ourConf) 1504 .build()); 1505 } 1506 return Optional.empty(); 1507 } 1508}