001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.client;
020
021import java.io.IOException;
022import java.util.ArrayList;
023import java.util.Arrays;
024import java.util.Collection;
025import java.util.Collections;
026import java.util.HashMap;
027import java.util.HashSet;
028import java.util.List;
029import java.util.Map;
030import java.util.Objects;
031import java.util.Optional;
032import java.util.Set;
033import java.util.TreeMap;
034import java.util.TreeSet;
035import java.util.function.BiPredicate;
036import java.util.function.Function;
037import java.util.regex.Matcher;
038import java.util.regex.Pattern;
039import java.util.stream.Collectors;
040import org.apache.hadoop.hbase.Coprocessor;
041import org.apache.hadoop.hbase.HConstants;
042import org.apache.hadoop.hbase.TableName;
043import org.apache.hadoop.hbase.exceptions.DeserializationException;
044import org.apache.hadoop.hbase.exceptions.HBaseException;
045import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
046import org.apache.hadoop.hbase.util.Bytes;
047import org.apache.hadoop.hbase.util.PrettyPrinter;
048import org.apache.yetus.audience.InterfaceAudience;
049import org.slf4j.Logger;
050import org.slf4j.LoggerFactory;
051
052import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
053import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
054
055/**
056 * Convenience class for composing an instance of {@link TableDescriptor}.
057 * @since 2.0.0
058 */
059@InterfaceAudience.Public
060public class TableDescriptorBuilder {
061  public static final Logger LOG = LoggerFactory.getLogger(TableDescriptorBuilder.class);
062  @InterfaceAudience.Private
063  public static final String SPLIT_POLICY = "SPLIT_POLICY";
064  private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY));
065  /**
066   * Used by HBase Shell interface to access this metadata
067   * attribute which denotes the maximum size of the store file after which a
068   * region split occurs.
069   */
070  @InterfaceAudience.Private
071  public static final String MAX_FILESIZE = "MAX_FILESIZE";
072  private static final Bytes MAX_FILESIZE_KEY
073          = new Bytes(Bytes.toBytes(MAX_FILESIZE));
074
075  /**
076   * Used by rest interface to access this metadata attribute
077   * which denotes if the table is Read Only.
078   */
079  @InterfaceAudience.Private
080  public static final String READONLY = "READONLY";
081  private static final Bytes READONLY_KEY
082          = new Bytes(Bytes.toBytes(READONLY));
083
084  /**
085   * Used by HBase Shell interface to access this metadata
086   * attribute which denotes if the table is compaction enabled.
087   */
088  @InterfaceAudience.Private
089  public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
090  private static final Bytes COMPACTION_ENABLED_KEY
091          = new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
092
093  /**
094   * Used by HBase Shell interface to access this metadata
095   * attribute which denotes if the table is split enabled.
096   */
097  @InterfaceAudience.Private
098  public static final String SPLIT_ENABLED = "SPLIT_ENABLED";
099  private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED));
100
101  /**
102   * Used by HBase Shell interface to access this metadata
103   * attribute which denotes if the table is merge enabled.
104   */
105  @InterfaceAudience.Private
106  public static final String MERGE_ENABLED = "MERGE_ENABLED";
107  private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED));
108
109  /**
110   * Used by HBase Shell interface to access this metadata
111   * attribute which represents the maximum size of the memstore after which its
112   * contents are flushed onto the disk.
113   */
114  @InterfaceAudience.Private
115  public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
116  private static final Bytes MEMSTORE_FLUSHSIZE_KEY
117          = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
118
119  @InterfaceAudience.Private
120  public static final String FLUSH_POLICY = "FLUSH_POLICY";
121  private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY));
122  /**
123   * Used by rest interface to access this metadata attribute
124   * which denotes if it is a catalog table, either <code> hbase:meta </code>.
125   */
126  @InterfaceAudience.Private
127  public static final String IS_META = "IS_META";
128  private static final Bytes IS_META_KEY
129          = new Bytes(Bytes.toBytes(IS_META));
130
131  /**
132   * {@link Durability} setting for the table.
133   */
134  @InterfaceAudience.Private
135  public static final String DURABILITY = "DURABILITY";
136  private static final Bytes DURABILITY_KEY
137          = new Bytes(Bytes.toBytes("DURABILITY"));
138
139  /**
140   * The number of region replicas for the table.
141   */
142  @InterfaceAudience.Private
143  public static final String REGION_REPLICATION = "REGION_REPLICATION";
144  private static final Bytes REGION_REPLICATION_KEY
145          = new Bytes(Bytes.toBytes(REGION_REPLICATION));
146
147  /**
148   * The flag to indicate whether or not the memstore should be
149   * replicated for read-replicas (CONSISTENCY =&gt; TIMELINE).
150   */
151  @InterfaceAudience.Private
152  public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
153  private static final Bytes REGION_MEMSTORE_REPLICATION_KEY
154          = new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION));
155
156  private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY
157          = new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY));
158  /**
159   * Used by shell/rest interface to access this metadata
160   * attribute which denotes if the table should be treated by region
161   * normalizer.
162   */
163  @InterfaceAudience.Private
164  public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED";
165  private static final Bytes NORMALIZATION_ENABLED_KEY
166          = new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED));
167
168  @InterfaceAudience.Private
169  public static final String NORMALIZER_TARGET_REGION_COUNT =
170      "NORMALIZER_TARGET_REGION_COUNT";
171  private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY =
172      new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT));
173
174  @InterfaceAudience.Private
175  public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE";
176  private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY =
177      new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE));
178
179  /**
180   * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global
181   * default value
182   */
183  private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
184
185  @InterfaceAudience.Private
186  public static final String PRIORITY = "PRIORITY";
187  private static final Bytes PRIORITY_KEY
188          = new Bytes(Bytes.toBytes(PRIORITY));
189
190  private static final Bytes RSGROUP_KEY =
191      new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP));
192
193  /**
194   * Relative priority of the table used for rpc scheduling
195   */
196  private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS;
197
198  /**
199   * Constant that denotes whether the table is READONLY by default and is false
200   */
201  public static final boolean DEFAULT_READONLY = false;
202
203  /**
204   * Constant that denotes whether the table is compaction enabled by default
205   */
206  public static final boolean DEFAULT_COMPACTION_ENABLED = true;
207
208  /**
209   * Constant that denotes whether the table is split enabled by default
210   */
211  public static final boolean DEFAULT_SPLIT_ENABLED = true;
212
213  /**
214   * Constant that denotes whether the table is merge enabled by default
215   */
216  public static final boolean DEFAULT_MERGE_ENABLED = true;
217
218  /**
219   * Constant that denotes whether the table is normalized by default.
220   */
221  public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
222
223  /**
224   * Constant that denotes the maximum default size of the memstore in bytes after which
225   * the contents are flushed to the store files.
226   */
227  public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L;
228
229  public static final int DEFAULT_REGION_REPLICATION = 1;
230
231  public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true;
232
233  private final static Map<String, String> DEFAULT_VALUES = new HashMap<>();
234  private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>();
235
236  static {
237    DEFAULT_VALUES.put(MAX_FILESIZE,
238            String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
239    DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
240    DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
241            String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
242    DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
243    DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
244    DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED));
245    DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY));
246    DEFAULT_VALUES.keySet().stream()
247            .map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add);
248    RESERVED_KEYWORDS.add(IS_META_KEY);
249  }
250
251  public static PrettyPrinter.Unit getUnit(String key) {
252    switch (key) {
253      case MAX_FILESIZE:
254      case MEMSTORE_FLUSHSIZE:
255        return PrettyPrinter.Unit.BYTE;
256      default:
257        return PrettyPrinter.Unit.NONE;
258    }
259  }
260
261  /**
262   * @deprecated namespace table has been folded into the ns family in meta table, do not use this
263   *             any more.
264   */
265  @InterfaceAudience.Private
266  @Deprecated
267  public final static String NAMESPACE_FAMILY_INFO = "info";
268
269  /**
270   * @deprecated namespace table has been folded into the ns family in meta table, do not use this
271   *             any more.
272   */
273  @InterfaceAudience.Private
274  @Deprecated
275  public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
276
277  /**
278   * @deprecated namespace table has been folded into the ns family in meta table, do not use this
279   *             any more.
280   */
281  @InterfaceAudience.Private
282  @Deprecated
283  public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
284
285  /**
286   * <pre>
287   * Pattern that matches a coprocessor specification. Form is:
288   * {@code <coprocessor jar file location> '|' <class name> ['|' <priority> ['|' <arguments>]]}
289   * where arguments are {@code <KEY> '=' <VALUE> [,...]}
290   * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2}
291   * </pre>
292   */
293  private static final Pattern CP_HTD_ATTR_VALUE_PATTERN =
294    Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$");
295
296  private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+";
297  private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+";
298  private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile(
299    "(" + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" +
300      CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?");
301  private static final Pattern CP_HTD_ATTR_KEY_PATTERN =
302    Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE);
303
304  /**
305   * Table descriptor for namespace table
306   * @deprecated since 3.0.0 and will be removed in 4.0.0. We have folded the data in namespace
307   *   table into meta table, so do not use it any more.
308   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21154">HBASE-21154</a>
309   */
310  @Deprecated
311  public static final TableDescriptor NAMESPACE_TABLEDESC =
312    TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME)
313      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES)
314        // Ten is arbitrary number. Keep versions to help debugging.
315        .setMaxVersions(10).setInMemory(true).setBlocksize(8 * 1024)
316        .setScope(HConstants.REPLICATION_SCOPE_LOCAL).build())
317      .build();
318
319  private final ModifyableTableDescriptor desc;
320
321  /**
322   * @param desc The table descriptor to serialize
323   * @return This instance serialized with pb with pb magic prefix
324   */
325  public static byte[] toByteArray(TableDescriptor desc) {
326    if (desc instanceof ModifyableTableDescriptor) {
327      return ((ModifyableTableDescriptor) desc).toByteArray();
328    }
329    return new ModifyableTableDescriptor(desc).toByteArray();
330  }
331
332  /**
333   * The input should be created by {@link #toByteArray}.
334   * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix
335   * @return This instance serialized with pb with pb magic prefix
336   * @throws org.apache.hadoop.hbase.exceptions.DeserializationException
337   */
338  public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException {
339    return ModifyableTableDescriptor.parseFrom(pbBytes);
340  }
341
342  public static TableDescriptorBuilder newBuilder(final TableName name) {
343    return new TableDescriptorBuilder(name);
344  }
345
346  public static TableDescriptor copy(TableDescriptor desc) {
347    return new ModifyableTableDescriptor(desc);
348  }
349
350  public static TableDescriptor copy(TableName name, TableDescriptor desc) {
351    return new ModifyableTableDescriptor(name, desc);
352  }
353
354  /**
355   * Copy all values, families, and name from the input.
356   * @param desc The desciptor to copy
357   * @return A clone of input
358   */
359  public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) {
360    return new TableDescriptorBuilder(desc);
361  }
362
363  private TableDescriptorBuilder(final TableName name) {
364    this.desc = new ModifyableTableDescriptor(name);
365  }
366
367  private TableDescriptorBuilder(final TableDescriptor desc) {
368    this.desc = new ModifyableTableDescriptor(desc);
369  }
370
371  public TableDescriptorBuilder setCoprocessor(String className) throws IOException {
372    return setCoprocessor(CoprocessorDescriptorBuilder.of(className));
373  }
374
375  public TableDescriptorBuilder setCoprocessor(CoprocessorDescriptor cpDesc) throws IOException {
376    desc.setCoprocessor(Objects.requireNonNull(cpDesc));
377    return this;
378  }
379
380  public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor> cpDescs)
381    throws IOException {
382    for (CoprocessorDescriptor cpDesc : cpDescs) {
383      desc.setCoprocessor(cpDesc);
384    }
385    return this;
386  }
387
388  public boolean hasCoprocessor(String classNameToMatch) {
389    return desc.hasCoprocessor(classNameToMatch);
390  }
391
392  public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) {
393    desc.setColumnFamily(Objects.requireNonNull(family));
394    return this;
395  }
396
397  public TableDescriptorBuilder setColumnFamilies(
398    final Collection<ColumnFamilyDescriptor> families) {
399    families.forEach(desc::setColumnFamily);
400    return this;
401  }
402
403  public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) {
404    desc.modifyColumnFamily(Objects.requireNonNull(family));
405    return this;
406  }
407
408  public TableDescriptorBuilder removeValue(final String key) {
409    desc.removeValue(key);
410    return this;
411  }
412
413  public TableDescriptorBuilder removeValue(Bytes key) {
414    desc.removeValue(key);
415    return this;
416  }
417
418  public TableDescriptorBuilder removeValue(byte[] key) {
419    desc.removeValue(key);
420    return this;
421  }
422
423  public TableDescriptorBuilder removeValue(BiPredicate<Bytes, Bytes> predicate) {
424    List<Bytes> toRemove =
425      desc.getValues().entrySet().stream().filter(e -> predicate.test(e.getKey(), e.getValue()))
426        .map(Map.Entry::getKey).collect(Collectors.toList());
427    for (Bytes key : toRemove) {
428      removeValue(key);
429    }
430    return this;
431  }
432
433  public TableDescriptorBuilder removeColumnFamily(final byte[] name) {
434    desc.removeColumnFamily(name);
435    return this;
436  }
437
438  public TableDescriptorBuilder removeCoprocessor(String className) {
439    desc.removeCoprocessor(className);
440    return this;
441  }
442
443  public TableDescriptorBuilder setCompactionEnabled(final boolean isEnable) {
444    desc.setCompactionEnabled(isEnable);
445    return this;
446  }
447
448  public TableDescriptorBuilder setSplitEnabled(final boolean isEnable) {
449    desc.setSplitEnabled(isEnable);
450    return this;
451  }
452
453  public TableDescriptorBuilder setMergeEnabled(final boolean isEnable) {
454    desc.setMergeEnabled(isEnable);
455    return this;
456  }
457
458  public TableDescriptorBuilder setDurability(Durability durability) {
459    desc.setDurability(durability);
460    return this;
461  }
462
463  public TableDescriptorBuilder setFlushPolicyClassName(String clazz) {
464    desc.setFlushPolicyClassName(clazz);
465    return this;
466  }
467
468  public TableDescriptorBuilder setMaxFileSize(long maxFileSize) {
469    desc.setMaxFileSize(maxFileSize);
470    return this;
471  }
472
473  public TableDescriptorBuilder setMaxFileSize(String maxFileSize) throws HBaseException {
474    desc.setMaxFileSize(maxFileSize);
475    return this;
476  }
477
478  public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) {
479    desc.setMemStoreFlushSize(memstoreFlushSize);
480    return this;
481  }
482
483  public TableDescriptorBuilder setMemStoreFlushSize(String memStoreFlushSize)
484    throws HBaseException {
485    desc.setMemStoreFlushSize(memStoreFlushSize);
486    return this;
487  }
488
489  public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) {
490    desc.setNormalizerTargetRegionCount(regionCount);
491    return this;
492  }
493
494  public TableDescriptorBuilder setNormalizerTargetRegionSize(final long regionSize) {
495    desc.setNormalizerTargetRegionSize(regionSize);
496    return this;
497  }
498
499  public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) {
500    desc.setNormalizationEnabled(isEnable);
501    return this;
502  }
503
504  public TableDescriptorBuilder setPriority(int priority) {
505    desc.setPriority(priority);
506    return this;
507  }
508
509  public TableDescriptorBuilder setReadOnly(final boolean readOnly) {
510    desc.setReadOnly(readOnly);
511    return this;
512  }
513
514  public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) {
515    desc.setRegionMemStoreReplication(memstoreReplication);
516    return this;
517  }
518
519  public TableDescriptorBuilder setRegionReplication(int regionReplication) {
520    desc.setRegionReplication(regionReplication);
521    return this;
522  }
523
524  public TableDescriptorBuilder setRegionSplitPolicyClassName(String clazz) {
525    desc.setRegionSplitPolicyClassName(clazz);
526    return this;
527  }
528
529  public TableDescriptorBuilder setValue(final String key, final String value) {
530    desc.setValue(key, value);
531    return this;
532  }
533
534  public TableDescriptorBuilder setValue(final Bytes key, final Bytes value) {
535    desc.setValue(key, value);
536    return this;
537  }
538
539  public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) {
540    desc.setValue(key, value);
541    return this;
542  }
543
544  public String getValue(String key) {
545    return desc.getValue(key);
546  }
547
548  /**
549   * Sets replication scope all & only the columns already in the builder. Columns added later won't
550   * be backfilled with replication scope.
551   * @param scope replication scope
552   * @return a TableDescriptorBuilder
553   */
554  public TableDescriptorBuilder setReplicationScope(int scope) {
555    Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
556    newFamilies.putAll(desc.families);
557    newFamilies
558        .forEach((cf, cfDesc) -> {
559          desc.removeColumnFamily(cf);
560          desc.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope)
561              .build());
562        });
563    return this;
564  }
565
566  public TableDescriptorBuilder setRegionServerGroup(String group) {
567    desc.setValue(RSGROUP_KEY, group);
568    return this;
569  }
570
571  public TableDescriptor build() {
572    return new ModifyableTableDescriptor(desc);
573  }
574
575  private static final class ModifyableTableDescriptor
576    implements TableDescriptor, Comparable<ModifyableTableDescriptor> {
577
578    private final TableName name;
579
580    /**
581     * A map which holds the metadata information of the table. This metadata
582     * includes values like IS_META, SPLIT_POLICY, MAX_FILE_SIZE,
583     * READONLY, MEMSTORE_FLUSHSIZE etc...
584     */
585    private final Map<Bytes, Bytes> values = new HashMap<>();
586
587    /**
588     * Maps column family name to the respective FamilyDescriptors
589     */
590    private final Map<byte[], ColumnFamilyDescriptor> families
591            = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
592
593    /**
594     * Construct a table descriptor specifying a TableName object
595     *
596     * @param name Table name.
597     */
598    private ModifyableTableDescriptor(final TableName name) {
599      this(name, Collections.emptyList(), Collections.emptyMap());
600    }
601
602    private ModifyableTableDescriptor(final TableDescriptor desc) {
603      this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues());
604    }
605
606    /**
607     * Construct a table descriptor by cloning the descriptor passed as a
608     * parameter.
609     * <p>
610     * Makes a deep copy of the supplied descriptor.
611     * @param name The new name
612     * @param desc The descriptor.
613     */
614    private ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) {
615      this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues());
616    }
617
618    private ModifyableTableDescriptor(final TableName name, final Collection<ColumnFamilyDescriptor> families,
619            Map<Bytes, Bytes> values) {
620      this.name = name;
621      families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c)));
622      this.values.putAll(values);
623      this.values.put(IS_META_KEY,
624        new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME)))));
625    }
626
627    /**
628     * Checks if this table is <code> hbase:meta </code> region.
629     *
630     * @return true if this table is <code> hbase:meta </code> region
631     */
632    @Override
633    public boolean isMetaRegion() {
634      return getOrDefault(IS_META_KEY, Boolean::valueOf, false);
635    }
636
637    /**
638     * Checks if the table is a <code>hbase:meta</code> table
639     *
640     * @return true if table is <code> hbase:meta </code> region.
641     */
642    @Override
643    public boolean isMetaTable() {
644      return isMetaRegion();
645    }
646
647    @Override
648    public Bytes getValue(Bytes key) {
649      Bytes rval = values.get(key);
650      return rval == null ? null : new Bytes(rval.copyBytes());
651    }
652
653    @Override
654    public String getValue(String key) {
655      Bytes rval = values.get(new Bytes(Bytes.toBytes(key)));
656      return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength());
657    }
658
659    @Override
660    public byte[] getValue(byte[] key) {
661      Bytes value = values.get(new Bytes(key));
662      return value == null ? null : value.copyBytes();
663    }
664
665    private <T> T getOrDefault(Bytes key, Function<String, T> function, T defaultValue) {
666      Bytes value = values.get(key);
667      if (value == null) {
668        return defaultValue;
669      } else {
670        return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength()));
671      }
672    }
673
674    /**
675     * Getter for fetching an unmodifiable {@link #values} map.
676     *
677     * @return unmodifiable map {@link #values}.
678     * @see #values
679     */
680    @Override
681    public Map<Bytes, Bytes> getValues() {
682      // shallow pointer copy
683      return Collections.unmodifiableMap(values);
684    }
685
686    /**
687     * Setter for storing metadata as a (key, value) pair in {@link #values} map
688     *
689     * @param key The key.
690     * @param value The value. If null, removes the setting.
691     * @return the modifyable TD
692     * @see #values
693     */
694    public ModifyableTableDescriptor setValue(byte[] key, byte[] value) {
695      return setValue(toBytesOrNull(key, v -> v),
696              toBytesOrNull(value, v -> v));
697    }
698
699    public ModifyableTableDescriptor setValue(String key, String value) {
700      return setValue(toBytesOrNull(key, Bytes::toBytes),
701              toBytesOrNull(value, Bytes::toBytes));
702    }
703
704    /*
705     * @param key The key.
706     * @param value The value. If null, removes the setting.
707     */
708    private ModifyableTableDescriptor setValue(final Bytes key,
709            final String value) {
710      return setValue(key, toBytesOrNull(value, Bytes::toBytes));
711    }
712
713    /*
714     * Setter for storing metadata as a (key, value) pair in {@link #values} map
715     *
716     * @param key The key.
717     * @param value The value. If null, removes the setting.
718     */
719    public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) {
720      if (value == null) {
721        values.remove(key);
722      } else {
723        values.put(key, value);
724      }
725      return this;
726    }
727
728    private static <T> Bytes toBytesOrNull(T t, Function<T, byte[]> f) {
729      if (t == null) {
730        return null;
731      } else {
732        return new Bytes(f.apply(t));
733      }
734    }
735
736    /**
737     * Remove metadata represented by the key from the {@link #values} map
738     *
739     * @param key Key whose key and value we're to remove from TableDescriptor
740     * parameters.
741     * @return the modifyable TD
742     */
743    public ModifyableTableDescriptor removeValue(final String key) {
744      return setValue(key, (String) null);
745    }
746
747    /**
748     * Remove metadata represented by the key from the {@link #values} map
749     *
750     * @param key Key whose key and value we're to remove from TableDescriptor
751     * parameters.
752     * @return the modifyable TD
753     */
754    public ModifyableTableDescriptor removeValue(Bytes key) {
755      return setValue(key, (Bytes) null);
756    }
757
758    /**
759     * Remove metadata represented by the key from the {@link #values} map
760     *
761     * @param key Key whose key and value we're to remove from TableDescriptor
762     * parameters.
763     * @return the modifyable TD
764     */
765    public ModifyableTableDescriptor removeValue(final byte[] key) {
766      return removeValue(new Bytes(key));
767    }
768
769    /**
770     * Check if the readOnly flag of the table is set. If the readOnly flag is
771     * set then the contents of the table can only be read from but not
772     * modified.
773     *
774     * @return true if all columns in the table should be read only
775     */
776    @Override
777    public boolean isReadOnly() {
778      return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY);
779    }
780
781    /**
782     * Setting the table as read only sets all the columns in the table as read
783     * only. By default all tables are modifiable, but if the readOnly flag is
784     * set to true then the contents of the table can only be read but not
785     * modified.
786     *
787     * @param readOnly True if all of the columns in the table should be read
788     * only.
789     * @return the modifyable TD
790     */
791    public ModifyableTableDescriptor setReadOnly(final boolean readOnly) {
792      return setValue(READONLY_KEY, Boolean.toString(readOnly));
793    }
794
795    /**
796     * Check if the compaction enable flag of the table is true. If flag is
797     * false then no minor/major compactions will be done in real.
798     *
799     * @return true if table compaction enabled
800     */
801    @Override
802    public boolean isCompactionEnabled() {
803      return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED);
804    }
805
806    /**
807     * Setting the table compaction enable flag.
808     *
809     * @param isEnable True if enable compaction.
810     * @return the modifyable TD
811     */
812    public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) {
813      return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable));
814    }
815
816    /**
817     * Check if the split enable flag of the table is true. If flag is false then no split will be
818     * done.
819     *
820     * @return true if table region split enabled
821     */
822    @Override
823    public boolean isSplitEnabled() {
824      return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED);
825    }
826
827    /**
828     * Setting the table region split enable flag.
829     * @param isEnable True if enable region split.
830     *
831     * @return the modifyable TD
832     */
833    public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) {
834      return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable));
835    }
836
837    /**
838     * Check if the region merge enable flag of the table is true. If flag is false then no merge
839     * will be done.
840     *
841     * @return true if table region merge enabled
842     */
843    @Override
844    public boolean isMergeEnabled() {
845      return getOrDefault(MERGE_ENABLED_KEY, Boolean::valueOf, DEFAULT_MERGE_ENABLED);
846    }
847
848    /**
849     * Setting the table region merge enable flag.
850     * @param isEnable True if enable region merge.
851     *
852     * @return the modifyable TD
853     */
854    public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) {
855      return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable));
856    }
857
858    /**
859     * Check if normalization enable flag of the table is true. If flag is false
860     * then no region normalizer won't attempt to normalize this table.
861     *
862     * @return true if region normalization is enabled for this table
863     */
864    @Override
865    public boolean isNormalizationEnabled() {
866      return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, DEFAULT_NORMALIZATION_ENABLED);
867    }
868
869    /**
870     * Check if there is the target region count. If so, the normalize plan will be calculated based
871     * on the target region count.
872     * @return target region count after normalize done
873     */
874    @Override
875    public int getNormalizerTargetRegionCount() {
876      return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf,
877        Integer.valueOf(-1));
878    }
879
880    /**
881     * Check if there is the target region size. If so, the normalize plan will be calculated based
882     * on the target region size.
883     * @return target region size after normalize done
884     */
885    @Override
886    public long getNormalizerTargetRegionSize() {
887      return getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1));
888    }
889
890    /**
891     * Setting the table normalization enable flag.
892     *
893     * @param isEnable True if enable normalization.
894     * @return the modifyable TD
895     */
896    public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) {
897      return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable));
898    }
899
900    /**
901     * Setting the target region count of table normalization .
902     * @param regionCount the target region count.
903     * @return the modifyable TD
904     */
905    public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) {
906      return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount));
907    }
908
909    /**
910     * Setting the target region size of table normalization.
911     * @param regionSize the target region size.
912     * @return the modifyable TD
913     */
914    public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) {
915      return setValue(NORMALIZER_TARGET_REGION_SIZE_KEY, Long.toString(regionSize));
916    }
917
918    /**
919     * Sets the {@link Durability} setting for the table. This defaults to
920     * Durability.USE_DEFAULT.
921     *
922     * @param durability enum value
923     * @return the modifyable TD
924     */
925    public ModifyableTableDescriptor setDurability(Durability durability) {
926      return setValue(DURABILITY_KEY, durability.name());
927    }
928
929    /**
930     * Returns the durability setting for the table.
931     *
932     * @return durability setting for the table.
933     */
934    @Override
935    public Durability getDurability() {
936      return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY);
937    }
938
939    /**
940     * Get the name of the table
941     *
942     * @return TableName
943     */
944    @Override
945    public TableName getTableName() {
946      return name;
947    }
948
949    /**
950     * This sets the class associated with the region split policy which
951     * determines when a region split should occur. The class used by default is
952     * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
953     *
954     * @param clazz the class name
955     * @return the modifyable TD
956     */
957    public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) {
958      return setValue(SPLIT_POLICY_KEY, clazz);
959    }
960
961    /**
962     * This gets the class associated with the region split policy which
963     * determines when a region split should occur. The class used by default is
964     * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
965     *
966     * @return the class name of the region split policy for this table. If this
967     * returns null, the default split policy is used.
968     */
969    @Override
970    public String getRegionSplitPolicyClassName() {
971      return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null);
972    }
973
974    /**
975     * Returns the maximum size upto which a region can grow to after which a
976     * region split is triggered. The region size is represented by the size of
977     * the biggest store file in that region.
978     *
979     * @return max hregion size for table, -1 if not set.
980     *
981     * @see #setMaxFileSize(long)
982     */
983    @Override
984    public long getMaxFileSize() {
985      return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1);
986    }
987
988    /**
989     * Sets the maximum size upto which a region can grow to after which a
990     * region split is triggered. The region size is represented by the size of
991     * the biggest store file in that region, i.e. If the biggest store file
992     * grows beyond the maxFileSize, then the region split is triggered. This
993     * defaults to a value of 256 MB.
994     * <p>
995     * This is not an absolute value and might vary. Assume that a single row
996     * exceeds the maxFileSize then the storeFileSize will be greater than
997     * maxFileSize since a single row cannot be split across multiple regions
998     * </p>
999     *
1000     * @param maxFileSize The maximum file size that a store file can grow to
1001     * before a split is triggered.
1002     * @return the modifyable TD
1003     */
1004    public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) {
1005      return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
1006    }
1007
1008    public ModifyableTableDescriptor setMaxFileSize(String maxFileSize) throws HBaseException {
1009      return setMaxFileSize(Long.parseLong(PrettyPrinter.
1010        valueOf(maxFileSize, PrettyPrinter.Unit.BYTE)));
1011    }
1012
1013    /**
1014     * Returns the size of the memstore after which a flush to filesystem is
1015     * triggered.
1016     *
1017     * @return memory cache flush size for each hregion, -1 if not set.
1018     *
1019     * @see #setMemStoreFlushSize(long)
1020     */
1021    @Override
1022    public long getMemStoreFlushSize() {
1023      return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1);
1024    }
1025
1026    /**
1027     * Represents the maximum size of the memstore after which the contents of
1028     * the memstore are flushed to the filesystem. This defaults to a size of 64
1029     * MB.
1030     *
1031     * @param memstoreFlushSize memory cache flush size for each hregion
1032     * @return the modifyable TD
1033     */
1034    public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
1035      return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
1036    }
1037
1038    public ModifyableTableDescriptor setMemStoreFlushSize(String memStoreFlushSize)
1039      throws HBaseException {
1040      return setMemStoreFlushSize(Long.parseLong(PrettyPrinter.valueOf(memStoreFlushSize,
1041        PrettyPrinter.Unit.BYTE)));
1042    }
1043
1044    /**
1045     * This sets the class associated with the flush policy which determines
1046     * determines the stores need to be flushed when flushing a region. The
1047     * class used by default is defined in
1048     * org.apache.hadoop.hbase.regionserver.FlushPolicy.
1049     *
1050     * @param clazz the class name
1051     * @return the modifyable TD
1052     */
1053    public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) {
1054      return setValue(FLUSH_POLICY_KEY, clazz);
1055    }
1056
1057    /**
1058     * This gets the class associated with the flush policy which determines the
1059     * stores need to be flushed when flushing a region. The class used by
1060     * default is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy.
1061     *
1062     * @return the class name of the flush policy for this table. If this
1063     * returns null, the default flush policy is used.
1064     */
1065    @Override
1066    public String getFlushPolicyClassName() {
1067      return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null);
1068    }
1069
1070    /**
1071     * Adds a column family. For the updating purpose please use
1072     * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead.
1073     *
1074     * @param family to add.
1075     * @return the modifyable TD
1076     */
1077    public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) {
1078      if (family.getName() == null || family.getName().length <= 0) {
1079        throw new IllegalArgumentException("Family name cannot be null or empty");
1080      }
1081      if (hasColumnFamily(family.getName())) {
1082        throw new IllegalArgumentException("Family '"
1083                + family.getNameAsString() + "' already exists so cannot be added");
1084      }
1085      return putColumnFamily(family);
1086    }
1087
1088    /**
1089     * Modifies the existing column family.
1090     *
1091     * @param family to update
1092     * @return this (for chained invocation)
1093     */
1094    public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) {
1095      if (family.getName() == null || family.getName().length <= 0) {
1096        throw new IllegalArgumentException("Family name cannot be null or empty");
1097      }
1098      if (!hasColumnFamily(family.getName())) {
1099        throw new IllegalArgumentException("Column family '" + family.getNameAsString()
1100                + "' does not exist");
1101      }
1102      return putColumnFamily(family);
1103    }
1104
1105    private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) {
1106      families.put(family.getName(), family);
1107      return this;
1108    }
1109
1110    /**
1111     * Checks to see if this table contains the given column family
1112     *
1113     * @param familyName Family name or column name.
1114     * @return true if the table contains the specified family name
1115     */
1116    @Override
1117    public boolean hasColumnFamily(final byte[] familyName) {
1118      return families.containsKey(familyName);
1119    }
1120
1121    /**
1122     * @return Name of this table and then a map of all of the column family descriptors.
1123     */
1124    @Override
1125    public String toString() {
1126      StringBuilder s = new StringBuilder();
1127      s.append('\'').append(Bytes.toString(name.getName())).append('\'');
1128      s.append(getValues(true));
1129      families.values().forEach(f -> s.append(", ").append(f));
1130      return s.toString();
1131    }
1132
1133    /**
1134     * @return Name of this table and then a map of all of the column family
1135     * descriptors (with only the non-default column family attributes)
1136     */
1137    @Override
1138    public String toStringCustomizedValues() {
1139      StringBuilder s = new StringBuilder();
1140      s.append('\'').append(Bytes.toString(name.getName())).append('\'');
1141      s.append(getValues(false));
1142      families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues()));
1143      return s.toString();
1144    }
1145
1146    /**
1147     * @return map of all table attributes formatted into string.
1148     */
1149    public String toStringTableAttributes() {
1150      return getValues(true).toString();
1151    }
1152
1153    private StringBuilder getValues(boolean printDefaults) {
1154      StringBuilder s = new StringBuilder();
1155
1156      // step 1: set partitioning and pruning
1157      Set<Bytes> reservedKeys = new TreeSet<>();
1158      Set<Bytes> userKeys = new TreeSet<>();
1159      for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
1160        if (entry.getKey() == null || entry.getKey().get() == null) {
1161          continue;
1162        }
1163        String key = Bytes.toString(entry.getKey().get());
1164        // in this section, print out reserved keywords + coprocessor info
1165        if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) {
1166          userKeys.add(entry.getKey());
1167          continue;
1168        }
1169        // only print out IS_META if true
1170        String value = Bytes.toString(entry.getValue().get());
1171        if (key.equalsIgnoreCase(IS_META)) {
1172          if (Boolean.valueOf(value) == false) {
1173            continue;
1174          }
1175        }
1176        // see if a reserved key is a default value. may not want to print it out
1177        if (printDefaults
1178                || !DEFAULT_VALUES.containsKey(key)
1179                || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1180          reservedKeys.add(entry.getKey());
1181        }
1182      }
1183
1184      // early exit optimization
1185      boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
1186      if (!hasAttributes) {
1187        return s;
1188      }
1189
1190      s.append(", {");
1191      // step 2: printing attributes
1192      if (hasAttributes) {
1193        s.append("TABLE_ATTRIBUTES => {");
1194
1195        // print all reserved keys first
1196        boolean printCommaForAttr = false;
1197        for (Bytes k : reservedKeys) {
1198          String key = Bytes.toString(k.get());
1199          String value = Bytes.toStringBinary(values.get(k).get());
1200          if (printCommaForAttr) {
1201            s.append(", ");
1202          }
1203          printCommaForAttr = true;
1204          s.append(key);
1205          s.append(" => ");
1206          s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1207        }
1208
1209        if (!userKeys.isEmpty()) {
1210          // print all non-reserved as a separate subset
1211          if (printCommaForAttr) {
1212            s.append(", ");
1213          }
1214          s.append(HConstants.METADATA).append(" => ");
1215          s.append("{");
1216          boolean printCommaForCfg = false;
1217          for (Bytes k : userKeys) {
1218            String key = Bytes.toString(k.get());
1219            String value = Bytes.toStringBinary(values.get(k).get());
1220            if (printCommaForCfg) {
1221              s.append(", ");
1222            }
1223            printCommaForCfg = true;
1224            s.append('\'').append(key).append('\'');
1225            s.append(" => ");
1226            s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1227          }
1228          s.append("}");
1229        }
1230
1231        s.append("}");
1232      }
1233
1234      s.append("}"); // end METHOD
1235      return s;
1236    }
1237
1238    /**
1239     * Compare the contents of the descriptor with another one passed as a
1240     * parameter. Checks if the obj passed is an instance of ModifyableTableDescriptor,
1241     * if yes then the contents of the descriptors are compared.
1242     *
1243     * @param obj The object to compare
1244     * @return true if the contents of the the two descriptors exactly match
1245     *
1246     * @see java.lang.Object#equals(java.lang.Object)
1247     */
1248    @Override
1249    public boolean equals(Object obj) {
1250      if (this == obj) {
1251        return true;
1252      }
1253      if (obj instanceof ModifyableTableDescriptor) {
1254        return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0;
1255      }
1256      return false;
1257    }
1258
1259    /**
1260     * @return hash code
1261     */
1262    @Override
1263    public int hashCode() {
1264      int result = this.name.hashCode();
1265      if (this.families.size() > 0) {
1266        for (ColumnFamilyDescriptor e : this.families.values()) {
1267          result ^= e.hashCode();
1268        }
1269      }
1270      result ^= values.hashCode();
1271      return result;
1272    }
1273
1274    // Comparable
1275    /**
1276     * Compares the descriptor with another descriptor which is passed as a
1277     * parameter. This compares the content of the two descriptors and not the
1278     * reference.
1279     *
1280     * @param other The MTD to compare
1281     * @return 0 if the contents of the descriptors are exactly matching, 1 if
1282     * there is a mismatch in the contents
1283     */
1284    @Override
1285    public int compareTo(final ModifyableTableDescriptor other) {
1286      return TableDescriptor.COMPARATOR.compare(this, other);
1287    }
1288
1289    @Override
1290    public ColumnFamilyDescriptor[] getColumnFamilies() {
1291      return families.values().toArray(new ColumnFamilyDescriptor[families.size()]);
1292    }
1293
1294    /**
1295     * Returns the configured replicas per region
1296     */
1297    @Override
1298    public int getRegionReplication() {
1299      return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION);
1300    }
1301
1302    /**
1303     * Sets the number of replicas per region.
1304     *
1305     * @param regionReplication the replication factor per region
1306     * @return the modifyable TD
1307     */
1308    public ModifyableTableDescriptor setRegionReplication(int regionReplication) {
1309      return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication));
1310    }
1311
1312    /**
1313     * @return true if the read-replicas memstore replication is enabled.
1314     */
1315    @Override
1316    public boolean hasRegionMemStoreReplication() {
1317      return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, DEFAULT_REGION_MEMSTORE_REPLICATION);
1318    }
1319
1320    /**
1321     * Enable or Disable the memstore replication from the primary region to the
1322     * replicas. The replication will be used only for meta operations (e.g.
1323     * flush, compaction, ...)
1324     *
1325     * @param memstoreReplication true if the new data written to the primary
1326     * region should be replicated. false if the secondaries can tollerate to
1327     * have new data only when the primary flushes the memstore.
1328     * @return the modifyable TD
1329     */
1330    public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) {
1331      setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication));
1332      // If the memstore replication is setup, we do not have to wait for observing a flush event
1333      // from primary before starting to serve reads, because gaps from replication is not applicable
1334      return setValue(REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
1335              Boolean.toString(memstoreReplication));
1336    }
1337
1338    public ModifyableTableDescriptor setPriority(int priority) {
1339      return setValue(PRIORITY_KEY, Integer.toString(priority));
1340    }
1341
1342    @Override
1343    public int getPriority() {
1344      return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY);
1345    }
1346
1347    /**
1348     * Returns all the column family names of the current table. The map of
1349     * TableDescriptor contains mapping of family name to ColumnFamilyDescriptor.
1350     * This returns all the keys of the family map which represents the column
1351     * family names of the table.
1352     *
1353     * @return Immutable sorted set of the keys of the families.
1354     */
1355    @Override
1356    public Set<byte[]> getColumnFamilyNames() {
1357      return Collections.unmodifiableSet(this.families.keySet());
1358    }
1359
1360    /**
1361     * Returns the ColumnFamilyDescriptor for a specific column family with name as
1362     * specified by the parameter column.
1363     *
1364     * @param column Column family name
1365     * @return Column descriptor for the passed family name or the family on
1366     * passed in column.
1367     */
1368    @Override
1369    public ColumnFamilyDescriptor getColumnFamily(final byte[] column) {
1370      return this.families.get(column);
1371    }
1372
1373    /**
1374     * Removes the ColumnFamilyDescriptor with name specified by the parameter column
1375     * from the table descriptor
1376     *
1377     * @param column Name of the column family to be removed.
1378     * @return Column descriptor for the passed family name or the family on
1379     * passed in column.
1380     */
1381    public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) {
1382      return this.families.remove(column);
1383    }
1384
1385    /**
1386     * Add a table coprocessor to this table. The coprocessor type must be
1387     * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't
1388     * check if the class can be loaded or not. Whether a coprocessor is
1389     * loadable or not will be determined when a region is opened.
1390     *
1391     * @param className Full class name.
1392     * @throws IOException
1393     * @return the modifyable TD
1394     */
1395    public ModifyableTableDescriptor setCoprocessor(String className) throws IOException {
1396      return setCoprocessor(
1397        CoprocessorDescriptorBuilder.newBuilder(className).setPriority(Coprocessor.PRIORITY_USER)
1398          .build());
1399    }
1400
1401    /**
1402     * Add a table coprocessor to this table. The coprocessor type must be
1403     * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't
1404     * check if the class can be loaded or not. Whether a coprocessor is
1405     * loadable or not will be determined when a region is opened.
1406     *
1407     * @throws IOException any illegal parameter key/value
1408     * @return the modifyable TD
1409     */
1410    public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp)
1411            throws IOException {
1412      checkHasCoprocessor(cp.getClassName());
1413      if (cp.getPriority() < 0) {
1414        throw new IOException("Priority must be bigger than or equal with zero, current:"
1415          + cp.getPriority());
1416      }
1417      // Validate parameter kvs and then add key/values to kvString.
1418      StringBuilder kvString = new StringBuilder();
1419      for (Map.Entry<String, String> e : cp.getProperties().entrySet()) {
1420        if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1421          throw new IOException("Illegal parameter key = " + e.getKey());
1422        }
1423        if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1424          throw new IOException("Illegal parameter (" + e.getKey()
1425                  + ") value = " + e.getValue());
1426        }
1427        if (kvString.length() != 0) {
1428          kvString.append(',');
1429        }
1430        kvString.append(e.getKey());
1431        kvString.append('=');
1432        kvString.append(e.getValue());
1433      }
1434
1435      String value = cp.getJarPath().orElse("")
1436              + "|" + cp.getClassName() + "|" + Integer.toString(cp.getPriority()) + "|"
1437              + kvString.toString();
1438      return setCoprocessorToMap(value);
1439    }
1440
1441    /**
1442     * Add a table coprocessor to this table. The coprocessor type must be
1443     * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't
1444     * check if the class can be loaded or not. Whether a coprocessor is
1445     * loadable or not will be determined when a region is opened.
1446     *
1447     * @param specStr The Coprocessor specification all in in one String
1448     * @throws IOException
1449     * @return the modifyable TD
1450     * @deprecated used by HTableDescriptor and admin.rb.
1451     *                       As of release 2.0.0, this will be removed in HBase 3.0.0.
1452     */
1453    @Deprecated
1454    public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr)
1455      throws IOException {
1456      CoprocessorDescriptor cpDesc = toCoprocessorDescriptor(specStr).orElseThrow(
1457        () -> new IllegalArgumentException(
1458          "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr));
1459      checkHasCoprocessor(cpDesc.getClassName());
1460      return setCoprocessorToMap(specStr);
1461    }
1462
1463    private void checkHasCoprocessor(final String className) throws IOException {
1464      if (hasCoprocessor(className)) {
1465        throw new IOException("Coprocessor " + className + " already exists.");
1466      }
1467    }
1468
1469    /**
1470     * Add coprocessor to values Map
1471     * @param specStr The Coprocessor specification all in in one String
1472     * @return Returns <code>this</code>
1473     */
1474    private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) {
1475      if (specStr == null) {
1476        return this;
1477      }
1478      // generate a coprocessor key
1479      int maxCoprocessorNumber = 0;
1480      Matcher keyMatcher;
1481      for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1482        keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1483        if (!keyMatcher.matches()) {
1484          continue;
1485        }
1486        maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
1487      }
1488      maxCoprocessorNumber++;
1489      String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1490      return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr)));
1491    }
1492
1493    /**
1494     * Check if the table has an attached co-processor represented by the name
1495     * className
1496     *
1497     * @param classNameToMatch - Class name of the co-processor
1498     * @return true of the table has a co-processor className
1499     */
1500    @Override
1501    public boolean hasCoprocessor(String classNameToMatch) {
1502      return getCoprocessorDescriptors().stream().anyMatch(cp -> cp.getClassName()
1503        .equals(classNameToMatch));
1504    }
1505
1506    /**
1507     * Return the list of attached co-processor represented by their name
1508     * className
1509     *
1510     * @return The list of co-processors classNames
1511     */
1512    @Override
1513    public List<CoprocessorDescriptor> getCoprocessorDescriptors() {
1514      List<CoprocessorDescriptor> result = new ArrayList<>();
1515      for (Map.Entry<Bytes, Bytes> e: getValues().entrySet()) {
1516        String key = Bytes.toString(e.getKey().get()).trim();
1517        if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) {
1518          toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim())
1519            .ifPresent(result::add);
1520        }
1521      }
1522      return result;
1523    }
1524
1525    /**
1526     * Remove a coprocessor from those set on the table
1527     *
1528     * @param className Class name of the co-processor
1529     */
1530    public void removeCoprocessor(String className) {
1531      Bytes match = null;
1532      Matcher keyMatcher;
1533      Matcher valueMatcher;
1534      for (Map.Entry<Bytes, Bytes> e : this.values
1535              .entrySet()) {
1536        keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1537                .getKey().get()));
1538        if (!keyMatcher.matches()) {
1539          continue;
1540        }
1541        valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1542                .toString(e.getValue().get()));
1543        if (!valueMatcher.matches()) {
1544          continue;
1545        }
1546        // get className and compare
1547        String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1548        // remove the CP if it is present
1549        if (clazz.equals(className.trim())) {
1550          match = e.getKey();
1551          break;
1552        }
1553      }
1554      // if we found a match, remove it
1555      if (match != null) {
1556        ModifyableTableDescriptor.this.removeValue(match);
1557      }
1558    }
1559
1560    /**
1561     * @return the bytes in pb format
1562     */
1563    private byte[] toByteArray() {
1564      return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray());
1565    }
1566
1567    /**
1568     * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance
1569     * with pb magic prefix
1570     * @return An instance of {@link ModifyableTableDescriptor} made from
1571     * <code>bytes</code>
1572     * @throws DeserializationException
1573     * @see #toByteArray()
1574     */
1575    private static TableDescriptor parseFrom(final byte[] bytes)
1576            throws DeserializationException {
1577      if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1578        throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor");
1579      }
1580      int pblen = ProtobufUtil.lengthOfPBMagic();
1581      HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder();
1582      try {
1583        ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
1584        return ProtobufUtil.toTableDescriptor(builder.build());
1585      } catch (IOException e) {
1586        throw new DeserializationException(e);
1587      }
1588    }
1589
1590    @Override
1591    public int getColumnFamilyCount() {
1592      return families.size();
1593    }
1594
1595    @Override
1596    public Optional<String> getRegionServerGroup() {
1597      Bytes value = values.get(RSGROUP_KEY);
1598      if (value != null) {
1599        return Optional.of(Bytes.toString(value.get(), value.getOffset(), value.getLength()));
1600      } else {
1601        return Optional.empty();
1602      }
1603    }
1604  }
1605
1606  /**
1607   * This method is mostly intended for internal use. However, it it also relied on by hbase-shell
1608   * for backwards compatibility.
1609   */
1610  private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) {
1611    Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec);
1612    if (matcher.matches()) {
1613      // jar file path can be empty if the cp class can be loaded
1614      // from class loader.
1615      String path = matcher.group(1).trim().isEmpty() ?
1616        null : matcher.group(1).trim();
1617      String className = matcher.group(2).trim();
1618      if (className.isEmpty()) {
1619        return Optional.empty();
1620      }
1621      String priorityStr = matcher.group(3).trim();
1622      int priority = priorityStr.isEmpty() ?
1623        Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr);
1624      String cfgSpec = null;
1625      try {
1626        cfgSpec = matcher.group(4);
1627      } catch (IndexOutOfBoundsException ex) {
1628        // ignore
1629      }
1630      Map<String, String> ourConf = new TreeMap<>();
1631      if (cfgSpec != null && !cfgSpec.trim().equals("|")) {
1632        cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1);
1633        Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec);
1634        while (m.find()) {
1635          ourConf.put(m.group(1), m.group(2));
1636        }
1637      }
1638      return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className)
1639        .setJarPath(path)
1640        .setPriority(priority)
1641        .setProperties(ourConf)
1642        .build());
1643    }
1644    return Optional.empty();
1645  }
1646}