001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.client;
020
021import java.io.IOException;
022import java.util.ArrayList;
023import java.util.Arrays;
024import java.util.Collection;
025import java.util.Collections;
026import java.util.HashMap;
027import java.util.HashSet;
028import java.util.List;
029import java.util.Map;
030import java.util.Objects;
031import java.util.Optional;
032import java.util.Set;
033import java.util.TreeMap;
034import java.util.TreeSet;
035import java.util.function.BiPredicate;
036import java.util.function.Function;
037import java.util.regex.Matcher;
038import java.util.regex.Pattern;
039import java.util.stream.Collectors;
040import org.apache.hadoop.hbase.Coprocessor;
041import org.apache.hadoop.hbase.HConstants;
042import org.apache.hadoop.hbase.TableName;
043import org.apache.hadoop.hbase.exceptions.DeserializationException;
044import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
045import org.apache.hadoop.hbase.util.Bytes;
046import org.apache.yetus.audience.InterfaceAudience;
047import org.slf4j.Logger;
048import org.slf4j.LoggerFactory;
049
050import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
051import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
052
053/**
054 * Convenience class for composing an instance of {@link TableDescriptor}.
055 * @since 2.0.0
056 */
057@InterfaceAudience.Public
058public class TableDescriptorBuilder {
059  public static final Logger LOG = LoggerFactory.getLogger(TableDescriptorBuilder.class);
060  @InterfaceAudience.Private
061  public static final String SPLIT_POLICY = "SPLIT_POLICY";
062  private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY));
063  /**
064   * Used by HBase Shell interface to access this metadata
065   * attribute which denotes the maximum size of the store file after which a
066   * region split occurs.
067   */
068  @InterfaceAudience.Private
069  public static final String MAX_FILESIZE = "MAX_FILESIZE";
070  private static final Bytes MAX_FILESIZE_KEY
071          = new Bytes(Bytes.toBytes(MAX_FILESIZE));
072
073  /**
074   * Used by rest interface to access this metadata attribute
075   * which denotes if the table is Read Only.
076   */
077  @InterfaceAudience.Private
078  public static final String READONLY = "READONLY";
079  private static final Bytes READONLY_KEY
080          = new Bytes(Bytes.toBytes(READONLY));
081
082  /**
083   * Used by HBase Shell interface to access this metadata
084   * attribute which denotes if the table is compaction enabled.
085   */
086  @InterfaceAudience.Private
087  public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
088  private static final Bytes COMPACTION_ENABLED_KEY
089          = new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
090
091  /**
092   * Used by HBase Shell interface to access this metadata
093   * attribute which denotes if the table is split enabled.
094   */
095  @InterfaceAudience.Private
096  public static final String SPLIT_ENABLED = "SPLIT_ENABLED";
097  private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED));
098
099  /**
100   * Used by HBase Shell interface to access this metadata
101   * attribute which denotes if the table is merge enabled.
102   */
103  @InterfaceAudience.Private
104  public static final String MERGE_ENABLED = "MERGE_ENABLED";
105  private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED));
106
107  /**
108   * Used by HBase Shell interface to access this metadata
109   * attribute which represents the maximum size of the memstore after which its
110   * contents are flushed onto the disk.
111   */
112  @InterfaceAudience.Private
113  public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
114  private static final Bytes MEMSTORE_FLUSHSIZE_KEY
115          = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
116
117  @InterfaceAudience.Private
118  public static final String FLUSH_POLICY = "FLUSH_POLICY";
119  private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY));
120  /**
121   * Used by rest interface to access this metadata attribute
122   * which denotes if it is a catalog table, either <code> hbase:meta </code>.
123   */
124  @InterfaceAudience.Private
125  public static final String IS_META = "IS_META";
126  private static final Bytes IS_META_KEY
127          = new Bytes(Bytes.toBytes(IS_META));
128
129  /**
130   * {@link Durability} setting for the table.
131   */
132  @InterfaceAudience.Private
133  public static final String DURABILITY = "DURABILITY";
134  private static final Bytes DURABILITY_KEY
135          = new Bytes(Bytes.toBytes("DURABILITY"));
136
137  /**
138   * The number of region replicas for the table.
139   */
140  @InterfaceAudience.Private
141  public static final String REGION_REPLICATION = "REGION_REPLICATION";
142  private static final Bytes REGION_REPLICATION_KEY
143          = new Bytes(Bytes.toBytes(REGION_REPLICATION));
144
145  /**
146   * The flag to indicate whether or not the memstore should be
147   * replicated for read-replicas (CONSISTENCY =&gt; TIMELINE).
148   */
149  @InterfaceAudience.Private
150  public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
151  private static final Bytes REGION_MEMSTORE_REPLICATION_KEY
152          = new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION));
153
154  private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY
155          = new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY));
156  /**
157   * Used by shell/rest interface to access this metadata
158   * attribute which denotes if the table should be treated by region
159   * normalizer.
160   */
161  @InterfaceAudience.Private
162  public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED";
163  private static final Bytes NORMALIZATION_ENABLED_KEY
164          = new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED));
165
166  @InterfaceAudience.Private
167  public static final String NORMALIZER_TARGET_REGION_COUNT =
168      "NORMALIZER_TARGET_REGION_COUNT";
169  private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY =
170      new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT));
171
172  @InterfaceAudience.Private
173  public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE";
174  private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY =
175      new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE));
176
177  /**
178   * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global
179   * default value
180   */
181  private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
182
183  @InterfaceAudience.Private
184  public static final String PRIORITY = "PRIORITY";
185  private static final Bytes PRIORITY_KEY
186          = new Bytes(Bytes.toBytes(PRIORITY));
187
188  private static final Bytes RSGROUP_KEY =
189      new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP));
190
191  /**
192   * Relative priority of the table used for rpc scheduling
193   */
194  private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS;
195
196  /**
197   * Constant that denotes whether the table is READONLY by default and is false
198   */
199  public static final boolean DEFAULT_READONLY = false;
200
201  /**
202   * Constant that denotes whether the table is compaction enabled by default
203   */
204  public static final boolean DEFAULT_COMPACTION_ENABLED = true;
205
206  /**
207   * Constant that denotes whether the table is split enabled by default
208   */
209  public static final boolean DEFAULT_SPLIT_ENABLED = true;
210
211  /**
212   * Constant that denotes whether the table is merge enabled by default
213   */
214  public static final boolean DEFAULT_MERGE_ENABLED = true;
215
216  /**
217   * Constant that denotes whether the table is normalized by default.
218   */
219  public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
220
221  /**
222   * Constant that denotes the maximum default size of the memstore in bytes after which
223   * the contents are flushed to the store files.
224   */
225  public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L;
226
227  public static final int DEFAULT_REGION_REPLICATION = 1;
228
229  public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true;
230
231  private final static Map<String, String> DEFAULT_VALUES = new HashMap<>();
232  private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>();
233
234  static {
235    DEFAULT_VALUES.put(MAX_FILESIZE,
236            String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
237    DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
238    DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
239            String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
240    DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
241    DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
242    DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED));
243    DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY));
244    DEFAULT_VALUES.keySet().stream()
245            .map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add);
246    RESERVED_KEYWORDS.add(IS_META_KEY);
247  }
248
249  /**
250   * @deprecated namespace table has been folded into the ns family in meta table, do not use this
251   *             any more.
252   */
253  @InterfaceAudience.Private
254  @Deprecated
255  public final static String NAMESPACE_FAMILY_INFO = "info";
256
257  /**
258   * @deprecated namespace table has been folded into the ns family in meta table, do not use this
259   *             any more.
260   */
261  @InterfaceAudience.Private
262  @Deprecated
263  public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
264
265  /**
266   * @deprecated namespace table has been folded into the ns family in meta table, do not use this
267   *             any more.
268   */
269  @InterfaceAudience.Private
270  @Deprecated
271  public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
272
273  /**
274   * <pre>
275   * Pattern that matches a coprocessor specification. Form is:
276   * {@code <coprocessor jar file location> '|' <class name> ['|' <priority> ['|' <arguments>]]}
277   * where arguments are {@code <KEY> '=' <VALUE> [,...]}
278   * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2}
279   * </pre>
280   */
281  private static final Pattern CP_HTD_ATTR_VALUE_PATTERN =
282    Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$");
283
284  private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+";
285  private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+";
286  private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile(
287    "(" + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" +
288      CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?");
289  private static final Pattern CP_HTD_ATTR_KEY_PATTERN =
290    Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE);
291
292  /**
293   * Table descriptor for namespace table
294   * @deprecated since 3.0.0 and will be removed in 4.0.0. We have folded the data in namespace
295   *   table into meta table, so do not use it any more.
296   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21154">HBASE-21154</a>
297   */
298  @Deprecated
299  public static final TableDescriptor NAMESPACE_TABLEDESC =
300    TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME)
301      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES)
302        // Ten is arbitrary number. Keep versions to help debugging.
303        .setMaxVersions(10).setInMemory(true).setBlocksize(8 * 1024)
304        .setScope(HConstants.REPLICATION_SCOPE_LOCAL).build())
305      .build();
306
307  private final ModifyableTableDescriptor desc;
308
309  /**
310   * @param desc The table descriptor to serialize
311   * @return This instance serialized with pb with pb magic prefix
312   */
313  public static byte[] toByteArray(TableDescriptor desc) {
314    if (desc instanceof ModifyableTableDescriptor) {
315      return ((ModifyableTableDescriptor) desc).toByteArray();
316    }
317    return new ModifyableTableDescriptor(desc).toByteArray();
318  }
319
320  /**
321   * The input should be created by {@link #toByteArray}.
322   * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix
323   * @return This instance serialized with pb with pb magic prefix
324   * @throws org.apache.hadoop.hbase.exceptions.DeserializationException
325   */
326  public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException {
327    return ModifyableTableDescriptor.parseFrom(pbBytes);
328  }
329
330  public static TableDescriptorBuilder newBuilder(final TableName name) {
331    return new TableDescriptorBuilder(name);
332  }
333
334  public static TableDescriptor copy(TableDescriptor desc) {
335    return new ModifyableTableDescriptor(desc);
336  }
337
338  public static TableDescriptor copy(TableName name, TableDescriptor desc) {
339    return new ModifyableTableDescriptor(name, desc);
340  }
341
342  /**
343   * Copy all values, families, and name from the input.
344   * @param desc The desciptor to copy
345   * @return A clone of input
346   */
347  public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) {
348    return new TableDescriptorBuilder(desc);
349  }
350
351  private TableDescriptorBuilder(final TableName name) {
352    this.desc = new ModifyableTableDescriptor(name);
353  }
354
355  private TableDescriptorBuilder(final TableDescriptor desc) {
356    this.desc = new ModifyableTableDescriptor(desc);
357  }
358
359  public TableDescriptorBuilder setCoprocessor(String className) throws IOException {
360    return setCoprocessor(CoprocessorDescriptorBuilder.of(className));
361  }
362
363  public TableDescriptorBuilder setCoprocessor(CoprocessorDescriptor cpDesc) throws IOException {
364    desc.setCoprocessor(Objects.requireNonNull(cpDesc));
365    return this;
366  }
367
368  public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor> cpDescs)
369    throws IOException {
370    for (CoprocessorDescriptor cpDesc : cpDescs) {
371      desc.setCoprocessor(cpDesc);
372    }
373    return this;
374  }
375
376  public boolean hasCoprocessor(String classNameToMatch) {
377    return desc.hasCoprocessor(classNameToMatch);
378  }
379
380  public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) {
381    desc.setColumnFamily(Objects.requireNonNull(family));
382    return this;
383  }
384
385  public TableDescriptorBuilder setColumnFamilies(
386    final Collection<ColumnFamilyDescriptor> families) {
387    families.forEach(desc::setColumnFamily);
388    return this;
389  }
390
391  public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) {
392    desc.modifyColumnFamily(Objects.requireNonNull(family));
393    return this;
394  }
395
396  public TableDescriptorBuilder removeValue(final String key) {
397    desc.removeValue(key);
398    return this;
399  }
400
401  public TableDescriptorBuilder removeValue(Bytes key) {
402    desc.removeValue(key);
403    return this;
404  }
405
406  public TableDescriptorBuilder removeValue(byte[] key) {
407    desc.removeValue(key);
408    return this;
409  }
410
411  public TableDescriptorBuilder removeValue(BiPredicate<Bytes, Bytes> predicate) {
412    List<Bytes> toRemove =
413      desc.getValues().entrySet().stream().filter(e -> predicate.test(e.getKey(), e.getValue()))
414        .map(Map.Entry::getKey).collect(Collectors.toList());
415    for (Bytes key : toRemove) {
416      removeValue(key);
417    }
418    return this;
419  }
420
421  public TableDescriptorBuilder removeColumnFamily(final byte[] name) {
422    desc.removeColumnFamily(name);
423    return this;
424  }
425
426  public TableDescriptorBuilder removeCoprocessor(String className) {
427    desc.removeCoprocessor(className);
428    return this;
429  }
430
431  public TableDescriptorBuilder setCompactionEnabled(final boolean isEnable) {
432    desc.setCompactionEnabled(isEnable);
433    return this;
434  }
435
436  public TableDescriptorBuilder setSplitEnabled(final boolean isEnable) {
437    desc.setSplitEnabled(isEnable);
438    return this;
439  }
440
441  public TableDescriptorBuilder setMergeEnabled(final boolean isEnable) {
442    desc.setMergeEnabled(isEnable);
443    return this;
444  }
445
446  public TableDescriptorBuilder setDurability(Durability durability) {
447    desc.setDurability(durability);
448    return this;
449  }
450
451  public TableDescriptorBuilder setFlushPolicyClassName(String clazz) {
452    desc.setFlushPolicyClassName(clazz);
453    return this;
454  }
455
456  public TableDescriptorBuilder setMaxFileSize(long maxFileSize) {
457    desc.setMaxFileSize(maxFileSize);
458    return this;
459  }
460
461  public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) {
462    desc.setMemStoreFlushSize(memstoreFlushSize);
463    return this;
464  }
465
466  public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) {
467    desc.setNormalizerTargetRegionCount(regionCount);
468    return this;
469  }
470
471  public TableDescriptorBuilder setNormalizerTargetRegionSize(final long regionSize) {
472    desc.setNormalizerTargetRegionSize(regionSize);
473    return this;
474  }
475
476  public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) {
477    desc.setNormalizationEnabled(isEnable);
478    return this;
479  }
480
481  public TableDescriptorBuilder setPriority(int priority) {
482    desc.setPriority(priority);
483    return this;
484  }
485
486  public TableDescriptorBuilder setReadOnly(final boolean readOnly) {
487    desc.setReadOnly(readOnly);
488    return this;
489  }
490
491  public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) {
492    desc.setRegionMemStoreReplication(memstoreReplication);
493    return this;
494  }
495
496  public TableDescriptorBuilder setRegionReplication(int regionReplication) {
497    desc.setRegionReplication(regionReplication);
498    return this;
499  }
500
501  public TableDescriptorBuilder setRegionSplitPolicyClassName(String clazz) {
502    desc.setRegionSplitPolicyClassName(clazz);
503    return this;
504  }
505
506  public TableDescriptorBuilder setValue(final String key, final String value) {
507    desc.setValue(key, value);
508    return this;
509  }
510
511  public TableDescriptorBuilder setValue(final Bytes key, final Bytes value) {
512    desc.setValue(key, value);
513    return this;
514  }
515
516  public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) {
517    desc.setValue(key, value);
518    return this;
519  }
520
521  public String getValue(String key) {
522    return desc.getValue(key);
523  }
524
525  /**
526   * Sets replication scope all & only the columns already in the builder. Columns added later won't
527   * be backfilled with replication scope.
528   * @param scope replication scope
529   * @return a TableDescriptorBuilder
530   */
531  public TableDescriptorBuilder setReplicationScope(int scope) {
532    Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
533    newFamilies.putAll(desc.families);
534    newFamilies
535        .forEach((cf, cfDesc) -> {
536          desc.removeColumnFamily(cf);
537          desc.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope)
538              .build());
539        });
540    return this;
541  }
542
543  public TableDescriptorBuilder setRegionServerGroup(String group) {
544    desc.setValue(RSGROUP_KEY, group);
545    return this;
546  }
547
548  public TableDescriptor build() {
549    return new ModifyableTableDescriptor(desc);
550  }
551
552  private static final class ModifyableTableDescriptor
553    implements TableDescriptor, Comparable<ModifyableTableDescriptor> {
554
555    private final TableName name;
556
557    /**
558     * A map which holds the metadata information of the table. This metadata
559     * includes values like IS_META, SPLIT_POLICY, MAX_FILE_SIZE,
560     * READONLY, MEMSTORE_FLUSHSIZE etc...
561     */
562    private final Map<Bytes, Bytes> values = new HashMap<>();
563
564    /**
565     * Maps column family name to the respective FamilyDescriptors
566     */
567    private final Map<byte[], ColumnFamilyDescriptor> families
568            = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
569
570    /**
571     * Construct a table descriptor specifying a TableName object
572     *
573     * @param name Table name.
574     */
575    private ModifyableTableDescriptor(final TableName name) {
576      this(name, Collections.emptyList(), Collections.emptyMap());
577    }
578
579    private ModifyableTableDescriptor(final TableDescriptor desc) {
580      this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues());
581    }
582
583    /**
584     * Construct a table descriptor by cloning the descriptor passed as a
585     * parameter.
586     * <p>
587     * Makes a deep copy of the supplied descriptor.
588     * @param name The new name
589     * @param desc The descriptor.
590     */
591    private ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) {
592      this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues());
593    }
594
595    private ModifyableTableDescriptor(final TableName name, final Collection<ColumnFamilyDescriptor> families,
596            Map<Bytes, Bytes> values) {
597      this.name = name;
598      families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c)));
599      this.values.putAll(values);
600      this.values.put(IS_META_KEY,
601        new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME)))));
602    }
603
604    /**
605     * Checks if this table is <code> hbase:meta </code> region.
606     *
607     * @return true if this table is <code> hbase:meta </code> region
608     */
609    @Override
610    public boolean isMetaRegion() {
611      return getOrDefault(IS_META_KEY, Boolean::valueOf, false);
612    }
613
614    /**
615     * Checks if the table is a <code>hbase:meta</code> table
616     *
617     * @return true if table is <code> hbase:meta </code> region.
618     */
619    @Override
620    public boolean isMetaTable() {
621      return isMetaRegion();
622    }
623
624    @Override
625    public Bytes getValue(Bytes key) {
626      Bytes rval = values.get(key);
627      return rval == null ? null : new Bytes(rval.copyBytes());
628    }
629
630    @Override
631    public String getValue(String key) {
632      Bytes rval = values.get(new Bytes(Bytes.toBytes(key)));
633      return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength());
634    }
635
636    @Override
637    public byte[] getValue(byte[] key) {
638      Bytes value = values.get(new Bytes(key));
639      return value == null ? null : value.copyBytes();
640    }
641
642    private <T> T getOrDefault(Bytes key, Function<String, T> function, T defaultValue) {
643      Bytes value = values.get(key);
644      if (value == null) {
645        return defaultValue;
646      } else {
647        return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength()));
648      }
649    }
650
651    /**
652     * Getter for fetching an unmodifiable {@link #values} map.
653     *
654     * @return unmodifiable map {@link #values}.
655     * @see #values
656     */
657    @Override
658    public Map<Bytes, Bytes> getValues() {
659      // shallow pointer copy
660      return Collections.unmodifiableMap(values);
661    }
662
663    /**
664     * Setter for storing metadata as a (key, value) pair in {@link #values} map
665     *
666     * @param key The key.
667     * @param value The value. If null, removes the setting.
668     * @return the modifyable TD
669     * @see #values
670     */
671    public ModifyableTableDescriptor setValue(byte[] key, byte[] value) {
672      return setValue(toBytesOrNull(key, v -> v),
673              toBytesOrNull(value, v -> v));
674    }
675
676    public ModifyableTableDescriptor setValue(String key, String value) {
677      return setValue(toBytesOrNull(key, Bytes::toBytes),
678              toBytesOrNull(value, Bytes::toBytes));
679    }
680
681    /*
682     * @param key The key.
683     * @param value The value. If null, removes the setting.
684     */
685    private ModifyableTableDescriptor setValue(final Bytes key,
686            final String value) {
687      return setValue(key, toBytesOrNull(value, Bytes::toBytes));
688    }
689
690    /*
691     * Setter for storing metadata as a (key, value) pair in {@link #values} map
692     *
693     * @param key The key.
694     * @param value The value. If null, removes the setting.
695     */
696    public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) {
697      if (value == null) {
698        values.remove(key);
699      } else {
700        values.put(key, value);
701      }
702      return this;
703    }
704
705    private static <T> Bytes toBytesOrNull(T t, Function<T, byte[]> f) {
706      if (t == null) {
707        return null;
708      } else {
709        return new Bytes(f.apply(t));
710      }
711    }
712
713    /**
714     * Remove metadata represented by the key from the {@link #values} map
715     *
716     * @param key Key whose key and value we're to remove from TableDescriptor
717     * parameters.
718     * @return the modifyable TD
719     */
720    public ModifyableTableDescriptor removeValue(final String key) {
721      return setValue(key, (String) null);
722    }
723
724    /**
725     * Remove metadata represented by the key from the {@link #values} map
726     *
727     * @param key Key whose key and value we're to remove from TableDescriptor
728     * parameters.
729     * @return the modifyable TD
730     */
731    public ModifyableTableDescriptor removeValue(Bytes key) {
732      return setValue(key, (Bytes) null);
733    }
734
735    /**
736     * Remove metadata represented by the key from the {@link #values} map
737     *
738     * @param key Key whose key and value we're to remove from TableDescriptor
739     * parameters.
740     * @return the modifyable TD
741     */
742    public ModifyableTableDescriptor removeValue(final byte[] key) {
743      return removeValue(new Bytes(key));
744    }
745
746    /**
747     * Check if the readOnly flag of the table is set. If the readOnly flag is
748     * set then the contents of the table can only be read from but not
749     * modified.
750     *
751     * @return true if all columns in the table should be read only
752     */
753    @Override
754    public boolean isReadOnly() {
755      return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY);
756    }
757
758    /**
759     * Setting the table as read only sets all the columns in the table as read
760     * only. By default all tables are modifiable, but if the readOnly flag is
761     * set to true then the contents of the table can only be read but not
762     * modified.
763     *
764     * @param readOnly True if all of the columns in the table should be read
765     * only.
766     * @return the modifyable TD
767     */
768    public ModifyableTableDescriptor setReadOnly(final boolean readOnly) {
769      return setValue(READONLY_KEY, Boolean.toString(readOnly));
770    }
771
772    /**
773     * Check if the compaction enable flag of the table is true. If flag is
774     * false then no minor/major compactions will be done in real.
775     *
776     * @return true if table compaction enabled
777     */
778    @Override
779    public boolean isCompactionEnabled() {
780      return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED);
781    }
782
783    /**
784     * Setting the table compaction enable flag.
785     *
786     * @param isEnable True if enable compaction.
787     * @return the modifyable TD
788     */
789    public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) {
790      return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable));
791    }
792
793    /**
794     * Check if the split enable flag of the table is true. If flag is false then no split will be
795     * done.
796     *
797     * @return true if table region split enabled
798     */
799    @Override
800    public boolean isSplitEnabled() {
801      return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED);
802    }
803
804    /**
805     * Setting the table region split enable flag.
806     * @param isEnable True if enable region split.
807     *
808     * @return the modifyable TD
809     */
810    public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) {
811      return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable));
812    }
813
814    /**
815     * Check if the region merge enable flag of the table is true. If flag is false then no merge
816     * will be done.
817     *
818     * @return true if table region merge enabled
819     */
820    @Override
821    public boolean isMergeEnabled() {
822      return getOrDefault(MERGE_ENABLED_KEY, Boolean::valueOf, DEFAULT_MERGE_ENABLED);
823    }
824
825    /**
826     * Setting the table region merge enable flag.
827     * @param isEnable True if enable region merge.
828     *
829     * @return the modifyable TD
830     */
831    public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) {
832      return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable));
833    }
834
835    /**
836     * Check if normalization enable flag of the table is true. If flag is false
837     * then no region normalizer won't attempt to normalize this table.
838     *
839     * @return true if region normalization is enabled for this table
840     */
841    @Override
842    public boolean isNormalizationEnabled() {
843      return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, DEFAULT_NORMALIZATION_ENABLED);
844    }
845
846    /**
847     * Check if there is the target region count. If so, the normalize plan will be calculated based
848     * on the target region count.
849     * @return target region count after normalize done
850     */
851    @Override
852    public int getNormalizerTargetRegionCount() {
853      return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf,
854        Integer.valueOf(-1));
855    }
856
857    /**
858     * Check if there is the target region size. If so, the normalize plan will be calculated based
859     * on the target region size.
860     * @return target region size after normalize done
861     */
862    @Override
863    public long getNormalizerTargetRegionSize() {
864      return getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1));
865    }
866
867    /**
868     * Setting the table normalization enable flag.
869     *
870     * @param isEnable True if enable normalization.
871     * @return the modifyable TD
872     */
873    public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) {
874      return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable));
875    }
876
877    /**
878     * Setting the target region count of table normalization .
879     * @param regionCount the target region count.
880     * @return the modifyable TD
881     */
882    public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) {
883      return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount));
884    }
885
886    /**
887     * Setting the target region size of table normalization.
888     * @param regionSize the target region size.
889     * @return the modifyable TD
890     */
891    public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) {
892      return setValue(NORMALIZER_TARGET_REGION_SIZE_KEY, Long.toString(regionSize));
893    }
894
895    /**
896     * Sets the {@link Durability} setting for the table. This defaults to
897     * Durability.USE_DEFAULT.
898     *
899     * @param durability enum value
900     * @return the modifyable TD
901     */
902    public ModifyableTableDescriptor setDurability(Durability durability) {
903      return setValue(DURABILITY_KEY, durability.name());
904    }
905
906    /**
907     * Returns the durability setting for the table.
908     *
909     * @return durability setting for the table.
910     */
911    @Override
912    public Durability getDurability() {
913      return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY);
914    }
915
916    /**
917     * Get the name of the table
918     *
919     * @return TableName
920     */
921    @Override
922    public TableName getTableName() {
923      return name;
924    }
925
926    /**
927     * This sets the class associated with the region split policy which
928     * determines when a region split should occur. The class used by default is
929     * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
930     *
931     * @param clazz the class name
932     * @return the modifyable TD
933     */
934    public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) {
935      return setValue(SPLIT_POLICY_KEY, clazz);
936    }
937
938    /**
939     * This gets the class associated with the region split policy which
940     * determines when a region split should occur. The class used by default is
941     * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
942     *
943     * @return the class name of the region split policy for this table. If this
944     * returns null, the default split policy is used.
945     */
946    @Override
947    public String getRegionSplitPolicyClassName() {
948      return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null);
949    }
950
951    /**
952     * Returns the maximum size upto which a region can grow to after which a
953     * region split is triggered. The region size is represented by the size of
954     * the biggest store file in that region.
955     *
956     * @return max hregion size for table, -1 if not set.
957     *
958     * @see #setMaxFileSize(long)
959     */
960    @Override
961    public long getMaxFileSize() {
962      return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1);
963    }
964
965    /**
966     * Sets the maximum size upto which a region can grow to after which a
967     * region split is triggered. The region size is represented by the size of
968     * the biggest store file in that region, i.e. If the biggest store file
969     * grows beyond the maxFileSize, then the region split is triggered. This
970     * defaults to a value of 256 MB.
971     * <p>
972     * This is not an absolute value and might vary. Assume that a single row
973     * exceeds the maxFileSize then the storeFileSize will be greater than
974     * maxFileSize since a single row cannot be split across multiple regions
975     * </p>
976     *
977     * @param maxFileSize The maximum file size that a store file can grow to
978     * before a split is triggered.
979     * @return the modifyable TD
980     */
981    public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) {
982      return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
983    }
984
985    /**
986     * Returns the size of the memstore after which a flush to filesystem is
987     * triggered.
988     *
989     * @return memory cache flush size for each hregion, -1 if not set.
990     *
991     * @see #setMemStoreFlushSize(long)
992     */
993    @Override
994    public long getMemStoreFlushSize() {
995      return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1);
996    }
997
998    /**
999     * Represents the maximum size of the memstore after which the contents of
1000     * the memstore are flushed to the filesystem. This defaults to a size of 64
1001     * MB.
1002     *
1003     * @param memstoreFlushSize memory cache flush size for each hregion
1004     * @return the modifyable TD
1005     */
1006    public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
1007      return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
1008    }
1009
1010    /**
1011     * This sets the class associated with the flush policy which determines
1012     * determines the stores need to be flushed when flushing a region. The
1013     * class used by default is defined in
1014     * org.apache.hadoop.hbase.regionserver.FlushPolicy.
1015     *
1016     * @param clazz the class name
1017     * @return the modifyable TD
1018     */
1019    public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) {
1020      return setValue(FLUSH_POLICY_KEY, clazz);
1021    }
1022
1023    /**
1024     * This gets the class associated with the flush policy which determines the
1025     * stores need to be flushed when flushing a region. The class used by
1026     * default is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy.
1027     *
1028     * @return the class name of the flush policy for this table. If this
1029     * returns null, the default flush policy is used.
1030     */
1031    @Override
1032    public String getFlushPolicyClassName() {
1033      return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null);
1034    }
1035
1036    /**
1037     * Adds a column family. For the updating purpose please use
1038     * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead.
1039     *
1040     * @param family to add.
1041     * @return the modifyable TD
1042     */
1043    public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) {
1044      if (family.getName() == null || family.getName().length <= 0) {
1045        throw new IllegalArgumentException("Family name cannot be null or empty");
1046      }
1047      if (hasColumnFamily(family.getName())) {
1048        throw new IllegalArgumentException("Family '"
1049                + family.getNameAsString() + "' already exists so cannot be added");
1050      }
1051      return putColumnFamily(family);
1052    }
1053
1054    /**
1055     * Modifies the existing column family.
1056     *
1057     * @param family to update
1058     * @return this (for chained invocation)
1059     */
1060    public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) {
1061      if (family.getName() == null || family.getName().length <= 0) {
1062        throw new IllegalArgumentException("Family name cannot be null or empty");
1063      }
1064      if (!hasColumnFamily(family.getName())) {
1065        throw new IllegalArgumentException("Column family '" + family.getNameAsString()
1066                + "' does not exist");
1067      }
1068      return putColumnFamily(family);
1069    }
1070
1071    private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) {
1072      families.put(family.getName(), family);
1073      return this;
1074    }
1075
1076    /**
1077     * Checks to see if this table contains the given column family
1078     *
1079     * @param familyName Family name or column name.
1080     * @return true if the table contains the specified family name
1081     */
1082    @Override
1083    public boolean hasColumnFamily(final byte[] familyName) {
1084      return families.containsKey(familyName);
1085    }
1086
1087    /**
1088     * @return Name of this table and then a map of all of the column family descriptors.
1089     */
1090    @Override
1091    public String toString() {
1092      StringBuilder s = new StringBuilder();
1093      s.append('\'').append(Bytes.toString(name.getName())).append('\'');
1094      s.append(getValues(true));
1095      families.values().forEach(f -> s.append(", ").append(f));
1096      return s.toString();
1097    }
1098
1099    /**
1100     * @return Name of this table and then a map of all of the column family
1101     * descriptors (with only the non-default column family attributes)
1102     */
1103    @Override
1104    public String toStringCustomizedValues() {
1105      StringBuilder s = new StringBuilder();
1106      s.append('\'').append(Bytes.toString(name.getName())).append('\'');
1107      s.append(getValues(false));
1108      families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues()));
1109      return s.toString();
1110    }
1111
1112    /**
1113     * @return map of all table attributes formatted into string.
1114     */
1115    public String toStringTableAttributes() {
1116      return getValues(true).toString();
1117    }
1118
1119    private StringBuilder getValues(boolean printDefaults) {
1120      StringBuilder s = new StringBuilder();
1121
1122      // step 1: set partitioning and pruning
1123      Set<Bytes> reservedKeys = new TreeSet<>();
1124      Set<Bytes> userKeys = new TreeSet<>();
1125      for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
1126        if (entry.getKey() == null || entry.getKey().get() == null) {
1127          continue;
1128        }
1129        String key = Bytes.toString(entry.getKey().get());
1130        // in this section, print out reserved keywords + coprocessor info
1131        if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) {
1132          userKeys.add(entry.getKey());
1133          continue;
1134        }
1135        // only print out IS_META if true
1136        String value = Bytes.toString(entry.getValue().get());
1137        if (key.equalsIgnoreCase(IS_META)) {
1138          if (Boolean.valueOf(value) == false) {
1139            continue;
1140          }
1141        }
1142        // see if a reserved key is a default value. may not want to print it out
1143        if (printDefaults
1144                || !DEFAULT_VALUES.containsKey(key)
1145                || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1146          reservedKeys.add(entry.getKey());
1147        }
1148      }
1149
1150      // early exit optimization
1151      boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
1152      if (!hasAttributes) {
1153        return s;
1154      }
1155
1156      s.append(", {");
1157      // step 2: printing attributes
1158      if (hasAttributes) {
1159        s.append("TABLE_ATTRIBUTES => {");
1160
1161        // print all reserved keys first
1162        boolean printCommaForAttr = false;
1163        for (Bytes k : reservedKeys) {
1164          String key = Bytes.toString(k.get());
1165          String value = Bytes.toStringBinary(values.get(k).get());
1166          if (printCommaForAttr) {
1167            s.append(", ");
1168          }
1169          printCommaForAttr = true;
1170          s.append(key);
1171          s.append(" => ");
1172          s.append('\'').append(value).append('\'');
1173        }
1174
1175        if (!userKeys.isEmpty()) {
1176          // print all non-reserved as a separate subset
1177          if (printCommaForAttr) {
1178            s.append(", ");
1179          }
1180          s.append(HConstants.METADATA).append(" => ");
1181          s.append("{");
1182          boolean printCommaForCfg = false;
1183          for (Bytes k : userKeys) {
1184            String key = Bytes.toString(k.get());
1185            String value = Bytes.toStringBinary(values.get(k).get());
1186            if (printCommaForCfg) {
1187              s.append(", ");
1188            }
1189            printCommaForCfg = true;
1190            s.append('\'').append(key).append('\'');
1191            s.append(" => ");
1192            s.append('\'').append(value).append('\'');
1193          }
1194          s.append("}");
1195        }
1196
1197        s.append("}");
1198      }
1199
1200      s.append("}"); // end METHOD
1201      return s;
1202    }
1203
1204    /**
1205     * Compare the contents of the descriptor with another one passed as a
1206     * parameter. Checks if the obj passed is an instance of ModifyableTableDescriptor,
1207     * if yes then the contents of the descriptors are compared.
1208     *
1209     * @param obj The object to compare
1210     * @return true if the contents of the the two descriptors exactly match
1211     *
1212     * @see java.lang.Object#equals(java.lang.Object)
1213     */
1214    @Override
1215    public boolean equals(Object obj) {
1216      if (this == obj) {
1217        return true;
1218      }
1219      if (obj instanceof ModifyableTableDescriptor) {
1220        return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0;
1221      }
1222      return false;
1223    }
1224
1225    /**
1226     * @return hash code
1227     */
1228    @Override
1229    public int hashCode() {
1230      int result = this.name.hashCode();
1231      if (this.families.size() > 0) {
1232        for (ColumnFamilyDescriptor e : this.families.values()) {
1233          result ^= e.hashCode();
1234        }
1235      }
1236      result ^= values.hashCode();
1237      return result;
1238    }
1239
1240    // Comparable
1241    /**
1242     * Compares the descriptor with another descriptor which is passed as a
1243     * parameter. This compares the content of the two descriptors and not the
1244     * reference.
1245     *
1246     * @param other The MTD to compare
1247     * @return 0 if the contents of the descriptors are exactly matching, 1 if
1248     * there is a mismatch in the contents
1249     */
1250    @Override
1251    public int compareTo(final ModifyableTableDescriptor other) {
1252      return TableDescriptor.COMPARATOR.compare(this, other);
1253    }
1254
1255    @Override
1256    public ColumnFamilyDescriptor[] getColumnFamilies() {
1257      return families.values().toArray(new ColumnFamilyDescriptor[families.size()]);
1258    }
1259
1260    /**
1261     * Returns the configured replicas per region
1262     */
1263    @Override
1264    public int getRegionReplication() {
1265      return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION);
1266    }
1267
1268    /**
1269     * Sets the number of replicas per region.
1270     *
1271     * @param regionReplication the replication factor per region
1272     * @return the modifyable TD
1273     */
1274    public ModifyableTableDescriptor setRegionReplication(int regionReplication) {
1275      return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication));
1276    }
1277
1278    /**
1279     * @return true if the read-replicas memstore replication is enabled.
1280     */
1281    @Override
1282    public boolean hasRegionMemStoreReplication() {
1283      return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, DEFAULT_REGION_MEMSTORE_REPLICATION);
1284    }
1285
1286    /**
1287     * Enable or Disable the memstore replication from the primary region to the
1288     * replicas. The replication will be used only for meta operations (e.g.
1289     * flush, compaction, ...)
1290     *
1291     * @param memstoreReplication true if the new data written to the primary
1292     * region should be replicated. false if the secondaries can tollerate to
1293     * have new data only when the primary flushes the memstore.
1294     * @return the modifyable TD
1295     */
1296    public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) {
1297      setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication));
1298      // If the memstore replication is setup, we do not have to wait for observing a flush event
1299      // from primary before starting to serve reads, because gaps from replication is not applicable
1300      return setValue(REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
1301              Boolean.toString(memstoreReplication));
1302    }
1303
1304    public ModifyableTableDescriptor setPriority(int priority) {
1305      return setValue(PRIORITY_KEY, Integer.toString(priority));
1306    }
1307
1308    @Override
1309    public int getPriority() {
1310      return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY);
1311    }
1312
1313    /**
1314     * Returns all the column family names of the current table. The map of
1315     * TableDescriptor contains mapping of family name to ColumnFamilyDescriptor.
1316     * This returns all the keys of the family map which represents the column
1317     * family names of the table.
1318     *
1319     * @return Immutable sorted set of the keys of the families.
1320     */
1321    @Override
1322    public Set<byte[]> getColumnFamilyNames() {
1323      return Collections.unmodifiableSet(this.families.keySet());
1324    }
1325
1326    /**
1327     * Returns the ColumnFamilyDescriptor for a specific column family with name as
1328     * specified by the parameter column.
1329     *
1330     * @param column Column family name
1331     * @return Column descriptor for the passed family name or the family on
1332     * passed in column.
1333     */
1334    @Override
1335    public ColumnFamilyDescriptor getColumnFamily(final byte[] column) {
1336      return this.families.get(column);
1337    }
1338
1339    /**
1340     * Removes the ColumnFamilyDescriptor with name specified by the parameter column
1341     * from the table descriptor
1342     *
1343     * @param column Name of the column family to be removed.
1344     * @return Column descriptor for the passed family name or the family on
1345     * passed in column.
1346     */
1347    public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) {
1348      return this.families.remove(column);
1349    }
1350
1351    /**
1352     * Add a table coprocessor to this table. The coprocessor type must be
1353     * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't
1354     * check if the class can be loaded or not. Whether a coprocessor is
1355     * loadable or not will be determined when a region is opened.
1356     *
1357     * @param className Full class name.
1358     * @throws IOException
1359     * @return the modifyable TD
1360     */
1361    public ModifyableTableDescriptor setCoprocessor(String className) throws IOException {
1362      return setCoprocessor(
1363        CoprocessorDescriptorBuilder.newBuilder(className).setPriority(Coprocessor.PRIORITY_USER)
1364          .build());
1365    }
1366
1367    /**
1368     * Add a table coprocessor to this table. The coprocessor type must be
1369     * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't
1370     * check if the class can be loaded or not. Whether a coprocessor is
1371     * loadable or not will be determined when a region is opened.
1372     *
1373     * @throws IOException any illegal parameter key/value
1374     * @return the modifyable TD
1375     */
1376    public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp)
1377            throws IOException {
1378      checkHasCoprocessor(cp.getClassName());
1379      if (cp.getPriority() < 0) {
1380        throw new IOException("Priority must be bigger than or equal with zero, current:"
1381          + cp.getPriority());
1382      }
1383      // Validate parameter kvs and then add key/values to kvString.
1384      StringBuilder kvString = new StringBuilder();
1385      for (Map.Entry<String, String> e : cp.getProperties().entrySet()) {
1386        if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1387          throw new IOException("Illegal parameter key = " + e.getKey());
1388        }
1389        if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1390          throw new IOException("Illegal parameter (" + e.getKey()
1391                  + ") value = " + e.getValue());
1392        }
1393        if (kvString.length() != 0) {
1394          kvString.append(',');
1395        }
1396        kvString.append(e.getKey());
1397        kvString.append('=');
1398        kvString.append(e.getValue());
1399      }
1400
1401      String value = cp.getJarPath().orElse("")
1402              + "|" + cp.getClassName() + "|" + Integer.toString(cp.getPriority()) + "|"
1403              + kvString.toString();
1404      return setCoprocessorToMap(value);
1405    }
1406
1407    /**
1408     * Add a table coprocessor to this table. The coprocessor type must be
1409     * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't
1410     * check if the class can be loaded or not. Whether a coprocessor is
1411     * loadable or not will be determined when a region is opened.
1412     *
1413     * @param specStr The Coprocessor specification all in in one String
1414     * @throws IOException
1415     * @return the modifyable TD
1416     * @deprecated used by HTableDescriptor and admin.rb.
1417     *                       As of release 2.0.0, this will be removed in HBase 3.0.0.
1418     */
1419    @Deprecated
1420    public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr)
1421      throws IOException {
1422      CoprocessorDescriptor cpDesc = toCoprocessorDescriptor(specStr).orElseThrow(
1423        () -> new IllegalArgumentException(
1424          "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr));
1425      checkHasCoprocessor(cpDesc.getClassName());
1426      return setCoprocessorToMap(specStr);
1427    }
1428
1429    private void checkHasCoprocessor(final String className) throws IOException {
1430      if (hasCoprocessor(className)) {
1431        throw new IOException("Coprocessor " + className + " already exists.");
1432      }
1433    }
1434
1435    /**
1436     * Add coprocessor to values Map
1437     * @param specStr The Coprocessor specification all in in one String
1438     * @return Returns <code>this</code>
1439     */
1440    private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) {
1441      if (specStr == null) {
1442        return this;
1443      }
1444      // generate a coprocessor key
1445      int maxCoprocessorNumber = 0;
1446      Matcher keyMatcher;
1447      for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1448        keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1449        if (!keyMatcher.matches()) {
1450          continue;
1451        }
1452        maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
1453      }
1454      maxCoprocessorNumber++;
1455      String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1456      return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr)));
1457    }
1458
1459    /**
1460     * Check if the table has an attached co-processor represented by the name
1461     * className
1462     *
1463     * @param classNameToMatch - Class name of the co-processor
1464     * @return true of the table has a co-processor className
1465     */
1466    @Override
1467    public boolean hasCoprocessor(String classNameToMatch) {
1468      return getCoprocessorDescriptors().stream().anyMatch(cp -> cp.getClassName()
1469        .equals(classNameToMatch));
1470    }
1471
1472    /**
1473     * Return the list of attached co-processor represented by their name
1474     * className
1475     *
1476     * @return The list of co-processors classNames
1477     */
1478    @Override
1479    public List<CoprocessorDescriptor> getCoprocessorDescriptors() {
1480      List<CoprocessorDescriptor> result = new ArrayList<>();
1481      for (Map.Entry<Bytes, Bytes> e: getValues().entrySet()) {
1482        String key = Bytes.toString(e.getKey().get()).trim();
1483        if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) {
1484          toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim())
1485            .ifPresent(result::add);
1486        }
1487      }
1488      return result;
1489    }
1490
1491    /**
1492     * Remove a coprocessor from those set on the table
1493     *
1494     * @param className Class name of the co-processor
1495     */
1496    public void removeCoprocessor(String className) {
1497      Bytes match = null;
1498      Matcher keyMatcher;
1499      Matcher valueMatcher;
1500      for (Map.Entry<Bytes, Bytes> e : this.values
1501              .entrySet()) {
1502        keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1503                .getKey().get()));
1504        if (!keyMatcher.matches()) {
1505          continue;
1506        }
1507        valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1508                .toString(e.getValue().get()));
1509        if (!valueMatcher.matches()) {
1510          continue;
1511        }
1512        // get className and compare
1513        String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1514        // remove the CP if it is present
1515        if (clazz.equals(className.trim())) {
1516          match = e.getKey();
1517          break;
1518        }
1519      }
1520      // if we found a match, remove it
1521      if (match != null) {
1522        ModifyableTableDescriptor.this.removeValue(match);
1523      }
1524    }
1525
1526    /**
1527     * @return the bytes in pb format
1528     */
1529    private byte[] toByteArray() {
1530      return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray());
1531    }
1532
1533    /**
1534     * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance
1535     * with pb magic prefix
1536     * @return An instance of {@link ModifyableTableDescriptor} made from
1537     * <code>bytes</code>
1538     * @throws DeserializationException
1539     * @see #toByteArray()
1540     */
1541    private static TableDescriptor parseFrom(final byte[] bytes)
1542            throws DeserializationException {
1543      if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1544        throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor");
1545      }
1546      int pblen = ProtobufUtil.lengthOfPBMagic();
1547      HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder();
1548      try {
1549        ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
1550        return ProtobufUtil.toTableDescriptor(builder.build());
1551      } catch (IOException e) {
1552        throw new DeserializationException(e);
1553      }
1554    }
1555
1556    @Override
1557    public int getColumnFamilyCount() {
1558      return families.size();
1559    }
1560
1561    @Override
1562    public Optional<String> getRegionServerGroup() {
1563      Bytes value = values.get(RSGROUP_KEY);
1564      if (value != null) {
1565        return Optional.of(Bytes.toString(value.get(), value.getOffset(), value.getLength()));
1566      } else {
1567        return Optional.empty();
1568      }
1569    }
1570  }
1571
1572  /**
1573   * This method is mostly intended for internal use. However, it it also relied on by hbase-shell
1574   * for backwards compatibility.
1575   */
1576  private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) {
1577    Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec);
1578    if (matcher.matches()) {
1579      // jar file path can be empty if the cp class can be loaded
1580      // from class loader.
1581      String path = matcher.group(1).trim().isEmpty() ?
1582        null : matcher.group(1).trim();
1583      String className = matcher.group(2).trim();
1584      if (className.isEmpty()) {
1585        return Optional.empty();
1586      }
1587      String priorityStr = matcher.group(3).trim();
1588      int priority = priorityStr.isEmpty() ?
1589        Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr);
1590      String cfgSpec = null;
1591      try {
1592        cfgSpec = matcher.group(4);
1593      } catch (IndexOutOfBoundsException ex) {
1594        // ignore
1595      }
1596      Map<String, String> ourConf = new TreeMap<>();
1597      if (cfgSpec != null && !cfgSpec.trim().equals("|")) {
1598        cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1);
1599        Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec);
1600        while (m.find()) {
1601          ourConf.put(m.group(1), m.group(2));
1602        }
1603      }
1604      return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className)
1605        .setJarPath(path)
1606        .setPriority(priority)
1607        .setProperties(ourConf)
1608        .build());
1609    }
1610    return Optional.empty();
1611  }
1612}