001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.client;
020
021import java.io.IOException;
022import java.util.ArrayList;
023import java.util.Arrays;
024import java.util.Collection;
025import java.util.Collections;
026import java.util.HashMap;
027import java.util.HashSet;
028import java.util.List;
029import java.util.Map;
030import java.util.Objects;
031import java.util.Optional;
032import java.util.Set;
033import java.util.TreeMap;
034import java.util.TreeSet;
035import java.util.function.Function;
036import java.util.regex.Matcher;
037import java.util.regex.Pattern;
038import org.apache.hadoop.fs.Path;
039import org.apache.hadoop.hbase.Coprocessor;
040import org.apache.hadoop.hbase.HConstants;
041import org.apache.hadoop.hbase.TableName;
042import org.apache.hadoop.hbase.exceptions.DeserializationException;
043import org.apache.hadoop.hbase.security.User;
044import org.apache.hadoop.hbase.util.Bytes;
045import org.apache.yetus.audience.InterfaceAudience;
046import org.slf4j.Logger;
047import org.slf4j.LoggerFactory;
048
049import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
050import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
051
052/**
053 * @since 2.0.0
054 */
055@InterfaceAudience.Public
056public class TableDescriptorBuilder {
057  public static final Logger LOG = LoggerFactory.getLogger(TableDescriptorBuilder.class);
058  @InterfaceAudience.Private
059  public static final String SPLIT_POLICY = "SPLIT_POLICY";
060  private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY));
061  /**
062   * Used by HBase Shell interface to access this metadata
063   * attribute which denotes the maximum size of the store file after which a
064   * region split occurs.
065   */
066  @InterfaceAudience.Private
067  public static final String MAX_FILESIZE = "MAX_FILESIZE";
068  private static final Bytes MAX_FILESIZE_KEY
069          = new Bytes(Bytes.toBytes(MAX_FILESIZE));
070
071  @InterfaceAudience.Private
072  public static final String OWNER = "OWNER";
073  @InterfaceAudience.Private
074  public static final Bytes OWNER_KEY
075          = new Bytes(Bytes.toBytes(OWNER));
076
077  /**
078   * Used by rest interface to access this metadata attribute
079   * which denotes if the table is Read Only.
080   */
081  @InterfaceAudience.Private
082  public static final String READONLY = "READONLY";
083  private static final Bytes READONLY_KEY
084          = new Bytes(Bytes.toBytes(READONLY));
085
086  /**
087   * Used by HBase Shell interface to access this metadata
088   * attribute which denotes if the table is compaction enabled.
089   */
090  @InterfaceAudience.Private
091  public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
092  private static final Bytes COMPACTION_ENABLED_KEY
093          = new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
094
095  /**
096   * Used by HBase Shell interface to access this metadata
097   * attribute which represents the maximum size of the memstore after which its
098   * contents are flushed onto the disk.
099   */
100  @InterfaceAudience.Private
101  public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
102  private static final Bytes MEMSTORE_FLUSHSIZE_KEY
103          = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
104
105  @InterfaceAudience.Private
106  public static final String FLUSH_POLICY = "FLUSH_POLICY";
107  private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY));
108  /**
109   * Used by rest interface to access this metadata attribute
110   * which denotes if it is a catalog table, either <code> hbase:meta </code>.
111   */
112  @InterfaceAudience.Private
113  public static final String IS_META = "IS_META";
114  private static final Bytes IS_META_KEY
115          = new Bytes(Bytes.toBytes(IS_META));
116
117  /**
118   * {@link Durability} setting for the table.
119   */
120  @InterfaceAudience.Private
121  public static final String DURABILITY = "DURABILITY";
122  private static final Bytes DURABILITY_KEY
123          = new Bytes(Bytes.toBytes("DURABILITY"));
124
125  /**
126   * The number of region replicas for the table.
127   */
128  @InterfaceAudience.Private
129  public static final String REGION_REPLICATION = "REGION_REPLICATION";
130  private static final Bytes REGION_REPLICATION_KEY
131          = new Bytes(Bytes.toBytes(REGION_REPLICATION));
132
133  /**
134   * The flag to indicate whether or not the memstore should be
135   * replicated for read-replicas (CONSISTENCY =&gt; TIMELINE).
136   */
137  @InterfaceAudience.Private
138  public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
139  private static final Bytes REGION_MEMSTORE_REPLICATION_KEY
140          = new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION));
141
142  private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY
143          = new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY));
144  /**
145   * Used by shell/rest interface to access this metadata
146   * attribute which denotes if the table should be treated by region
147   * normalizer.
148   */
149  @InterfaceAudience.Private
150  public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED";
151  private static final Bytes NORMALIZATION_ENABLED_KEY
152          = new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED));
153
154  @InterfaceAudience.Private
155  public static final String NORMALIZER_TARGET_REGION_COUNT =
156      "NORMALIZER_TARGET_REGION_COUNT";
157  private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY =
158      new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT));
159
160  @InterfaceAudience.Private
161  public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE";
162  private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY =
163      new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE));
164
165  /**
166   * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global
167   * default value
168   */
169  private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
170
171  @InterfaceAudience.Private
172  public static final String PRIORITY = "PRIORITY";
173  private static final Bytes PRIORITY_KEY
174          = new Bytes(Bytes.toBytes(PRIORITY));
175
176  /**
177   * Relative priority of the table used for rpc scheduling
178   */
179  private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS;
180
181  /**
182   * Constant that denotes whether the table is READONLY by default and is false
183   */
184  public static final boolean DEFAULT_READONLY = false;
185
186  /**
187   * Constant that denotes whether the table is compaction enabled by default
188   */
189  public static final boolean DEFAULT_COMPACTION_ENABLED = true;
190
191  /**
192   * Constant that denotes whether the table is normalized by default.
193   */
194  public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
195
196  /**
197   * Constant that denotes the maximum default size of the memstore in bytes after which
198   * the contents are flushed to the store files.
199   */
200  public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L;
201
202  public static final int DEFAULT_REGION_REPLICATION = 1;
203
204  public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true;
205
206  private final static Map<String, String> DEFAULT_VALUES = new HashMap<>();
207  private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>();
208
209  static {
210    DEFAULT_VALUES.put(MAX_FILESIZE,
211            String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
212    DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
213    DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
214            String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
215    DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
216    DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
217    DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED));
218    DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY));
219    DEFAULT_VALUES.keySet().stream()
220            .map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add);
221    RESERVED_KEYWORDS.add(IS_META_KEY);
222  }
223
224  @InterfaceAudience.Private
225  public final static String NAMESPACE_FAMILY_INFO = "info";
226  @InterfaceAudience.Private
227  public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
228  @InterfaceAudience.Private
229  public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
230
231  /**
232   * <pre>
233   * Pattern that matches a coprocessor specification. Form is:
234   * {@code <coprocessor jar file location> '|' <class name> ['|' <priority> ['|' <arguments>]]}
235   * where arguments are {@code <KEY> '=' <VALUE> [,...]}
236   * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2}
237   * </pre>
238   */
239  private static final Pattern CP_HTD_ATTR_VALUE_PATTERN =
240    Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$");
241
242  private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+";
243  private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+";
244  private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile(
245    "(" + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" +
246      CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?");
247  private static final Pattern CP_HTD_ATTR_KEY_PATTERN =
248    Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE);
249  /**
250   * Table descriptor for namespace table
251   */
252  // TODO We used to set CacheDataInL1 for NS table. When we have BucketCache in file mode, now the
253  // NS data goes to File mode BC only. Test how that affect the system. If too much, we have to
254  // rethink about adding back the setCacheDataInL1 for NS table.
255  public static final TableDescriptor NAMESPACE_TABLEDESC
256    = TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME)
257      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES)
258        // Ten is arbitrary number.  Keep versions to help debugging.
259        .setMaxVersions(10)
260        .setInMemory(true)
261        .setBlocksize(8 * 1024)
262        .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
263        .build())
264      .build();
265  private final ModifyableTableDescriptor desc;
266
267  /**
268   * @param desc The table descriptor to serialize
269   * @return This instance serialized with pb with pb magic prefix
270   */
271  public static byte[] toByteArray(TableDescriptor desc) {
272    if (desc instanceof ModifyableTableDescriptor) {
273      return ((ModifyableTableDescriptor) desc).toByteArray();
274    }
275    return new ModifyableTableDescriptor(desc).toByteArray();
276  }
277
278  /**
279   * The input should be created by {@link #toByteArray}.
280   * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix
281   * @return This instance serialized with pb with pb magic prefix
282   * @throws org.apache.hadoop.hbase.exceptions.DeserializationException
283   */
284  public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException {
285    return ModifyableTableDescriptor.parseFrom(pbBytes);
286  }
287
288  public static TableDescriptorBuilder newBuilder(final TableName name) {
289    return new TableDescriptorBuilder(name);
290  }
291
292  public static TableDescriptor copy(TableDescriptor desc) {
293    return new ModifyableTableDescriptor(desc);
294  }
295
296  public static TableDescriptor copy(TableName name, TableDescriptor desc) {
297    return new ModifyableTableDescriptor(name, desc);
298  }
299
300  /**
301   * Copy all values, families, and name from the input.
302   * @param desc The desciptor to copy
303   * @return A clone of input
304   */
305  public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) {
306    return new TableDescriptorBuilder(desc);
307  }
308
309  private TableDescriptorBuilder(final TableName name) {
310    this.desc = new ModifyableTableDescriptor(name);
311  }
312
313  private TableDescriptorBuilder(final TableDescriptor desc) {
314    this.desc = new ModifyableTableDescriptor(desc);
315  }
316
317  /**
318   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
319   *                       Use {@link #setCoprocessor(String)} instead
320   */
321  @Deprecated
322  public TableDescriptorBuilder addCoprocessor(String className) throws IOException {
323    return addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
324  }
325
326  /**
327   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
328   *                       Use {@link #setCoprocessor(CoprocessorDescriptor)} instead
329   */
330  @Deprecated
331  public TableDescriptorBuilder addCoprocessor(String className, Path jarFilePath,
332    int priority, final Map<String, String> kvs) throws IOException {
333    desc.setCoprocessor(
334      CoprocessorDescriptorBuilder.newBuilder(className)
335        .setJarPath(jarFilePath == null ? null : jarFilePath.toString())
336        .setPriority(priority)
337        .setProperties(kvs == null ? Collections.emptyMap() : kvs)
338        .build());
339    return this;
340  }
341
342  /**
343   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
344   *                       Use {@link #setCoprocessor(CoprocessorDescriptor)} instead
345   */
346  @Deprecated
347  public TableDescriptorBuilder addCoprocessorWithSpec(final String specStr) throws IOException {
348    desc.setCoprocessorWithSpec(specStr);
349    return this;
350  }
351
352  /**
353   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
354   *                       Use {@link #setColumnFamily(ColumnFamilyDescriptor)} instead
355   */
356  @Deprecated
357  public TableDescriptorBuilder addColumnFamily(final ColumnFamilyDescriptor family) {
358    desc.setColumnFamily(family);
359    return this;
360  }
361
362  public TableDescriptorBuilder setCoprocessor(String className) throws IOException {
363    return setCoprocessor(CoprocessorDescriptorBuilder.of(className));
364  }
365
366  public TableDescriptorBuilder setCoprocessor(CoprocessorDescriptor cpDesc) throws IOException {
367    desc.setCoprocessor(Objects.requireNonNull(cpDesc));
368    return this;
369  }
370
371  public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor> cpDescs)
372    throws IOException {
373    for (CoprocessorDescriptor cpDesc : cpDescs) {
374      desc.setCoprocessor(cpDesc);
375    }
376    return this;
377  }
378
379  public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) {
380    desc.setColumnFamily(Objects.requireNonNull(family));
381    return this;
382  }
383
384  public TableDescriptorBuilder setColumnFamilies(
385    final Collection<ColumnFamilyDescriptor> families) {
386    families.forEach(desc::setColumnFamily);
387    return this;
388  }
389
390  public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) {
391    desc.modifyColumnFamily(Objects.requireNonNull(family));
392    return this;
393  }
394
395  public TableDescriptorBuilder removeValue(Bytes key) {
396    desc.removeValue(key);
397    return this;
398  }
399
400  public TableDescriptorBuilder removeValue(byte[] key) {
401    desc.removeValue(key);
402    return this;
403  }
404
405  public TableDescriptorBuilder removeColumnFamily(final byte[] name) {
406    desc.removeColumnFamily(name);
407    return this;
408  }
409
410  public TableDescriptorBuilder removeCoprocessor(String className) {
411    desc.removeCoprocessor(className);
412    return this;
413  }
414
415  public TableDescriptorBuilder setCompactionEnabled(final boolean isEnable) {
416    desc.setCompactionEnabled(isEnable);
417    return this;
418  }
419
420  public TableDescriptorBuilder setDurability(Durability durability) {
421    desc.setDurability(durability);
422    return this;
423  }
424
425  public TableDescriptorBuilder setFlushPolicyClassName(String clazz) {
426    desc.setFlushPolicyClassName(clazz);
427    return this;
428  }
429
430  public TableDescriptorBuilder setMaxFileSize(long maxFileSize) {
431    desc.setMaxFileSize(maxFileSize);
432    return this;
433  }
434
435  public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) {
436    desc.setMemStoreFlushSize(memstoreFlushSize);
437    return this;
438  }
439
440  public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) {
441    desc.setNormalizerTargetRegionCount(regionCount);
442    return this;
443  }
444
445  public TableDescriptorBuilder setNormalizerTargetRegionSize(final long regionSize) {
446    desc.setNormalizerTargetRegionSize(regionSize);
447    return this;
448  }
449
450  public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) {
451    desc.setNormalizationEnabled(isEnable);
452    return this;
453  }
454
455  /**
456   * @deprecated since 2.0.0 and will be removed in 3.0.0.
457   * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a>
458   */
459  @Deprecated
460  public TableDescriptorBuilder setOwner(User owner) {
461    desc.setOwner(owner);
462    return this;
463  }
464
465  /**
466   * @deprecated since 2.0.0 and will be removed in 3.0.0.
467   * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a>
468   */
469  @Deprecated
470  public TableDescriptorBuilder setOwnerString(String ownerString) {
471    desc.setOwnerString(ownerString);
472    return this;
473  }
474
475  public TableDescriptorBuilder setPriority(int priority) {
476    desc.setPriority(priority);
477    return this;
478  }
479
480  public TableDescriptorBuilder setReadOnly(final boolean readOnly) {
481    desc.setReadOnly(readOnly);
482    return this;
483  }
484
485  public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) {
486    desc.setRegionMemStoreReplication(memstoreReplication);
487    return this;
488  }
489
490  public TableDescriptorBuilder setRegionReplication(int regionReplication) {
491    desc.setRegionReplication(regionReplication);
492    return this;
493  }
494
495  public TableDescriptorBuilder setRegionSplitPolicyClassName(String clazz) {
496    desc.setRegionSplitPolicyClassName(clazz);
497    return this;
498  }
499
500  public TableDescriptorBuilder setValue(final String key, final String value) {
501    desc.setValue(key, value);
502    return this;
503  }
504
505  public TableDescriptorBuilder setValue(final Bytes key, final Bytes value) {
506    desc.setValue(key, value);
507    return this;
508  }
509
510  public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) {
511    desc.setValue(key, value);
512    return this;
513  }
514
515  /**
516   * Sets replication scope all & only the columns already in the builder. Columns added later won't
517   * be backfilled with replication scope.
518   * @param scope replication scope
519   * @return a TableDescriptorBuilder
520   */
521  public TableDescriptorBuilder setReplicationScope(int scope) {
522    Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
523    newFamilies.putAll(desc.families);
524    newFamilies
525        .forEach((cf, cfDesc) -> {
526          desc.removeColumnFamily(cf);
527          desc.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope)
528              .build());
529        });
530    return this;
531  }
532
533  public TableDescriptor build() {
534    return new ModifyableTableDescriptor(desc);
535  }
536
537  /**
538   * TODO: make this private after removing the HTableDescriptor
539   */
540  @InterfaceAudience.Private
541  public static class ModifyableTableDescriptor
542          implements TableDescriptor, Comparable<ModifyableTableDescriptor> {
543
544    private final TableName name;
545
546    /**
547     * A map which holds the metadata information of the table. This metadata
548     * includes values like IS_META, SPLIT_POLICY, MAX_FILE_SIZE,
549     * READONLY, MEMSTORE_FLUSHSIZE etc...
550     */
551    private final Map<Bytes, Bytes> values = new HashMap<>();
552
553    /**
554     * Maps column family name to the respective FamilyDescriptors
555     */
556    private final Map<byte[], ColumnFamilyDescriptor> families
557            = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
558
559    /**
560     * Construct a table descriptor specifying a TableName object
561     *
562     * @param name Table name.
563     * TODO: make this private after removing the HTableDescriptor
564     */
565    @InterfaceAudience.Private
566    public ModifyableTableDescriptor(final TableName name) {
567      this(name, Collections.EMPTY_LIST, Collections.EMPTY_MAP);
568    }
569
570    private ModifyableTableDescriptor(final TableDescriptor desc) {
571      this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues());
572    }
573
574    /**
575     * Construct a table descriptor by cloning the descriptor passed as a
576     * parameter.
577     * <p>
578     * Makes a deep copy of the supplied descriptor.
579     * @param name The new name
580     * @param desc The descriptor.
581     * TODO: make this private after removing the HTableDescriptor
582     */
583    @InterfaceAudience.Private
584    @Deprecated // only used by HTableDescriptor. remove this method if HTD is removed
585    public ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) {
586      this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues());
587    }
588
589    private ModifyableTableDescriptor(final TableName name, final Collection<ColumnFamilyDescriptor> families,
590            Map<Bytes, Bytes> values) {
591      this.name = name;
592      families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c)));
593      this.values.putAll(values);
594      this.values.put(IS_META_KEY,
595        new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME)))));
596    }
597
598    /**
599     * Checks if this table is <code> hbase:meta </code> region.
600     *
601     * @return true if this table is <code> hbase:meta </code> region
602     */
603    @Override
604    public boolean isMetaRegion() {
605      return getOrDefault(IS_META_KEY, Boolean::valueOf, false);
606    }
607
608    /**
609     * Checks if the table is a <code>hbase:meta</code> table
610     *
611     * @return true if table is <code> hbase:meta </code> region.
612     */
613    @Override
614    public boolean isMetaTable() {
615      return isMetaRegion();
616    }
617
618    @Override
619    public Bytes getValue(Bytes key) {
620      Bytes rval = values.get(key);
621      return rval == null ? null : new Bytes(rval.copyBytes());
622    }
623
624    @Override
625    public String getValue(String key) {
626      Bytes rval = values.get(new Bytes(Bytes.toBytes(key)));
627      return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength());
628    }
629
630    @Override
631    public byte[] getValue(byte[] key) {
632      Bytes value = values.get(new Bytes(key));
633      return value == null ? null : value.copyBytes();
634    }
635
636    private <T> T getOrDefault(Bytes key, Function<String, T> function, T defaultValue) {
637      Bytes value = values.get(key);
638      if (value == null) {
639        return defaultValue;
640      } else {
641        return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength()));
642      }
643    }
644
645    /**
646     * Getter for fetching an unmodifiable {@link #values} map.
647     *
648     * @return unmodifiable map {@link #values}.
649     * @see #values
650     */
651    @Override
652    public Map<Bytes, Bytes> getValues() {
653      // shallow pointer copy
654      return Collections.unmodifiableMap(values);
655    }
656
657    /**
658     * Setter for storing metadata as a (key, value) pair in {@link #values} map
659     *
660     * @param key The key.
661     * @param value The value. If null, removes the setting.
662     * @return the modifyable TD
663     * @see #values
664     */
665    public ModifyableTableDescriptor setValue(byte[] key, byte[] value) {
666      return setValue(toBytesOrNull(key, v -> v),
667              toBytesOrNull(value, v -> v));
668    }
669
670    public ModifyableTableDescriptor setValue(String key, String value) {
671      return setValue(toBytesOrNull(key, Bytes::toBytes),
672              toBytesOrNull(value, Bytes::toBytes));
673    }
674
675    /*
676     * @param key The key.
677     * @param value The value. If null, removes the setting.
678     */
679    private ModifyableTableDescriptor setValue(final Bytes key,
680            final String value) {
681      return setValue(key, toBytesOrNull(value, Bytes::toBytes));
682    }
683
684    /*
685     * Setter for storing metadata as a (key, value) pair in {@link #values} map
686     *
687     * @param key The key.
688     * @param value The value. If null, removes the setting.
689     */
690    public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) {
691      if (value == null) {
692        values.remove(key);
693      } else {
694        values.put(key, value);
695      }
696      return this;
697    }
698
699    private static <T> Bytes toBytesOrNull(T t, Function<T, byte[]> f) {
700      if (t == null) {
701        return null;
702      } else {
703        return new Bytes(f.apply(t));
704      }
705    }
706
707    /**
708     * Remove metadata represented by the key from the {@link #values} map
709     *
710     * @param key Key whose key and value we're to remove from TableDescriptor
711     * parameters.
712     * @return the modifyable TD
713     */
714    public ModifyableTableDescriptor removeValue(Bytes key) {
715      return setValue(key, (Bytes) null);
716    }
717
718    /**
719     * Remove metadata represented by the key from the {@link #values} map
720     *
721     * @param key Key whose key and value we're to remove from TableDescriptor
722     * parameters.
723     * @return the modifyable TD
724     */
725    public ModifyableTableDescriptor removeValue(final byte[] key) {
726      return removeValue(new Bytes(key));
727    }
728
729    /**
730     * Check if the readOnly flag of the table is set. If the readOnly flag is
731     * set then the contents of the table can only be read from but not
732     * modified.
733     *
734     * @return true if all columns in the table should be read only
735     */
736    @Override
737    public boolean isReadOnly() {
738      return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY);
739    }
740
741    /**
742     * Setting the table as read only sets all the columns in the table as read
743     * only. By default all tables are modifiable, but if the readOnly flag is
744     * set to true then the contents of the table can only be read but not
745     * modified.
746     *
747     * @param readOnly True if all of the columns in the table should be read
748     * only.
749     * @return the modifyable TD
750     */
751    public ModifyableTableDescriptor setReadOnly(final boolean readOnly) {
752      return setValue(READONLY_KEY, Boolean.toString(readOnly));
753    }
754
755    /**
756     * Check if the compaction enable flag of the table is true. If flag is
757     * false then no minor/major compactions will be done in real.
758     *
759     * @return true if table compaction enabled
760     */
761    @Override
762    public boolean isCompactionEnabled() {
763      return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED);
764    }
765
766    /**
767     * Setting the table compaction enable flag.
768     *
769     * @param isEnable True if enable compaction.
770     * @return the modifyable TD
771     */
772    public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) {
773      return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable));
774    }
775
776    /**
777     * Check if normalization enable flag of the table is true. If flag is false
778     * then no region normalizer won't attempt to normalize this table.
779     *
780     * @return true if region normalization is enabled for this table
781     */
782    @Override
783    public boolean isNormalizationEnabled() {
784      return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, DEFAULT_NORMALIZATION_ENABLED);
785    }
786
787    /**
788     * Check if there is the target region count. If so, the normalize plan will be calculated based
789     * on the target region count.
790     * @return target region count after normalize done
791     */
792    @Override
793    public int getNormalizerTargetRegionCount() {
794      return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf,
795        Integer.valueOf(-1));
796    }
797
798    /**
799     * Check if there is the target region size. If so, the normalize plan will be calculated based
800     * on the target region size.
801     * @return target region size after normalize done
802     */
803    @Override
804    public long getNormalizerTargetRegionSize() {
805      return getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1));
806    }
807
808    /**
809     * Setting the table normalization enable flag.
810     *
811     * @param isEnable True if enable normalization.
812     * @return the modifyable TD
813     */
814    public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) {
815      return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable));
816    }
817
818    /**
819     * Setting the target region count of table normalization .
820     * @param regionCount the target region count.
821     * @return the modifyable TD
822     */
823    public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) {
824      return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount));
825    }
826
827    /**
828     * Setting the target region size of table normalization.
829     * @param regionSize the target region size.
830     * @return the modifyable TD
831     */
832    public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) {
833      return setValue(NORMALIZER_TARGET_REGION_SIZE_KEY, Long.toString(regionSize));
834    }
835
836    /**
837     * Sets the {@link Durability} setting for the table. This defaults to
838     * Durability.USE_DEFAULT.
839     *
840     * @param durability enum value
841     * @return the modifyable TD
842     */
843    public ModifyableTableDescriptor setDurability(Durability durability) {
844      return setValue(DURABILITY_KEY, durability.name());
845    }
846
847    /**
848     * Returns the durability setting for the table.
849     *
850     * @return durability setting for the table.
851     */
852    @Override
853    public Durability getDurability() {
854      return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY);
855    }
856
857    /**
858     * Get the name of the table
859     *
860     * @return TableName
861     */
862    @Override
863    public TableName getTableName() {
864      return name;
865    }
866
867    /**
868     * This sets the class associated with the region split policy which
869     * determines when a region split should occur. The class used by default is
870     * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
871     *
872     * @param clazz the class name
873     * @return the modifyable TD
874     */
875    public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) {
876      return setValue(SPLIT_POLICY_KEY, clazz);
877    }
878
879    /**
880     * This gets the class associated with the region split policy which
881     * determines when a region split should occur. The class used by default is
882     * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
883     *
884     * @return the class name of the region split policy for this table. If this
885     * returns null, the default split policy is used.
886     */
887    @Override
888    public String getRegionSplitPolicyClassName() {
889      return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null);
890    }
891
892    /**
893     * Returns the maximum size upto which a region can grow to after which a
894     * region split is triggered. The region size is represented by the size of
895     * the biggest store file in that region.
896     *
897     * @return max hregion size for table, -1 if not set.
898     *
899     * @see #setMaxFileSize(long)
900     */
901    @Override
902    public long getMaxFileSize() {
903      return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1);
904    }
905
906    /**
907     * Sets the maximum size upto which a region can grow to after which a
908     * region split is triggered. The region size is represented by the size of
909     * the biggest store file in that region, i.e. If the biggest store file
910     * grows beyond the maxFileSize, then the region split is triggered. This
911     * defaults to a value of 256 MB.
912     * <p>
913     * This is not an absolute value and might vary. Assume that a single row
914     * exceeds the maxFileSize then the storeFileSize will be greater than
915     * maxFileSize since a single row cannot be split across multiple regions
916     * </p>
917     *
918     * @param maxFileSize The maximum file size that a store file can grow to
919     * before a split is triggered.
920     * @return the modifyable TD
921     */
922    public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) {
923      return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
924    }
925
926    /**
927     * Returns the size of the memstore after which a flush to filesystem is
928     * triggered.
929     *
930     * @return memory cache flush size for each hregion, -1 if not set.
931     *
932     * @see #setMemStoreFlushSize(long)
933     */
934    @Override
935    public long getMemStoreFlushSize() {
936      return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1);
937    }
938
939    /**
940     * Represents the maximum size of the memstore after which the contents of
941     * the memstore are flushed to the filesystem. This defaults to a size of 64
942     * MB.
943     *
944     * @param memstoreFlushSize memory cache flush size for each hregion
945     * @return the modifyable TD
946     */
947    public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
948      return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
949    }
950
951    /**
952     * This sets the class associated with the flush policy which determines
953     * determines the stores need to be flushed when flushing a region. The
954     * class used by default is defined in
955     * org.apache.hadoop.hbase.regionserver.FlushPolicy.
956     *
957     * @param clazz the class name
958     * @return the modifyable TD
959     */
960    public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) {
961      return setValue(FLUSH_POLICY_KEY, clazz);
962    }
963
964    /**
965     * This gets the class associated with the flush policy which determines the
966     * stores need to be flushed when flushing a region. The class used by
967     * default is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy.
968     *
969     * @return the class name of the flush policy for this table. If this
970     * returns null, the default flush policy is used.
971     */
972    @Override
973    public String getFlushPolicyClassName() {
974      return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null);
975    }
976
977    /**
978     * Adds a column family. For the updating purpose please use
979     * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead.
980     *
981     * @param family to add.
982     * @return the modifyable TD
983     */
984    public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) {
985      if (family.getName() == null || family.getName().length <= 0) {
986        throw new IllegalArgumentException("Family name cannot be null or empty");
987      }
988      if (hasColumnFamily(family.getName())) {
989        throw new IllegalArgumentException("Family '"
990                + family.getNameAsString() + "' already exists so cannot be added");
991      }
992      return putColumnFamily(family);
993    }
994
995    /**
996     * Modifies the existing column family.
997     *
998     * @param family to update
999     * @return this (for chained invocation)
1000     */
1001    public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) {
1002      if (family.getName() == null || family.getName().length <= 0) {
1003        throw new IllegalArgumentException("Family name cannot be null or empty");
1004      }
1005      if (!hasColumnFamily(family.getName())) {
1006        throw new IllegalArgumentException("Column family '" + family.getNameAsString()
1007                + "' does not exist");
1008      }
1009      return putColumnFamily(family);
1010    }
1011
1012    private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) {
1013      families.put(family.getName(), family);
1014      return this;
1015    }
1016
1017    /**
1018     * Checks to see if this table contains the given column family
1019     *
1020     * @param familyName Family name or column name.
1021     * @return true if the table contains the specified family name
1022     */
1023    @Override
1024    public boolean hasColumnFamily(final byte[] familyName) {
1025      return families.containsKey(familyName);
1026    }
1027
1028    /**
1029     * @return Name of this table and then a map of all of the column family descriptors.
1030     */
1031    @Override
1032    public String toString() {
1033      StringBuilder s = new StringBuilder();
1034      s.append('\'').append(Bytes.toString(name.getName())).append('\'');
1035      s.append(getValues(true));
1036      families.values().forEach(f -> s.append(", ").append(f));
1037      return s.toString();
1038    }
1039
1040    /**
1041     * @return Name of this table and then a map of all of the column family
1042     * descriptors (with only the non-default column family attributes)
1043     */
1044    public String toStringCustomizedValues() {
1045      StringBuilder s = new StringBuilder();
1046      s.append('\'').append(Bytes.toString(name.getName())).append('\'');
1047      s.append(getValues(false));
1048      families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues()));
1049      return s.toString();
1050    }
1051
1052    /**
1053     * @return map of all table attributes formatted into string.
1054     */
1055    public String toStringTableAttributes() {
1056      return getValues(true).toString();
1057    }
1058
1059    private StringBuilder getValues(boolean printDefaults) {
1060      StringBuilder s = new StringBuilder();
1061
1062      // step 1: set partitioning and pruning
1063      Set<Bytes> reservedKeys = new TreeSet<>();
1064      Set<Bytes> userKeys = new TreeSet<>();
1065      for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
1066        if (entry.getKey() == null || entry.getKey().get() == null) {
1067          continue;
1068        }
1069        String key = Bytes.toString(entry.getKey().get());
1070        // in this section, print out reserved keywords + coprocessor info
1071        if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) {
1072          userKeys.add(entry.getKey());
1073          continue;
1074        }
1075        // only print out IS_META if true
1076        String value = Bytes.toString(entry.getValue().get());
1077        if (key.equalsIgnoreCase(IS_META)) {
1078          if (Boolean.valueOf(value) == false) {
1079            continue;
1080          }
1081        }
1082        // see if a reserved key is a default value. may not want to print it out
1083        if (printDefaults
1084                || !DEFAULT_VALUES.containsKey(key)
1085                || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1086          reservedKeys.add(entry.getKey());
1087        }
1088      }
1089
1090      // early exit optimization
1091      boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
1092      if (!hasAttributes) {
1093        return s;
1094      }
1095
1096      s.append(", {");
1097      // step 2: printing attributes
1098      if (hasAttributes) {
1099        s.append("TABLE_ATTRIBUTES => {");
1100
1101        // print all reserved keys first
1102        boolean printCommaForAttr = false;
1103        for (Bytes k : reservedKeys) {
1104          String key = Bytes.toString(k.get());
1105          String value = Bytes.toStringBinary(values.get(k).get());
1106          if (printCommaForAttr) {
1107            s.append(", ");
1108          }
1109          printCommaForAttr = true;
1110          s.append(key);
1111          s.append(" => ");
1112          s.append('\'').append(value).append('\'');
1113        }
1114
1115        if (!userKeys.isEmpty()) {
1116          // print all non-reserved as a separate subset
1117          if (printCommaForAttr) {
1118            s.append(", ");
1119          }
1120          s.append(HConstants.METADATA).append(" => ");
1121          s.append("{");
1122          boolean printCommaForCfg = false;
1123          for (Bytes k : userKeys) {
1124            String key = Bytes.toString(k.get());
1125            String value = Bytes.toStringBinary(values.get(k).get());
1126            if (printCommaForCfg) {
1127              s.append(", ");
1128            }
1129            printCommaForCfg = true;
1130            s.append('\'').append(key).append('\'');
1131            s.append(" => ");
1132            s.append('\'').append(value).append('\'');
1133          }
1134          s.append("}");
1135        }
1136      }
1137
1138      s.append("}"); // end METHOD
1139      return s;
1140    }
1141
1142    /**
1143     * Compare the contents of the descriptor with another one passed as a
1144     * parameter. Checks if the obj passed is an instance of ModifyableTableDescriptor,
1145     * if yes then the contents of the descriptors are compared.
1146     *
1147     * @param obj The object to compare
1148     * @return true if the contents of the the two descriptors exactly match
1149     *
1150     * @see java.lang.Object#equals(java.lang.Object)
1151     */
1152    @Override
1153    public boolean equals(Object obj) {
1154      if (this == obj) {
1155        return true;
1156      }
1157      if (obj instanceof ModifyableTableDescriptor) {
1158        return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0;
1159      }
1160      return false;
1161    }
1162
1163    /**
1164     * @return hash code
1165     */
1166    @Override
1167    public int hashCode() {
1168      int result = this.name.hashCode();
1169      if (this.families.size() > 0) {
1170        for (ColumnFamilyDescriptor e : this.families.values()) {
1171          result ^= e.hashCode();
1172        }
1173      }
1174      result ^= values.hashCode();
1175      return result;
1176    }
1177
1178    // Comparable
1179    /**
1180     * Compares the descriptor with another descriptor which is passed as a
1181     * parameter. This compares the content of the two descriptors and not the
1182     * reference.
1183     *
1184     * @param other The MTD to compare
1185     * @return 0 if the contents of the descriptors are exactly matching, 1 if
1186     * there is a mismatch in the contents
1187     */
1188    @Override
1189    public int compareTo(final ModifyableTableDescriptor other) {
1190      return TableDescriptor.COMPARATOR.compare(this, other);
1191    }
1192
1193    @Override
1194    public ColumnFamilyDescriptor[] getColumnFamilies() {
1195      return families.values().toArray(new ColumnFamilyDescriptor[families.size()]);
1196    }
1197
1198    /**
1199     * Returns the configured replicas per region
1200     */
1201    @Override
1202    public int getRegionReplication() {
1203      return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION);
1204    }
1205
1206    /**
1207     * Sets the number of replicas per region.
1208     *
1209     * @param regionReplication the replication factor per region
1210     * @return the modifyable TD
1211     */
1212    public ModifyableTableDescriptor setRegionReplication(int regionReplication) {
1213      return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication));
1214    }
1215
1216    /**
1217     * @return true if the read-replicas memstore replication is enabled.
1218     */
1219    @Override
1220    public boolean hasRegionMemStoreReplication() {
1221      return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, DEFAULT_REGION_MEMSTORE_REPLICATION);
1222    }
1223
1224    /**
1225     * Enable or Disable the memstore replication from the primary region to the
1226     * replicas. The replication will be used only for meta operations (e.g.
1227     * flush, compaction, ...)
1228     *
1229     * @param memstoreReplication true if the new data written to the primary
1230     * region should be replicated. false if the secondaries can tollerate to
1231     * have new data only when the primary flushes the memstore.
1232     * @return the modifyable TD
1233     */
1234    public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) {
1235      setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication));
1236      // If the memstore replication is setup, we do not have to wait for observing a flush event
1237      // from primary before starting to serve reads, because gaps from replication is not applicable
1238      return setValue(REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
1239              Boolean.toString(memstoreReplication));
1240    }
1241
1242    public ModifyableTableDescriptor setPriority(int priority) {
1243      return setValue(PRIORITY_KEY, Integer.toString(priority));
1244    }
1245
1246    @Override
1247    public int getPriority() {
1248      return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY);
1249    }
1250
1251    /**
1252     * Returns all the column family names of the current table. The map of
1253     * TableDescriptor contains mapping of family name to ColumnFamilyDescriptor.
1254     * This returns all the keys of the family map which represents the column
1255     * family names of the table.
1256     *
1257     * @return Immutable sorted set of the keys of the families.
1258     */
1259    @Override
1260    public Set<byte[]> getColumnFamilyNames() {
1261      return Collections.unmodifiableSet(this.families.keySet());
1262    }
1263
1264    /**
1265     * Returns the ColumnFamilyDescriptor for a specific column family with name as
1266     * specified by the parameter column.
1267     *
1268     * @param column Column family name
1269     * @return Column descriptor for the passed family name or the family on
1270     * passed in column.
1271     */
1272    @Override
1273    public ColumnFamilyDescriptor getColumnFamily(final byte[] column) {
1274      return this.families.get(column);
1275    }
1276
1277    /**
1278     * Removes the ColumnFamilyDescriptor with name specified by the parameter column
1279     * from the table descriptor
1280     *
1281     * @param column Name of the column family to be removed.
1282     * @return Column descriptor for the passed family name or the family on
1283     * passed in column.
1284     */
1285    public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) {
1286      return this.families.remove(column);
1287    }
1288
1289    /**
1290     * Add a table coprocessor to this table. The coprocessor type must be
1291     * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't
1292     * check if the class can be loaded or not. Whether a coprocessor is
1293     * loadable or not will be determined when a region is opened.
1294     *
1295     * @param className Full class name.
1296     * @throws IOException
1297     * @return the modifyable TD
1298     */
1299    public ModifyableTableDescriptor setCoprocessor(String className) throws IOException {
1300      return setCoprocessor(
1301        CoprocessorDescriptorBuilder.newBuilder(className).setPriority(Coprocessor.PRIORITY_USER)
1302          .build());
1303    }
1304
1305    /**
1306     * Add a table coprocessor to this table. The coprocessor type must be
1307     * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't
1308     * check if the class can be loaded or not. Whether a coprocessor is
1309     * loadable or not will be determined when a region is opened.
1310     *
1311     * @throws IOException any illegal parameter key/value
1312     * @return the modifyable TD
1313     */
1314    public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp)
1315            throws IOException {
1316      checkHasCoprocessor(cp.getClassName());
1317      if (cp.getPriority() < 0) {
1318        throw new IOException("Priority must be bigger than or equal with zero, current:"
1319          + cp.getPriority());
1320      }
1321      // Validate parameter kvs and then add key/values to kvString.
1322      StringBuilder kvString = new StringBuilder();
1323      for (Map.Entry<String, String> e : cp.getProperties().entrySet()) {
1324        if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1325          throw new IOException("Illegal parameter key = " + e.getKey());
1326        }
1327        if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1328          throw new IOException("Illegal parameter (" + e.getKey()
1329                  + ") value = " + e.getValue());
1330        }
1331        if (kvString.length() != 0) {
1332          kvString.append(',');
1333        }
1334        kvString.append(e.getKey());
1335        kvString.append('=');
1336        kvString.append(e.getValue());
1337      }
1338
1339      String value = cp.getJarPath().orElse("")
1340              + "|" + cp.getClassName() + "|" + Integer.toString(cp.getPriority()) + "|"
1341              + kvString.toString();
1342      return setCoprocessorToMap(value);
1343    }
1344
1345    /**
1346     * Add a table coprocessor to this table. The coprocessor type must be
1347     * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't
1348     * check if the class can be loaded or not. Whether a coprocessor is
1349     * loadable or not will be determined when a region is opened.
1350     *
1351     * @param specStr The Coprocessor specification all in in one String
1352     * @throws IOException
1353     * @return the modifyable TD
1354     * @deprecated used by HTableDescriptor and admin.rb.
1355     *                       As of release 2.0.0, this will be removed in HBase 3.0.0.
1356     */
1357    @Deprecated
1358    public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr)
1359      throws IOException {
1360      CoprocessorDescriptor cpDesc = toCoprocessorDescriptor(specStr).orElseThrow(
1361        () -> new IllegalArgumentException(
1362          "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr));
1363      checkHasCoprocessor(cpDesc.getClassName());
1364      return setCoprocessorToMap(specStr);
1365    }
1366
1367    private void checkHasCoprocessor(final String className) throws IOException {
1368      if (hasCoprocessor(className)) {
1369        throw new IOException("Coprocessor " + className + " already exists.");
1370      }
1371    }
1372
1373    /**
1374     * Add coprocessor to values Map
1375     * @param specStr The Coprocessor specification all in in one String
1376     * @return Returns <code>this</code>
1377     */
1378    private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) {
1379      if (specStr == null) {
1380        return this;
1381      }
1382      // generate a coprocessor key
1383      int maxCoprocessorNumber = 0;
1384      Matcher keyMatcher;
1385      for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1386        keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1387        if (!keyMatcher.matches()) {
1388          continue;
1389        }
1390        maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
1391      }
1392      maxCoprocessorNumber++;
1393      String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1394      return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr)));
1395    }
1396
1397    /**
1398     * Check if the table has an attached co-processor represented by the name
1399     * className
1400     *
1401     * @param classNameToMatch - Class name of the co-processor
1402     * @return true of the table has a co-processor className
1403     */
1404    @Override
1405    public boolean hasCoprocessor(String classNameToMatch) {
1406      return getCoprocessorDescriptors().stream().anyMatch(cp -> cp.getClassName()
1407        .equals(classNameToMatch));
1408    }
1409
1410    /**
1411     * Return the list of attached co-processor represented by their name
1412     * className
1413     *
1414     * @return The list of co-processors classNames
1415     */
1416    @Override
1417    public List<CoprocessorDescriptor> getCoprocessorDescriptors() {
1418      List<CoprocessorDescriptor> result = new ArrayList<>();
1419      for (Map.Entry<Bytes, Bytes> e: getValues().entrySet()) {
1420        String key = Bytes.toString(e.getKey().get()).trim();
1421        if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) {
1422          toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim())
1423            .ifPresent(result::add);
1424        }
1425      }
1426      return result;
1427    }
1428
1429    /**
1430     * Remove a coprocessor from those set on the table
1431     *
1432     * @param className Class name of the co-processor
1433     */
1434    public void removeCoprocessor(String className) {
1435      Bytes match = null;
1436      Matcher keyMatcher;
1437      Matcher valueMatcher;
1438      for (Map.Entry<Bytes, Bytes> e : this.values
1439              .entrySet()) {
1440        keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1441                .getKey().get()));
1442        if (!keyMatcher.matches()) {
1443          continue;
1444        }
1445        valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1446                .toString(e.getValue().get()));
1447        if (!valueMatcher.matches()) {
1448          continue;
1449        }
1450        // get className and compare
1451        String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1452        // remove the CP if it is present
1453        if (clazz.equals(className.trim())) {
1454          match = e.getKey();
1455          break;
1456        }
1457      }
1458      // if we found a match, remove it
1459      if (match != null) {
1460        ModifyableTableDescriptor.this.removeValue(match);
1461      }
1462    }
1463
1464    /**
1465     * @deprecated since 2.0.0 and will be removed in 3.0.0.
1466     * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a>
1467     */
1468    @Deprecated
1469    public ModifyableTableDescriptor setOwner(User owner) {
1470      return setOwnerString(owner != null ? owner.getShortName() : null);
1471    }
1472
1473    /**
1474     * @deprecated since 2.0.0 and will be removed in 3.0.0.
1475     * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a>
1476     */
1477    // used by admin.rb:alter(table_name,*args) to update owner.
1478    @Deprecated
1479    public ModifyableTableDescriptor setOwnerString(String ownerString) {
1480      return setValue(OWNER_KEY, ownerString);
1481    }
1482
1483    /**
1484     * @deprecated since 2.0.0 and will be removed in 3.0.0.
1485     * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a>
1486     */
1487    @Override
1488    @Deprecated
1489    public String getOwnerString() {
1490      // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1491      // hbase:meta should return system user as owner, not null (see
1492      // MasterFileSystem.java:bootstrap()).
1493      return getOrDefault(OWNER_KEY, Function.identity(), null);
1494    }
1495
1496    /**
1497     * @return the bytes in pb format
1498     */
1499    private byte[] toByteArray() {
1500      return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray());
1501    }
1502
1503    /**
1504     * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance
1505     * with pb magic prefix
1506     * @return An instance of {@link ModifyableTableDescriptor} made from
1507     * <code>bytes</code>
1508     * @throws DeserializationException
1509     * @see #toByteArray()
1510     */
1511    private static TableDescriptor parseFrom(final byte[] bytes)
1512            throws DeserializationException {
1513      if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1514        throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor");
1515      }
1516      int pblen = ProtobufUtil.lengthOfPBMagic();
1517      HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder();
1518      try {
1519        ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
1520        return ProtobufUtil.toTableDescriptor(builder.build());
1521      } catch (IOException e) {
1522        throw new DeserializationException(e);
1523      }
1524    }
1525
1526    @Override
1527    public int getColumnFamilyCount() {
1528      return families.size();
1529    }
1530  }
1531
1532  private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) {
1533    Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec);
1534    if (matcher.matches()) {
1535      // jar file path can be empty if the cp class can be loaded
1536      // from class loader.
1537      String path = matcher.group(1).trim().isEmpty() ?
1538        null : matcher.group(1).trim();
1539      String className = matcher.group(2).trim();
1540      if (className.isEmpty()) {
1541        return Optional.empty();
1542      }
1543      String priorityStr = matcher.group(3).trim();
1544      int priority = priorityStr.isEmpty() ?
1545        Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr);
1546      String cfgSpec = null;
1547      try {
1548        cfgSpec = matcher.group(4);
1549      } catch (IndexOutOfBoundsException ex) {
1550        // ignore
1551      }
1552      Map<String, String> ourConf = new TreeMap<>();
1553      if (cfgSpec != null && !cfgSpec.trim().equals("|")) {
1554        cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1);
1555        Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec);
1556        while (m.find()) {
1557          ourConf.put(m.group(1), m.group(2));
1558        }
1559      }
1560      return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className)
1561        .setJarPath(path)
1562        .setPriority(priority)
1563        .setProperties(ourConf)
1564        .build());
1565    }
1566    return Optional.empty();
1567  }
1568}