View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.ArrayList;
25  import java.util.Collection;
26  import java.util.Collections;
27  import java.util.HashMap;
28  import java.util.HashSet;
29  import java.util.Iterator;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.Set;
33  import java.util.TreeMap;
34  import java.util.TreeSet;
35  import java.util.regex.Matcher;
36  
37  import org.apache.hadoop.hbase.util.ByteStringer;
38  import org.apache.commons.logging.Log;
39  import org.apache.commons.logging.LogFactory;
40  import org.apache.hadoop.hbase.classification.InterfaceAudience;
41  import org.apache.hadoop.hbase.classification.InterfaceStability;
42  import org.apache.hadoop.conf.Configuration;
43  import org.apache.hadoop.fs.Path;
44  import org.apache.hadoop.hbase.client.Durability;
45  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
46  import org.apache.hadoop.hbase.exceptions.DeserializationException;
47  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
48  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
49  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
50  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
51  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
52  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
53  import org.apache.hadoop.hbase.regionserver.BloomType;
54  import org.apache.hadoop.hbase.security.User;
55  import org.apache.hadoop.hbase.util.Bytes;
56  import org.apache.hadoop.hbase.util.Writables;
57  import org.apache.hadoop.io.WritableComparable;
58  
59  /**
60   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
61   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
62   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
63   * when the region split should occur, coprocessors associated with it etc...
64   */
65  @InterfaceAudience.Public
66  @InterfaceStability.Evolving
67  public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
68  
69    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
70  
71    /**
72     *  Changes prior to version 3 were not recorded here.
73     *  Version 3 adds metadata as a map where keys and values are byte[].
74     *  Version 4 adds indexes
75     *  Version 5 removed transactional pollution -- e.g. indexes
76     *  Version 6 changed metadata to BytesBytesPair in PB
77     *  Version 7 adds table-level configuration
78     */
79    private static final byte TABLE_DESCRIPTOR_VERSION = 7;
80  
81    private TableName name = null;
82  
83    /**
84     * A map which holds the metadata information of the table. This metadata
85     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
86     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
87     */
88    private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
89      new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
90  
91    /**
92     * A map which holds the configuration specific to the table.
93     * The keys of the map have the same names as config keys and override the defaults with
94     * table-specific settings. Example usage may be for compactions, etc.
95     */
96    private final Map<String, String> configuration = new HashMap<String, String>();
97  
98    public static final String SPLIT_POLICY = "SPLIT_POLICY";
99  
100   /**
101    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
102    * attribute which denotes the maximum size of the store file after which
103    * a region split occurs
104    *
105    * @see #getMaxFileSize()
106    */
107   public static final String MAX_FILESIZE = "MAX_FILESIZE";
108   private static final ImmutableBytesWritable MAX_FILESIZE_KEY =
109     new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
110 
111   public static final String OWNER = "OWNER";
112   public static final ImmutableBytesWritable OWNER_KEY =
113     new ImmutableBytesWritable(Bytes.toBytes(OWNER));
114 
115   /**
116    * <em>INTERNAL</em> Used by rest interface to access this metadata
117    * attribute which denotes if the table is Read Only
118    *
119    * @see #isReadOnly()
120    */
121   public static final String READONLY = "READONLY";
122   private static final ImmutableBytesWritable READONLY_KEY =
123     new ImmutableBytesWritable(Bytes.toBytes(READONLY));
124 
125   /**
126    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
127    * attribute which denotes if the table is compaction enabled
128    *
129    * @see #isCompactionEnabled()
130    */
131   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
132   private static final ImmutableBytesWritable COMPACTION_ENABLED_KEY =
133     new ImmutableBytesWritable(Bytes.toBytes(COMPACTION_ENABLED));
134 
135   /**
136    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
137    * attribute which represents the maximum size of the memstore after which
138    * its contents are flushed onto the disk
139    *
140    * @see #getMemStoreFlushSize()
141    */
142   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
143   private static final ImmutableBytesWritable MEMSTORE_FLUSHSIZE_KEY =
144     new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
145 
146   public static final String FLUSH_POLICY = "FLUSH_POLICY";
147 
148   /**
149    * <em>INTERNAL</em> Used by rest interface to access this metadata
150    * attribute which denotes if the table is a -ROOT- region or not
151    *
152    * @see #isRootRegion()
153    */
154   public static final String IS_ROOT = "IS_ROOT";
155   private static final ImmutableBytesWritable IS_ROOT_KEY =
156     new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
157 
158   /**
159    * <em>INTERNAL</em> Used by rest interface to access this metadata
160    * attribute which denotes if it is a catalog table, either
161    * <code> hbase:meta </code> or <code> -ROOT- </code>
162    *
163    * @see #isMetaRegion()
164    */
165   public static final String IS_META = "IS_META";
166   private static final ImmutableBytesWritable IS_META_KEY =
167     new ImmutableBytesWritable(Bytes.toBytes(IS_META));
168 
169   /**
170    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
171    * attribute which denotes if the deferred log flush option is enabled.
172    * @deprecated Use {@link #DURABILITY} instead.
173    */
174   @Deprecated
175   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
176   @Deprecated
177   private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY =
178     new ImmutableBytesWritable(Bytes.toBytes(DEFERRED_LOG_FLUSH));
179 
180   /**
181    * <em>INTERNAL</em> {@link Durability} setting for the table.
182    */
183   public static final String DURABILITY = "DURABILITY";
184   private static final ImmutableBytesWritable DURABILITY_KEY =
185       new ImmutableBytesWritable(Bytes.toBytes("DURABILITY"));
186 
187   /**
188    * <em>INTERNAL</em> number of region replicas for the table.
189    */
190   public static final String REGION_REPLICATION = "REGION_REPLICATION";
191   private static final ImmutableBytesWritable REGION_REPLICATION_KEY =
192       new ImmutableBytesWritable(Bytes.toBytes(REGION_REPLICATION));
193 
194   /**
195    * <em>INTERNAL</em> flag to indicate whether or not the memstore should be replicated
196    * for read-replicas (CONSISTENCY =&gt; TIMELINE).
197    */
198   public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
199   private static final ImmutableBytesWritable REGION_MEMSTORE_REPLICATION_KEY =
200       new ImmutableBytesWritable(Bytes.toBytes(REGION_MEMSTORE_REPLICATION));
201 
202   /**
203    * <em>INTERNAL</em> Used by shell/rest interface to access this metadata
204    * attribute which denotes if the table should be treated by region normalizer.
205    *
206    * @see #isNormalizationEnabled()
207    */
208   public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED";
209   private static final ImmutableBytesWritable NORMALIZATION_ENABLED_KEY =
210     new ImmutableBytesWritable(Bytes.toBytes(NORMALIZATION_ENABLED));
211 
212   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
213   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
214 
215   /*
216    *  The below are ugly but better than creating them each time till we
217    *  replace booleans being saved as Strings with plain booleans.  Need a
218    *  migration script to do this.  TODO.
219    */
220   private static final ImmutableBytesWritable FALSE =
221     new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString()));
222 
223   private static final ImmutableBytesWritable TRUE =
224     new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
225 
226   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
227 
228   /**
229    * Constant that denotes whether the table is READONLY by default and is false
230    */
231   public static final boolean DEFAULT_READONLY = false;
232 
233   /**
234    * Constant that denotes whether the table is compaction enabled by default
235    */
236   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
237 
238   /**
239    * Constant that denotes whether the table is normalized by default.
240    */
241   public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
242 
243   /**
244    * Constant that denotes the maximum default size of the memstore after which
245    * the contents are flushed to the store files
246    */
247   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
248 
249   public static final int DEFAULT_REGION_REPLICATION = 1;
250 
251   public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true;
252 
253   private final static Map<String, String> DEFAULT_VALUES
254     = new HashMap<String, String>();
255   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
256     = new HashSet<ImmutableBytesWritable>();
257   static {
258     DEFAULT_VALUES.put(MAX_FILESIZE,
259         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
260     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
261     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
262         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
263     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
264         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
265     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
266     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
267     DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED));
268     for (String s : DEFAULT_VALUES.keySet()) {
269       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
270     }
271     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
272     RESERVED_KEYWORDS.add(IS_META_KEY);
273   }
274 
275   /**
276    * Cache of whether this is a meta table or not.
277    */
278   private volatile Boolean meta = null;
279   /**
280    * Cache of whether this is root table or not.
281    */
282   private volatile Boolean root = null;
283 
284   /**
285    * Durability setting for the table
286    */
287   private Durability durability = null;
288 
289   /**
290    * Maps column family name to the respective HColumnDescriptors
291    */
292   private final Map<byte [], HColumnDescriptor> families =
293     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
294 
295   /**
296    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
297    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
298    */
299   @InterfaceAudience.Private
300   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
301     setName(name);
302     for(HColumnDescriptor descriptor : families) {
303       this.families.put(descriptor.getName(), descriptor);
304     }
305   }
306 
307   /**
308    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
309    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
310    */
311   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
312       Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
313     setName(name);
314     for(HColumnDescriptor descriptor : families) {
315       this.families.put(descriptor.getName(), descriptor);
316     }
317     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
318         values.entrySet()) {
319       setValue(entry.getKey(), entry.getValue());
320     }
321   }
322 
323   /**
324    * Default constructor which constructs an empty object.
325    * For deserializing an HTableDescriptor instance only.
326    * @deprecated As of release 0.96
327    *             (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
328    *             This will be removed in HBase 2.0.0.
329    *             Used by Writables and Writables are going away.
330    */
331   @Deprecated
332   public HTableDescriptor() {
333     super();
334   }
335 
336   /**
337    * Construct a table descriptor specifying a TableName object
338    * @param name Table name.
339    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug</a>
340    */
341   public HTableDescriptor(final TableName name) {
342     super();
343     setName(name);
344   }
345 
346   /**
347    * Construct a table descriptor specifying a byte array table name
348    * @param name Table name.
349    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug</a>
350    */
351   @Deprecated
352   public HTableDescriptor(final byte[] name) {
353     this(TableName.valueOf(name));
354   }
355 
356   /**
357    * Construct a table descriptor specifying a String table name
358    * @param name Table name.
359    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug</a>
360    */
361   @Deprecated
362   public HTableDescriptor(final String name) {
363     this(TableName.valueOf(name));
364   }
365 
366   /**
367    * Construct a table descriptor by cloning the descriptor passed as a parameter.
368    * <p>
369    * Makes a deep copy of the supplied descriptor.
370    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
371    * @param desc The descriptor.
372    */
373   public HTableDescriptor(final HTableDescriptor desc) {
374     this(desc.name, desc);
375   }
376 
377   /**
378    * Construct a table descriptor by cloning the descriptor passed as a parameter
379    * but using a different table name.
380    * <p>
381    * Makes a deep copy of the supplied descriptor.
382    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
383    * @param name Table name.
384    * @param desc The descriptor.
385    */
386   public HTableDescriptor(final TableName name, final HTableDescriptor desc) {
387     super();
388     setName(name);
389     setMetaFlags(this.name);
390     for (HColumnDescriptor c: desc.families.values()) {
391       this.families.put(c.getName(), new HColumnDescriptor(c));
392     }
393     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
394         desc.values.entrySet()) {
395       setValue(e.getKey(), e.getValue());
396     }
397     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
398       this.configuration.put(e.getKey(), e.getValue());
399     }
400   }
401 
402   /*
403    * Set meta flags on this table.
404    * IS_ROOT_KEY is set if its a -ROOT- table
405    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
406    * Called by constructors.
407    * @param name
408    */
409   private void setMetaFlags(final TableName name) {
410     setMetaRegion(isRootRegion() ||
411       name.equals(TableName.META_TABLE_NAME));
412   }
413 
414   /**
415    * Check if the descriptor represents a <code> -ROOT- </code> region.
416    *
417    * @return true if this is a <code> -ROOT- </code> region
418    */
419   public boolean isRootRegion() {
420     if (this.root == null) {
421       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
422     }
423     return this.root.booleanValue();
424   }
425 
426   /**
427    * <em> INTERNAL </em> Used to denote if the current table represents
428    * <code> -ROOT- </code> region. This is used internally by the
429    * HTableDescriptor constructors
430    *
431    * @param isRoot true if this is the <code> -ROOT- </code> region
432    */
433   protected void setRootRegion(boolean isRoot) {
434     // TODO: Make the value a boolean rather than String of boolean.
435     setValue(IS_ROOT_KEY, isRoot ? TRUE : FALSE);
436   }
437 
438   /**
439    * Checks if this table is <code> hbase:meta </code>
440    * region.
441    *
442    * @return true if this table is <code> hbase:meta </code>
443    * region
444    */
445   public boolean isMetaRegion() {
446     if (this.meta == null) {
447       this.meta = calculateIsMetaRegion();
448     }
449     return this.meta.booleanValue();
450   }
451 
452   private synchronized Boolean calculateIsMetaRegion() {
453     byte [] value = getValue(IS_META_KEY);
454     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
455   }
456 
457   private boolean isSomething(final ImmutableBytesWritable key,
458       final boolean valueIfNull) {
459     byte [] value = getValue(key);
460     if (value != null) {
461       return Boolean.valueOf(Bytes.toString(value));
462     }
463     return valueIfNull;
464   }
465 
466   /**
467    * <em> INTERNAL </em> Used to denote if the current table represents
468    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
469    * internally by the HTableDescriptor constructors
470    *
471    * @param isMeta true if its either <code> -ROOT- </code> or
472    * <code> hbase:meta </code> region
473    */
474   protected void setMetaRegion(boolean isMeta) {
475     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
476   }
477 
478   /**
479    * Checks if the table is a <code>hbase:meta</code> table
480    *
481    * @return true if table is <code> hbase:meta </code> region.
482    */
483   public boolean isMetaTable() {
484     return isMetaRegion() && !isRootRegion();
485   }
486 
487   /**
488    * Getter for accessing the metadata associated with the key
489    *
490    * @param key The key.
491    * @return The value.
492    * @see #values
493    */
494   public byte[] getValue(byte[] key) {
495     return getValue(new ImmutableBytesWritable(key));
496   }
497 
498   private byte[] getValue(final ImmutableBytesWritable key) {
499     ImmutableBytesWritable ibw = values.get(key);
500     if (ibw == null)
501       return null;
502     return ibw.get();
503   }
504 
505   /**
506    * Getter for accessing the metadata associated with the key
507    *
508    * @param key The key.
509    * @return The value.
510    * @see #values
511    */
512   public String getValue(String key) {
513     byte[] value = getValue(Bytes.toBytes(key));
514     if (value == null)
515       return null;
516     return Bytes.toString(value);
517   }
518 
519   /**
520    * Getter for fetching an unmodifiable {@link #values} map.
521    *
522    * @return unmodifiable map {@link #values}.
523    * @see #values
524    */
525   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
526     // shallow pointer copy
527     return Collections.unmodifiableMap(values);
528   }
529 
530   /**
531    * Setter for storing metadata as a (key, value) pair in {@link #values} map
532    *
533    * @param key The key.
534    * @param value The value.
535    * @see #values
536    */
537   public HTableDescriptor setValue(byte[] key, byte[] value) {
538     setValue(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value));
539     return this;
540   }
541 
542   /*
543    * @param key The key.
544    * @param value The value.
545    */
546   private HTableDescriptor setValue(final ImmutableBytesWritable key,
547       final String value) {
548     setValue(key, new ImmutableBytesWritable(Bytes.toBytes(value)));
549     return this;
550   }
551 
552   /*
553    * Setter for storing metadata as a (key, value) pair in {@link #values} map
554    *
555    * @param key The key.
556    * @param value The value.
557    */
558   public HTableDescriptor setValue(final ImmutableBytesWritable key,
559       final ImmutableBytesWritable value) {
560     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
561       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
562       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
563           "use " + DURABILITY + " instead");
564       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
565       return this;
566     }
567     values.put(key, value);
568     return this;
569   }
570 
571   /**
572    * Setter for storing metadata as a (key, value) pair in {@link #values} map
573    *
574    * @param key The key.
575    * @param value The value.
576    * @see #values
577    */
578   public HTableDescriptor setValue(String key, String value) {
579     if (value == null) {
580       remove(key);
581     } else {
582       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
583     }
584     return this;
585   }
586 
587   /**
588    * Remove metadata represented by the key from the {@link #values} map
589    *
590    * @param key Key whose key and value we're to remove from HTableDescriptor
591    * parameters.
592    */
593   public void remove(final String key) {
594     remove(new ImmutableBytesWritable(Bytes.toBytes(key)));
595   }
596 
597   /**
598    * Remove metadata represented by the key from the {@link #values} map
599    *
600    * @param key Key whose key and value we're to remove from HTableDescriptor
601    * parameters.
602    */
603   public void remove(ImmutableBytesWritable key) {
604     values.remove(key);
605   }
606 
607   /**
608    * Remove metadata represented by the key from the {@link #values} map
609    *
610    * @param key Key whose key and value we're to remove from HTableDescriptor
611    * parameters.
612    */
613   public void remove(final byte [] key) {
614     remove(new ImmutableBytesWritable(key));
615   }
616 
617   /**
618    * Check if the readOnly flag of the table is set. If the readOnly flag is
619    * set then the contents of the table can only be read from but not modified.
620    *
621    * @return true if all columns in the table should be read only
622    */
623   public boolean isReadOnly() {
624     return isSomething(READONLY_KEY, DEFAULT_READONLY);
625   }
626 
627   /**
628    * Setting the table as read only sets all the columns in the table as read
629    * only. By default all tables are modifiable, but if the readOnly flag is
630    * set to true then the contents of the table can only be read but not modified.
631    *
632    * @param readOnly True if all of the columns in the table should be read
633    * only.
634    */
635   public HTableDescriptor setReadOnly(final boolean readOnly) {
636     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
637   }
638 
639   /**
640    * Check if the compaction enable flag of the table is true. If flag is
641    * false then no minor/major compactions will be done in real.
642    *
643    * @return true if table compaction enabled
644    */
645   public boolean isCompactionEnabled() {
646     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
647   }
648 
649   /**
650    * Setting the table compaction enable flag.
651    *
652    * @param isEnable True if enable compaction.
653    */
654   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
655     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
656     return this;
657   }
658 
659   /**
660    * Check if normalization enable flag of the table is true. If flag is
661    * false then no region normalizer won't attempt to normalize this table.
662    *
663    * @return true if region normalization is enabled for this table
664    */
665   public boolean isNormalizationEnabled() {
666     return isSomething(NORMALIZATION_ENABLED_KEY, DEFAULT_NORMALIZATION_ENABLED);
667   }
668 
669   /**
670    * Setting the table normalization enable flag.
671    *
672    * @param isEnable True if enable normalization.
673    */
674   public HTableDescriptor setNormalizationEnabled(final boolean isEnable) {
675     setValue(NORMALIZATION_ENABLED_KEY, isEnable ? TRUE : FALSE);
676     return this;
677   }
678 
679   /**
680    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
681    * @param durability enum value
682    */
683   public HTableDescriptor setDurability(Durability durability) {
684     this.durability = durability;
685     setValue(DURABILITY_KEY, durability.name());
686     return this;
687   }
688 
689   /**
690    * Returns the durability setting for the table.
691    * @return durability setting for the table.
692    */
693   public Durability getDurability() {
694     if (this.durability == null) {
695       byte[] durabilityValue = getValue(DURABILITY_KEY);
696       if (durabilityValue == null) {
697         this.durability = DEFAULT_DURABLITY;
698       } else {
699         try {
700           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
701         } catch (IllegalArgumentException ex) {
702           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
703             + " is not known. Durability:" + Bytes.toString(durabilityValue));
704           this.durability = DEFAULT_DURABLITY;
705         }
706       }
707     }
708     return this.durability;
709   }
710 
711   /**
712    * Get the name of the table
713    *
714    * @return TableName
715    */
716   public TableName getTableName() {
717     return name;
718   }
719 
720   /**
721    * Get the name of the table as a byte array.
722    *
723    * @return name of table
724    * @deprecated Use {@link #getTableName()} instead
725    */
726   @Deprecated
727   public byte[] getName() {
728     return name.getName();
729   }
730 
731   /**
732    * Get the name of the table as a String
733    *
734    * @return name of table as a String
735    */
736   public String getNameAsString() {
737     return name.getNameAsString();
738   }
739 
740   /**
741    * This sets the class associated with the region split policy which
742    * determines when a region split should occur.  The class used by
743    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
744    * @param clazz the class name
745    */
746   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
747     setValue(SPLIT_POLICY, clazz);
748     return this;
749   }
750 
751   /**
752    * This gets the class associated with the region split policy which
753    * determines when a region split should occur.  The class used by
754    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
755    *
756    * @return the class name of the region split policy for this table.
757    * If this returns null, the default split policy is used.
758    */
759    public String getRegionSplitPolicyClassName() {
760     return getValue(SPLIT_POLICY);
761   }
762 
763   /**
764    * Set the name of the table.
765    *
766    * @param name name of table
767    */
768   @Deprecated
769   public HTableDescriptor setName(byte[] name) {
770     setName(TableName.valueOf(name));
771     return this;
772   }
773 
774   @Deprecated
775   public HTableDescriptor setName(TableName name) {
776     this.name = name;
777     setMetaFlags(this.name);
778     return this;
779   }
780 
781   /**
782    * Returns the maximum size upto which a region can grow to after which a region
783    * split is triggered. The region size is represented by the size of the biggest
784    * store file in that region.
785    *
786    * @return max hregion size for table, -1 if not set.
787    *
788    * @see #setMaxFileSize(long)
789    */
790   public long getMaxFileSize() {
791     byte [] value = getValue(MAX_FILESIZE_KEY);
792     if (value != null) {
793       return Long.parseLong(Bytes.toString(value));
794     }
795     return -1;
796   }
797 
798   /**
799    * Sets the maximum size upto which a region can grow to after which a region
800    * split is triggered. The region size is represented by the size of the biggest
801    * store file in that region, i.e. If the biggest store file grows beyond the
802    * maxFileSize, then the region split is triggered. This defaults to a value of
803    * 256 MB.
804    * <p>
805    * This is not an absolute value and might vary. Assume that a single row exceeds
806    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
807    * a single row cannot be split across multiple regions
808    * </p>
809    *
810    * @param maxFileSize The maximum file size that a store file can grow to
811    * before a split is triggered.
812    */
813   public HTableDescriptor setMaxFileSize(long maxFileSize) {
814     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
815     return this;
816   }
817 
818   /**
819    * Returns the size of the memstore after which a flush to filesystem is triggered.
820    *
821    * @return memory cache flush size for each hregion, -1 if not set.
822    *
823    * @see #setMemStoreFlushSize(long)
824    */
825   public long getMemStoreFlushSize() {
826     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
827     if (value != null) {
828       return Long.parseLong(Bytes.toString(value));
829     }
830     return -1;
831   }
832 
833   /**
834    * Represents the maximum size of the memstore after which the contents of the
835    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
836    *
837    * @param memstoreFlushSize memory cache flush size for each hregion
838    */
839   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
840     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
841     return this;
842   }
843 
844   /**
845    * This sets the class associated with the flush policy which determines determines the stores
846    * need to be flushed when flushing a region. The class used by default is defined in
847    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
848    * @param clazz the class name
849    */
850   public HTableDescriptor setFlushPolicyClassName(String clazz) {
851     setValue(FLUSH_POLICY, clazz);
852     return this;
853   }
854 
855   /**
856    * This gets the class associated with the flush policy which determines the stores need to be
857    * flushed when flushing a region. The class used by default is defined in
858    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
859    * @return the class name of the flush policy for this table. If this returns null, the default
860    *         flush policy is used.
861    */
862   public String getFlushPolicyClassName() {
863     return getValue(FLUSH_POLICY);
864   }
865 
866   /**
867    * Adds a column family.
868    * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
869    * @param family HColumnDescriptor of family to add.
870    */
871   public HTableDescriptor addFamily(final HColumnDescriptor family) {
872     if (family.getName() == null || family.getName().length <= 0) {
873       throw new IllegalArgumentException("Family name cannot be null or empty");
874     }
875     if (hasFamily(family.getName())) {
876       throw new IllegalArgumentException("Family '" +
877         family.getNameAsString() + "' already exists so cannot be added");
878     }
879     this.families.put(family.getName(), family);
880     return this;
881   }
882 
883   /**
884    * Modifies the existing column family.
885    * @param family HColumnDescriptor of family to update
886    * @return this (for chained invocation)
887    */
888   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
889     if (family.getName() == null || family.getName().length <= 0) {
890       throw new IllegalArgumentException("Family name cannot be null or empty");
891     }
892     if (!hasFamily(family.getName())) {
893       throw new IllegalArgumentException("Column family '" + family.getNameAsString()
894         + "' does not exist");
895     }
896     this.families.put(family.getName(), family);
897     return this;
898   }
899 
900   /**
901    * Checks to see if this table contains the given column family
902    * @param familyName Family name or column name.
903    * @return true if the table contains the specified family name
904    */
905   public boolean hasFamily(final byte [] familyName) {
906     return families.containsKey(familyName);
907   }
908 
909   /**
910    * @return Name of this table and then a map of all of the column family
911    * descriptors.
912    * @see #getNameAsString()
913    */
914   @Override
915   public String toString() {
916     StringBuilder s = new StringBuilder();
917     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
918     s.append(getValues(true));
919     for (HColumnDescriptor f : families.values()) {
920       s.append(", ").append(f);
921     }
922     return s.toString();
923   }
924 
925   /**
926    * @return Name of this table and then a map of all of the column family
927    * descriptors (with only the non-default column family attributes)
928    */
929   public String toStringCustomizedValues() {
930     StringBuilder s = new StringBuilder();
931     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
932     s.append(getValues(false));
933     for(HColumnDescriptor hcd : families.values()) {
934       s.append(", ").append(hcd.toStringCustomizedValues());
935     }
936     return s.toString();
937   }
938 
939   /**
940    * @return map of all table attributes formatted into string.
941    */
942   public String toStringTableAttributes() {
943    return getValues(true).toString();
944   }
945 
946   private StringBuilder getValues(boolean printDefaults) {
947     StringBuilder s = new StringBuilder();
948 
949     // step 1: set partitioning and pruning
950     Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
951     Set<ImmutableBytesWritable> userKeys = new TreeSet<ImmutableBytesWritable>();
952     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry : values.entrySet()) {
953       ImmutableBytesWritable k = entry.getKey();
954       if (k == null || k.get() == null) continue;
955       String key = Bytes.toString(k.get());
956       // in this section, print out reserved keywords + coprocessor info
957       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
958         userKeys.add(k);
959         continue;
960       }
961       // only print out IS_ROOT/IS_META if true
962       String value = Bytes.toString(entry.getValue().get());
963       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
964         if (Boolean.valueOf(value) == false) continue;
965       }
966       // see if a reserved key is a default value. may not want to print it out
967       if (printDefaults
968           || !DEFAULT_VALUES.containsKey(key)
969           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
970         reservedKeys.add(k);
971       }
972     }
973 
974     // early exit optimization
975     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
976     if (!hasAttributes && configuration.isEmpty()) return s;
977 
978     s.append(", {");
979     // step 2: printing attributes
980     if (hasAttributes) {
981       s.append("TABLE_ATTRIBUTES => {");
982 
983       // print all reserved keys first
984       boolean printCommaForAttr = false;
985       for (ImmutableBytesWritable k : reservedKeys) {
986         String key = Bytes.toString(k.get());
987         String value = Bytes.toStringBinary(values.get(k).get());
988         if (printCommaForAttr) s.append(", ");
989         printCommaForAttr = true;
990         s.append(key);
991         s.append(" => ");
992         s.append('\'').append(value).append('\'');
993       }
994 
995       if (!userKeys.isEmpty()) {
996         // print all non-reserved, advanced config keys as a separate subset
997         if (printCommaForAttr) s.append(", ");
998         printCommaForAttr = true;
999         s.append(HConstants.METADATA).append(" => ");
1000         s.append("{");
1001         boolean printCommaForCfg = false;
1002         for (ImmutableBytesWritable k : userKeys) {
1003           String key = Bytes.toString(k.get());
1004           String value = Bytes.toStringBinary(values.get(k).get());
1005           if (printCommaForCfg) s.append(", ");
1006           printCommaForCfg = true;
1007           s.append('\'').append(key).append('\'');
1008           s.append(" => ");
1009           s.append('\'').append(value).append('\'');
1010         }
1011         s.append("}");
1012       }
1013     }
1014 
1015     // step 3: printing all configuration:
1016     if (!configuration.isEmpty()) {
1017       if (hasAttributes) {
1018         s.append(", ");
1019       }
1020       s.append(HConstants.CONFIGURATION).append(" => ");
1021       s.append('{');
1022       boolean printCommaForConfig = false;
1023       for (Map.Entry<String, String> e : configuration.entrySet()) {
1024         if (printCommaForConfig) s.append(", ");
1025         printCommaForConfig = true;
1026         s.append('\'').append(e.getKey()).append('\'');
1027         s.append(" => ");
1028         s.append('\'').append(e.getValue()).append('\'');
1029       }
1030       s.append("}");
1031     }
1032     s.append("}"); // end METHOD
1033     return s;
1034   }
1035 
1036   /**
1037    * Compare the contents of the descriptor with another one passed as a parameter.
1038    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
1039    * contents of the descriptors are compared.
1040    *
1041    * @return true if the contents of the the two descriptors exactly match
1042    *
1043    * @see java.lang.Object#equals(java.lang.Object)
1044    */
1045   @Override
1046   public boolean equals(Object obj) {
1047     if (this == obj) {
1048       return true;
1049     }
1050     if (obj == null) {
1051       return false;
1052     }
1053     if (!(obj instanceof HTableDescriptor)) {
1054       return false;
1055     }
1056     return compareTo((HTableDescriptor)obj) == 0;
1057   }
1058 
1059   /**
1060    * @see java.lang.Object#hashCode()
1061    */
1062   @Override
1063   public int hashCode() {
1064     int result = this.name.hashCode();
1065     result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode();
1066     if (this.families != null && this.families.size() > 0) {
1067       for (HColumnDescriptor e: this.families.values()) {
1068         result ^= e.hashCode();
1069       }
1070     }
1071     result ^= values.hashCode();
1072     result ^= configuration.hashCode();
1073     return result;
1074   }
1075 
1076   /**
1077    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
1078    * and is used for de-serialization of the HTableDescriptor over RPC
1079    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
1080    */
1081   @Deprecated
1082   @Override
1083   public void readFields(DataInput in) throws IOException {
1084     int version = in.readInt();
1085     if (version < 3)
1086       throw new IOException("versions < 3 are not supported (and never existed!?)");
1087     // version 3+
1088     name = TableName.valueOf(Bytes.readByteArray(in));
1089     setRootRegion(in.readBoolean());
1090     setMetaRegion(in.readBoolean());
1091     values.clear();
1092     configuration.clear();
1093     int numVals = in.readInt();
1094     for (int i = 0; i < numVals; i++) {
1095       ImmutableBytesWritable key = new ImmutableBytesWritable();
1096       ImmutableBytesWritable value = new ImmutableBytesWritable();
1097       key.readFields(in);
1098       value.readFields(in);
1099       setValue(key, value);
1100     }
1101     families.clear();
1102     int numFamilies = in.readInt();
1103     for (int i = 0; i < numFamilies; i++) {
1104       HColumnDescriptor c = new HColumnDescriptor();
1105       c.readFields(in);
1106       families.put(c.getName(), c);
1107     }
1108     if (version >= 7) {
1109       int numConfigs = in.readInt();
1110       for (int i = 0; i < numConfigs; i++) {
1111         ImmutableBytesWritable key = new ImmutableBytesWritable();
1112         ImmutableBytesWritable value = new ImmutableBytesWritable();
1113         key.readFields(in);
1114         value.readFields(in);
1115         configuration.put(
1116           Bytes.toString(key.get(), key.getOffset(), key.getLength()),
1117           Bytes.toString(value.get(), value.getOffset(), value.getLength()));
1118       }
1119     }
1120   }
1121 
1122   /**
1123    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
1124    * and is used for serialization of the HTableDescriptor over RPC
1125    * @deprecated Writables are going away.
1126    * Use {@link com.google.protobuf.MessageLite#toByteArray} instead.
1127    */
1128   @Deprecated
1129   @Override
1130   public void write(DataOutput out) throws IOException {
1131     out.writeInt(TABLE_DESCRIPTOR_VERSION);
1132     Bytes.writeByteArray(out, name.toBytes());
1133     out.writeBoolean(isRootRegion());
1134     out.writeBoolean(isMetaRegion());
1135     out.writeInt(values.size());
1136     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1137         values.entrySet()) {
1138       e.getKey().write(out);
1139       e.getValue().write(out);
1140     }
1141     out.writeInt(families.size());
1142     for(Iterator<HColumnDescriptor> it = families.values().iterator();
1143         it.hasNext(); ) {
1144       HColumnDescriptor family = it.next();
1145       family.write(out);
1146     }
1147     out.writeInt(configuration.size());
1148     for (Map.Entry<String, String> e : configuration.entrySet()) {
1149       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1150       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1151     }
1152   }
1153 
1154   // Comparable
1155 
1156   /**
1157    * Compares the descriptor with another descriptor which is passed as a parameter.
1158    * This compares the content of the two descriptors and not the reference.
1159    *
1160    * @return 0 if the contents of the descriptors are exactly matching,
1161    *         1 if there is a mismatch in the contents
1162    */
1163   @Override
1164   public int compareTo(final HTableDescriptor other) {
1165     int result = this.name.compareTo(other.name);
1166     if (result == 0) {
1167       result = families.size() - other.families.size();
1168     }
1169     if (result == 0 && families.size() != other.families.size()) {
1170       result = Integer.compare(families.size(), other.families.size());
1171     }
1172     if (result == 0) {
1173       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1174           it2 = other.families.values().iterator(); it.hasNext(); ) {
1175         result = it.next().compareTo(it2.next());
1176         if (result != 0) {
1177           break;
1178         }
1179       }
1180     }
1181     if (result == 0) {
1182       // punt on comparison for ordering, just calculate difference
1183       result = this.values.hashCode() - other.values.hashCode();
1184       if (result < 0)
1185         result = -1;
1186       else if (result > 0)
1187         result = 1;
1188     }
1189     if (result == 0) {
1190       result = this.configuration.hashCode() - other.configuration.hashCode();
1191       if (result < 0)
1192         result = -1;
1193       else if (result > 0)
1194         result = 1;
1195     }
1196     return result;
1197   }
1198 
1199   /**
1200    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1201    * of all the column families of the table.
1202    *
1203    * @return Immutable collection of {@link HColumnDescriptor} of all the
1204    * column families.
1205    */
1206   public Collection<HColumnDescriptor> getFamilies() {
1207     return Collections.unmodifiableCollection(this.families.values());
1208   }
1209 
1210   /**
1211    * Returns the configured replicas per region
1212    */
1213   public int getRegionReplication() {
1214     byte[] val = getValue(REGION_REPLICATION_KEY);
1215     if (val == null || val.length == 0) {
1216       return DEFAULT_REGION_REPLICATION;
1217     }
1218     return Integer.parseInt(Bytes.toString(val));
1219   }
1220 
1221   /**
1222    * Sets the number of replicas per region.
1223    * @param regionReplication the replication factor per region
1224    */
1225   public HTableDescriptor setRegionReplication(int regionReplication) {
1226     setValue(REGION_REPLICATION_KEY,
1227         new ImmutableBytesWritable(Bytes.toBytes(Integer.toString(regionReplication))));
1228     return this;
1229   }
1230 
1231   /**
1232    * @return true if the read-replicas memstore replication is enabled.
1233    */
1234   public boolean hasRegionMemstoreReplication() {
1235     return isSomething(REGION_MEMSTORE_REPLICATION_KEY, DEFAULT_REGION_MEMSTORE_REPLICATION);
1236   }
1237 
1238   /**
1239    * Enable or Disable the memstore replication from the primary region to the replicas.
1240    * The replication will be used only for meta operations (e.g. flush, compaction, ...)
1241    *
1242    * @param memstoreReplication true if the new data written to the primary region
1243    *                                 should be replicated.
1244    *                            false if the secondaries can tollerate to have new
1245    *                                  data only when the primary flushes the memstore.
1246    */
1247   public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) {
1248     setValue(REGION_MEMSTORE_REPLICATION_KEY, memstoreReplication ? TRUE : FALSE);
1249     // If the memstore replication is setup, we do not have to wait for observing a flush event
1250     // from primary before starting to serve reads, because gaps from replication is not applicable
1251     setConfiguration(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
1252       Boolean.toString(memstoreReplication));
1253     return this;
1254   }
1255 
1256   /**
1257    * Returns all the column family names of the current table. The map of
1258    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1259    * This returns all the keys of the family map which represents the column
1260    * family names of the table.
1261    *
1262    * @return Immutable sorted set of the keys of the families.
1263    */
1264   public Set<byte[]> getFamiliesKeys() {
1265     return Collections.unmodifiableSet(this.families.keySet());
1266   }
1267 
1268   /**
1269    * Returns an array all the {@link HColumnDescriptor} of the column families
1270    * of the table.
1271    *
1272    * @return Array of all the HColumnDescriptors of the current table
1273    *
1274    * @see #getFamilies()
1275    */
1276   public HColumnDescriptor[] getColumnFamilies() {
1277     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1278     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1279   }
1280 
1281 
1282   /**
1283    * Returns the HColumnDescriptor for a specific column family with name as
1284    * specified by the parameter column.
1285    *
1286    * @param column Column family name
1287    * @return Column descriptor for the passed family name or the family on
1288    * passed in column.
1289    */
1290   public HColumnDescriptor getFamily(final byte [] column) {
1291     return this.families.get(column);
1292   }
1293 
1294 
1295   /**
1296    * Removes the HColumnDescriptor with name specified by the parameter column
1297    * from the table descriptor
1298    *
1299    * @param column Name of the column family to be removed.
1300    * @return Column descriptor for the passed family name or the family on
1301    * passed in column.
1302    */
1303   public HColumnDescriptor removeFamily(final byte [] column) {
1304     return this.families.remove(column);
1305   }
1306 
1307   /**
1308    * Add a table coprocessor to this table. The coprocessor
1309    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1310    * or Endpoint.
1311    * It won't check if the class can be loaded or not.
1312    * Whether a coprocessor is loadable or not will be determined when
1313    * a region is opened.
1314    * @param className Full class name.
1315    * @throws IOException
1316    */
1317   public HTableDescriptor addCoprocessor(String className) throws IOException {
1318     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1319     return this;
1320   }
1321 
1322   /**
1323    * Add a table coprocessor to this table. The coprocessor
1324    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1325    * or Endpoint.
1326    * It won't check if the class can be loaded or not.
1327    * Whether a coprocessor is loadable or not will be determined when
1328    * a region is opened.
1329    * @param jarFilePath Path of the jar file. If it's null, the class will be
1330    * loaded from default classloader.
1331    * @param className Full class name.
1332    * @param priority Priority
1333    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1334    * @throws IOException
1335    */
1336   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1337                              int priority, final Map<String, String> kvs)
1338   throws IOException {
1339     checkHasCoprocessor(className);
1340 
1341     // Validate parameter kvs and then add key/values to kvString.
1342     StringBuilder kvString = new StringBuilder();
1343     if (kvs != null) {
1344       for (Map.Entry<String, String> e: kvs.entrySet()) {
1345         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1346           throw new IOException("Illegal parameter key = " + e.getKey());
1347         }
1348         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1349           throw new IOException("Illegal parameter (" + e.getKey() +
1350               ") value = " + e.getValue());
1351         }
1352         if (kvString.length() != 0) {
1353           kvString.append(',');
1354         }
1355         kvString.append(e.getKey());
1356         kvString.append('=');
1357         kvString.append(e.getValue());
1358       }
1359     }
1360 
1361     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1362         "|" + className + "|" + Integer.toString(priority) + "|" +
1363         kvString.toString();
1364     return addCoprocessorToMap(value);
1365   }
1366 
1367   /**
1368    * Add a table coprocessor to this table. The coprocessor
1369    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1370    * or Endpoint.
1371    * It won't check if the class can be loaded or not.
1372    * Whether a coprocessor is loadable or not will be determined when
1373    * a region is opened.
1374    * @param specStr The Coprocessor specification all in in one String formatted so matches
1375    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1376    * @throws IOException
1377    */
1378   public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
1379     String className = getCoprocessorClassNameFromSpecStr(specStr);
1380     if (className == null) {
1381       throw new IllegalArgumentException("Format does not match " +
1382         HConstants.CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr);
1383     }
1384     checkHasCoprocessor(className);
1385     return addCoprocessorToMap(specStr);
1386   }
1387 
1388   private void checkHasCoprocessor(final String className) throws IOException {
1389     if (hasCoprocessor(className)) {
1390       throw new IOException("Coprocessor " + className + " already exists.");
1391     }
1392   }
1393 
1394   /**
1395    * Add coprocessor to values Map
1396    * @param specStr The Coprocessor specification all in in one String formatted so matches
1397    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1398    * @return Returns <code>this</code>
1399    */
1400   private HTableDescriptor addCoprocessorToMap(final String specStr) {
1401     if (specStr == null) return this;
1402     // generate a coprocessor key
1403     int maxCoprocessorNumber = 0;
1404     Matcher keyMatcher;
1405     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1406         this.values.entrySet()) {
1407       keyMatcher =
1408           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1409               Bytes.toString(e.getKey().get()));
1410       if (!keyMatcher.matches()) {
1411         continue;
1412       }
1413       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
1414     }
1415     maxCoprocessorNumber++;
1416     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1417     this.values.put(new ImmutableBytesWritable(Bytes.toBytes(key)),
1418       new ImmutableBytesWritable(Bytes.toBytes(specStr)));
1419     return this;
1420   }
1421 
1422   /**
1423    * Check if the table has an attached co-processor represented by the name className
1424    *
1425    * @param classNameToMatch - Class name of the co-processor
1426    * @return true of the table has a co-processor className
1427    */
1428   public boolean hasCoprocessor(String classNameToMatch) {
1429     Matcher keyMatcher;
1430     Matcher valueMatcher;
1431     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1432         this.values.entrySet()) {
1433       keyMatcher =
1434           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1435               Bytes.toString(e.getKey().get()));
1436       if (!keyMatcher.matches()) {
1437         continue;
1438       }
1439       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1440       if (className == null) continue;
1441       if (className.equals(classNameToMatch.trim())) {
1442         return true;
1443       }
1444     }
1445     return false;
1446   }
1447 
1448   /**
1449    * Return the list of attached co-processor represented by their name className
1450    *
1451    * @return The list of co-processors classNames
1452    */
1453   public List<String> getCoprocessors() {
1454     List<String> result = new ArrayList<String>();
1455     Matcher keyMatcher;
1456     Matcher valueMatcher;
1457     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values.entrySet()) {
1458       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1459       if (!keyMatcher.matches()) {
1460         continue;
1461       }
1462       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1463       if (className == null) continue;
1464       result.add(className); // classname is the 2nd field
1465     }
1466     return result;
1467   }
1468 
1469   /**
1470    * @param spec String formatted as per {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1471    * @return Class parsed from passed in <code>spec</code> or null if no match or classpath found
1472    */
1473   private static String getCoprocessorClassNameFromSpecStr(final String spec) {
1474     Matcher matcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(spec);
1475     // Classname is the 2nd field
1476     return matcher != null && matcher.matches()? matcher.group(2).trim(): null;
1477   }
1478 
1479   /**
1480    * Remove a coprocessor from those set on the table
1481    * @param className Class name of the co-processor
1482    */
1483   public void removeCoprocessor(String className) {
1484     ImmutableBytesWritable match = null;
1485     Matcher keyMatcher;
1486     Matcher valueMatcher;
1487     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values
1488         .entrySet()) {
1489       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1490           .getKey().get()));
1491       if (!keyMatcher.matches()) {
1492         continue;
1493       }
1494       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1495           .toString(e.getValue().get()));
1496       if (!valueMatcher.matches()) {
1497         continue;
1498       }
1499       // get className and compare
1500       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1501       // remove the CP if it is present
1502       if (clazz.equals(className.trim())) {
1503         match = e.getKey();
1504         break;
1505       }
1506     }
1507     // if we found a match, remove it
1508     if (match != null)
1509       remove(match);
1510   }
1511 
1512   /**
1513    * Returns the {@link Path} object representing the table directory under
1514    * path rootdir
1515    *
1516    * Deprecated use FSUtils.getTableDir() instead.
1517    *
1518    * @param rootdir qualified path of HBase root directory
1519    * @param tableName name of table
1520    * @return {@link Path} for table
1521    */
1522   @Deprecated
1523   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1524     //This is bad I had to mirror code from FSUTils.getTableDir since
1525     //there is no module dependency between hbase-client and hbase-server
1526     TableName name = TableName.valueOf(tableName);
1527     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1528               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1529   }
1530 
1531   /**
1532    * Table descriptor for <code>hbase:meta</code> catalog table
1533    * @deprecated Use TableDescriptors#get(TableName.META_TABLE_NAME) or
1534    * HBaseAdmin#getTableDescriptor(TableName.META_TABLE_NAME) instead.
1535    */
1536   @Deprecated
1537   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1538       TableName.META_TABLE_NAME,
1539       new HColumnDescriptor[] {
1540           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1541               // Ten is arbitrary number.  Keep versions to help debugging.
1542               .setMaxVersions(HConstants.DEFAULT_HBASE_META_VERSIONS)
1543               .setInMemory(true)
1544               .setBlocksize(HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)
1545               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1546               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1547               .setBloomFilterType(BloomType.NONE)
1548               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1549               // e.g. if using CombinedBlockCache (BucketCache).
1550               .setCacheDataInL1(true)
1551       });
1552 
1553   static {
1554     try {
1555       META_TABLEDESC.addCoprocessor(
1556           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1557           null, Coprocessor.PRIORITY_SYSTEM, null);
1558     } catch (IOException ex) {
1559       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1560       throw new RuntimeException(ex);
1561     }
1562   }
1563 
1564   public final static String NAMESPACE_FAMILY_INFO = "info";
1565   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1566   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1567 
1568   /** Table descriptor for namespace table */
1569   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1570       TableName.NAMESPACE_TABLE_NAME,
1571       new HColumnDescriptor[] {
1572           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1573               // Ten is arbitrary number.  Keep versions to help debugging.
1574               .setMaxVersions(10)
1575               .setInMemory(true)
1576               .setBlocksize(8 * 1024)
1577               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1578               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1579               // e.g. if using CombinedBlockCache (BucketCache).
1580               .setCacheDataInL1(true)
1581       });
1582 
1583   @Deprecated
1584   public HTableDescriptor setOwner(User owner) {
1585     return setOwnerString(owner != null ? owner.getShortName() : null);
1586   }
1587 
1588   // used by admin.rb:alter(table_name,*args) to update owner.
1589   @Deprecated
1590   public HTableDescriptor setOwnerString(String ownerString) {
1591     if (ownerString != null) {
1592       setValue(OWNER_KEY, ownerString);
1593     } else {
1594       remove(OWNER_KEY);
1595     }
1596     return this;
1597   }
1598 
1599   @Deprecated
1600   public String getOwnerString() {
1601     if (getValue(OWNER_KEY) != null) {
1602       return Bytes.toString(getValue(OWNER_KEY));
1603     }
1604     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1605     // hbase:meta and -ROOT- should return system user as owner, not null (see
1606     // MasterFileSystem.java:bootstrap()).
1607     return null;
1608   }
1609 
1610   /**
1611    * @return This instance serialized with pb with pb magic prefix
1612    * @see #parseFrom(byte[])
1613    */
1614   public byte [] toByteArray() {
1615     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1616   }
1617 
1618   /**
1619    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1620    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1621    * @throws DeserializationException
1622    * @throws IOException
1623    * @see #toByteArray()
1624    */
1625   public static HTableDescriptor parseFrom(final byte [] bytes)
1626   throws DeserializationException, IOException {
1627     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1628       return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
1629     }
1630     int pblen = ProtobufUtil.lengthOfPBMagic();
1631     TableSchema.Builder builder = TableSchema.newBuilder();
1632     TableSchema ts;
1633     try {
1634       ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
1635       ts = builder.build();
1636     } catch (IOException e) {
1637       throw new DeserializationException(e);
1638     }
1639     return convert(ts);
1640   }
1641 
1642   /**
1643    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1644    */
1645   public TableSchema convert() {
1646     TableSchema.Builder builder = TableSchema.newBuilder();
1647     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1648     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1649       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1650       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1651       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1652       builder.addAttributes(aBuilder.build());
1653     }
1654     for (HColumnDescriptor hcd: getColumnFamilies()) {
1655       builder.addColumnFamilies(hcd.convert());
1656     }
1657     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1658       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1659       aBuilder.setName(e.getKey());
1660       aBuilder.setValue(e.getValue());
1661       builder.addConfiguration(aBuilder.build());
1662     }
1663     return builder.build();
1664   }
1665 
1666   /**
1667    * @param ts A pb TableSchema instance.
1668    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1669    */
1670   public static HTableDescriptor convert(final TableSchema ts) {
1671     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1672     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1673     int index = 0;
1674     for (ColumnFamilySchema cfs: list) {
1675       hcds[index++] = HColumnDescriptor.convert(cfs);
1676     }
1677     HTableDescriptor htd = new HTableDescriptor(
1678         ProtobufUtil.toTableName(ts.getTableName()),
1679         hcds);
1680     for (BytesBytesPair a: ts.getAttributesList()) {
1681       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1682     }
1683     for (NameStringPair a: ts.getConfigurationList()) {
1684       htd.setConfiguration(a.getName(), a.getValue());
1685     }
1686     return htd;
1687   }
1688 
1689   /**
1690    * Getter for accessing the configuration value by key
1691    */
1692   public String getConfigurationValue(String key) {
1693     return configuration.get(key);
1694   }
1695 
1696   /**
1697    * Getter for fetching an unmodifiable {@link #configuration} map.
1698    */
1699   public Map<String, String> getConfiguration() {
1700     // shallow pointer copy
1701     return Collections.unmodifiableMap(configuration);
1702   }
1703 
1704   /**
1705    * Setter for storing a configuration setting in {@link #configuration} map.
1706    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1707    * @param value String value. If null, removes the setting.
1708    */
1709   public HTableDescriptor setConfiguration(String key, String value) {
1710     if (value == null) {
1711       removeConfiguration(key);
1712     } else {
1713       configuration.put(key, value);
1714     }
1715     return this;
1716   }
1717 
1718   /**
1719    * Remove a config setting represented by the key from the {@link #configuration} map
1720    */
1721   public void removeConfiguration(final String key) {
1722     configuration.remove(key);
1723   }
1724 
1725   public static HTableDescriptor metaTableDescriptor(final Configuration conf)
1726       throws IOException {
1727     HTableDescriptor metaDescriptor = new HTableDescriptor(
1728       TableName.META_TABLE_NAME,
1729       new HColumnDescriptor[] {
1730         new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1731           .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
1732             HConstants.DEFAULT_HBASE_META_VERSIONS))
1733           .setInMemory(true)
1734           .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
1735             HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
1736           .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1737           // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1738           .setBloomFilterType(BloomType.NONE)
1739           .setCacheDataInL1(true)
1740          });
1741     metaDescriptor.addCoprocessor(
1742       "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1743       null, Coprocessor.PRIORITY_SYSTEM, null);
1744     return metaDescriptor;
1745   }
1746 
1747 }