View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.Collection;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Set;
31  import java.util.TreeMap;
32  import java.util.TreeSet;
33  import java.util.regex.Matcher;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.classification.InterfaceAudience;
39  import org.apache.hadoop.hbase.classification.InterfaceStability;
40  import org.apache.hadoop.hbase.client.Durability;
41  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
42  import org.apache.hadoop.hbase.exceptions.DeserializationException;
43  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
44  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
45  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
46  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
47  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
48  import org.apache.hadoop.hbase.regionserver.BloomType;
49  import org.apache.hadoop.hbase.security.User;
50  import org.apache.hadoop.hbase.util.ByteStringer;
51  import org.apache.hadoop.hbase.util.Bytes;
52  
53  import com.google.protobuf.InvalidProtocolBufferException;
54  
55  /**
56   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
57   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
58   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
59   * when the region split should occur, coprocessors associated with it etc...
60   */
61  @InterfaceAudience.Public
62  @InterfaceStability.Evolving
63  public class HTableDescriptor implements Comparable<HTableDescriptor> {
64  
65    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
66  
67    private TableName name = null;
68  
69    /**
70     * A map which holds the metadata information of the table. This metadata
71     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
72     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
73     */
74    private final Map<Bytes, Bytes> values =
75        new HashMap<Bytes, Bytes>();
76  
77    /**
78     * A map which holds the configuration specific to the table.
79     * The keys of the map have the same names as config keys and override the defaults with
80     * table-specific settings. Example usage may be for compactions, etc.
81     */
82    private final Map<String, String> configuration = new HashMap<String, String>();
83  
84    public static final String SPLIT_POLICY = "SPLIT_POLICY";
85  
86    /**
87     * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
88     * attribute which denotes the maximum size of the store file after which
89     * a region split occurs
90     *
91     * @see #getMaxFileSize()
92     */
93    public static final String MAX_FILESIZE = "MAX_FILESIZE";
94    private static final Bytes MAX_FILESIZE_KEY =
95        new Bytes(Bytes.toBytes(MAX_FILESIZE));
96  
97    public static final String OWNER = "OWNER";
98    public static final Bytes OWNER_KEY =
99        new Bytes(Bytes.toBytes(OWNER));
100 
101   /**
102    * <em>INTERNAL</em> Used by rest interface to access this metadata
103    * attribute which denotes if the table is Read Only
104    *
105    * @see #isReadOnly()
106    */
107   public static final String READONLY = "READONLY";
108   private static final Bytes READONLY_KEY =
109       new Bytes(Bytes.toBytes(READONLY));
110 
111   /**
112    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
113    * attribute which denotes if the table is compaction enabled
114    *
115    * @see #isCompactionEnabled()
116    */
117   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
118   private static final Bytes COMPACTION_ENABLED_KEY =
119       new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
120 
121   /**
122    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
123    * attribute which represents the maximum size of the memstore after which
124    * its contents are flushed onto the disk
125    *
126    * @see #getMemStoreFlushSize()
127    */
128   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
129   private static final Bytes MEMSTORE_FLUSHSIZE_KEY =
130       new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
131 
132   public static final String FLUSH_POLICY = "FLUSH_POLICY";
133 
134   /**
135    * <em>INTERNAL</em> Used by rest interface to access this metadata
136    * attribute which denotes if the table is a -ROOT- region or not
137    *
138    * @see #isRootRegion()
139    */
140   public static final String IS_ROOT = "IS_ROOT";
141   private static final Bytes IS_ROOT_KEY =
142       new Bytes(Bytes.toBytes(IS_ROOT));
143 
144   /**
145    * <em>INTERNAL</em> Used by rest interface to access this metadata
146    * attribute which denotes if it is a catalog table, either
147    * <code> hbase:meta </code> or <code> -ROOT- </code>
148    *
149    * @see #isMetaRegion()
150    */
151   public static final String IS_META = "IS_META";
152   private static final Bytes IS_META_KEY =
153       new Bytes(Bytes.toBytes(IS_META));
154 
155   /**
156    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
157    * attribute which denotes if the deferred log flush option is enabled.
158    * @deprecated Use {@link #DURABILITY} instead.
159    */
160   @Deprecated
161   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
162   @Deprecated
163   private static final Bytes DEFERRED_LOG_FLUSH_KEY =
164       new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH));
165 
166   /**
167    * <em>INTERNAL</em> {@link Durability} setting for the table.
168    */
169   public static final String DURABILITY = "DURABILITY";
170   private static final Bytes DURABILITY_KEY =
171       new Bytes(Bytes.toBytes("DURABILITY"));
172 
173   /**
174    * <em>INTERNAL</em> number of region replicas for the table.
175    */
176   public static final String REGION_REPLICATION = "REGION_REPLICATION";
177   private static final Bytes REGION_REPLICATION_KEY =
178       new Bytes(Bytes.toBytes(REGION_REPLICATION));
179 
180   /**
181    * <em>INTERNAL</em> flag to indicate whether or not the memstore should be replicated
182    * for read-replicas (CONSISTENCY =&gt; TIMELINE).
183    */
184   public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
185   private static final Bytes REGION_MEMSTORE_REPLICATION_KEY =
186       new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION));
187 
188   /**
189    * <em>INTERNAL</em> Used by shell/rest interface to access this metadata
190    * attribute which denotes if the table should be treated by region normalizer.
191    *
192    * @see #isNormalizationEnabled()
193    */
194   public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED";
195   private static final Bytes NORMALIZATION_ENABLED_KEY =
196     new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED));
197 
198   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
199   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
200 
201   /*
202    *  The below are ugly but better than creating them each time till we
203    *  replace booleans being saved as Strings with plain booleans.  Need a
204    *  migration script to do this.  TODO.
205    */
206   private static final Bytes FALSE =
207       new Bytes(Bytes.toBytes(Boolean.FALSE.toString()));
208 
209   private static final Bytes TRUE =
210       new Bytes(Bytes.toBytes(Boolean.TRUE.toString()));
211 
212   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
213 
214   /**
215    * Constant that denotes whether the table is READONLY by default and is false
216    */
217   public static final boolean DEFAULT_READONLY = false;
218 
219   /**
220    * Constant that denotes whether the table is compaction enabled by default
221    */
222   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
223 
224   /**
225    * Constant that denotes whether the table is normalized by default.
226    */
227   public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
228 
229   /**
230    * Constant that denotes the maximum default size of the memstore after which
231    * the contents are flushed to the store files
232    */
233   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
234 
235   public static final int DEFAULT_REGION_REPLICATION = 1;
236 
237   public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true;
238 
239   private final static Map<String, String> DEFAULT_VALUES
240     = new HashMap<String, String>();
241   private final static Set<Bytes> RESERVED_KEYWORDS
242       = new HashSet<Bytes>();
243 
244   static {
245     DEFAULT_VALUES.put(MAX_FILESIZE,
246         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
247     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
248     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
249         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
250     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
251         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
252     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
253     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
254     for (String s : DEFAULT_VALUES.keySet()) {
255       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
256     }
257     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
258     RESERVED_KEYWORDS.add(IS_META_KEY);
259   }
260 
261   /**
262    * Cache of whether this is a meta table or not.
263    */
264   private volatile Boolean meta = null;
265   /**
266    * Cache of whether this is root table or not.
267    */
268   private volatile Boolean root = null;
269 
270   /**
271    * Durability setting for the table
272    */
273   private Durability durability = null;
274 
275   /**
276    * Maps column family name to the respective HColumnDescriptors
277    */
278   private final Map<byte [], HColumnDescriptor> families =
279     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
280 
281   /**
282    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
283    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
284    */
285   @InterfaceAudience.Private
286   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
287     setName(name);
288     for(HColumnDescriptor descriptor : families) {
289       this.families.put(descriptor.getName(), descriptor);
290     }
291   }
292 
293   /**
294    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
295    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
296    */
297   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
298       Map<Bytes, Bytes> values) {
299     setName(name);
300     for(HColumnDescriptor descriptor : families) {
301       this.families.put(descriptor.getName(), descriptor);
302     }
303     for (Map.Entry<Bytes, Bytes> entry :
304         values.entrySet()) {
305       setValue(entry.getKey(), entry.getValue());
306     }
307   }
308 
309   /**
310    * Default constructor which constructs an empty object.
311    * For deserializing an HTableDescriptor instance only.
312    * @deprecated As of release 0.96 (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
313    *             This was made protected in 2.0.0 and will be removed in HBase 3.0.0.
314    *             Used by Writables and Writables are going away.
315    */
316   @Deprecated
317   protected HTableDescriptor() {
318     super();
319   }
320 
321   /**
322    * Construct a table descriptor specifying a TableName object
323    * @param name Table name.
324    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
325    */
326   public HTableDescriptor(final TableName name) {
327     super();
328     setName(name);
329   }
330 
331   /**
332    * Construct a table descriptor specifying a byte array table name
333    * @param name Table name.
334    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
335    */
336   @Deprecated
337   public HTableDescriptor(final byte[] name) {
338     this(TableName.valueOf(name));
339   }
340 
341   /**
342    * Construct a table descriptor specifying a String table name
343    * @param name Table name.
344    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
345    */
346   @Deprecated
347   public HTableDescriptor(final String name) {
348     this(TableName.valueOf(name));
349   }
350 
351   /**
352    * Construct a table descriptor by cloning the descriptor passed as a parameter.
353    * <p>
354    * Makes a deep copy of the supplied descriptor.
355    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
356    * @param desc The descriptor.
357    */
358   public HTableDescriptor(final HTableDescriptor desc) {
359     super();
360     setName(desc.name);
361     setMetaFlags(this.name);
362     for (HColumnDescriptor c: desc.families.values()) {
363       this.families.put(c.getName(), new HColumnDescriptor(c));
364     }
365     for (Map.Entry<Bytes, Bytes> e :
366         desc.values.entrySet()) {
367       setValue(e.getKey(), e.getValue());
368     }
369     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
370       this.configuration.put(e.getKey(), e.getValue());
371     }
372   }
373 
374   /*
375    * Set meta flags on this table.
376    * IS_ROOT_KEY is set if its a -ROOT- table
377    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
378    * Called by constructors.
379    * @param name
380    */
381   private void setMetaFlags(final TableName name) {
382     setMetaRegion(isRootRegion() ||
383         name.equals(TableName.META_TABLE_NAME));
384   }
385 
386   /**
387    * Check if the descriptor represents a <code> -ROOT- </code> region.
388    *
389    * @return true if this is a <code> -ROOT- </code> region
390    */
391   public boolean isRootRegion() {
392     if (this.root == null) {
393       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
394     }
395     return this.root.booleanValue();
396   }
397 
398   /**
399    * <em> INTERNAL </em> Used to denote if the current table represents
400    * <code> -ROOT- </code> region. This is used internally by the
401    * HTableDescriptor constructors
402    *
403    * @param isRoot true if this is the <code> -ROOT- </code> region
404    */
405   protected void setRootRegion(boolean isRoot) {
406     // TODO: Make the value a boolean rather than String of boolean.
407     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
408   }
409 
410   /**
411    * Checks if this table is <code> hbase:meta </code>
412    * region.
413    *
414    * @return true if this table is <code> hbase:meta </code>
415    * region
416    */
417   public boolean isMetaRegion() {
418     if (this.meta == null) {
419       this.meta = calculateIsMetaRegion();
420     }
421     return this.meta.booleanValue();
422   }
423 
424   private synchronized Boolean calculateIsMetaRegion() {
425     byte [] value = getValue(IS_META_KEY);
426     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
427   }
428 
429   private boolean isSomething(final Bytes key,
430       final boolean valueIfNull) {
431     byte [] value = getValue(key);
432     if (value != null) {
433       return Boolean.valueOf(Bytes.toString(value));
434     }
435     return valueIfNull;
436   }
437 
438   /**
439    * <em> INTERNAL </em> Used to denote if the current table represents
440    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
441    * internally by the HTableDescriptor constructors
442    *
443    * @param isMeta true if its either <code> -ROOT- </code> or
444    * <code> hbase:meta </code> region
445    */
446   protected void setMetaRegion(boolean isMeta) {
447     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
448   }
449 
450   /**
451    * Checks if the table is a <code>hbase:meta</code> table
452    *
453    * @return true if table is <code> hbase:meta </code> region.
454    */
455   public boolean isMetaTable() {
456     return isMetaRegion() && !isRootRegion();
457   }
458 
459   /**
460    * Getter for accessing the metadata associated with the key
461    *
462    * @param key The key.
463    * @return The value.
464    * @see #values
465    */
466   public byte[] getValue(byte[] key) {
467     return getValue(new Bytes(key));
468   }
469 
470   private byte[] getValue(final Bytes key) {
471     Bytes ibw = values.get(key);
472     if (ibw == null)
473       return null;
474     return ibw.get();
475   }
476 
477   /**
478    * Getter for accessing the metadata associated with the key
479    *
480    * @param key The key.
481    * @return The value.
482    * @see #values
483    */
484   public String getValue(String key) {
485     byte[] value = getValue(Bytes.toBytes(key));
486     if (value == null)
487       return null;
488     return Bytes.toString(value);
489   }
490 
491   /**
492    * Getter for fetching an unmodifiable {@link #values} map.
493    *
494    * @return unmodifiable map {@link #values}.
495    * @see #values
496    */
497   public Map<Bytes, Bytes> getValues() {
498     // shallow pointer copy
499     return Collections.unmodifiableMap(values);
500   }
501 
502   /**
503    * Setter for storing metadata as a (key, value) pair in {@link #values} map
504    *
505    * @param key The key.
506    * @param value The value.
507    * @see #values
508    */
509   public HTableDescriptor setValue(byte[] key, byte[] value) {
510     setValue(new Bytes(key), new Bytes(value));
511     return this;
512   }
513 
514   /*
515    * @param key The key.
516    * @param value The value.
517    */
518   private HTableDescriptor setValue(final Bytes key,
519       final String value) {
520     setValue(key, new Bytes(Bytes.toBytes(value)));
521     return this;
522   }
523 
524   /*
525    * Setter for storing metadata as a (key, value) pair in {@link #values} map
526    *
527    * @param key The key.
528    * @param value The value.
529    */
530   public HTableDescriptor setValue(final Bytes key,
531       final Bytes value) {
532     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
533       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
534       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
535           "use " + DURABILITY + " instead");
536       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
537       return this;
538     }
539     values.put(key, value);
540     return this;
541   }
542 
543   /**
544    * Setter for storing metadata as a (key, value) pair in {@link #values} map
545    *
546    * @param key The key.
547    * @param value The value.
548    * @see #values
549    */
550   public HTableDescriptor setValue(String key, String value) {
551     if (value == null) {
552       remove(key);
553     } else {
554       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
555     }
556     return this;
557   }
558 
559   /**
560    * Remove metadata represented by the key from the {@link #values} map
561    *
562    * @param key Key whose key and value we're to remove from HTableDescriptor
563    * parameters.
564    */
565   public void remove(final String key) {
566     remove(new Bytes(Bytes.toBytes(key)));
567   }
568 
569   /**
570    * Remove metadata represented by the key from the {@link #values} map
571    *
572    * @param key Key whose key and value we're to remove from HTableDescriptor
573    * parameters.
574    */
575   public void remove(Bytes key) {
576     values.remove(key);
577   }
578 
579   /**
580    * Remove metadata represented by the key from the {@link #values} map
581    *
582    * @param key Key whose key and value we're to remove from HTableDescriptor
583    * parameters.
584    */
585   public void remove(final byte [] key) {
586     remove(new Bytes(key));
587   }
588 
589   /**
590    * Check if the readOnly flag of the table is set. If the readOnly flag is
591    * set then the contents of the table can only be read from but not modified.
592    *
593    * @return true if all columns in the table should be read only
594    */
595   public boolean isReadOnly() {
596     return isSomething(READONLY_KEY, DEFAULT_READONLY);
597   }
598 
599   /**
600    * Setting the table as read only sets all the columns in the table as read
601    * only. By default all tables are modifiable, but if the readOnly flag is
602    * set to true then the contents of the table can only be read but not modified.
603    *
604    * @param readOnly True if all of the columns in the table should be read
605    * only.
606    */
607   public HTableDescriptor setReadOnly(final boolean readOnly) {
608     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
609   }
610 
611   /**
612    * Check if the compaction enable flag of the table is true. If flag is
613    * false then no minor/major compactions will be done in real.
614    *
615    * @return true if table compaction enabled
616    */
617   public boolean isCompactionEnabled() {
618     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
619   }
620 
621   /**
622    * Setting the table compaction enable flag.
623    *
624    * @param isEnable True if enable compaction.
625    */
626   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
627     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
628     return this;
629   }
630 
631   /**
632    * Check if normalization enable flag of the table is true. If flag is
633    * false then no region normalizer won't attempt to normalize this table.
634    *
635    * @return true if region normalization is enabled for this table
636    */
637   public boolean isNormalizationEnabled() {
638     return isSomething(NORMALIZATION_ENABLED_KEY, DEFAULT_NORMALIZATION_ENABLED);
639   }
640 
641   /**
642    * Setting the table normalization enable flag.
643    *
644    * @param isEnable True if enable normalization.
645    */
646   public HTableDescriptor setNormalizationEnabled(final boolean isEnable) {
647     setValue(NORMALIZATION_ENABLED_KEY, isEnable ? TRUE : FALSE);
648     return this;
649   }
650 
651   /**
652    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
653    * @param durability enum value
654    */
655   public HTableDescriptor setDurability(Durability durability) {
656     this.durability = durability;
657     setValue(DURABILITY_KEY, durability.name());
658     return this;
659   }
660 
661   /**
662    * Returns the durability setting for the table.
663    * @return durability setting for the table.
664    */
665   public Durability getDurability() {
666     if (this.durability == null) {
667       byte[] durabilityValue = getValue(DURABILITY_KEY);
668       if (durabilityValue == null) {
669         this.durability = DEFAULT_DURABLITY;
670       } else {
671         try {
672           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
673         } catch (IllegalArgumentException ex) {
674           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
675             + " is not known. Durability:" + Bytes.toString(durabilityValue));
676           this.durability = DEFAULT_DURABLITY;
677         }
678       }
679     }
680     return this.durability;
681   }
682 
683   /**
684    * Get the name of the table
685    *
686    * @return TableName
687    */
688   public TableName getTableName() {
689     return name;
690   }
691 
692   /**
693    * Get the name of the table as a byte array.
694    *
695    * @return name of table
696    * @deprecated Use {@link #getTableName()} instead
697    */
698   @Deprecated
699   public byte[] getName() {
700     return name.getName();
701   }
702 
703   /**
704    * Get the name of the table as a String
705    *
706    * @return name of table as a String
707    */
708   public String getNameAsString() {
709     return name.getNameAsString();
710   }
711 
712   /**
713    * This sets the class associated with the region split policy which
714    * determines when a region split should occur.  The class used by
715    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
716    * @param clazz the class name
717    */
718   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
719     setValue(SPLIT_POLICY, clazz);
720     return this;
721   }
722 
723   /**
724    * This gets the class associated with the region split policy which
725    * determines when a region split should occur.  The class used by
726    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
727    *
728    * @return the class name of the region split policy for this table.
729    * If this returns null, the default split policy is used.
730    */
731    public String getRegionSplitPolicyClassName() {
732     return getValue(SPLIT_POLICY);
733   }
734 
735   /**
736    * Set the name of the table.
737    *
738    * @param name name of table
739    */
740   @Deprecated
741   public HTableDescriptor setName(byte[] name) {
742     setName(TableName.valueOf(name));
743     return this;
744   }
745 
746   @Deprecated
747   public HTableDescriptor setName(TableName name) {
748     this.name = name;
749     setMetaFlags(this.name);
750     return this;
751   }
752 
753   /**
754    * Returns the maximum size upto which a region can grow to after which a region
755    * split is triggered. The region size is represented by the size of the biggest
756    * store file in that region.
757    *
758    * @return max hregion size for table, -1 if not set.
759    *
760    * @see #setMaxFileSize(long)
761    */
762   public long getMaxFileSize() {
763     byte [] value = getValue(MAX_FILESIZE_KEY);
764     if (value != null) {
765       return Long.parseLong(Bytes.toString(value));
766     }
767     return -1;
768   }
769 
770   /**
771    * Sets the maximum size upto which a region can grow to after which a region
772    * split is triggered. The region size is represented by the size of the biggest
773    * store file in that region, i.e. If the biggest store file grows beyond the
774    * maxFileSize, then the region split is triggered. This defaults to a value of
775    * 256 MB.
776    * <p>
777    * This is not an absolute value and might vary. Assume that a single row exceeds
778    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
779    * a single row cannot be split across multiple regions
780    * </p>
781    *
782    * @param maxFileSize The maximum file size that a store file can grow to
783    * before a split is triggered.
784    */
785   public HTableDescriptor setMaxFileSize(long maxFileSize) {
786     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
787     return this;
788   }
789 
790   /**
791    * Returns the size of the memstore after which a flush to filesystem is triggered.
792    *
793    * @return memory cache flush size for each hregion, -1 if not set.
794    *
795    * @see #setMemStoreFlushSize(long)
796    */
797   public long getMemStoreFlushSize() {
798     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
799     if (value != null) {
800       return Long.parseLong(Bytes.toString(value));
801     }
802     return -1;
803   }
804 
805   /**
806    * Represents the maximum size of the memstore after which the contents of the
807    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
808    *
809    * @param memstoreFlushSize memory cache flush size for each hregion
810    */
811   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
812     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
813     return this;
814   }
815 
816   /**
817    * This sets the class associated with the flush policy which determines determines the stores
818    * need to be flushed when flushing a region. The class used by default is defined in
819    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
820    * @param clazz the class name
821    */
822   public HTableDescriptor setFlushPolicyClassName(String clazz) {
823     setValue(FLUSH_POLICY, clazz);
824     return this;
825   }
826 
827   /**
828    * This gets the class associated with the flush policy which determines the stores need to be
829    * flushed when flushing a region. The class used by default is defined in
830    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
831    * @return the class name of the flush policy for this table. If this returns null, the default
832    *         flush policy is used.
833    */
834   public String getFlushPolicyClassName() {
835     return getValue(FLUSH_POLICY);
836   }
837 
838   /**
839    * Adds a column family.
840    * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
841    * @param family HColumnDescriptor of family to add.
842    */
843   public HTableDescriptor addFamily(final HColumnDescriptor family) {
844     if (family.getName() == null || family.getName().length <= 0) {
845       throw new IllegalArgumentException("Family name cannot be null or empty");
846     }
847     if (hasFamily(family.getName())) {
848       throw new IllegalArgumentException("Family '" +
849         family.getNameAsString() + "' already exists so cannot be added");
850     }
851     this.families.put(family.getName(), family);
852     return this;
853   }
854 
855   /**
856    * Modifies the existing column family.
857    * @param family HColumnDescriptor of family to update
858    * @return this (for chained invocation)
859    */
860   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
861     if (family.getName() == null || family.getName().length <= 0) {
862       throw new IllegalArgumentException("Family name cannot be null or empty");
863     }
864     if (!hasFamily(family.getName())) {
865       throw new IllegalArgumentException("Column family '" + family.getNameAsString()
866         + "' does not exist");
867     }
868     this.families.put(family.getName(), family);
869     return this;
870   }
871 
872   /**
873    * Checks to see if this table contains the given column family
874    * @param familyName Family name or column name.
875    * @return true if the table contains the specified family name
876    */
877   public boolean hasFamily(final byte [] familyName) {
878     return families.containsKey(familyName);
879   }
880 
881   /**
882    * @return Name of this table and then a map of all of the column family
883    * descriptors.
884    * @see #getNameAsString()
885    */
886   @Override
887   public String toString() {
888     StringBuilder s = new StringBuilder();
889     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
890     s.append(getValues(true));
891     for (HColumnDescriptor f : families.values()) {
892       s.append(", ").append(f);
893     }
894     return s.toString();
895   }
896 
897   /**
898    * @return Name of this table and then a map of all of the column family
899    * descriptors (with only the non-default column family attributes)
900    */
901   public String toStringCustomizedValues() {
902     StringBuilder s = new StringBuilder();
903     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
904     s.append(getValues(false));
905     for(HColumnDescriptor hcd : families.values()) {
906       s.append(", ").append(hcd.toStringCustomizedValues());
907     }
908     return s.toString();
909   }
910 
911   /**
912    * @return map of all table attributes formatted into string.
913    */
914   public String toStringTableAttributes() {
915    return getValues(true).toString();
916   }
917 
918   private StringBuilder getValues(boolean printDefaults) {
919     StringBuilder s = new StringBuilder();
920 
921     // step 1: set partitioning and pruning
922     Set<Bytes> reservedKeys = new TreeSet<Bytes>();
923     Set<Bytes> userKeys = new TreeSet<Bytes>();
924     for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
925       if (entry.getKey() == null || entry.getKey().get() == null) continue;
926       String key = Bytes.toString(entry.getKey().get());
927       // in this section, print out reserved keywords + coprocessor info
928       if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) {
929         userKeys.add(entry.getKey());
930         continue;
931       }
932       // only print out IS_ROOT/IS_META if true
933       String value = Bytes.toString(entry.getValue().get());
934       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
935         if (Boolean.valueOf(value) == false) continue;
936       }
937       // see if a reserved key is a default value. may not want to print it out
938       if (printDefaults
939           || !DEFAULT_VALUES.containsKey(key)
940           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
941         reservedKeys.add(entry.getKey());
942       }
943     }
944 
945     // early exit optimization
946     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
947     if (!hasAttributes && configuration.isEmpty()) return s;
948 
949     s.append(", {");
950     // step 2: printing attributes
951     if (hasAttributes) {
952       s.append("TABLE_ATTRIBUTES => {");
953 
954       // print all reserved keys first
955       boolean printCommaForAttr = false;
956       for (Bytes k : reservedKeys) {
957         String key = Bytes.toString(k.get());
958         String value = Bytes.toStringBinary(values.get(k).get());
959         if (printCommaForAttr) s.append(", ");
960         printCommaForAttr = true;
961         s.append(key);
962         s.append(" => ");
963         s.append('\'').append(value).append('\'');
964       }
965 
966       if (!userKeys.isEmpty()) {
967         // print all non-reserved, advanced config keys as a separate subset
968         if (printCommaForAttr) s.append(", ");
969         printCommaForAttr = true;
970         s.append(HConstants.METADATA).append(" => ");
971         s.append("{");
972         boolean printCommaForCfg = false;
973         for (Bytes k : userKeys) {
974           String key = Bytes.toString(k.get());
975           String value = Bytes.toStringBinary(values.get(k).get());
976           if (printCommaForCfg) s.append(", ");
977           printCommaForCfg = true;
978           s.append('\'').append(key).append('\'');
979           s.append(" => ");
980           s.append('\'').append(value).append('\'');
981         }
982         s.append("}");
983       }
984     }
985 
986     // step 3: printing all configuration:
987     if (!configuration.isEmpty()) {
988       if (hasAttributes) {
989         s.append(", ");
990       }
991       s.append(HConstants.CONFIGURATION).append(" => ");
992       s.append('{');
993       boolean printCommaForConfig = false;
994       for (Map.Entry<String, String> e : configuration.entrySet()) {
995         if (printCommaForConfig) s.append(", ");
996         printCommaForConfig = true;
997         s.append('\'').append(e.getKey()).append('\'');
998         s.append(" => ");
999         s.append('\'').append(e.getValue()).append('\'');
1000       }
1001       s.append("}");
1002     }
1003     s.append("}"); // end METHOD
1004     return s;
1005   }
1006 
1007   /**
1008    * Compare the contents of the descriptor with another one passed as a parameter.
1009    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
1010    * contents of the descriptors are compared.
1011    *
1012    * @return true if the contents of the the two descriptors exactly match
1013    *
1014    * @see java.lang.Object#equals(java.lang.Object)
1015    */
1016   @Override
1017   public boolean equals(Object obj) {
1018     if (this == obj) {
1019       return true;
1020     }
1021     if (obj == null) {
1022       return false;
1023     }
1024     if (!(obj instanceof HTableDescriptor)) {
1025       return false;
1026     }
1027     return compareTo((HTableDescriptor)obj) == 0;
1028   }
1029 
1030   /**
1031    * @see java.lang.Object#hashCode()
1032    */
1033   @Override
1034   public int hashCode() {
1035     int result = this.name.hashCode();
1036     if (this.families.size() > 0) {
1037       for (HColumnDescriptor e: this.families.values()) {
1038         result ^= e.hashCode();
1039       }
1040     }
1041     result ^= values.hashCode();
1042     result ^= configuration.hashCode();
1043     return result;
1044   }
1045 
1046   // Comparable
1047 
1048   /**
1049    * Compares the descriptor with another descriptor which is passed as a parameter.
1050    * This compares the content of the two descriptors and not the reference.
1051    *
1052    * @return 0 if the contents of the descriptors are exactly matching,
1053    *         1 if there is a mismatch in the contents
1054    */
1055   @Override
1056   public int compareTo(final HTableDescriptor other) {
1057     int result = this.name.compareTo(other.name);
1058     if (result == 0) {
1059       result = families.size() - other.families.size();
1060     }
1061     if (result == 0 && families.size() != other.families.size()) {
1062       result = Integer.valueOf(families.size()).compareTo(
1063           Integer.valueOf(other.families.size()));
1064     }
1065     if (result == 0) {
1066       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1067           it2 = other.families.values().iterator(); it.hasNext(); ) {
1068         result = it.next().compareTo(it2.next());
1069         if (result != 0) {
1070           break;
1071         }
1072       }
1073     }
1074     if (result == 0) {
1075       // punt on comparison for ordering, just calculate difference
1076       result = this.values.hashCode() - other.values.hashCode();
1077       if (result < 0)
1078         result = -1;
1079       else if (result > 0)
1080         result = 1;
1081     }
1082     if (result == 0) {
1083       result = this.configuration.hashCode() - other.configuration.hashCode();
1084       if (result < 0)
1085         result = -1;
1086       else if (result > 0)
1087         result = 1;
1088     }
1089     return result;
1090   }
1091 
1092   /**
1093    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1094    * of all the column families of the table.
1095    *
1096    * @return Immutable collection of {@link HColumnDescriptor} of all the
1097    * column families.
1098    */
1099   public Collection<HColumnDescriptor> getFamilies() {
1100     return Collections.unmodifiableCollection(this.families.values());
1101   }
1102 
1103   /**
1104    * Returns the configured replicas per region
1105    */
1106   public int getRegionReplication() {
1107     byte[] val = getValue(REGION_REPLICATION_KEY);
1108     if (val == null || val.length == 0) {
1109       return DEFAULT_REGION_REPLICATION;
1110     }
1111     return Integer.parseInt(Bytes.toString(val));
1112   }
1113 
1114   /**
1115    * Sets the number of replicas per region.
1116    * @param regionReplication the replication factor per region
1117    */
1118   public HTableDescriptor setRegionReplication(int regionReplication) {
1119     setValue(REGION_REPLICATION_KEY,
1120         new Bytes(Bytes.toBytes(Integer.toString(regionReplication))));
1121     return this;
1122   }
1123 
1124   /**
1125    * @return true if the read-replicas memstore replication is enabled.
1126    */
1127   public boolean hasRegionMemstoreReplication() {
1128     return isSomething(REGION_MEMSTORE_REPLICATION_KEY, DEFAULT_REGION_MEMSTORE_REPLICATION);
1129   }
1130 
1131   /**
1132    * Enable or Disable the memstore replication from the primary region to the replicas.
1133    * The replication will be used only for meta operations (e.g. flush, compaction, ...)
1134    *
1135    * @param memstoreReplication true if the new data written to the primary region
1136    *                                 should be replicated.
1137    *                            false if the secondaries can tollerate to have new
1138    *                                  data only when the primary flushes the memstore.
1139    */
1140   public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) {
1141     setValue(REGION_MEMSTORE_REPLICATION_KEY, memstoreReplication ? TRUE : FALSE);
1142     // If the memstore replication is setup, we do not have to wait for observing a flush event
1143     // from primary before starting to serve reads, because gaps from replication is not applicable
1144     setConfiguration(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
1145       Boolean.toString(memstoreReplication));
1146     return this;
1147   }
1148 
1149   /**
1150    * Returns all the column family names of the current table. The map of
1151    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1152    * This returns all the keys of the family map which represents the column
1153    * family names of the table.
1154    *
1155    * @return Immutable sorted set of the keys of the families.
1156    */
1157   public Set<byte[]> getFamiliesKeys() {
1158     return Collections.unmodifiableSet(this.families.keySet());
1159   }
1160 
1161   /**
1162    * Returns an array all the {@link HColumnDescriptor} of the column families
1163    * of the table.
1164    *
1165    * @return Array of all the HColumnDescriptors of the current table
1166    *
1167    * @see #getFamilies()
1168    */
1169   public HColumnDescriptor[] getColumnFamilies() {
1170     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1171     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1172   }
1173 
1174 
1175   /**
1176    * Returns the HColumnDescriptor for a specific column family with name as
1177    * specified by the parameter column.
1178    *
1179    * @param column Column family name
1180    * @return Column descriptor for the passed family name or the family on
1181    * passed in column.
1182    */
1183   public HColumnDescriptor getFamily(final byte [] column) {
1184     return this.families.get(column);
1185   }
1186 
1187 
1188   /**
1189    * Removes the HColumnDescriptor with name specified by the parameter column
1190    * from the table descriptor
1191    *
1192    * @param column Name of the column family to be removed.
1193    * @return Column descriptor for the passed family name or the family on
1194    * passed in column.
1195    */
1196   public HColumnDescriptor removeFamily(final byte [] column) {
1197     return this.families.remove(column);
1198   }
1199 
1200 
1201   /**
1202    * Add a table coprocessor to this table. The coprocessor
1203    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1204    * or Endpoint.
1205    * It won't check if the class can be loaded or not.
1206    * Whether a coprocessor is loadable or not will be determined when
1207    * a region is opened.
1208    * @param className Full class name.
1209    * @throws IOException
1210    */
1211   public HTableDescriptor addCoprocessor(String className) throws IOException {
1212     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1213     return this;
1214   }
1215 
1216 
1217   /**
1218    * Add a table coprocessor to this table. The coprocessor
1219    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1220    * or Endpoint.
1221    * It won't check if the class can be loaded or not.
1222    * Whether a coprocessor is loadable or not will be determined when
1223    * a region is opened.
1224    * @param jarFilePath Path of the jar file. If it's null, the class will be
1225    * loaded from default classloader.
1226    * @param className Full class name.
1227    * @param priority Priority
1228    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1229    * @throws IOException
1230    */
1231   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1232                              int priority, final Map<String, String> kvs)
1233   throws IOException {
1234     if (hasCoprocessor(className)) {
1235       throw new IOException("Coprocessor " + className + " already exists.");
1236     }
1237     // validate parameter kvs
1238     StringBuilder kvString = new StringBuilder();
1239     if (kvs != null) {
1240       for (Map.Entry<String, String> e: kvs.entrySet()) {
1241         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1242           throw new IOException("Illegal parameter key = " + e.getKey());
1243         }
1244         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1245           throw new IOException("Illegal parameter (" + e.getKey() +
1246               ") value = " + e.getValue());
1247         }
1248         if (kvString.length() != 0) {
1249           kvString.append(',');
1250         }
1251         kvString.append(e.getKey());
1252         kvString.append('=');
1253         kvString.append(e.getValue());
1254       }
1255     }
1256 
1257     // generate a coprocessor key
1258     int maxCoprocessorNumber = 0;
1259     Matcher keyMatcher;
1260     for (Map.Entry<Bytes, Bytes> e :
1261         this.values.entrySet()) {
1262       keyMatcher =
1263           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1264               Bytes.toString(e.getKey().get()));
1265       if (!keyMatcher.matches()) {
1266         continue;
1267       }
1268       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)),
1269           maxCoprocessorNumber);
1270     }
1271     maxCoprocessorNumber++;
1272 
1273     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1274     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1275         "|" + className + "|" + Integer.toString(priority) + "|" +
1276         kvString.toString();
1277     setValue(key, value);
1278     return this;
1279   }
1280 
1281 
1282   /**
1283    * Check if the table has an attached co-processor represented by the name className
1284    *
1285    * @param className - Class name of the co-processor
1286    * @return true of the table has a co-processor className
1287    */
1288   public boolean hasCoprocessor(String className) {
1289     Matcher keyMatcher;
1290     Matcher valueMatcher;
1291     for (Map.Entry<Bytes, Bytes> e :
1292         this.values.entrySet()) {
1293       keyMatcher =
1294           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1295               Bytes.toString(e.getKey().get()));
1296       if (!keyMatcher.matches()) {
1297         continue;
1298       }
1299       valueMatcher =
1300         HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(
1301             Bytes.toString(e.getValue().get()));
1302       if (!valueMatcher.matches()) {
1303         continue;
1304       }
1305       // get className and compare
1306       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1307       if (clazz.equals(className.trim())) {
1308         return true;
1309       }
1310     }
1311     return false;
1312   }
1313 
1314   /**
1315    * Return the list of attached co-processor represented by their name className
1316    *
1317    * @return The list of co-processors classNames
1318    */
1319   public List<String> getCoprocessors() {
1320     List<String> result = new ArrayList<String>();
1321     Matcher keyMatcher;
1322     Matcher valueMatcher;
1323     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1324       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1325       if (!keyMatcher.matches()) {
1326         continue;
1327       }
1328       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1329           .toString(e.getValue().get()));
1330       if (!valueMatcher.matches()) {
1331         continue;
1332       }
1333       result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
1334     }
1335     return result;
1336   }
1337 
1338   /**
1339    * Remove a coprocessor from those set on the table
1340    * @param className Class name of the co-processor
1341    */
1342   public void removeCoprocessor(String className) {
1343     Bytes match = null;
1344     Matcher keyMatcher;
1345     Matcher valueMatcher;
1346     for (Map.Entry<Bytes, Bytes> e : this.values
1347         .entrySet()) {
1348       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1349           .getKey().get()));
1350       if (!keyMatcher.matches()) {
1351         continue;
1352       }
1353       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1354           .toString(e.getValue().get()));
1355       if (!valueMatcher.matches()) {
1356         continue;
1357       }
1358       // get className and compare
1359       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1360       // remove the CP if it is present
1361       if (clazz.equals(className.trim())) {
1362         match = e.getKey();
1363         break;
1364       }
1365     }
1366     // if we found a match, remove it
1367     if (match != null)
1368       remove(match);
1369   }
1370 
1371   /**
1372    * Returns the {@link Path} object representing the table directory under
1373    * path rootdir
1374    *
1375    * Deprecated use FSUtils.getTableDir() instead.
1376    *
1377    * @param rootdir qualified path of HBase root directory
1378    * @param tableName name of table
1379    * @return {@link Path} for table
1380    */
1381   @Deprecated
1382   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1383     //This is bad I had to mirror code from FSUTils.getTableDir since
1384     //there is no module dependency between hbase-client and hbase-server
1385     TableName name = TableName.valueOf(tableName);
1386     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1387               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1388   }
1389 
1390   /** Table descriptor for <code>hbase:meta</code> catalog table
1391    * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
1392    * Admin#getTableDescriptor(TableName.META_TABLE) instead.
1393    */
1394   @Deprecated
1395   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1396       TableName.META_TABLE_NAME,
1397       new HColumnDescriptor[] {
1398           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1399               // Ten is arbitrary number.  Keep versions to help debugging.
1400               .setMaxVersions(10)
1401               .setInMemory(true)
1402               .setBlocksize(8 * 1024)
1403               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1404               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1405               .setBloomFilterType(BloomType.NONE)
1406               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1407               // e.g. if using CombinedBlockCache (BucketCache).
1408               .setCacheDataInL1(true),
1409           new HColumnDescriptor(HConstants.TABLE_FAMILY)
1410               // Ten is arbitrary number.  Keep versions to help debugging.
1411               .setMaxVersions(10)
1412               .setInMemory(true)
1413               .setBlocksize(8 * 1024)
1414               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1415                   // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1416               .setBloomFilterType(BloomType.NONE)
1417                   // Enable cache of data blocks in L1 if more than one caching tier deployed:
1418                   // e.g. if using CombinedBlockCache (BucketCache).
1419               .setCacheDataInL1(true)
1420       });
1421 
1422   static {
1423     try {
1424       META_TABLEDESC.addCoprocessor(
1425           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1426           null, Coprocessor.PRIORITY_SYSTEM, null);
1427     } catch (IOException ex) {
1428       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1429       throw new RuntimeException(ex);
1430     }
1431   }
1432 
1433   public final static String NAMESPACE_FAMILY_INFO = "info";
1434   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1435   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1436 
1437   /** Table descriptor for namespace table */
1438   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1439       TableName.NAMESPACE_TABLE_NAME,
1440       new HColumnDescriptor[] {
1441           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1442               // Ten is arbitrary number.  Keep versions to help debugging.
1443               .setMaxVersions(10)
1444               .setInMemory(true)
1445               .setBlocksize(8 * 1024)
1446               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1447               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1448               // e.g. if using CombinedBlockCache (BucketCache).
1449               .setCacheDataInL1(true)
1450       });
1451 
1452   @Deprecated
1453   public HTableDescriptor setOwner(User owner) {
1454     return setOwnerString(owner != null ? owner.getShortName() : null);
1455   }
1456 
1457   // used by admin.rb:alter(table_name,*args) to update owner.
1458   @Deprecated
1459   public HTableDescriptor setOwnerString(String ownerString) {
1460     if (ownerString != null) {
1461       setValue(OWNER_KEY, ownerString);
1462     } else {
1463       remove(OWNER_KEY);
1464     }
1465     return this;
1466   }
1467 
1468   @Deprecated
1469   public String getOwnerString() {
1470     if (getValue(OWNER_KEY) != null) {
1471       return Bytes.toString(getValue(OWNER_KEY));
1472     }
1473     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1474     // hbase:meta and -ROOT- should return system user as owner, not null (see
1475     // MasterFileSystem.java:bootstrap()).
1476     return null;
1477   }
1478 
1479   /**
1480    * @return This instance serialized with pb with pb magic prefix
1481    * @see #parseFrom(byte[])
1482    */
1483   public byte [] toByteArray() {
1484     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1485   }
1486 
1487   /**
1488    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1489    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1490    * @throws DeserializationException
1491    * @throws IOException
1492    * @see #toByteArray()
1493    */
1494   public static HTableDescriptor parseFrom(final byte [] bytes)
1495   throws DeserializationException, IOException {
1496     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1497       throw new DeserializationException("Expected PB encoded HTableDescriptor");
1498     }
1499     int pblen = ProtobufUtil.lengthOfPBMagic();
1500     TableSchema.Builder builder = TableSchema.newBuilder();
1501     TableSchema ts;
1502     try {
1503       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1504     } catch (InvalidProtocolBufferException e) {
1505       throw new DeserializationException(e);
1506     }
1507     return convert(ts);
1508   }
1509 
1510   /**
1511    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1512    */
1513   public TableSchema convert() {
1514     TableSchema.Builder builder = TableSchema.newBuilder();
1515     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1516     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1517       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1518       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1519       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1520       builder.addAttributes(aBuilder.build());
1521     }
1522     for (HColumnDescriptor hcd: getColumnFamilies()) {
1523       builder.addColumnFamilies(hcd.convert());
1524     }
1525     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1526       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1527       aBuilder.setName(e.getKey());
1528       aBuilder.setValue(e.getValue());
1529       builder.addConfiguration(aBuilder.build());
1530     }
1531     return builder.build();
1532   }
1533 
1534   /**
1535    * @param ts A pb TableSchema instance.
1536    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1537    */
1538   public static HTableDescriptor convert(final TableSchema ts) {
1539     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1540     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1541     int index = 0;
1542     for (ColumnFamilySchema cfs: list) {
1543       hcds[index++] = HColumnDescriptor.convert(cfs);
1544     }
1545     HTableDescriptor htd = new HTableDescriptor(
1546         ProtobufUtil.toTableName(ts.getTableName()),
1547         hcds);
1548     for (BytesBytesPair a: ts.getAttributesList()) {
1549       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1550     }
1551     for (NameStringPair a: ts.getConfigurationList()) {
1552       htd.setConfiguration(a.getName(), a.getValue());
1553     }
1554     return htd;
1555   }
1556 
1557   /**
1558    * Getter for accessing the configuration value by key
1559    */
1560   public String getConfigurationValue(String key) {
1561     return configuration.get(key);
1562   }
1563 
1564   /**
1565    * Getter for fetching an unmodifiable {@link #configuration} map.
1566    */
1567   public Map<String, String> getConfiguration() {
1568     // shallow pointer copy
1569     return Collections.unmodifiableMap(configuration);
1570   }
1571 
1572   /**
1573    * Setter for storing a configuration setting in {@link #configuration} map.
1574    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1575    * @param value String value. If null, removes the setting.
1576    */
1577   public HTableDescriptor setConfiguration(String key, String value) {
1578     if (value == null) {
1579       removeConfiguration(key);
1580     } else {
1581       configuration.put(key, value);
1582     }
1583     return this;
1584   }
1585 
1586   /**
1587    * Remove a config setting represented by the key from the {@link #configuration} map
1588    */
1589   public void removeConfiguration(final String key) {
1590     configuration.remove(key);
1591   }
1592 }