View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.Collection;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Set;
31  import java.util.TreeMap;
32  import java.util.TreeSet;
33  import java.util.regex.Matcher;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.classification.InterfaceAudience;
39  import org.apache.hadoop.hbase.classification.InterfaceStability;
40  import org.apache.hadoop.hbase.client.Durability;
41  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
42  import org.apache.hadoop.hbase.exceptions.DeserializationException;
43  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
44  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
45  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
46  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
47  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
48  import org.apache.hadoop.hbase.regionserver.BloomType;
49  import org.apache.hadoop.hbase.security.User;
50  import org.apache.hadoop.hbase.util.ByteStringer;
51  import org.apache.hadoop.hbase.util.Bytes;
52  
53  /**
54   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
55   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
56   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
57   * when the region split should occur, coprocessors associated with it etc...
58   */
59  @InterfaceAudience.Public
60  @InterfaceStability.Evolving
61  public class HTableDescriptor implements Comparable<HTableDescriptor> {
62  
63    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
64  
65    private TableName name = null;
66  
67    /**
68     * A map which holds the metadata information of the table. This metadata
69     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
70     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
71     */
72    private final Map<Bytes, Bytes> values =
73        new HashMap<Bytes, Bytes>();
74  
75    /**
76     * A map which holds the configuration specific to the table.
77     * The keys of the map have the same names as config keys and override the defaults with
78     * table-specific settings. Example usage may be for compactions, etc.
79     */
80    private final Map<String, String> configuration = new HashMap<String, String>();
81  
82    public static final String SPLIT_POLICY = "SPLIT_POLICY";
83  
84    /**
85     * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
86     * attribute which denotes the maximum size of the store file after which
87     * a region split occurs
88     *
89     * @see #getMaxFileSize()
90     */
91    public static final String MAX_FILESIZE = "MAX_FILESIZE";
92    private static final Bytes MAX_FILESIZE_KEY =
93        new Bytes(Bytes.toBytes(MAX_FILESIZE));
94  
95    public static final String OWNER = "OWNER";
96    public static final Bytes OWNER_KEY =
97        new Bytes(Bytes.toBytes(OWNER));
98  
99    /**
100    * <em>INTERNAL</em> Used by rest interface to access this metadata
101    * attribute which denotes if the table is Read Only
102    *
103    * @see #isReadOnly()
104    */
105   public static final String READONLY = "READONLY";
106   private static final Bytes READONLY_KEY =
107       new Bytes(Bytes.toBytes(READONLY));
108 
109   /**
110    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
111    * attribute which denotes if the table is compaction enabled
112    *
113    * @see #isCompactionEnabled()
114    */
115   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
116   private static final Bytes COMPACTION_ENABLED_KEY =
117       new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
118 
119   /**
120    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
121    * attribute which represents the maximum size of the memstore after which
122    * its contents are flushed onto the disk
123    *
124    * @see #getMemStoreFlushSize()
125    */
126   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
127   private static final Bytes MEMSTORE_FLUSHSIZE_KEY =
128       new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
129 
130   public static final String FLUSH_POLICY = "FLUSH_POLICY";
131 
132   /**
133    * <em>INTERNAL</em> Used by rest interface to access this metadata
134    * attribute which denotes if the table is a -ROOT- region or not
135    *
136    * @see #isRootRegion()
137    */
138   public static final String IS_ROOT = "IS_ROOT";
139   private static final Bytes IS_ROOT_KEY =
140       new Bytes(Bytes.toBytes(IS_ROOT));
141 
142   /**
143    * <em>INTERNAL</em> Used by rest interface to access this metadata
144    * attribute which denotes if it is a catalog table, either
145    * <code> hbase:meta </code> or <code> -ROOT- </code>
146    *
147    * @see #isMetaRegion()
148    */
149   public static final String IS_META = "IS_META";
150   private static final Bytes IS_META_KEY =
151       new Bytes(Bytes.toBytes(IS_META));
152 
153   /**
154    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
155    * attribute which denotes if the deferred log flush option is enabled.
156    * @deprecated Use {@link #DURABILITY} instead.
157    */
158   @Deprecated
159   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
160   @Deprecated
161   private static final Bytes DEFERRED_LOG_FLUSH_KEY =
162       new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH));
163 
164   /**
165    * <em>INTERNAL</em> {@link Durability} setting for the table.
166    */
167   public static final String DURABILITY = "DURABILITY";
168   private static final Bytes DURABILITY_KEY =
169       new Bytes(Bytes.toBytes("DURABILITY"));
170 
171   /**
172    * <em>INTERNAL</em> number of region replicas for the table.
173    */
174   public static final String REGION_REPLICATION = "REGION_REPLICATION";
175   private static final Bytes REGION_REPLICATION_KEY =
176       new Bytes(Bytes.toBytes(REGION_REPLICATION));
177 
178   /**
179    * <em>INTERNAL</em> flag to indicate whether or not the memstore should be replicated
180    * for read-replicas (CONSISTENCY =&gt; TIMELINE).
181    */
182   public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
183   private static final Bytes REGION_MEMSTORE_REPLICATION_KEY =
184       new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION));
185 
186   /**
187    * <em>INTERNAL</em> Used by shell/rest interface to access this metadata
188    * attribute which denotes if the table should be treated by region normalizer.
189    *
190    * @see #isNormalizationEnabled()
191    */
192   public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED";
193   private static final Bytes NORMALIZATION_ENABLED_KEY =
194     new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED));
195 
196   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
197   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
198 
199   /*
200    *  The below are ugly but better than creating them each time till we
201    *  replace booleans being saved as Strings with plain booleans.  Need a
202    *  migration script to do this.  TODO.
203    */
204   private static final Bytes FALSE =
205       new Bytes(Bytes.toBytes(Boolean.FALSE.toString()));
206 
207   private static final Bytes TRUE =
208       new Bytes(Bytes.toBytes(Boolean.TRUE.toString()));
209 
210   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
211 
212   /**
213    * Constant that denotes whether the table is READONLY by default and is false
214    */
215   public static final boolean DEFAULT_READONLY = false;
216 
217   /**
218    * Constant that denotes whether the table is compaction enabled by default
219    */
220   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
221 
222   /**
223    * Constant that denotes whether the table is normalized by default.
224    */
225   public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
226 
227   /**
228    * Constant that denotes the maximum default size of the memstore after which
229    * the contents are flushed to the store files
230    */
231   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
232 
233   public static final int DEFAULT_REGION_REPLICATION = 1;
234 
235   public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true;
236 
237   private final static Map<String, String> DEFAULT_VALUES
238     = new HashMap<String, String>();
239   private final static Set<Bytes> RESERVED_KEYWORDS
240       = new HashSet<Bytes>();
241 
242   static {
243     DEFAULT_VALUES.put(MAX_FILESIZE,
244         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
245     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
246     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
247         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
248     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
249         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
250     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
251     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
252     DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED));
253     for (String s : DEFAULT_VALUES.keySet()) {
254       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
255     }
256     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
257     RESERVED_KEYWORDS.add(IS_META_KEY);
258   }
259 
260   /**
261    * Cache of whether this is a meta table or not.
262    */
263   private volatile Boolean meta = null;
264   /**
265    * Cache of whether this is root table or not.
266    */
267   private volatile Boolean root = null;
268 
269   /**
270    * Durability setting for the table
271    */
272   private Durability durability = null;
273 
274   /**
275    * Maps column family name to the respective HColumnDescriptors
276    */
277   private final Map<byte [], HColumnDescriptor> families =
278     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
279 
280   /**
281    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
282    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
283    */
284   @InterfaceAudience.Private
285   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
286     setName(name);
287     for(HColumnDescriptor descriptor : families) {
288       this.families.put(descriptor.getName(), descriptor);
289     }
290   }
291 
292   /**
293    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
294    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
295    */
296   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
297       Map<Bytes, Bytes> values) {
298     setName(name);
299     for(HColumnDescriptor descriptor : families) {
300       this.families.put(descriptor.getName(), descriptor);
301     }
302     for (Map.Entry<Bytes, Bytes> entry :
303         values.entrySet()) {
304       setValue(entry.getKey(), entry.getValue());
305     }
306   }
307 
308   /**
309    * Default constructor which constructs an empty object.
310    * For deserializing an HTableDescriptor instance only.
311    * @deprecated As of release 0.96 (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
312    *             This was made protected in 2.0.0 and will be removed in HBase 3.0.0.
313    *             Used by Writables and Writables are going away.
314    */
315   @Deprecated
316   protected HTableDescriptor() {
317     super();
318   }
319 
320   /**
321    * Construct a table descriptor specifying a TableName object
322    * @param name Table name.
323    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug</a>
324    */
325   public HTableDescriptor(final TableName name) {
326     super();
327     setName(name);
328   }
329 
330   /**
331    * Construct a table descriptor specifying a byte array table name
332    * @param name Table name.
333    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 (HBASE-174) HBASE: Un-openable tablename bug</a>
334    */
335   @Deprecated
336   public HTableDescriptor(final byte[] name) {
337     this(TableName.valueOf(name));
338   }
339 
340   /**
341    * Construct a table descriptor specifying a String table name
342    * @param name Table name.
343    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 (HBASE-174) HBASE: Un-openable tablename bug</a>
344    */
345   @Deprecated
346   public HTableDescriptor(final String name) {
347     this(TableName.valueOf(name));
348   }
349 
350   /**
351    * Construct a table descriptor by cloning the descriptor passed as a parameter.
352    * <p>
353    * Makes a deep copy of the supplied descriptor.
354    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
355    * @param desc The descriptor.
356    */
357   public HTableDescriptor(final HTableDescriptor desc) {
358     this(desc.name, desc);
359   }
360 
361   /**
362    * Construct a table descriptor by cloning the descriptor passed as a parameter
363    * but using a different table name.
364    * <p>
365    * Makes a deep copy of the supplied descriptor.
366    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
367    * @param name Table name.
368    * @param desc The descriptor.
369    */
370   public HTableDescriptor(final TableName name, final HTableDescriptor desc) {
371     super();
372     setName(name);
373     setMetaFlags(this.name);
374     for (HColumnDescriptor c: desc.families.values()) {
375       this.families.put(c.getName(), new HColumnDescriptor(c));
376     }
377     for (Map.Entry<Bytes, Bytes> e :
378         desc.values.entrySet()) {
379       setValue(e.getKey(), e.getValue());
380     }
381     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
382       this.configuration.put(e.getKey(), e.getValue());
383     }
384   }
385 
386   /*
387    * Set meta flags on this table.
388    * IS_ROOT_KEY is set if its a -ROOT- table
389    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
390    * Called by constructors.
391    * @param name
392    */
393   private void setMetaFlags(final TableName name) {
394     setMetaRegion(isRootRegion() ||
395         name.equals(TableName.META_TABLE_NAME));
396   }
397 
398   /**
399    * Check if the descriptor represents a <code> -ROOT- </code> region.
400    *
401    * @return true if this is a <code> -ROOT- </code> region
402    */
403   public boolean isRootRegion() {
404     if (this.root == null) {
405       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
406     }
407     return this.root.booleanValue();
408   }
409 
410   /**
411    * <em> INTERNAL </em> Used to denote if the current table represents
412    * <code> -ROOT- </code> region. This is used internally by the
413    * HTableDescriptor constructors
414    *
415    * @param isRoot true if this is the <code> -ROOT- </code> region
416    */
417   protected void setRootRegion(boolean isRoot) {
418     // TODO: Make the value a boolean rather than String of boolean.
419     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
420   }
421 
422   /**
423    * Checks if this table is <code> hbase:meta </code>
424    * region.
425    *
426    * @return true if this table is <code> hbase:meta </code>
427    * region
428    */
429   public boolean isMetaRegion() {
430     if (this.meta == null) {
431       this.meta = calculateIsMetaRegion();
432     }
433     return this.meta.booleanValue();
434   }
435 
436   private synchronized Boolean calculateIsMetaRegion() {
437     byte [] value = getValue(IS_META_KEY);
438     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
439   }
440 
441   private boolean isSomething(final Bytes key,
442       final boolean valueIfNull) {
443     byte [] value = getValue(key);
444     if (value != null) {
445       return Boolean.valueOf(Bytes.toString(value));
446     }
447     return valueIfNull;
448   }
449 
450   /**
451    * <em> INTERNAL </em> Used to denote if the current table represents
452    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
453    * internally by the HTableDescriptor constructors
454    *
455    * @param isMeta true if its either <code> -ROOT- </code> or
456    * <code> hbase:meta </code> region
457    */
458   protected void setMetaRegion(boolean isMeta) {
459     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
460   }
461 
462   /**
463    * Checks if the table is a <code>hbase:meta</code> table
464    *
465    * @return true if table is <code> hbase:meta </code> region.
466    */
467   public boolean isMetaTable() {
468     return isMetaRegion() && !isRootRegion();
469   }
470 
471   /**
472    * Getter for accessing the metadata associated with the key
473    *
474    * @param key The key.
475    * @return The value.
476    * @see #values
477    */
478   public byte[] getValue(byte[] key) {
479     return getValue(new Bytes(key));
480   }
481 
482   private byte[] getValue(final Bytes key) {
483     Bytes ibw = values.get(key);
484     if (ibw == null)
485       return null;
486     return ibw.get();
487   }
488 
489   /**
490    * Getter for accessing the metadata associated with the key
491    *
492    * @param key The key.
493    * @return The value.
494    * @see #values
495    */
496   public String getValue(String key) {
497     byte[] value = getValue(Bytes.toBytes(key));
498     if (value == null)
499       return null;
500     return Bytes.toString(value);
501   }
502 
503   /**
504    * Getter for fetching an unmodifiable {@link #values} map.
505    *
506    * @return unmodifiable map {@link #values}.
507    * @see #values
508    */
509   public Map<Bytes, Bytes> getValues() {
510     // shallow pointer copy
511     return Collections.unmodifiableMap(values);
512   }
513 
514   /**
515    * Setter for storing metadata as a (key, value) pair in {@link #values} map
516    *
517    * @param key The key.
518    * @param value The value.
519    * @see #values
520    */
521   public HTableDescriptor setValue(byte[] key, byte[] value) {
522     setValue(new Bytes(key), new Bytes(value));
523     return this;
524   }
525 
526   /*
527    * @param key The key.
528    * @param value The value.
529    */
530   private HTableDescriptor setValue(final Bytes key,
531       final String value) {
532     setValue(key, new Bytes(Bytes.toBytes(value)));
533     return this;
534   }
535 
536   /*
537    * Setter for storing metadata as a (key, value) pair in {@link #values} map
538    *
539    * @param key The key.
540    * @param value The value.
541    */
542   public HTableDescriptor setValue(final Bytes key, final Bytes value) {
543     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
544       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
545       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
546           "use " + DURABILITY + " instead");
547       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
548       return this;
549     }
550     values.put(key, value);
551     return this;
552   }
553 
554   /**
555    * Setter for storing metadata as a (key, value) pair in {@link #values} map
556    *
557    * @param key The key.
558    * @param value The value.
559    * @see #values
560    */
561   public HTableDescriptor setValue(String key, String value) {
562     if (value == null) {
563       remove(key);
564     } else {
565       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
566     }
567     return this;
568   }
569 
570   /**
571    * Remove metadata represented by the key from the {@link #values} map
572    *
573    * @param key Key whose key and value we're to remove from HTableDescriptor
574    * parameters.
575    */
576   public void remove(final String key) {
577     remove(new Bytes(Bytes.toBytes(key)));
578   }
579 
580   /**
581    * Remove metadata represented by the key from the {@link #values} map
582    *
583    * @param key Key whose key and value we're to remove from HTableDescriptor
584    * parameters.
585    */
586   public void remove(Bytes key) {
587     values.remove(key);
588   }
589 
590   /**
591    * Remove metadata represented by the key from the {@link #values} map
592    *
593    * @param key Key whose key and value we're to remove from HTableDescriptor
594    * parameters.
595    */
596   public void remove(final byte [] key) {
597     remove(new Bytes(key));
598   }
599 
600   /**
601    * Check if the readOnly flag of the table is set. If the readOnly flag is
602    * set then the contents of the table can only be read from but not modified.
603    *
604    * @return true if all columns in the table should be read only
605    */
606   public boolean isReadOnly() {
607     return isSomething(READONLY_KEY, DEFAULT_READONLY);
608   }
609 
610   /**
611    * Setting the table as read only sets all the columns in the table as read
612    * only. By default all tables are modifiable, but if the readOnly flag is
613    * set to true then the contents of the table can only be read but not modified.
614    *
615    * @param readOnly True if all of the columns in the table should be read
616    * only.
617    */
618   public HTableDescriptor setReadOnly(final boolean readOnly) {
619     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
620   }
621 
622   /**
623    * Check if the compaction enable flag of the table is true. If flag is
624    * false then no minor/major compactions will be done in real.
625    *
626    * @return true if table compaction enabled
627    */
628   public boolean isCompactionEnabled() {
629     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
630   }
631 
632   /**
633    * Setting the table compaction enable flag.
634    *
635    * @param isEnable True if enable compaction.
636    */
637   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
638     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
639     return this;
640   }
641 
642   /**
643    * Check if normalization enable flag of the table is true. If flag is
644    * false then no region normalizer won't attempt to normalize this table.
645    *
646    * @return true if region normalization is enabled for this table
647    */
648   public boolean isNormalizationEnabled() {
649     return isSomething(NORMALIZATION_ENABLED_KEY, DEFAULT_NORMALIZATION_ENABLED);
650   }
651 
652   /**
653    * Setting the table normalization enable flag.
654    *
655    * @param isEnable True if enable normalization.
656    */
657   public HTableDescriptor setNormalizationEnabled(final boolean isEnable) {
658     setValue(NORMALIZATION_ENABLED_KEY, isEnable ? TRUE : FALSE);
659     return this;
660   }
661 
662   /**
663    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
664    * @param durability enum value
665    */
666   public HTableDescriptor setDurability(Durability durability) {
667     this.durability = durability;
668     setValue(DURABILITY_KEY, durability.name());
669     return this;
670   }
671 
672   /**
673    * Returns the durability setting for the table.
674    * @return durability setting for the table.
675    */
676   public Durability getDurability() {
677     if (this.durability == null) {
678       byte[] durabilityValue = getValue(DURABILITY_KEY);
679       if (durabilityValue == null) {
680         this.durability = DEFAULT_DURABLITY;
681       } else {
682         try {
683           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
684         } catch (IllegalArgumentException ex) {
685           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
686             + " is not known. Durability:" + Bytes.toString(durabilityValue));
687           this.durability = DEFAULT_DURABLITY;
688         }
689       }
690     }
691     return this.durability;
692   }
693 
694   /**
695    * Get the name of the table
696    *
697    * @return TableName
698    */
699   public TableName getTableName() {
700     return name;
701   }
702 
703   /**
704    * Get the name of the table as a byte array.
705    *
706    * @return name of table
707    * @deprecated Use {@link #getTableName()} instead
708    */
709   @Deprecated
710   public byte[] getName() {
711     return name.getName();
712   }
713 
714   /**
715    * Get the name of the table as a String
716    *
717    * @return name of table as a String
718    */
719   public String getNameAsString() {
720     return name.getNameAsString();
721   }
722 
723   /**
724    * This sets the class associated with the region split policy which
725    * determines when a region split should occur.  The class used by
726    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
727    * @param clazz the class name
728    */
729   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
730     setValue(SPLIT_POLICY, clazz);
731     return this;
732   }
733 
734   /**
735    * This gets the class associated with the region split policy which
736    * determines when a region split should occur.  The class used by
737    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
738    *
739    * @return the class name of the region split policy for this table.
740    * If this returns null, the default split policy is used.
741    */
742    public String getRegionSplitPolicyClassName() {
743     return getValue(SPLIT_POLICY);
744   }
745 
746   /**
747    * Set the name of the table.
748    *
749    * @param name name of table
750    */
751   @Deprecated
752   public HTableDescriptor setName(byte[] name) {
753     setName(TableName.valueOf(name));
754     return this;
755   }
756 
757   @Deprecated
758   public HTableDescriptor setName(TableName name) {
759     this.name = name;
760     setMetaFlags(this.name);
761     return this;
762   }
763 
764   /**
765    * Returns the maximum size upto which a region can grow to after which a region
766    * split is triggered. The region size is represented by the size of the biggest
767    * store file in that region.
768    *
769    * @return max hregion size for table, -1 if not set.
770    *
771    * @see #setMaxFileSize(long)
772    */
773   public long getMaxFileSize() {
774     byte [] value = getValue(MAX_FILESIZE_KEY);
775     if (value != null) {
776       return Long.parseLong(Bytes.toString(value));
777     }
778     return -1;
779   }
780 
781   /**
782    * Sets the maximum size upto which a region can grow to after which a region
783    * split is triggered. The region size is represented by the size of the biggest
784    * store file in that region, i.e. If the biggest store file grows beyond the
785    * maxFileSize, then the region split is triggered. This defaults to a value of
786    * 256 MB.
787    * <p>
788    * This is not an absolute value and might vary. Assume that a single row exceeds
789    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
790    * a single row cannot be split across multiple regions
791    * </p>
792    *
793    * @param maxFileSize The maximum file size that a store file can grow to
794    * before a split is triggered.
795    */
796   public HTableDescriptor setMaxFileSize(long maxFileSize) {
797     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
798     return this;
799   }
800 
801   /**
802    * Returns the size of the memstore after which a flush to filesystem is triggered.
803    *
804    * @return memory cache flush size for each hregion, -1 if not set.
805    *
806    * @see #setMemStoreFlushSize(long)
807    */
808   public long getMemStoreFlushSize() {
809     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
810     if (value != null) {
811       return Long.parseLong(Bytes.toString(value));
812     }
813     return -1;
814   }
815 
816   /**
817    * Represents the maximum size of the memstore after which the contents of the
818    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
819    *
820    * @param memstoreFlushSize memory cache flush size for each hregion
821    */
822   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
823     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
824     return this;
825   }
826 
827   /**
828    * This sets the class associated with the flush policy which determines determines the stores
829    * need to be flushed when flushing a region. The class used by default is defined in
830    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
831    * @param clazz the class name
832    */
833   public HTableDescriptor setFlushPolicyClassName(String clazz) {
834     setValue(FLUSH_POLICY, clazz);
835     return this;
836   }
837 
838   /**
839    * This gets the class associated with the flush policy which determines the stores need to be
840    * flushed when flushing a region. The class used by default is defined in
841    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
842    * @return the class name of the flush policy for this table. If this returns null, the default
843    *         flush policy is used.
844    */
845   public String getFlushPolicyClassName() {
846     return getValue(FLUSH_POLICY);
847   }
848 
849   /**
850    * Adds a column family.
851    * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
852    * @param family HColumnDescriptor of family to add.
853    */
854   public HTableDescriptor addFamily(final HColumnDescriptor family) {
855     if (family.getName() == null || family.getName().length <= 0) {
856       throw new IllegalArgumentException("Family name cannot be null or empty");
857     }
858     if (hasFamily(family.getName())) {
859       throw new IllegalArgumentException("Family '" +
860         family.getNameAsString() + "' already exists so cannot be added");
861     }
862     this.families.put(family.getName(), family);
863     return this;
864   }
865 
866   /**
867    * Modifies the existing column family.
868    * @param family HColumnDescriptor of family to update
869    * @return this (for chained invocation)
870    */
871   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
872     if (family.getName() == null || family.getName().length <= 0) {
873       throw new IllegalArgumentException("Family name cannot be null or empty");
874     }
875     if (!hasFamily(family.getName())) {
876       throw new IllegalArgumentException("Column family '" + family.getNameAsString()
877         + "' does not exist");
878     }
879     this.families.put(family.getName(), family);
880     return this;
881   }
882 
883   /**
884    * Checks to see if this table contains the given column family
885    * @param familyName Family name or column name.
886    * @return true if the table contains the specified family name
887    */
888   public boolean hasFamily(final byte [] familyName) {
889     return families.containsKey(familyName);
890   }
891 
892   /**
893    * @return Name of this table and then a map of all of the column family
894    * descriptors.
895    * @see #getNameAsString()
896    */
897   @Override
898   public String toString() {
899     StringBuilder s = new StringBuilder();
900     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
901     s.append(getValues(true));
902     for (HColumnDescriptor f : families.values()) {
903       s.append(", ").append(f);
904     }
905     return s.toString();
906   }
907 
908   /**
909    * @return Name of this table and then a map of all of the column family
910    * descriptors (with only the non-default column family attributes)
911    */
912   public String toStringCustomizedValues() {
913     StringBuilder s = new StringBuilder();
914     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
915     s.append(getValues(false));
916     for(HColumnDescriptor hcd : families.values()) {
917       s.append(", ").append(hcd.toStringCustomizedValues());
918     }
919     return s.toString();
920   }
921 
922   /**
923    * @return map of all table attributes formatted into string.
924    */
925   public String toStringTableAttributes() {
926    return getValues(true).toString();
927   }
928 
929   private StringBuilder getValues(boolean printDefaults) {
930     StringBuilder s = new StringBuilder();
931 
932     // step 1: set partitioning and pruning
933     Set<Bytes> reservedKeys = new TreeSet<Bytes>();
934     Set<Bytes> userKeys = new TreeSet<Bytes>();
935     for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
936       if (entry.getKey() == null || entry.getKey().get() == null) continue;
937       String key = Bytes.toString(entry.getKey().get());
938       // in this section, print out reserved keywords + coprocessor info
939       if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) {
940         userKeys.add(entry.getKey());
941         continue;
942       }
943       // only print out IS_ROOT/IS_META if true
944       String value = Bytes.toString(entry.getValue().get());
945       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
946         if (Boolean.valueOf(value) == false) continue;
947       }
948       // see if a reserved key is a default value. may not want to print it out
949       if (printDefaults
950           || !DEFAULT_VALUES.containsKey(key)
951           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
952         reservedKeys.add(entry.getKey());
953       }
954     }
955 
956     // early exit optimization
957     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
958     if (!hasAttributes && configuration.isEmpty()) return s;
959 
960     s.append(", {");
961     // step 2: printing attributes
962     if (hasAttributes) {
963       s.append("TABLE_ATTRIBUTES => {");
964 
965       // print all reserved keys first
966       boolean printCommaForAttr = false;
967       for (Bytes k : reservedKeys) {
968         String key = Bytes.toString(k.get());
969         String value = Bytes.toStringBinary(values.get(k).get());
970         if (printCommaForAttr) s.append(", ");
971         printCommaForAttr = true;
972         s.append(key);
973         s.append(" => ");
974         s.append('\'').append(value).append('\'');
975       }
976 
977       if (!userKeys.isEmpty()) {
978         // print all non-reserved, advanced config keys as a separate subset
979         if (printCommaForAttr) s.append(", ");
980         printCommaForAttr = true;
981         s.append(HConstants.METADATA).append(" => ");
982         s.append("{");
983         boolean printCommaForCfg = false;
984         for (Bytes k : userKeys) {
985           String key = Bytes.toString(k.get());
986           String value = Bytes.toStringBinary(values.get(k).get());
987           if (printCommaForCfg) s.append(", ");
988           printCommaForCfg = true;
989           s.append('\'').append(key).append('\'');
990           s.append(" => ");
991           s.append('\'').append(value).append('\'');
992         }
993         s.append("}");
994       }
995     }
996 
997     // step 3: printing all configuration:
998     if (!configuration.isEmpty()) {
999       if (hasAttributes) {
1000         s.append(", ");
1001       }
1002       s.append(HConstants.CONFIGURATION).append(" => ");
1003       s.append('{');
1004       boolean printCommaForConfig = false;
1005       for (Map.Entry<String, String> e : configuration.entrySet()) {
1006         if (printCommaForConfig) s.append(", ");
1007         printCommaForConfig = true;
1008         s.append('\'').append(e.getKey()).append('\'');
1009         s.append(" => ");
1010         s.append('\'').append(e.getValue()).append('\'');
1011       }
1012       s.append("}");
1013     }
1014     s.append("}"); // end METHOD
1015     return s;
1016   }
1017 
1018   /**
1019    * Compare the contents of the descriptor with another one passed as a parameter.
1020    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
1021    * contents of the descriptors are compared.
1022    *
1023    * @return true if the contents of the the two descriptors exactly match
1024    *
1025    * @see java.lang.Object#equals(java.lang.Object)
1026    */
1027   @Override
1028   public boolean equals(Object obj) {
1029     if (this == obj) {
1030       return true;
1031     }
1032     if (obj == null) {
1033       return false;
1034     }
1035     if (!(obj instanceof HTableDescriptor)) {
1036       return false;
1037     }
1038     return compareTo((HTableDescriptor)obj) == 0;
1039   }
1040 
1041   /**
1042    * @see java.lang.Object#hashCode()
1043    */
1044   @Override
1045   public int hashCode() {
1046     int result = this.name.hashCode();
1047     if (this.families.size() > 0) {
1048       for (HColumnDescriptor e: this.families.values()) {
1049         result ^= e.hashCode();
1050       }
1051     }
1052     result ^= values.hashCode();
1053     result ^= configuration.hashCode();
1054     return result;
1055   }
1056 
1057   // Comparable
1058 
1059   /**
1060    * Compares the descriptor with another descriptor which is passed as a parameter.
1061    * This compares the content of the two descriptors and not the reference.
1062    *
1063    * @return 0 if the contents of the descriptors are exactly matching,
1064    *         1 if there is a mismatch in the contents
1065    */
1066   @Override
1067   public int compareTo(final HTableDescriptor other) {
1068     int result = this.name.compareTo(other.name);
1069     if (result == 0) {
1070       result = families.size() - other.families.size();
1071     }
1072     if (result == 0 && families.size() != other.families.size()) {
1073       result = Integer.valueOf(families.size()).compareTo(
1074           Integer.valueOf(other.families.size()));
1075     }
1076     if (result == 0) {
1077       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1078           it2 = other.families.values().iterator(); it.hasNext(); ) {
1079         result = it.next().compareTo(it2.next());
1080         if (result != 0) {
1081           break;
1082         }
1083       }
1084     }
1085     if (result == 0) {
1086       // punt on comparison for ordering, just calculate difference
1087       result = this.values.hashCode() - other.values.hashCode();
1088       if (result < 0)
1089         result = -1;
1090       else if (result > 0)
1091         result = 1;
1092     }
1093     if (result == 0) {
1094       result = this.configuration.hashCode() - other.configuration.hashCode();
1095       if (result < 0)
1096         result = -1;
1097       else if (result > 0)
1098         result = 1;
1099     }
1100     return result;
1101   }
1102 
1103   /**
1104    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1105    * of all the column families of the table.
1106    *
1107    * @return Immutable collection of {@link HColumnDescriptor} of all the
1108    * column families.
1109    */
1110   public Collection<HColumnDescriptor> getFamilies() {
1111     return Collections.unmodifiableCollection(this.families.values());
1112   }
1113 
1114   /**
1115    * Returns the configured replicas per region
1116    */
1117   public int getRegionReplication() {
1118     byte[] val = getValue(REGION_REPLICATION_KEY);
1119     if (val == null || val.length == 0) {
1120       return DEFAULT_REGION_REPLICATION;
1121     }
1122     return Integer.parseInt(Bytes.toString(val));
1123   }
1124 
1125   /**
1126    * Sets the number of replicas per region.
1127    * @param regionReplication the replication factor per region
1128    */
1129   public HTableDescriptor setRegionReplication(int regionReplication) {
1130     setValue(REGION_REPLICATION_KEY,
1131         new Bytes(Bytes.toBytes(Integer.toString(regionReplication))));
1132     return this;
1133   }
1134 
1135   /**
1136    * @return true if the read-replicas memstore replication is enabled.
1137    */
1138   public boolean hasRegionMemstoreReplication() {
1139     return isSomething(REGION_MEMSTORE_REPLICATION_KEY, DEFAULT_REGION_MEMSTORE_REPLICATION);
1140   }
1141 
1142   /**
1143    * Enable or Disable the memstore replication from the primary region to the replicas.
1144    * The replication will be used only for meta operations (e.g. flush, compaction, ...)
1145    *
1146    * @param memstoreReplication true if the new data written to the primary region
1147    *                                 should be replicated.
1148    *                            false if the secondaries can tollerate to have new
1149    *                                  data only when the primary flushes the memstore.
1150    */
1151   public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) {
1152     setValue(REGION_MEMSTORE_REPLICATION_KEY, memstoreReplication ? TRUE : FALSE);
1153     // If the memstore replication is setup, we do not have to wait for observing a flush event
1154     // from primary before starting to serve reads, because gaps from replication is not applicable
1155     setConfiguration(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
1156       Boolean.toString(memstoreReplication));
1157     return this;
1158   }
1159 
1160   /**
1161    * Returns all the column family names of the current table. The map of
1162    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1163    * This returns all the keys of the family map which represents the column
1164    * family names of the table.
1165    *
1166    * @return Immutable sorted set of the keys of the families.
1167    */
1168   public Set<byte[]> getFamiliesKeys() {
1169     return Collections.unmodifiableSet(this.families.keySet());
1170   }
1171 
1172   /**
1173    * Returns an array all the {@link HColumnDescriptor} of the column families
1174    * of the table.
1175    *
1176    * @return Array of all the HColumnDescriptors of the current table
1177    *
1178    * @see #getFamilies()
1179    */
1180   public HColumnDescriptor[] getColumnFamilies() {
1181     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1182     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1183   }
1184 
1185 
1186   /**
1187    * Returns the HColumnDescriptor for a specific column family with name as
1188    * specified by the parameter column.
1189    *
1190    * @param column Column family name
1191    * @return Column descriptor for the passed family name or the family on
1192    * passed in column.
1193    */
1194   public HColumnDescriptor getFamily(final byte [] column) {
1195     return this.families.get(column);
1196   }
1197 
1198 
1199   /**
1200    * Removes the HColumnDescriptor with name specified by the parameter column
1201    * from the table descriptor
1202    *
1203    * @param column Name of the column family to be removed.
1204    * @return Column descriptor for the passed family name or the family on
1205    * passed in column.
1206    */
1207   public HColumnDescriptor removeFamily(final byte [] column) {
1208     return this.families.remove(column);
1209   }
1210 
1211   /**
1212    * Add a table coprocessor to this table. The coprocessor
1213    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1214    * or Endpoint.
1215    * It won't check if the class can be loaded or not.
1216    * Whether a coprocessor is loadable or not will be determined when
1217    * a region is opened.
1218    * @param className Full class name.
1219    * @throws IOException
1220    */
1221   public HTableDescriptor addCoprocessor(String className) throws IOException {
1222     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1223     return this;
1224   }
1225 
1226   /**
1227    * Add a table coprocessor to this table. The coprocessor
1228    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1229    * or Endpoint.
1230    * It won't check if the class can be loaded or not.
1231    * Whether a coprocessor is loadable or not will be determined when
1232    * a region is opened.
1233    * @param jarFilePath Path of the jar file. If it's null, the class will be
1234    * loaded from default classloader.
1235    * @param className Full class name.
1236    * @param priority Priority
1237    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1238    * @throws IOException
1239    */
1240   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1241                              int priority, final Map<String, String> kvs)
1242   throws IOException {
1243     checkHasCoprocessor(className);
1244 
1245     // Validate parameter kvs and then add key/values to kvString.
1246     StringBuilder kvString = new StringBuilder();
1247     if (kvs != null) {
1248       for (Map.Entry<String, String> e: kvs.entrySet()) {
1249         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1250           throw new IOException("Illegal parameter key = " + e.getKey());
1251         }
1252         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1253           throw new IOException("Illegal parameter (" + e.getKey() +
1254               ") value = " + e.getValue());
1255         }
1256         if (kvString.length() != 0) {
1257           kvString.append(',');
1258         }
1259         kvString.append(e.getKey());
1260         kvString.append('=');
1261         kvString.append(e.getValue());
1262       }
1263     }
1264 
1265     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1266         "|" + className + "|" + Integer.toString(priority) + "|" +
1267         kvString.toString();
1268     return addCoprocessorToMap(value);
1269   }
1270 
1271   /**
1272    * Add a table coprocessor to this table. The coprocessor
1273    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1274    * or Endpoint.
1275    * It won't check if the class can be loaded or not.
1276    * Whether a coprocessor is loadable or not will be determined when
1277    * a region is opened.
1278    * @param specStr The Coprocessor specification all in in one String formatted so matches
1279    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1280    * @throws IOException
1281    */
1282   public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
1283     String className = getCoprocessorClassNameFromSpecStr(specStr);
1284     if (className == null) {
1285       throw new IllegalArgumentException("Format does not match " +
1286         HConstants.CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr);
1287     }
1288     checkHasCoprocessor(className);
1289     return addCoprocessorToMap(specStr);
1290   }
1291 
1292   private void checkHasCoprocessor(final String className) throws IOException {
1293     if (hasCoprocessor(className)) {
1294       throw new IOException("Coprocessor " + className + " already exists.");
1295     }
1296   }
1297 
1298   /**
1299    * Add coprocessor to values Map
1300    * @param specStr The Coprocessor specification all in in one String formatted so matches
1301    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1302    * @return Returns <code>this</code>
1303    */
1304   private HTableDescriptor addCoprocessorToMap(final String specStr) {
1305     if (specStr == null) return this;
1306     // generate a coprocessor key
1307     int maxCoprocessorNumber = 0;
1308     Matcher keyMatcher;
1309     for (Map.Entry<Bytes, Bytes> e: this.values.entrySet()) {
1310       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1311       if (!keyMatcher.matches()) {
1312         continue;
1313       }
1314       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
1315     }
1316     maxCoprocessorNumber++;
1317     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1318     this.values.put(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr)));
1319     return this;
1320   }
1321 
1322   /**
1323    * Check if the table has an attached co-processor represented by the name className
1324    *
1325    * @param classNameToMatch - Class name of the co-processor
1326    * @return true of the table has a co-processor className
1327    */
1328   public boolean hasCoprocessor(String classNameToMatch) {
1329     Matcher keyMatcher;
1330     for (Map.Entry<Bytes, Bytes> e :
1331         this.values.entrySet()) {
1332       keyMatcher =
1333           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1334               Bytes.toString(e.getKey().get()));
1335       if (!keyMatcher.matches()) {
1336         continue;
1337       }
1338       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1339       if (className == null) continue;
1340       if (className.equals(classNameToMatch.trim())) {
1341         return true;
1342       }
1343     }
1344     return false;
1345   }
1346 
1347   /**
1348    * Return the list of attached co-processor represented by their name className
1349    *
1350    * @return The list of co-processors classNames
1351    */
1352   public List<String> getCoprocessors() {
1353     List<String> result = new ArrayList<String>();
1354     Matcher keyMatcher;
1355     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1356       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1357       if (!keyMatcher.matches()) {
1358         continue;
1359       }
1360       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1361       if (className == null) continue;
1362       result.add(className); // classname is the 2nd field
1363     }
1364     return result;
1365   }
1366 
1367   /**
1368    * @param spec String formatted as per {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1369    * @return Class parsed from passed in <code>spec</code> or null if no match or classpath found
1370    */
1371   private static String getCoprocessorClassNameFromSpecStr(final String spec) {
1372     Matcher matcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(spec);
1373     // Classname is the 2nd field
1374     return matcher != null && matcher.matches()? matcher.group(2).trim(): null;
1375   }
1376 
1377   /**
1378    * Remove a coprocessor from those set on the table
1379    * @param className Class name of the co-processor
1380    */
1381   public void removeCoprocessor(String className) {
1382     Bytes match = null;
1383     Matcher keyMatcher;
1384     Matcher valueMatcher;
1385     for (Map.Entry<Bytes, Bytes> e : this.values
1386         .entrySet()) {
1387       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1388           .getKey().get()));
1389       if (!keyMatcher.matches()) {
1390         continue;
1391       }
1392       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1393           .toString(e.getValue().get()));
1394       if (!valueMatcher.matches()) {
1395         continue;
1396       }
1397       // get className and compare
1398       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1399       // remove the CP if it is present
1400       if (clazz.equals(className.trim())) {
1401         match = e.getKey();
1402         break;
1403       }
1404     }
1405     // if we found a match, remove it
1406     if (match != null)
1407       remove(match);
1408   }
1409 
1410   /**
1411    * Returns the {@link Path} object representing the table directory under
1412    * path rootdir
1413    *
1414    * Deprecated use FSUtils.getTableDir() instead.
1415    *
1416    * @param rootdir qualified path of HBase root directory
1417    * @param tableName name of table
1418    * @return {@link Path} for table
1419    */
1420   @Deprecated
1421   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1422     //This is bad I had to mirror code from FSUTils.getTableDir since
1423     //there is no module dependency between hbase-client and hbase-server
1424     TableName name = TableName.valueOf(tableName);
1425     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1426               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1427   }
1428 
1429   /** Table descriptor for <code>hbase:meta</code> catalog table
1430    * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
1431    * Admin#getTableDescriptor(TableName.META_TABLE) instead.
1432    */
1433   @Deprecated
1434   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1435       TableName.META_TABLE_NAME,
1436       new HColumnDescriptor[] {
1437           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1438               // Ten is arbitrary number.  Keep versions to help debugging.
1439               .setMaxVersions(10)
1440               .setInMemory(true)
1441               .setBlocksize(8 * 1024)
1442               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1443               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1444               .setBloomFilterType(BloomType.NONE)
1445               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1446               // e.g. if using CombinedBlockCache (BucketCache).
1447               .setCacheDataInL1(true),
1448           new HColumnDescriptor(HConstants.TABLE_FAMILY)
1449               // Ten is arbitrary number.  Keep versions to help debugging.
1450               .setMaxVersions(10)
1451               .setInMemory(true)
1452               .setBlocksize(8 * 1024)
1453               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1454                   // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1455               .setBloomFilterType(BloomType.NONE)
1456                   // Enable cache of data blocks in L1 if more than one caching tier deployed:
1457                   // e.g. if using CombinedBlockCache (BucketCache).
1458               .setCacheDataInL1(true)
1459       });
1460 
1461   static {
1462     try {
1463       META_TABLEDESC.addCoprocessor(
1464           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1465           null, Coprocessor.PRIORITY_SYSTEM, null);
1466     } catch (IOException ex) {
1467       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1468       throw new RuntimeException(ex);
1469     }
1470   }
1471 
1472   public final static String NAMESPACE_FAMILY_INFO = "info";
1473   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1474   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1475 
1476   /** Table descriptor for namespace table */
1477   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1478       TableName.NAMESPACE_TABLE_NAME,
1479       new HColumnDescriptor[] {
1480           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1481               // Ten is arbitrary number.  Keep versions to help debugging.
1482               .setMaxVersions(10)
1483               .setInMemory(true)
1484               .setBlocksize(8 * 1024)
1485               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1486               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1487               // e.g. if using CombinedBlockCache (BucketCache).
1488               .setCacheDataInL1(true)
1489       });
1490 
1491   @Deprecated
1492   public HTableDescriptor setOwner(User owner) {
1493     return setOwnerString(owner != null ? owner.getShortName() : null);
1494   }
1495 
1496   // used by admin.rb:alter(table_name,*args) to update owner.
1497   @Deprecated
1498   public HTableDescriptor setOwnerString(String ownerString) {
1499     if (ownerString != null) {
1500       setValue(OWNER_KEY, ownerString);
1501     } else {
1502       remove(OWNER_KEY);
1503     }
1504     return this;
1505   }
1506 
1507   @Deprecated
1508   public String getOwnerString() {
1509     if (getValue(OWNER_KEY) != null) {
1510       return Bytes.toString(getValue(OWNER_KEY));
1511     }
1512     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1513     // hbase:meta and -ROOT- should return system user as owner, not null (see
1514     // MasterFileSystem.java:bootstrap()).
1515     return null;
1516   }
1517 
1518   /**
1519    * @return This instance serialized with pb with pb magic prefix
1520    * @see #parseFrom(byte[])
1521    */
1522   public byte [] toByteArray() {
1523     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1524   }
1525 
1526   /**
1527    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1528    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1529    * @throws DeserializationException
1530    * @throws IOException
1531    * @see #toByteArray()
1532    */
1533   public static HTableDescriptor parseFrom(final byte [] bytes)
1534   throws DeserializationException, IOException {
1535     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1536       throw new DeserializationException("Expected PB encoded HTableDescriptor");
1537     }
1538     int pblen = ProtobufUtil.lengthOfPBMagic();
1539     TableSchema.Builder builder = TableSchema.newBuilder();
1540     TableSchema ts;
1541     try {
1542       ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
1543       ts = builder.build();
1544     } catch (IOException e) {
1545       throw new DeserializationException(e);
1546     }
1547     return convert(ts);
1548   }
1549 
1550   /**
1551    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1552    */
1553   public TableSchema convert() {
1554     TableSchema.Builder builder = TableSchema.newBuilder();
1555     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1556     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1557       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1558       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1559       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1560       builder.addAttributes(aBuilder.build());
1561     }
1562     for (HColumnDescriptor hcd: getColumnFamilies()) {
1563       builder.addColumnFamilies(hcd.convert());
1564     }
1565     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1566       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1567       aBuilder.setName(e.getKey());
1568       aBuilder.setValue(e.getValue());
1569       builder.addConfiguration(aBuilder.build());
1570     }
1571     return builder.build();
1572   }
1573 
1574   /**
1575    * @param ts A pb TableSchema instance.
1576    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1577    */
1578   public static HTableDescriptor convert(final TableSchema ts) {
1579     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1580     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1581     int index = 0;
1582     for (ColumnFamilySchema cfs: list) {
1583       hcds[index++] = HColumnDescriptor.convert(cfs);
1584     }
1585     HTableDescriptor htd = new HTableDescriptor(
1586         ProtobufUtil.toTableName(ts.getTableName()),
1587         hcds);
1588     for (BytesBytesPair a: ts.getAttributesList()) {
1589       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1590     }
1591     for (NameStringPair a: ts.getConfigurationList()) {
1592       htd.setConfiguration(a.getName(), a.getValue());
1593     }
1594     return htd;
1595   }
1596 
1597   /**
1598    * Getter for accessing the configuration value by key
1599    */
1600   public String getConfigurationValue(String key) {
1601     return configuration.get(key);
1602   }
1603 
1604   /**
1605    * Getter for fetching an unmodifiable {@link #configuration} map.
1606    */
1607   public Map<String, String> getConfiguration() {
1608     // shallow pointer copy
1609     return Collections.unmodifiableMap(configuration);
1610   }
1611 
1612   /**
1613    * Setter for storing a configuration setting in {@link #configuration} map.
1614    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1615    * @param value String value. If null, removes the setting.
1616    */
1617   public HTableDescriptor setConfiguration(String key, String value) {
1618     if (value == null) {
1619       removeConfiguration(key);
1620     } else {
1621       configuration.put(key, value);
1622     }
1623     return this;
1624   }
1625 
1626   /**
1627    * Remove a config setting represented by the key from the {@link #configuration} map
1628    */
1629   public void removeConfiguration(final String key) {
1630     configuration.remove(key);
1631   }
1632 }