View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.Collection;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Set;
31  import java.util.TreeMap;
32  import java.util.TreeSet;
33  import java.util.regex.Matcher;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.classification.InterfaceAudience;
39  import org.apache.hadoop.hbase.classification.InterfaceStability;
40  import org.apache.hadoop.hbase.client.Durability;
41  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
42  import org.apache.hadoop.hbase.exceptions.DeserializationException;
43  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
44  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
45  import org.apache.hadoop.hbase.security.User;
46  import org.apache.hadoop.hbase.util.Bytes;
47
48  /**
49   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
50   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
51   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
52   * when the region split should occur, coprocessors associated with it etc...
53   */
54  @InterfaceAudience.Public
55  @InterfaceStability.Evolving
56  public class HTableDescriptor implements Comparable<HTableDescriptor> {
57
58    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
59
60    private TableName name = null;
61
62    /**
63     * A map which holds the metadata information of the table. This metadata
64     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
65     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
66     */
67    private final Map<Bytes, Bytes> values =
68        new HashMap<Bytes, Bytes>();
69
70    /**
71     * A map which holds the configuration specific to the table.
72     * The keys of the map have the same names as config keys and override the defaults with
73     * table-specific settings. Example usage may be for compactions, etc.
74     */
75    private final Map<String, String> configuration = new HashMap<String, String>();
76
77    public static final String SPLIT_POLICY = "SPLIT_POLICY";
78
79    /**
80     * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
81     * attribute which denotes the maximum size of the store file after which
82     * a region split occurs
83     *
84     * @see #getMaxFileSize()
85     */
86    public static final String MAX_FILESIZE = "MAX_FILESIZE";
87    private static final Bytes MAX_FILESIZE_KEY =
88        new Bytes(Bytes.toBytes(MAX_FILESIZE));
89
90    public static final String OWNER = "OWNER";
91    public static final Bytes OWNER_KEY =
92        new Bytes(Bytes.toBytes(OWNER));
93
94    /**
95     * <em>INTERNAL</em> Used by rest interface to access this metadata
96     * attribute which denotes if the table is Read Only
97     *
98     * @see #isReadOnly()
99     */
100   public static final String READONLY = "READONLY";
101   private static final Bytes READONLY_KEY =
102       new Bytes(Bytes.toBytes(READONLY));
103
104   /**
105    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
106    * attribute which denotes if the table is compaction enabled
107    *
108    * @see #isCompactionEnabled()
109    */
110   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
111   private static final Bytes COMPACTION_ENABLED_KEY =
112       new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
113
114   /**
115    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
116    * attribute which represents the maximum size of the memstore after which
117    * its contents are flushed onto the disk
118    *
119    * @see #getMemStoreFlushSize()
120    */
121   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
122   private static final Bytes MEMSTORE_FLUSHSIZE_KEY =
123       new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
124
125   public static final String FLUSH_POLICY = "FLUSH_POLICY";
126
127   /**
128    * <em>INTERNAL</em> Used by rest interface to access this metadata
129    * attribute which denotes if the table is a -ROOT- region or not
130    *
131    * @see #isRootRegion()
132    */
133   public static final String IS_ROOT = "IS_ROOT";
134   private static final Bytes IS_ROOT_KEY =
135       new Bytes(Bytes.toBytes(IS_ROOT));
136
137   /**
138    * <em>INTERNAL</em> Used by rest interface to access this metadata
139    * attribute which denotes if it is a catalog table, either
140    * <code> hbase:meta </code> or <code> -ROOT- </code>
141    *
142    * @see #isMetaRegion()
143    */
144   public static final String IS_META = "IS_META";
145   private static final Bytes IS_META_KEY =
146       new Bytes(Bytes.toBytes(IS_META));
147
148   /**
149    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
150    * attribute which denotes if the deferred log flush option is enabled.
151    * @deprecated Use {@link #DURABILITY} instead.
152    */
153   @Deprecated
154   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
155   @Deprecated
156   private static final Bytes DEFERRED_LOG_FLUSH_KEY =
157       new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH));
158
159   /**
160    * <em>INTERNAL</em> {@link Durability} setting for the table.
161    */
162   public static final String DURABILITY = "DURABILITY";
163   private static final Bytes DURABILITY_KEY =
164       new Bytes(Bytes.toBytes("DURABILITY"));
165
166   /**
167    * <em>INTERNAL</em> number of region replicas for the table.
168    */
169   public static final String REGION_REPLICATION = "REGION_REPLICATION";
170   private static final Bytes REGION_REPLICATION_KEY =
171       new Bytes(Bytes.toBytes(REGION_REPLICATION));
172
173   /**
174    * <em>INTERNAL</em> flag to indicate whether or not the memstore should be replicated
175    * for read-replicas (CONSISTENCY =&gt; TIMELINE).
176    */
177   public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
178   private static final Bytes REGION_MEMSTORE_REPLICATION_KEY =
179       new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION));
180
181   /**
182    * <em>INTERNAL</em> Used by shell/rest interface to access this metadata
183    * attribute which denotes if the table should be treated by region normalizer.
184    *
185    * @see #isNormalizationEnabled()
186    */
187   public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED";
188   private static final Bytes NORMALIZATION_ENABLED_KEY =
189     new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED));
190
191   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
192   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
193
194   public static final String PRIORITY = "PRIORITY";
195   private static final Bytes PRIORITY_KEY =
196     new Bytes(Bytes.toBytes(PRIORITY));
197
198   /** Relative priority of the table used for rpc scheduling */
199   private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS;
200
201   /*
202    *  The below are ugly but better than creating them each time till we
203    *  replace booleans being saved as Strings with plain booleans.  Need a
204    *  migration script to do this.  TODO.
205    */
206   private static final Bytes FALSE =
207       new Bytes(Bytes.toBytes(Boolean.FALSE.toString()));
208
209   private static final Bytes TRUE =
210       new Bytes(Bytes.toBytes(Boolean.TRUE.toString()));
211
212   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
213
214   /**
215    * Constant that denotes whether the table is READONLY by default and is false
216    */
217   public static final boolean DEFAULT_READONLY = false;
218
219   /**
220    * Constant that denotes whether the table is compaction enabled by default
221    */
222   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
223
224   /**
225    * Constant that denotes whether the table is normalized by default.
226    */
227   public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
228
229   /**
230    * Constant that denotes the maximum default size of the memstore after which
231    * the contents are flushed to the store files
232    */
233   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
234
235   public static final int DEFAULT_REGION_REPLICATION = 1;
236
237   public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true;
238
239   private final static Map<String, String> DEFAULT_VALUES
240     = new HashMap<String, String>();
241   private final static Set<Bytes> RESERVED_KEYWORDS
242       = new HashSet<Bytes>();
243
244   static {
245     DEFAULT_VALUES.put(MAX_FILESIZE,
246         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
247     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
248     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
249         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
250     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
251         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
252     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
253     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
254     DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED));
255     DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY));
256     for (String s : DEFAULT_VALUES.keySet()) {
257       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
258     }
259     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
260     RESERVED_KEYWORDS.add(IS_META_KEY);
261   }
262
263   /**
264    * Cache of whether this is a meta table or not.
265    */
266   private volatile Boolean meta = null;
267   /**
268    * Cache of whether this is root table or not.
269    */
270   private volatile Boolean root = null;
271
272   /**
273    * Durability setting for the table
274    */
275   private Durability durability = null;
276
277   /**
278    * Maps column family name to the respective HColumnDescriptors
279    */
280   private final Map<byte [], HColumnDescriptor> families =
281     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
282
283   /**
284    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
285    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
286    */
287   @InterfaceAudience.Private
288   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
289     setName(name);
290     for(HColumnDescriptor descriptor : families) {
291       this.families.put(descriptor.getName(), descriptor);
292     }
293   }
294
295   /**
296    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
297    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
298    */
299   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
300       Map<Bytes, Bytes> values) {
301     setName(name);
302     for(HColumnDescriptor descriptor : families) {
303       this.families.put(descriptor.getName(), descriptor);
304     }
305     for (Map.Entry<Bytes, Bytes> entry :
306         values.entrySet()) {
307       setValue(entry.getKey(), entry.getValue());
308     }
309   }
310
311   /**
312    * Default constructor which constructs an empty object.
313    * For deserializing an HTableDescriptor instance only.
314    * @deprecated As of release 0.96 (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
315    *             This was made protected in 2.0.0 and will be removed in HBase 3.0.0.
316    *             Used by Writables and Writables are going away.
317    */
318   @Deprecated
319   protected HTableDescriptor() {
320     super();
321   }
322
323   /**
324    * Construct a table descriptor specifying a TableName object
325    * @param name Table name.
326    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug</a>
327    */
328   public HTableDescriptor(final TableName name) {
329     super();
330     setName(name);
331   }
332
333   /**
334    * Construct a table descriptor specifying a byte array table name
335    * @param name Table name.
336    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 (HBASE-174) HBASE: Un-openable tablename bug</a>
337    */
338   @Deprecated
339   public HTableDescriptor(final byte[] name) {
340     this(TableName.valueOf(name));
341   }
342
343   /**
344    * Construct a table descriptor specifying a String table name
345    * @param name Table name.
346    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 (HBASE-174) HBASE: Un-openable tablename bug</a>
347    */
348   @Deprecated
349   public HTableDescriptor(final String name) {
350     this(TableName.valueOf(name));
351   }
352
353   /**
354    * Construct a table descriptor by cloning the descriptor passed as a parameter.
355    * <p>
356    * Makes a deep copy of the supplied descriptor.
357    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
358    * @param desc The descriptor.
359    */
360   public HTableDescriptor(final HTableDescriptor desc) {
361     this(desc.name, desc);
362   }
363
364   /**
365    * Construct a table descriptor by cloning the descriptor passed as a parameter
366    * but using a different table name.
367    * <p>
368    * Makes a deep copy of the supplied descriptor.
369    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
370    * @param name Table name.
371    * @param desc The descriptor.
372    */
373   public HTableDescriptor(final TableName name, final HTableDescriptor desc) {
374     super();
375     setName(name);
376     setMetaFlags(this.name);
377     for (HColumnDescriptor c: desc.families.values()) {
378       this.families.put(c.getName(), new HColumnDescriptor(c));
379     }
380     for (Map.Entry<Bytes, Bytes> e :
381         desc.values.entrySet()) {
382       setValue(e.getKey(), e.getValue());
383     }
384     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
385       this.configuration.put(e.getKey(), e.getValue());
386     }
387   }
388
389   /*
390    * Set meta flags on this table.
391    * IS_ROOT_KEY is set if its a -ROOT- table
392    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
393    * Called by constructors.
394    * @param name
395    */
396   private void setMetaFlags(final TableName name) {
397     setMetaRegion(isRootRegion() ||
398         name.equals(TableName.META_TABLE_NAME));
399   }
400
401   /**
402    * Check if the descriptor represents a <code> -ROOT- </code> region.
403    *
404    * @return true if this is a <code> -ROOT- </code> region
405    */
406   public boolean isRootRegion() {
407     if (this.root == null) {
408       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
409     }
410     return this.root.booleanValue();
411   }
412
413   /**
414    * <em> INTERNAL </em> Used to denote if the current table represents
415    * <code> -ROOT- </code> region. This is used internally by the
416    * HTableDescriptor constructors
417    *
418    * @param isRoot true if this is the <code> -ROOT- </code> region
419    */
420   protected void setRootRegion(boolean isRoot) {
421     // TODO: Make the value a boolean rather than String of boolean.
422     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
423   }
424
425   /**
426    * Checks if this table is <code> hbase:meta </code>
427    * region.
428    *
429    * @return true if this table is <code> hbase:meta </code>
430    * region
431    */
432   public boolean isMetaRegion() {
433     if (this.meta == null) {
434       this.meta = calculateIsMetaRegion();
435     }
436     return this.meta.booleanValue();
437   }
438
439   private synchronized Boolean calculateIsMetaRegion() {
440     byte [] value = getValue(IS_META_KEY);
441     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
442   }
443
444   private boolean isSomething(final Bytes key,
445       final boolean valueIfNull) {
446     byte [] value = getValue(key);
447     if (value != null) {
448       return Boolean.valueOf(Bytes.toString(value));
449     }
450     return valueIfNull;
451   }
452
453   /**
454    * <em> INTERNAL </em> Used to denote if the current table represents
455    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
456    * internally by the HTableDescriptor constructors
457    *
458    * @param isMeta true if its either <code> -ROOT- </code> or
459    * <code> hbase:meta </code> region
460    */
461   protected void setMetaRegion(boolean isMeta) {
462     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
463   }
464
465   /**
466    * Checks if the table is a <code>hbase:meta</code> table
467    *
468    * @return true if table is <code> hbase:meta </code> region.
469    */
470   public boolean isMetaTable() {
471     return isMetaRegion() && !isRootRegion();
472   }
473
474   /**
475    * Getter for accessing the metadata associated with the key
476    *
477    * @param key The key.
478    * @return The value.
479    * @see #values
480    */
481   public byte[] getValue(byte[] key) {
482     return getValue(new Bytes(key));
483   }
484
485   private byte[] getValue(final Bytes key) {
486     Bytes ibw = values.get(key);
487     if (ibw == null)
488       return null;
489     return ibw.get();
490   }
491
492   /**
493    * Getter for accessing the metadata associated with the key
494    *
495    * @param key The key.
496    * @return The value.
497    * @see #values
498    */
499   public String getValue(String key) {
500     byte[] value = getValue(Bytes.toBytes(key));
501     if (value == null)
502       return null;
503     return Bytes.toString(value);
504   }
505
506   /**
507    * Getter for fetching an unmodifiable {@link #values} map.
508    *
509    * @return unmodifiable map {@link #values}.
510    * @see #values
511    */
512   public Map<Bytes, Bytes> getValues() {
513     // shallow pointer copy
514     return Collections.unmodifiableMap(values);
515   }
516
517   /**
518    * Setter for storing metadata as a (key, value) pair in {@link #values} map
519    *
520    * @param key The key.
521    * @param value The value.
522    * @see #values
523    */
524   public HTableDescriptor setValue(byte[] key, byte[] value) {
525     setValue(new Bytes(key), new Bytes(value));
526     return this;
527   }
528
529   /*
530    * @param key The key.
531    * @param value The value.
532    */
533   private HTableDescriptor setValue(final Bytes key,
534       final String value) {
535     setValue(key, new Bytes(Bytes.toBytes(value)));
536     return this;
537   }
538
539   /*
540    * Setter for storing metadata as a (key, value) pair in {@link #values} map
541    *
542    * @param key The key.
543    * @param value The value.
544    */
545   public HTableDescriptor setValue(final Bytes key, final Bytes value) {
546     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
547       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
548       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
549           "use " + DURABILITY + " instead");
550       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
551       return this;
552     }
553     values.put(key, value);
554     return this;
555   }
556
557   /**
558    * Setter for storing metadata as a (key, value) pair in {@link #values} map
559    *
560    * @param key The key.
561    * @param value The value.
562    * @see #values
563    */
564   public HTableDescriptor setValue(String key, String value) {
565     if (value == null) {
566       remove(key);
567     } else {
568       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
569     }
570     return this;
571   }
572
573   /**
574    * Remove metadata represented by the key from the {@link #values} map
575    *
576    * @param key Key whose key and value we're to remove from HTableDescriptor
577    * parameters.
578    */
579   public void remove(final String key) {
580     remove(new Bytes(Bytes.toBytes(key)));
581   }
582
583   /**
584    * Remove metadata represented by the key from the {@link #values} map
585    *
586    * @param key Key whose key and value we're to remove from HTableDescriptor
587    * parameters.
588    */
589   public void remove(Bytes key) {
590     values.remove(key);
591   }
592
593   /**
594    * Remove metadata represented by the key from the {@link #values} map
595    *
596    * @param key Key whose key and value we're to remove from HTableDescriptor
597    * parameters.
598    */
599   public void remove(final byte [] key) {
600     remove(new Bytes(key));
601   }
602
603   /**
604    * Check if the readOnly flag of the table is set. If the readOnly flag is
605    * set then the contents of the table can only be read from but not modified.
606    *
607    * @return true if all columns in the table should be read only
608    */
609   public boolean isReadOnly() {
610     return isSomething(READONLY_KEY, DEFAULT_READONLY);
611   }
612
613   /**
614    * Setting the table as read only sets all the columns in the table as read
615    * only. By default all tables are modifiable, but if the readOnly flag is
616    * set to true then the contents of the table can only be read but not modified.
617    *
618    * @param readOnly True if all of the columns in the table should be read
619    * only.
620    */
621   public HTableDescriptor setReadOnly(final boolean readOnly) {
622     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
623   }
624
625   /**
626    * Check if the compaction enable flag of the table is true. If flag is
627    * false then no minor/major compactions will be done in real.
628    *
629    * @return true if table compaction enabled
630    */
631   public boolean isCompactionEnabled() {
632     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
633   }
634
635   /**
636    * Setting the table compaction enable flag.
637    *
638    * @param isEnable True if enable compaction.
639    */
640   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
641     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
642     return this;
643   }
644
645   /**
646    * Check if normalization enable flag of the table is true. If flag is
647    * false then no region normalizer won't attempt to normalize this table.
648    *
649    * @return true if region normalization is enabled for this table
650    */
651   public boolean isNormalizationEnabled() {
652     return isSomething(NORMALIZATION_ENABLED_KEY, DEFAULT_NORMALIZATION_ENABLED);
653   }
654
655   /**
656    * Setting the table normalization enable flag.
657    *
658    * @param isEnable True if enable normalization.
659    */
660   public HTableDescriptor setNormalizationEnabled(final boolean isEnable) {
661     setValue(NORMALIZATION_ENABLED_KEY, isEnable ? TRUE : FALSE);
662     return this;
663   }
664
665   /**
666    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
667    * @param durability enum value
668    */
669   public HTableDescriptor setDurability(Durability durability) {
670     this.durability = durability;
671     setValue(DURABILITY_KEY, durability.name());
672     return this;
673   }
674
675   /**
676    * Returns the durability setting for the table.
677    * @return durability setting for the table.
678    */
679   public Durability getDurability() {
680     if (this.durability == null) {
681       byte[] durabilityValue = getValue(DURABILITY_KEY);
682       if (durabilityValue == null) {
683         this.durability = DEFAULT_DURABLITY;
684       } else {
685         try {
686           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
687         } catch (IllegalArgumentException ex) {
688           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
689             + " is not known. Durability:" + Bytes.toString(durabilityValue));
690           this.durability = DEFAULT_DURABLITY;
691         }
692       }
693     }
694     return this.durability;
695   }
696
697   /**
698    * Get the name of the table
699    *
700    * @return TableName
701    */
702   public TableName getTableName() {
703     return name;
704   }
705
706   /**
707    * Get the name of the table as a byte array.
708    *
709    * @return name of table
710    * @deprecated Use {@link #getTableName()} instead
711    */
712   @Deprecated
713   public byte[] getName() {
714     return name.getName();
715   }
716
717   /**
718    * Get the name of the table as a String
719    *
720    * @return name of table as a String
721    */
722   public String getNameAsString() {
723     return name.getNameAsString();
724   }
725
726   /**
727    * This sets the class associated with the region split policy which
728    * determines when a region split should occur.  The class used by
729    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
730    * @param clazz the class name
731    */
732   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
733     setValue(SPLIT_POLICY, clazz);
734     return this;
735   }
736
737   /**
738    * This gets the class associated with the region split policy which
739    * determines when a region split should occur.  The class used by
740    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
741    *
742    * @return the class name of the region split policy for this table.
743    * If this returns null, the default split policy is used.
744    */
745    public String getRegionSplitPolicyClassName() {
746     return getValue(SPLIT_POLICY);
747   }
748
749   /**
750    * Set the name of the table.
751    *
752    * @param name name of table
753    */
754   @Deprecated
755   public HTableDescriptor setName(byte[] name) {
756     setName(TableName.valueOf(name));
757     return this;
758   }
759
760   @Deprecated
761   public HTableDescriptor setName(TableName name) {
762     this.name = name;
763     setMetaFlags(this.name);
764     return this;
765   }
766
767   /**
768    * Returns the maximum size upto which a region can grow to after which a region
769    * split is triggered. The region size is represented by the size of the biggest
770    * store file in that region.
771    *
772    * @return max hregion size for table, -1 if not set.
773    *
774    * @see #setMaxFileSize(long)
775    */
776   public long getMaxFileSize() {
777     byte [] value = getValue(MAX_FILESIZE_KEY);
778     if (value != null) {
779       return Long.parseLong(Bytes.toString(value));
780     }
781     return -1;
782   }
783
784   /**
785    * Sets the maximum size upto which a region can grow to after which a region
786    * split is triggered. The region size is represented by the size of the biggest
787    * store file in that region, i.e. If the biggest store file grows beyond the
788    * maxFileSize, then the region split is triggered. This defaults to a value of
789    * 256 MB.
790    * <p>
791    * This is not an absolute value and might vary. Assume that a single row exceeds
792    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
793    * a single row cannot be split across multiple regions
794    * </p>
795    *
796    * @param maxFileSize The maximum file size that a store file can grow to
797    * before a split is triggered.
798    */
799   public HTableDescriptor setMaxFileSize(long maxFileSize) {
800     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
801     return this;
802   }
803
804   /**
805    * Returns the size of the memstore after which a flush to filesystem is triggered.
806    *
807    * @return memory cache flush size for each hregion, -1 if not set.
808    *
809    * @see #setMemStoreFlushSize(long)
810    */
811   public long getMemStoreFlushSize() {
812     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
813     if (value != null) {
814       return Long.parseLong(Bytes.toString(value));
815     }
816     return -1;
817   }
818
819   /**
820    * Represents the maximum size of the memstore after which the contents of the
821    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
822    *
823    * @param memstoreFlushSize memory cache flush size for each hregion
824    */
825   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
826     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
827     return this;
828   }
829
830   /**
831    * This sets the class associated with the flush policy which determines determines the stores
832    * need to be flushed when flushing a region. The class used by default is defined in
833    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
834    * @param clazz the class name
835    */
836   public HTableDescriptor setFlushPolicyClassName(String clazz) {
837     setValue(FLUSH_POLICY, clazz);
838     return this;
839   }
840
841   /**
842    * This gets the class associated with the flush policy which determines the stores need to be
843    * flushed when flushing a region. The class used by default is defined in
844    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
845    * @return the class name of the flush policy for this table. If this returns null, the default
846    *         flush policy is used.
847    */
848   public String getFlushPolicyClassName() {
849     return getValue(FLUSH_POLICY);
850   }
851
852   /**
853    * Adds a column family.
854    * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
855    * @param family HColumnDescriptor of family to add.
856    */
857   public HTableDescriptor addFamily(final HColumnDescriptor family) {
858     if (family.getName() == null || family.getName().length <= 0) {
859       throw new IllegalArgumentException("Family name cannot be null or empty");
860     }
861     if (hasFamily(family.getName())) {
862       throw new IllegalArgumentException("Family '" +
863         family.getNameAsString() + "' already exists so cannot be added");
864     }
865     this.families.put(family.getName(), family);
866     return this;
867   }
868
869   /**
870    * Modifies the existing column family.
871    * @param family HColumnDescriptor of family to update
872    * @return this (for chained invocation)
873    */
874   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
875     if (family.getName() == null || family.getName().length <= 0) {
876       throw new IllegalArgumentException("Family name cannot be null or empty");
877     }
878     if (!hasFamily(family.getName())) {
879       throw new IllegalArgumentException("Column family '" + family.getNameAsString()
880         + "' does not exist");
881     }
882     this.families.put(family.getName(), family);
883     return this;
884   }
885
886   /**
887    * Checks to see if this table contains the given column family
888    * @param familyName Family name or column name.
889    * @return true if the table contains the specified family name
890    */
891   public boolean hasFamily(final byte [] familyName) {
892     return families.containsKey(familyName);
893   }
894
895   /**
896    * @return Name of this table and then a map of all of the column family
897    * descriptors.
898    * @see #getNameAsString()
899    */
900   @Override
901   public String toString() {
902     StringBuilder s = new StringBuilder();
903     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
904     s.append(getValues(true));
905     for (HColumnDescriptor f : families.values()) {
906       s.append(", ").append(f);
907     }
908     return s.toString();
909   }
910
911   /**
912    * @return Name of this table and then a map of all of the column family
913    * descriptors (with only the non-default column family attributes)
914    */
915   public String toStringCustomizedValues() {
916     StringBuilder s = new StringBuilder();
917     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
918     s.append(getValues(false));
919     for(HColumnDescriptor hcd : families.values()) {
920       s.append(", ").append(hcd.toStringCustomizedValues());
921     }
922     return s.toString();
923   }
924
925   /**
926    * @return map of all table attributes formatted into string.
927    */
928   public String toStringTableAttributes() {
929    return getValues(true).toString();
930   }
931
932   private StringBuilder getValues(boolean printDefaults) {
933     StringBuilder s = new StringBuilder();
934
935     // step 1: set partitioning and pruning
936     Set<Bytes> reservedKeys = new TreeSet<Bytes>();
937     Set<Bytes> userKeys = new TreeSet<Bytes>();
938     for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
939       if (entry.getKey() == null || entry.getKey().get() == null) continue;
940       String key = Bytes.toString(entry.getKey().get());
941       // in this section, print out reserved keywords + coprocessor info
942       if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) {
943         userKeys.add(entry.getKey());
944         continue;
945       }
946       // only print out IS_ROOT/IS_META if true
947       String value = Bytes.toString(entry.getValue().get());
948       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
949         if (Boolean.valueOf(value) == false) continue;
950       }
951       // see if a reserved key is a default value. may not want to print it out
952       if (printDefaults
953           || !DEFAULT_VALUES.containsKey(key)
954           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
955         reservedKeys.add(entry.getKey());
956       }
957     }
958
959     // early exit optimization
960     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
961     if (!hasAttributes && configuration.isEmpty()) return s;
962
963     s.append(", {");
964     // step 2: printing attributes
965     if (hasAttributes) {
966       s.append("TABLE_ATTRIBUTES => {");
967
968       // print all reserved keys first
969       boolean printCommaForAttr = false;
970       for (Bytes k : reservedKeys) {
971         String key = Bytes.toString(k.get());
972         String value = Bytes.toStringBinary(values.get(k).get());
973         if (printCommaForAttr) s.append(", ");
974         printCommaForAttr = true;
975         s.append(key);
976         s.append(" => ");
977         s.append('\'').append(value).append('\'');
978       }
979
980       if (!userKeys.isEmpty()) {
981         // print all non-reserved, advanced config keys as a separate subset
982         if (printCommaForAttr) s.append(", ");
983         printCommaForAttr = true;
984         s.append(HConstants.METADATA).append(" => ");
985         s.append("{");
986         boolean printCommaForCfg = false;
987         for (Bytes k : userKeys) {
988           String key = Bytes.toString(k.get());
989           String value = Bytes.toStringBinary(values.get(k).get());
990           if (printCommaForCfg) s.append(", ");
991           printCommaForCfg = true;
992           s.append('\'').append(key).append('\'');
993           s.append(" => ");
994           s.append('\'').append(value).append('\'');
995         }
996         s.append("}");
997       }
998     }
999
1000     // step 3: printing all configuration:
1001     if (!configuration.isEmpty()) {
1002       if (hasAttributes) {
1003         s.append(", ");
1004       }
1005       s.append(HConstants.CONFIGURATION).append(" => ");
1006       s.append('{');
1007       boolean printCommaForConfig = false;
1008       for (Map.Entry<String, String> e : configuration.entrySet()) {
1009         if (printCommaForConfig) s.append(", ");
1010         printCommaForConfig = true;
1011         s.append('\'').append(e.getKey()).append('\'');
1012         s.append(" => ");
1013         s.append('\'').append(e.getValue()).append('\'');
1014       }
1015       s.append("}");
1016     }
1017     s.append("}"); // end METHOD
1018     return s;
1019   }
1020
1021   /**
1022    * Compare the contents of the descriptor with another one passed as a parameter.
1023    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
1024    * contents of the descriptors are compared.
1025    *
1026    * @return true if the contents of the the two descriptors exactly match
1027    *
1028    * @see java.lang.Object#equals(java.lang.Object)
1029    */
1030   @Override
1031   public boolean equals(Object obj) {
1032     if (this == obj) {
1033       return true;
1034     }
1035     if (obj == null) {
1036       return false;
1037     }
1038     if (!(obj instanceof HTableDescriptor)) {
1039       return false;
1040     }
1041     return compareTo((HTableDescriptor)obj) == 0;
1042   }
1043
1044   /**
1045    * @see java.lang.Object#hashCode()
1046    */
1047   @Override
1048   public int hashCode() {
1049     int result = this.name.hashCode();
1050     if (this.families.size() > 0) {
1051       for (HColumnDescriptor e: this.families.values()) {
1052         result ^= e.hashCode();
1053       }
1054     }
1055     result ^= values.hashCode();
1056     result ^= configuration.hashCode();
1057     return result;
1058   }
1059
1060   // Comparable
1061
1062   /**
1063    * Compares the descriptor with another descriptor which is passed as a parameter.
1064    * This compares the content of the two descriptors and not the reference.
1065    *
1066    * @return 0 if the contents of the descriptors are exactly matching,
1067    *         1 if there is a mismatch in the contents
1068    */
1069   @Override
1070   public int compareTo(final HTableDescriptor other) {
1071     int result = this.name.compareTo(other.name);
1072     if (result == 0) {
1073       result = families.size() - other.families.size();
1074     }
1075     if (result == 0 && families.size() != other.families.size()) {
1076       result = Integer.valueOf(families.size()).compareTo(
1077           Integer.valueOf(other.families.size()));
1078     }
1079     if (result == 0) {
1080       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1081           it2 = other.families.values().iterator(); it.hasNext(); ) {
1082         result = it.next().compareTo(it2.next());
1083         if (result != 0) {
1084           break;
1085         }
1086       }
1087     }
1088     if (result == 0) {
1089       // punt on comparison for ordering, just calculate difference
1090       result = this.values.hashCode() - other.values.hashCode();
1091       if (result < 0)
1092         result = -1;
1093       else if (result > 0)
1094         result = 1;
1095     }
1096     if (result == 0) {
1097       result = this.configuration.hashCode() - other.configuration.hashCode();
1098       if (result < 0)
1099         result = -1;
1100       else if (result > 0)
1101         result = 1;
1102     }
1103     return result;
1104   }
1105
1106   /**
1107    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1108    * of all the column families of the table.
1109    *
1110    * @return Immutable collection of {@link HColumnDescriptor} of all the
1111    * column families.
1112    */
1113   public Collection<HColumnDescriptor> getFamilies() {
1114     return Collections.unmodifiableCollection(this.families.values());
1115   }
1116
1117   /**
1118    * Return true if there are at least one cf whose replication scope is serial.
1119    */
1120   public boolean hasSerialReplicationScope() {
1121     for (HColumnDescriptor column: getFamilies()){
1122       if (column.getScope() == HConstants.REPLICATION_SCOPE_SERIAL){
1123         return true;
1124       }
1125     }
1126     return false;
1127   }
1128
1129   /**
1130    * Returns the configured replicas per region
1131    */
1132   public int getRegionReplication() {
1133     return getIntValue(REGION_REPLICATION_KEY, DEFAULT_REGION_REPLICATION);
1134   }
1135
1136   private int getIntValue(Bytes key, int defaultVal) {
1137     byte[] val = getValue(key);
1138     if (val == null || val.length == 0) {
1139       return defaultVal;
1140     }
1141     return Integer.parseInt(Bytes.toString(val));
1142   }
1143
1144   /**
1145    * Sets the number of replicas per region.
1146    * @param regionReplication the replication factor per region
1147    */
1148   public HTableDescriptor setRegionReplication(int regionReplication) {
1149     setValue(REGION_REPLICATION_KEY,
1150         new Bytes(Bytes.toBytes(Integer.toString(regionReplication))));
1151     return this;
1152   }
1153
1154   /**
1155    * @return true if the read-replicas memstore replication is enabled.
1156    */
1157   public boolean hasRegionMemstoreReplication() {
1158     return isSomething(REGION_MEMSTORE_REPLICATION_KEY, DEFAULT_REGION_MEMSTORE_REPLICATION);
1159   }
1160
1161   /**
1162    * Enable or Disable the memstore replication from the primary region to the replicas.
1163    * The replication will be used only for meta operations (e.g. flush, compaction, ...)
1164    *
1165    * @param memstoreReplication true if the new data written to the primary region
1166    *                                 should be replicated.
1167    *                            false if the secondaries can tollerate to have new
1168    *                                  data only when the primary flushes the memstore.
1169    */
1170   public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) {
1171     setValue(REGION_MEMSTORE_REPLICATION_KEY, memstoreReplication ? TRUE : FALSE);
1172     // If the memstore replication is setup, we do not have to wait for observing a flush event
1173     // from primary before starting to serve reads, because gaps from replication is not applicable
1174     setConfiguration(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
1175       Boolean.toString(memstoreReplication));
1176     return this;
1177   }
1178
1179   public HTableDescriptor setPriority(int priority) {
1180     setValue(PRIORITY_KEY, Integer.toString(priority));
1181     return this;
1182   }
1183
1184   public int getPriority() {
1185     return getIntValue(PRIORITY_KEY, DEFAULT_PRIORITY);
1186   }
1187
1188   /**
1189    * Returns all the column family names of the current table. The map of
1190    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1191    * This returns all the keys of the family map which represents the column
1192    * family names of the table.
1193    *
1194    * @return Immutable sorted set of the keys of the families.
1195    */
1196   public Set<byte[]> getFamiliesKeys() {
1197     return Collections.unmodifiableSet(this.families.keySet());
1198   }
1199
1200   /**
1201    * Returns an array all the {@link HColumnDescriptor} of the column families
1202    * of the table.
1203    *
1204    * @return Array of all the HColumnDescriptors of the current table
1205    *
1206    * @see #getFamilies()
1207    */
1208   public HColumnDescriptor[] getColumnFamilies() {
1209     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1210     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1211   }
1212
1213
1214   /**
1215    * Returns the HColumnDescriptor for a specific column family with name as
1216    * specified by the parameter column.
1217    *
1218    * @param column Column family name
1219    * @return Column descriptor for the passed family name or the family on
1220    * passed in column.
1221    */
1222   public HColumnDescriptor getFamily(final byte [] column) {
1223     return this.families.get(column);
1224   }
1225
1226
1227   /**
1228    * Removes the HColumnDescriptor with name specified by the parameter column
1229    * from the table descriptor
1230    *
1231    * @param column Name of the column family to be removed.
1232    * @return Column descriptor for the passed family name or the family on
1233    * passed in column.
1234    */
1235   public HColumnDescriptor removeFamily(final byte [] column) {
1236     return this.families.remove(column);
1237   }
1238
1239   /**
1240    * Add a table coprocessor to this table. The coprocessor
1241    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1242    * or Endpoint.
1243    * It won't check if the class can be loaded or not.
1244    * Whether a coprocessor is loadable or not will be determined when
1245    * a region is opened.
1246    * @param className Full class name.
1247    * @throws IOException
1248    */
1249   public HTableDescriptor addCoprocessor(String className) throws IOException {
1250     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1251     return this;
1252   }
1253
1254   /**
1255    * Add a table coprocessor to this table. The coprocessor
1256    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1257    * or Endpoint.
1258    * It won't check if the class can be loaded or not.
1259    * Whether a coprocessor is loadable or not will be determined when
1260    * a region is opened.
1261    * @param jarFilePath Path of the jar file. If it's null, the class will be
1262    * loaded from default classloader.
1263    * @param className Full class name.
1264    * @param priority Priority
1265    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1266    * @throws IOException
1267    */
1268   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1269                              int priority, final Map<String, String> kvs)
1270   throws IOException {
1271     checkHasCoprocessor(className);
1272
1273     // Validate parameter kvs and then add key/values to kvString.
1274     StringBuilder kvString = new StringBuilder();
1275     if (kvs != null) {
1276       for (Map.Entry<String, String> e: kvs.entrySet()) {
1277         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1278           throw new IOException("Illegal parameter key = " + e.getKey());
1279         }
1280         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1281           throw new IOException("Illegal parameter (" + e.getKey() +
1282               ") value = " + e.getValue());
1283         }
1284         if (kvString.length() != 0) {
1285           kvString.append(',');
1286         }
1287         kvString.append(e.getKey());
1288         kvString.append('=');
1289         kvString.append(e.getValue());
1290       }
1291     }
1292
1293     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1294         "|" + className + "|" + Integer.toString(priority) + "|" +
1295         kvString.toString();
1296     return addCoprocessorToMap(value);
1297   }
1298
1299   /**
1300    * Add a table coprocessor to this table. The coprocessor
1301    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1302    * or Endpoint.
1303    * It won't check if the class can be loaded or not.
1304    * Whether a coprocessor is loadable or not will be determined when
1305    * a region is opened.
1306    * @param specStr The Coprocessor specification all in in one String formatted so matches
1307    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1308    * @throws IOException
1309    */
1310   public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
1311     String className = getCoprocessorClassNameFromSpecStr(specStr);
1312     if (className == null) {
1313       throw new IllegalArgumentException("Format does not match " +
1314         HConstants.CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr);
1315     }
1316     checkHasCoprocessor(className);
1317     return addCoprocessorToMap(specStr);
1318   }
1319
1320   private void checkHasCoprocessor(final String className) throws IOException {
1321     if (hasCoprocessor(className)) {
1322       throw new IOException("Coprocessor " + className + " already exists.");
1323     }
1324   }
1325
1326   /**
1327    * Add coprocessor to values Map
1328    * @param specStr The Coprocessor specification all in in one String formatted so matches
1329    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1330    * @return Returns <code>this</code>
1331    */
1332   private HTableDescriptor addCoprocessorToMap(final String specStr) {
1333     if (specStr == null) return this;
1334     // generate a coprocessor key
1335     int maxCoprocessorNumber = 0;
1336     Matcher keyMatcher;
1337     for (Map.Entry<Bytes, Bytes> e: this.values.entrySet()) {
1338       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1339       if (!keyMatcher.matches()) {
1340         continue;
1341       }
1342       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
1343     }
1344     maxCoprocessorNumber++;
1345     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1346     this.values.put(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr)));
1347     return this;
1348   }
1349
1350   /**
1351    * Check if the table has an attached co-processor represented by the name className
1352    *
1353    * @param classNameToMatch - Class name of the co-processor
1354    * @return true of the table has a co-processor className
1355    */
1356   public boolean hasCoprocessor(String classNameToMatch) {
1357     Matcher keyMatcher;
1358     for (Map.Entry<Bytes, Bytes> e :
1359         this.values.entrySet()) {
1360       keyMatcher =
1361           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1362               Bytes.toString(e.getKey().get()));
1363       if (!keyMatcher.matches()) {
1364         continue;
1365       }
1366       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1367       if (className == null) continue;
1368       if (className.equals(classNameToMatch.trim())) {
1369         return true;
1370       }
1371     }
1372     return false;
1373   }
1374
1375   /**
1376    * Return the list of attached co-processor represented by their name className
1377    *
1378    * @return The list of co-processors classNames
1379    */
1380   public List<String> getCoprocessors() {
1381     List<String> result = new ArrayList<String>();
1382     Matcher keyMatcher;
1383     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1384       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1385       if (!keyMatcher.matches()) {
1386         continue;
1387       }
1388       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1389       if (className == null) continue;
1390       result.add(className); // classname is the 2nd field
1391     }
1392     return result;
1393   }
1394
1395   /**
1396    * @param spec String formatted as per {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1397    * @return Class parsed from passed in <code>spec</code> or null if no match or classpath found
1398    */
1399   private static String getCoprocessorClassNameFromSpecStr(final String spec) {
1400     Matcher matcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(spec);
1401     // Classname is the 2nd field
1402     return matcher != null && matcher.matches()? matcher.group(2).trim(): null;
1403   }
1404
1405   /**
1406    * Remove a coprocessor from those set on the table
1407    * @param className Class name of the co-processor
1408    */
1409   public void removeCoprocessor(String className) {
1410     Bytes match = null;
1411     Matcher keyMatcher;
1412     Matcher valueMatcher;
1413     for (Map.Entry<Bytes, Bytes> e : this.values
1414         .entrySet()) {
1415       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1416           .getKey().get()));
1417       if (!keyMatcher.matches()) {
1418         continue;
1419       }
1420       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1421           .toString(e.getValue().get()));
1422       if (!valueMatcher.matches()) {
1423         continue;
1424       }
1425       // get className and compare
1426       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1427       // remove the CP if it is present
1428       if (clazz.equals(className.trim())) {
1429         match = e.getKey();
1430         break;
1431       }
1432     }
1433     // if we found a match, remove it
1434     if (match != null)
1435       remove(match);
1436   }
1437
1438   /**
1439    * Returns the {@link Path} object representing the table directory under
1440    * path rootdir
1441    *
1442    * Deprecated use FSUtils.getTableDir() instead.
1443    *
1444    * @param rootdir qualified path of HBase root directory
1445    * @param tableName name of table
1446    * @return {@link Path} for table
1447    */
1448   @Deprecated
1449   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1450     //This is bad I had to mirror code from FSUTils.getTableDir since
1451     //there is no module dependency between hbase-client and hbase-server
1452     TableName name = TableName.valueOf(tableName);
1453     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1454               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1455   }
1456
1457   public final static String NAMESPACE_FAMILY_INFO = "info";
1458   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1459   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1460
1461   /** Table descriptor for namespace table */
1462   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1463       TableName.NAMESPACE_TABLE_NAME,
1464       new HColumnDescriptor[] {
1465           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1466               // Ten is arbitrary number.  Keep versions to help debugging.
1467               .setMaxVersions(10)
1468               .setInMemory(true)
1469               .setBlocksize(8 * 1024)
1470               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1471               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1472               // e.g. if using CombinedBlockCache (BucketCache).
1473               .setCacheDataInL1(true)
1474       });
1475
1476   @Deprecated
1477   public HTableDescriptor setOwner(User owner) {
1478     return setOwnerString(owner != null ? owner.getShortName() : null);
1479   }
1480
1481   // used by admin.rb:alter(table_name,*args) to update owner.
1482   @Deprecated
1483   public HTableDescriptor setOwnerString(String ownerString) {
1484     if (ownerString != null) {
1485       setValue(OWNER_KEY, ownerString);
1486     } else {
1487       remove(OWNER_KEY);
1488     }
1489     return this;
1490   }
1491
1492   @Deprecated
1493   public String getOwnerString() {
1494     if (getValue(OWNER_KEY) != null) {
1495       return Bytes.toString(getValue(OWNER_KEY));
1496     }
1497     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1498     // hbase:meta and -ROOT- should return system user as owner, not null (see
1499     // MasterFileSystem.java:bootstrap()).
1500     return null;
1501   }
1502
1503   /**
1504    * @return This instance serialized with pb with pb magic prefix
1505    * @see #parseFrom(byte[])
1506    */
1507   public byte[] toByteArray() {
1508     return ProtobufUtil.prependPBMagic(ProtobufUtil.convertToTableSchema(this).toByteArray());
1509   }
1510
1511   /**
1512    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1513    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1514    * @throws DeserializationException
1515    * @throws IOException
1516    * @see #toByteArray()
1517    */
1518   public static HTableDescriptor parseFrom(final byte [] bytes)
1519   throws DeserializationException, IOException {
1520     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1521       throw new DeserializationException("Expected PB encoded HTableDescriptor");
1522     }
1523     int pblen = ProtobufUtil.lengthOfPBMagic();
1524     TableSchema.Builder builder = TableSchema.newBuilder();
1525     TableSchema ts;
1526     try {
1527       ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
1528       ts = builder.build();
1529     } catch (IOException e) {
1530       throw new DeserializationException(e);
1531     }
1532     return ProtobufUtil.convertToHTableDesc(ts);
1533   }
1534
1535   /**
1536    * Getter for accessing the configuration value by key
1537    */
1538   public String getConfigurationValue(String key) {
1539     return configuration.get(key);
1540   }
1541
1542   /**
1543    * Getter for fetching an unmodifiable {@link #configuration} map.
1544    */
1545   public Map<String, String> getConfiguration() {
1546     // shallow pointer copy
1547     return Collections.unmodifiableMap(configuration);
1548   }
1549
1550   /**
1551    * Setter for storing a configuration setting in {@link #configuration} map.
1552    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1553    * @param value String value. If null, removes the setting.
1554    */
1555   public HTableDescriptor setConfiguration(String key, String value) {
1556     if (value == null) {
1557       removeConfiguration(key);
1558     } else {
1559       configuration.put(key, value);
1560     }
1561     return this;
1562   }
1563
1564   /**
1565    * Remove a config setting represented by the key from the {@link #configuration} map
1566    */
1567   public void removeConfiguration(final String key) {
1568     configuration.remove(key);
1569   }
1570 }