View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.Collection;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Set;
31  import java.util.TreeMap;
32  import java.util.TreeSet;
33  import java.util.regex.Matcher;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.classification.InterfaceAudience;
39  import org.apache.hadoop.hbase.classification.InterfaceStability;
40  import org.apache.hadoop.hbase.client.Durability;
41  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
42  import org.apache.hadoop.hbase.exceptions.DeserializationException;
43  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
44  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
45  import org.apache.hadoop.hbase.security.User;
46  import org.apache.hadoop.hbase.util.Bytes;
47
48  /**
49   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
50   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
51   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
52   * when the region split should occur, coprocessors associated with it etc...
53   */
54  @InterfaceAudience.Public
55  @InterfaceStability.Evolving
56  public class HTableDescriptor implements Comparable<HTableDescriptor> {
57
58    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
59
60    private TableName name = null;
61
62    /**
63     * A map which holds the metadata information of the table. This metadata
64     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
65     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
66     */
67    private final Map<Bytes, Bytes> values =
68        new HashMap<Bytes, Bytes>();
69
70    /**
71     * A map which holds the configuration specific to the table.
72     * The keys of the map have the same names as config keys and override the defaults with
73     * table-specific settings. Example usage may be for compactions, etc.
74     */
75    private final Map<String, String> configuration = new HashMap<String, String>();
76
77    public static final String SPLIT_POLICY = "SPLIT_POLICY";
78
79    /**
80     * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
81     * attribute which denotes the maximum size of the store file after which
82     * a region split occurs
83     *
84     * @see #getMaxFileSize()
85     */
86    public static final String MAX_FILESIZE = "MAX_FILESIZE";
87    private static final Bytes MAX_FILESIZE_KEY =
88        new Bytes(Bytes.toBytes(MAX_FILESIZE));
89
90    public static final String OWNER = "OWNER";
91    public static final Bytes OWNER_KEY =
92        new Bytes(Bytes.toBytes(OWNER));
93
94    /**
95     * <em>INTERNAL</em> Used by rest interface to access this metadata
96     * attribute which denotes if the table is Read Only
97     *
98     * @see #isReadOnly()
99     */
100   public static final String READONLY = "READONLY";
101   private static final Bytes READONLY_KEY =
102       new Bytes(Bytes.toBytes(READONLY));
103
104   /**
105    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
106    * attribute which denotes if the table is compaction enabled
107    *
108    * @see #isCompactionEnabled()
109    */
110   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
111   private static final Bytes COMPACTION_ENABLED_KEY =
112       new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
113
114   /**
115    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
116    * attribute which represents the maximum size of the memstore after which
117    * its contents are flushed onto the disk
118    *
119    * @see #getMemStoreFlushSize()
120    */
121   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
122   private static final Bytes MEMSTORE_FLUSHSIZE_KEY =
123       new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
124
125   public static final String FLUSH_POLICY = "FLUSH_POLICY";
126
127   /**
128    * <em>INTERNAL</em> Used by rest interface to access this metadata
129    * attribute which denotes if the table is a -ROOT- region or not
130    *
131    * @see #isRootRegion()
132    */
133   public static final String IS_ROOT = "IS_ROOT";
134   private static final Bytes IS_ROOT_KEY =
135       new Bytes(Bytes.toBytes(IS_ROOT));
136
137   /**
138    * <em>INTERNAL</em> Used by rest interface to access this metadata
139    * attribute which denotes if it is a catalog table, either
140    * <code> hbase:meta </code> or <code> -ROOT- </code>
141    *
142    * @see #isMetaRegion()
143    */
144   public static final String IS_META = "IS_META";
145   private static final Bytes IS_META_KEY =
146       new Bytes(Bytes.toBytes(IS_META));
147
148   /**
149    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
150    * attribute which denotes if the deferred log flush option is enabled.
151    * @deprecated Use {@link #DURABILITY} instead.
152    */
153   @Deprecated
154   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
155   @Deprecated
156   private static final Bytes DEFERRED_LOG_FLUSH_KEY =
157       new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH));
158
159   /**
160    * <em>INTERNAL</em> {@link Durability} setting for the table.
161    */
162   public static final String DURABILITY = "DURABILITY";
163   private static final Bytes DURABILITY_KEY =
164       new Bytes(Bytes.toBytes("DURABILITY"));
165
166   /**
167    * <em>INTERNAL</em> number of region replicas for the table.
168    */
169   public static final String REGION_REPLICATION = "REGION_REPLICATION";
170   private static final Bytes REGION_REPLICATION_KEY =
171       new Bytes(Bytes.toBytes(REGION_REPLICATION));
172
173   /**
174    * <em>INTERNAL</em> flag to indicate whether or not the memstore should be replicated
175    * for read-replicas (CONSISTENCY =&gt; TIMELINE).
176    */
177   public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
178   private static final Bytes REGION_MEMSTORE_REPLICATION_KEY =
179       new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION));
180
181   /**
182    * <em>INTERNAL</em> Used by shell/rest interface to access this metadata
183    * attribute which denotes if the table should be treated by region normalizer.
184    *
185    * @see #isNormalizationEnabled()
186    */
187   public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED";
188   private static final Bytes NORMALIZATION_ENABLED_KEY =
189     new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED));
190
191   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
192   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
193
194   public static final String PRIORITY = "PRIORITY";
195   private static final Bytes PRIORITY_KEY =
196     new Bytes(Bytes.toBytes(PRIORITY));
197
198   /** Relative priority of the table used for rpc scheduling */
199   private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS;
200
201   /*
202    *  The below are ugly but better than creating them each time till we
203    *  replace booleans being saved as Strings with plain booleans.  Need a
204    *  migration script to do this.  TODO.
205    */
206   private static final Bytes FALSE =
207       new Bytes(Bytes.toBytes(Boolean.FALSE.toString()));
208
209   private static final Bytes TRUE =
210       new Bytes(Bytes.toBytes(Boolean.TRUE.toString()));
211
212   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
213
214   /**
215    * Constant that denotes whether the table is READONLY by default and is false
216    */
217   public static final boolean DEFAULT_READONLY = false;
218
219   /**
220    * Constant that denotes whether the table is compaction enabled by default
221    */
222   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
223
224   /**
225    * Constant that denotes whether the table is normalized by default.
226    */
227   public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
228
229   /**
230    * Constant that denotes the maximum default size of the memstore after which
231    * the contents are flushed to the store files
232    */
233   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
234
235   public static final int DEFAULT_REGION_REPLICATION = 1;
236
237   public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true;
238
239   private final static Map<String, String> DEFAULT_VALUES
240     = new HashMap<String, String>();
241   private final static Set<Bytes> RESERVED_KEYWORDS
242       = new HashSet<Bytes>();
243
244   static {
245     DEFAULT_VALUES.put(MAX_FILESIZE,
246         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
247     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
248     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
249         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
250     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
251         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
252     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
253     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
254     DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED));
255     DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY));
256     for (String s : DEFAULT_VALUES.keySet()) {
257       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
258     }
259     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
260     RESERVED_KEYWORDS.add(IS_META_KEY);
261   }
262
263   /**
264    * Cache of whether this is a meta table or not.
265    */
266   private volatile Boolean meta = null;
267   /**
268    * Cache of whether this is root table or not.
269    */
270   private volatile Boolean root = null;
271
272   /**
273    * Durability setting for the table
274    */
275   private Durability durability = null;
276
277   /**
278    * Maps column family name to the respective HColumnDescriptors
279    */
280   private final Map<byte [], HColumnDescriptor> families =
281     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
282
283   /**
284    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
285    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
286    */
287   @InterfaceAudience.Private
288   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
289     setName(name);
290     for(HColumnDescriptor descriptor : families) {
291       this.families.put(descriptor.getName(), descriptor);
292     }
293   }
294
295   /**
296    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
297    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
298    */
299   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
300       Map<Bytes, Bytes> values) {
301     setName(name);
302     for(HColumnDescriptor descriptor : families) {
303       this.families.put(descriptor.getName(), descriptor);
304     }
305     for (Map.Entry<Bytes, Bytes> entry :
306         values.entrySet()) {
307       setValue(entry.getKey(), entry.getValue());
308     }
309   }
310
311   /**
312    * Default constructor which constructs an empty object.
313    * For deserializing an HTableDescriptor instance only.
314    * @deprecated As of release 0.96 (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
315    *             This was made protected in 2.0.0 and will be removed in HBase 3.0.0.
316    *             Used by Writables and Writables are going away.
317    */
318   @Deprecated
319   protected HTableDescriptor() {
320     super();
321   }
322
323   /**
324    * Construct a table descriptor specifying a TableName object
325    * @param name Table name.
326    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug</a>
327    */
328   public HTableDescriptor(final TableName name) {
329     super();
330     setName(name);
331   }
332
333   /**
334    * Construct a table descriptor specifying a byte array table name
335    * @param name Table name.
336    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 (HBASE-174) HBASE: Un-openable tablename bug</a>
337    */
338   @Deprecated
339   public HTableDescriptor(final byte[] name) {
340     this(TableName.valueOf(name));
341   }
342
343   /**
344    * Construct a table descriptor specifying a String table name
345    * @param name Table name.
346    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 (HBASE-174) HBASE: Un-openable tablename bug</a>
347    */
348   @Deprecated
349   public HTableDescriptor(final String name) {
350     this(TableName.valueOf(name));
351   }
352
353   /**
354    * Construct a table descriptor by cloning the descriptor passed as a parameter.
355    * <p>
356    * Makes a deep copy of the supplied descriptor.
357    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
358    * @param desc The descriptor.
359    */
360   public HTableDescriptor(final HTableDescriptor desc) {
361     this(desc.name, desc);
362   }
363
364   /**
365    * Construct a table descriptor by cloning the descriptor passed as a parameter
366    * but using a different table name.
367    * <p>
368    * Makes a deep copy of the supplied descriptor.
369    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
370    * @param name Table name.
371    * @param desc The descriptor.
372    */
373   public HTableDescriptor(final TableName name, final HTableDescriptor desc) {
374     super();
375     setName(name);
376     setMetaFlags(this.name);
377     for (HColumnDescriptor c: desc.families.values()) {
378       this.families.put(c.getName(), new HColumnDescriptor(c));
379     }
380     for (Map.Entry<Bytes, Bytes> e :
381         desc.values.entrySet()) {
382       setValue(e.getKey(), e.getValue());
383     }
384     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
385       this.configuration.put(e.getKey(), e.getValue());
386     }
387   }
388
389   /*
390    * Set meta flags on this table.
391    * IS_ROOT_KEY is set if its a -ROOT- table
392    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
393    * Called by constructors.
394    * @param name
395    */
396   private void setMetaFlags(final TableName name) {
397     setMetaRegion(isRootRegion() ||
398         name.equals(TableName.META_TABLE_NAME));
399   }
400
401   /**
402    * Check if the descriptor represents a <code> -ROOT- </code> region.
403    *
404    * @return true if this is a <code> -ROOT- </code> region
405    */
406   public boolean isRootRegion() {
407     if (this.root == null) {
408       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
409     }
410     return this.root.booleanValue();
411   }
412
413   /**
414    * <em> INTERNAL </em> Used to denote if the current table represents
415    * <code> -ROOT- </code> region. This is used internally by the
416    * HTableDescriptor constructors
417    *
418    * @param isRoot true if this is the <code> -ROOT- </code> region
419    */
420   protected void setRootRegion(boolean isRoot) {
421     // TODO: Make the value a boolean rather than String of boolean.
422     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
423   }
424
425   /**
426    * Checks if this table is <code> hbase:meta </code>
427    * region.
428    *
429    * @return true if this table is <code> hbase:meta </code>
430    * region
431    */
432   public boolean isMetaRegion() {
433     if (this.meta == null) {
434       this.meta = calculateIsMetaRegion();
435     }
436     return this.meta.booleanValue();
437   }
438
439   private synchronized Boolean calculateIsMetaRegion() {
440     byte [] value = getValue(IS_META_KEY);
441     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
442   }
443
444   private boolean isSomething(final Bytes key,
445       final boolean valueIfNull) {
446     byte [] value = getValue(key);
447     if (value != null) {
448       return Boolean.valueOf(Bytes.toString(value));
449     }
450     return valueIfNull;
451   }
452
453   /**
454    * <em> INTERNAL </em> Used to denote if the current table represents
455    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
456    * internally by the HTableDescriptor constructors
457    *
458    * @param isMeta true if its either <code> -ROOT- </code> or
459    * <code> hbase:meta </code> region
460    */
461   protected void setMetaRegion(boolean isMeta) {
462     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
463   }
464
465   /**
466    * Checks if the table is a <code>hbase:meta</code> table
467    *
468    * @return true if table is <code> hbase:meta </code> region.
469    */
470   public boolean isMetaTable() {
471     return isMetaRegion() && !isRootRegion();
472   }
473
474   /**
475    * Getter for accessing the metadata associated with the key
476    *
477    * @param key The key.
478    * @return The value.
479    * @see #values
480    */
481   public byte[] getValue(byte[] key) {
482     return getValue(new Bytes(key));
483   }
484
485   private byte[] getValue(final Bytes key) {
486     Bytes ibw = values.get(key);
487     if (ibw == null)
488       return null;
489     return ibw.get();
490   }
491
492   /**
493    * Getter for accessing the metadata associated with the key
494    *
495    * @param key The key.
496    * @return The value.
497    * @see #values
498    */
499   public String getValue(String key) {
500     byte[] value = getValue(Bytes.toBytes(key));
501     if (value == null)
502       return null;
503     return Bytes.toString(value);
504   }
505
506   /**
507    * Getter for fetching an unmodifiable {@link #values} map.
508    *
509    * @return unmodifiable map {@link #values}.
510    * @see #values
511    */
512   public Map<Bytes, Bytes> getValues() {
513     // shallow pointer copy
514     return Collections.unmodifiableMap(values);
515   }
516
517   /**
518    * Setter for storing metadata as a (key, value) pair in {@link #values} map
519    *
520    * @param key The key.
521    * @param value The value.
522    * @see #values
523    */
524   public HTableDescriptor setValue(byte[] key, byte[] value) {
525     setValue(new Bytes(key), new Bytes(value));
526     return this;
527   }
528
529   /*
530    * @param key The key.
531    * @param value The value.
532    */
533   private HTableDescriptor setValue(final Bytes key,
534       final String value) {
535     setValue(key, new Bytes(Bytes.toBytes(value)));
536     return this;
537   }
538
539   /*
540    * Setter for storing metadata as a (key, value) pair in {@link #values} map
541    *
542    * @param key The key.
543    * @param value The value.
544    */
545   public HTableDescriptor setValue(final Bytes key, final Bytes value) {
546     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
547       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
548       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
549           "use " + DURABILITY + " instead");
550       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
551       return this;
552     }
553     values.put(key, value);
554     return this;
555   }
556
557   /**
558    * Setter for storing metadata as a (key, value) pair in {@link #values} map
559    *
560    * @param key The key.
561    * @param value The value.
562    * @see #values
563    */
564   public HTableDescriptor setValue(String key, String value) {
565     if (value == null) {
566       remove(key);
567     } else {
568       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
569     }
570     return this;
571   }
572
573   /**
574    * Remove metadata represented by the key from the {@link #values} map
575    *
576    * @param key Key whose key and value we're to remove from HTableDescriptor
577    * parameters.
578    */
579   public void remove(final String key) {
580     remove(new Bytes(Bytes.toBytes(key)));
581   }
582
583   /**
584    * Remove metadata represented by the key from the {@link #values} map
585    *
586    * @param key Key whose key and value we're to remove from HTableDescriptor
587    * parameters.
588    */
589   public void remove(Bytes key) {
590     values.remove(key);
591   }
592
593   /**
594    * Remove metadata represented by the key from the {@link #values} map
595    *
596    * @param key Key whose key and value we're to remove from HTableDescriptor
597    * parameters.
598    */
599   public void remove(final byte [] key) {
600     remove(new Bytes(key));
601   }
602
603   /**
604    * Check if the readOnly flag of the table is set. If the readOnly flag is
605    * set then the contents of the table can only be read from but not modified.
606    *
607    * @return true if all columns in the table should be read only
608    */
609   public boolean isReadOnly() {
610     return isSomething(READONLY_KEY, DEFAULT_READONLY);
611   }
612
613   /**
614    * Setting the table as read only sets all the columns in the table as read
615    * only. By default all tables are modifiable, but if the readOnly flag is
616    * set to true then the contents of the table can only be read but not modified.
617    *
618    * @param readOnly True if all of the columns in the table should be read
619    * only.
620    */
621   public HTableDescriptor setReadOnly(final boolean readOnly) {
622     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
623   }
624
625   /**
626    * Check if the compaction enable flag of the table is true. If flag is
627    * false then no minor/major compactions will be done in real.
628    *
629    * @return true if table compaction enabled
630    */
631   public boolean isCompactionEnabled() {
632     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
633   }
634
635   /**
636    * Setting the table compaction enable flag.
637    *
638    * @param isEnable True if enable compaction.
639    */
640   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
641     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
642     return this;
643   }
644
645   /**
646    * Check if normalization enable flag of the table is true. If flag is
647    * false then no region normalizer won't attempt to normalize this table.
648    *
649    * @return true if region normalization is enabled for this table
650    */
651   public boolean isNormalizationEnabled() {
652     return isSomething(NORMALIZATION_ENABLED_KEY, DEFAULT_NORMALIZATION_ENABLED);
653   }
654
655   /**
656    * Setting the table normalization enable flag.
657    *
658    * @param isEnable True if enable normalization.
659    */
660   public HTableDescriptor setNormalizationEnabled(final boolean isEnable) {
661     setValue(NORMALIZATION_ENABLED_KEY, isEnable ? TRUE : FALSE);
662     return this;
663   }
664
665   /**
666    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
667    * @param durability enum value
668    */
669   public HTableDescriptor setDurability(Durability durability) {
670     this.durability = durability;
671     setValue(DURABILITY_KEY, durability.name());
672     return this;
673   }
674
675   /**
676    * Returns the durability setting for the table.
677    * @return durability setting for the table.
678    */
679   public Durability getDurability() {
680     if (this.durability == null) {
681       byte[] durabilityValue = getValue(DURABILITY_KEY);
682       if (durabilityValue == null) {
683         this.durability = DEFAULT_DURABLITY;
684       } else {
685         try {
686           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
687         } catch (IllegalArgumentException ex) {
688           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
689             + " is not known. Durability:" + Bytes.toString(durabilityValue));
690           this.durability = DEFAULT_DURABLITY;
691         }
692       }
693     }
694     return this.durability;
695   }
696
697   /**
698    * Get the name of the table
699    *
700    * @return TableName
701    */
702   public TableName getTableName() {
703     return name;
704   }
705
706   /**
707    * Get the name of the table as a byte array.
708    *
709    * @return name of table
710    * @deprecated Use {@link #getTableName()} instead
711    */
712   @Deprecated
713   public byte[] getName() {
714     return name.getName();
715   }
716
717   /**
718    * Get the name of the table as a String
719    *
720    * @return name of table as a String
721    */
722   public String getNameAsString() {
723     return name.getNameAsString();
724   }
725
726   /**
727    * This sets the class associated with the region split policy which
728    * determines when a region split should occur.  The class used by
729    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
730    * @param clazz the class name
731    */
732   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
733     setValue(SPLIT_POLICY, clazz);
734     return this;
735   }
736
737   /**
738    * This gets the class associated with the region split policy which
739    * determines when a region split should occur.  The class used by
740    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
741    *
742    * @return the class name of the region split policy for this table.
743    * If this returns null, the default split policy is used.
744    */
745    public String getRegionSplitPolicyClassName() {
746     return getValue(SPLIT_POLICY);
747   }
748
749   /**
750    * Set the name of the table.
751    *
752    * @param name name of table
753    */
754   @Deprecated
755   public HTableDescriptor setName(byte[] name) {
756     setName(TableName.valueOf(name));
757     return this;
758   }
759
760   @Deprecated
761   public HTableDescriptor setName(TableName name) {
762     this.name = name;
763     setMetaFlags(this.name);
764     return this;
765   }
766
767   /**
768    * Returns the maximum size upto which a region can grow to after which a region
769    * split is triggered. The region size is represented by the size of the biggest
770    * store file in that region.
771    *
772    * @return max hregion size for table, -1 if not set.
773    *
774    * @see #setMaxFileSize(long)
775    */
776   public long getMaxFileSize() {
777     byte [] value = getValue(MAX_FILESIZE_KEY);
778     if (value != null) {
779       return Long.parseLong(Bytes.toString(value));
780     }
781     return -1;
782   }
783
784   /**
785    * Sets the maximum size upto which a region can grow to after which a region
786    * split is triggered. The region size is represented by the size of the biggest
787    * store file in that region, i.e. If the biggest store file grows beyond the
788    * maxFileSize, then the region split is triggered. This defaults to a value of
789    * 256 MB.
790    * <p>
791    * This is not an absolute value and might vary. Assume that a single row exceeds
792    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
793    * a single row cannot be split across multiple regions
794    * </p>
795    *
796    * @param maxFileSize The maximum file size that a store file can grow to
797    * before a split is triggered.
798    */
799   public HTableDescriptor setMaxFileSize(long maxFileSize) {
800     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
801     return this;
802   }
803
804   /**
805    * Returns the size of the memstore after which a flush to filesystem is triggered.
806    *
807    * @return memory cache flush size for each hregion, -1 if not set.
808    *
809    * @see #setMemStoreFlushSize(long)
810    */
811   public long getMemStoreFlushSize() {
812     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
813     if (value != null) {
814       return Long.parseLong(Bytes.toString(value));
815     }
816     return -1;
817   }
818
819   /**
820    * Represents the maximum size of the memstore after which the contents of the
821    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
822    *
823    * @param memstoreFlushSize memory cache flush size for each hregion
824    */
825   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
826     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
827     return this;
828   }
829
830   /**
831    * This sets the class associated with the flush policy which determines determines the stores
832    * need to be flushed when flushing a region. The class used by default is defined in
833    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
834    * @param clazz the class name
835    */
836   public HTableDescriptor setFlushPolicyClassName(String clazz) {
837     setValue(FLUSH_POLICY, clazz);
838     return this;
839   }
840
841   /**
842    * This gets the class associated with the flush policy which determines the stores need to be
843    * flushed when flushing a region. The class used by default is defined in
844    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
845    * @return the class name of the flush policy for this table. If this returns null, the default
846    *         flush policy is used.
847    */
848   public String getFlushPolicyClassName() {
849     return getValue(FLUSH_POLICY);
850   }
851
852   /**
853    * Adds a column family.
854    * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
855    * @param family HColumnDescriptor of family to add.
856    */
857   public HTableDescriptor addFamily(final HColumnDescriptor family) {
858     if (family.getName() == null || family.getName().length <= 0) {
859       throw new IllegalArgumentException("Family name cannot be null or empty");
860     }
861     if (hasFamily(family.getName())) {
862       throw new IllegalArgumentException("Family '" +
863         family.getNameAsString() + "' already exists so cannot be added");
864     }
865     this.families.put(family.getName(), family);
866     return this;
867   }
868
869   /**
870    * Modifies the existing column family.
871    * @param family HColumnDescriptor of family to update
872    * @return this (for chained invocation)
873    */
874   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
875     if (family.getName() == null || family.getName().length <= 0) {
876       throw new IllegalArgumentException("Family name cannot be null or empty");
877     }
878     if (!hasFamily(family.getName())) {
879       throw new IllegalArgumentException("Column family '" + family.getNameAsString()
880         + "' does not exist");
881     }
882     this.families.put(family.getName(), family);
883     return this;
884   }
885
886   /**
887    * Checks to see if this table contains the given column family
888    * @param familyName Family name or column name.
889    * @return true if the table contains the specified family name
890    */
891   public boolean hasFamily(final byte [] familyName) {
892     return families.containsKey(familyName);
893   }
894
895   /**
896    * @return Name of this table and then a map of all of the column family
897    * descriptors.
898    * @see #getNameAsString()
899    */
900   @Override
901   public String toString() {
902     StringBuilder s = new StringBuilder();
903     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
904     s.append(getValues(true));
905     for (HColumnDescriptor f : families.values()) {
906       s.append(", ").append(f);
907     }
908     return s.toString();
909   }
910
911   /**
912    * @return Name of this table and then a map of all of the column family
913    * descriptors (with only the non-default column family attributes)
914    */
915   public String toStringCustomizedValues() {
916     StringBuilder s = new StringBuilder();
917     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
918     s.append(getValues(false));
919     for(HColumnDescriptor hcd : families.values()) {
920       s.append(", ").append(hcd.toStringCustomizedValues());
921     }
922     return s.toString();
923   }
924
925   /**
926    * @return map of all table attributes formatted into string.
927    */
928   public String toStringTableAttributes() {
929    return getValues(true).toString();
930   }
931
932   private StringBuilder getValues(boolean printDefaults) {
933     StringBuilder s = new StringBuilder();
934
935     // step 1: set partitioning and pruning
936     Set<Bytes> reservedKeys = new TreeSet<Bytes>();
937     Set<Bytes> userKeys = new TreeSet<Bytes>();
938     for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
939       if (entry.getKey() == null || entry.getKey().get() == null) continue;
940       String key = Bytes.toString(entry.getKey().get());
941       // in this section, print out reserved keywords + coprocessor info
942       if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) {
943         userKeys.add(entry.getKey());
944         continue;
945       }
946       // only print out IS_ROOT/IS_META if true
947       String value = Bytes.toString(entry.getValue().get());
948       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
949         if (Boolean.valueOf(value) == false) continue;
950       }
951       // see if a reserved key is a default value. may not want to print it out
952       if (printDefaults
953           || !DEFAULT_VALUES.containsKey(key)
954           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
955         reservedKeys.add(entry.getKey());
956       }
957     }
958
959     // early exit optimization
960     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
961     if (!hasAttributes && configuration.isEmpty()) return s;
962
963     s.append(", {");
964     // step 2: printing attributes
965     if (hasAttributes) {
966       s.append("TABLE_ATTRIBUTES => {");
967
968       // print all reserved keys first
969       boolean printCommaForAttr = false;
970       for (Bytes k : reservedKeys) {
971         String key = Bytes.toString(k.get());
972         String value = Bytes.toStringBinary(values.get(k).get());
973         if (printCommaForAttr) s.append(", ");
974         printCommaForAttr = true;
975         s.append(key);
976         s.append(" => ");
977         s.append('\'').append(value).append('\'');
978       }
979
980       if (!userKeys.isEmpty()) {
981         // print all non-reserved, advanced config keys as a separate subset
982         if (printCommaForAttr) s.append(", ");
983         printCommaForAttr = true;
984         s.append(HConstants.METADATA).append(" => ");
985         s.append("{");
986         boolean printCommaForCfg = false;
987         for (Bytes k : userKeys) {
988           String key = Bytes.toString(k.get());
989           String value = Bytes.toStringBinary(values.get(k).get());
990           if (printCommaForCfg) s.append(", ");
991           printCommaForCfg = true;
992           s.append('\'').append(key).append('\'');
993           s.append(" => ");
994           s.append('\'').append(value).append('\'');
995         }
996         s.append("}");
997       }
998     }
999
1000     // step 3: printing all configuration:
1001     if (!configuration.isEmpty()) {
1002       if (hasAttributes) {
1003         s.append(", ");
1004       }
1005       s.append(HConstants.CONFIGURATION).append(" => ");
1006       s.append('{');
1007       boolean printCommaForConfig = false;
1008       for (Map.Entry<String, String> e : configuration.entrySet()) {
1009         if (printCommaForConfig) s.append(", ");
1010         printCommaForConfig = true;
1011         s.append('\'').append(e.getKey()).append('\'');
1012         s.append(" => ");
1013         s.append('\'').append(e.getValue()).append('\'');
1014       }
1015       s.append("}");
1016     }
1017     s.append("}"); // end METHOD
1018     return s;
1019   }
1020
1021   /**
1022    * Compare the contents of the descriptor with another one passed as a parameter.
1023    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
1024    * contents of the descriptors are compared.
1025    *
1026    * @return true if the contents of the the two descriptors exactly match
1027    *
1028    * @see java.lang.Object#equals(java.lang.Object)
1029    */
1030   @Override
1031   public boolean equals(Object obj) {
1032     if (this == obj) {
1033       return true;
1034     }
1035     if (obj == null) {
1036       return false;
1037     }
1038     if (!(obj instanceof HTableDescriptor)) {
1039       return false;
1040     }
1041     return compareTo((HTableDescriptor)obj) == 0;
1042   }
1043
1044   /**
1045    * @see java.lang.Object#hashCode()
1046    */
1047   @Override
1048   public int hashCode() {
1049     int result = this.name.hashCode();
1050     if (this.families.size() > 0) {
1051       for (HColumnDescriptor e: this.families.values()) {
1052         result ^= e.hashCode();
1053       }
1054     }
1055     result ^= values.hashCode();
1056     result ^= configuration.hashCode();
1057     return result;
1058   }
1059
1060   // Comparable
1061
1062   /**
1063    * Compares the descriptor with another descriptor which is passed as a parameter.
1064    * This compares the content of the two descriptors and not the reference.
1065    *
1066    * @return 0 if the contents of the descriptors are exactly matching,
1067    *         1 if there is a mismatch in the contents
1068    */
1069   @Override
1070   public int compareTo(final HTableDescriptor other) {
1071     int result = this.name.compareTo(other.name);
1072     if (result == 0) {
1073       result = families.size() - other.families.size();
1074     }
1075     if (result == 0 && families.size() != other.families.size()) {
1076       result = Integer.valueOf(families.size()).compareTo(
1077           Integer.valueOf(other.families.size()));
1078     }
1079     if (result == 0) {
1080       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1081           it2 = other.families.values().iterator(); it.hasNext(); ) {
1082         result = it.next().compareTo(it2.next());
1083         if (result != 0) {
1084           break;
1085         }
1086       }
1087     }
1088     if (result == 0) {
1089       // punt on comparison for ordering, just calculate difference
1090       result = this.values.hashCode() - other.values.hashCode();
1091       if (result < 0)
1092         result = -1;
1093       else if (result > 0)
1094         result = 1;
1095     }
1096     if (result == 0) {
1097       result = this.configuration.hashCode() - other.configuration.hashCode();
1098       if (result < 0)
1099         result = -1;
1100       else if (result > 0)
1101         result = 1;
1102     }
1103     return result;
1104   }
1105
1106   /**
1107    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1108    * of all the column families of the table.
1109    *
1110    * @return Immutable collection of {@link HColumnDescriptor} of all the
1111    * column families.
1112    */
1113   public Collection<HColumnDescriptor> getFamilies() {
1114     return Collections.unmodifiableCollection(this.families.values());
1115   }
1116
1117   /**
1118    * Returns the configured replicas per region
1119    */
1120   public int getRegionReplication() {
1121     return getIntValue(REGION_REPLICATION_KEY, DEFAULT_REGION_REPLICATION);
1122   }
1123
1124   private int getIntValue(Bytes key, int defaultVal) {
1125     byte[] val = getValue(key);
1126     if (val == null || val.length == 0) {
1127       return defaultVal;
1128     }
1129     return Integer.parseInt(Bytes.toString(val));
1130   }
1131
1132   /**
1133    * Sets the number of replicas per region.
1134    * @param regionReplication the replication factor per region
1135    */
1136   public HTableDescriptor setRegionReplication(int regionReplication) {
1137     setValue(REGION_REPLICATION_KEY,
1138         new Bytes(Bytes.toBytes(Integer.toString(regionReplication))));
1139     return this;
1140   }
1141
1142   /**
1143    * @return true if the read-replicas memstore replication is enabled.
1144    */
1145   public boolean hasRegionMemstoreReplication() {
1146     return isSomething(REGION_MEMSTORE_REPLICATION_KEY, DEFAULT_REGION_MEMSTORE_REPLICATION);
1147   }
1148
1149   /**
1150    * Enable or Disable the memstore replication from the primary region to the replicas.
1151    * The replication will be used only for meta operations (e.g. flush, compaction, ...)
1152    *
1153    * @param memstoreReplication true if the new data written to the primary region
1154    *                                 should be replicated.
1155    *                            false if the secondaries can tollerate to have new
1156    *                                  data only when the primary flushes the memstore.
1157    */
1158   public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) {
1159     setValue(REGION_MEMSTORE_REPLICATION_KEY, memstoreReplication ? TRUE : FALSE);
1160     // If the memstore replication is setup, we do not have to wait for observing a flush event
1161     // from primary before starting to serve reads, because gaps from replication is not applicable
1162     setConfiguration(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
1163       Boolean.toString(memstoreReplication));
1164     return this;
1165   }
1166
1167   public HTableDescriptor setPriority(int priority) {
1168     setValue(PRIORITY_KEY, Integer.toString(priority));
1169     return this;
1170   }
1171
1172   public int getPriority() {
1173     return getIntValue(PRIORITY_KEY, DEFAULT_PRIORITY);
1174   }
1175
1176   /**
1177    * Returns all the column family names of the current table. The map of
1178    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1179    * This returns all the keys of the family map which represents the column
1180    * family names of the table.
1181    *
1182    * @return Immutable sorted set of the keys of the families.
1183    */
1184   public Set<byte[]> getFamiliesKeys() {
1185     return Collections.unmodifiableSet(this.families.keySet());
1186   }
1187
1188   /**
1189    * Returns an array all the {@link HColumnDescriptor} of the column families
1190    * of the table.
1191    *
1192    * @return Array of all the HColumnDescriptors of the current table
1193    *
1194    * @see #getFamilies()
1195    */
1196   public HColumnDescriptor[] getColumnFamilies() {
1197     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1198     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1199   }
1200
1201
1202   /**
1203    * Returns the HColumnDescriptor for a specific column family with name as
1204    * specified by the parameter column.
1205    *
1206    * @param column Column family name
1207    * @return Column descriptor for the passed family name or the family on
1208    * passed in column.
1209    */
1210   public HColumnDescriptor getFamily(final byte [] column) {
1211     return this.families.get(column);
1212   }
1213
1214
1215   /**
1216    * Removes the HColumnDescriptor with name specified by the parameter column
1217    * from the table descriptor
1218    *
1219    * @param column Name of the column family to be removed.
1220    * @return Column descriptor for the passed family name or the family on
1221    * passed in column.
1222    */
1223   public HColumnDescriptor removeFamily(final byte [] column) {
1224     return this.families.remove(column);
1225   }
1226
1227   /**
1228    * Add a table coprocessor to this table. The coprocessor
1229    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1230    * or Endpoint.
1231    * It won't check if the class can be loaded or not.
1232    * Whether a coprocessor is loadable or not will be determined when
1233    * a region is opened.
1234    * @param className Full class name.
1235    * @throws IOException
1236    */
1237   public HTableDescriptor addCoprocessor(String className) throws IOException {
1238     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1239     return this;
1240   }
1241
1242   /**
1243    * Add a table coprocessor to this table. The coprocessor
1244    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1245    * or Endpoint.
1246    * It won't check if the class can be loaded or not.
1247    * Whether a coprocessor is loadable or not will be determined when
1248    * a region is opened.
1249    * @param jarFilePath Path of the jar file. If it's null, the class will be
1250    * loaded from default classloader.
1251    * @param className Full class name.
1252    * @param priority Priority
1253    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1254    * @throws IOException
1255    */
1256   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1257                              int priority, final Map<String, String> kvs)
1258   throws IOException {
1259     checkHasCoprocessor(className);
1260
1261     // Validate parameter kvs and then add key/values to kvString.
1262     StringBuilder kvString = new StringBuilder();
1263     if (kvs != null) {
1264       for (Map.Entry<String, String> e: kvs.entrySet()) {
1265         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1266           throw new IOException("Illegal parameter key = " + e.getKey());
1267         }
1268         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1269           throw new IOException("Illegal parameter (" + e.getKey() +
1270               ") value = " + e.getValue());
1271         }
1272         if (kvString.length() != 0) {
1273           kvString.append(',');
1274         }
1275         kvString.append(e.getKey());
1276         kvString.append('=');
1277         kvString.append(e.getValue());
1278       }
1279     }
1280
1281     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1282         "|" + className + "|" + Integer.toString(priority) + "|" +
1283         kvString.toString();
1284     return addCoprocessorToMap(value);
1285   }
1286
1287   /**
1288    * Add a table coprocessor to this table. The coprocessor
1289    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1290    * or Endpoint.
1291    * It won't check if the class can be loaded or not.
1292    * Whether a coprocessor is loadable or not will be determined when
1293    * a region is opened.
1294    * @param specStr The Coprocessor specification all in in one String formatted so matches
1295    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1296    * @throws IOException
1297    */
1298   public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
1299     String className = getCoprocessorClassNameFromSpecStr(specStr);
1300     if (className == null) {
1301       throw new IllegalArgumentException("Format does not match " +
1302         HConstants.CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr);
1303     }
1304     checkHasCoprocessor(className);
1305     return addCoprocessorToMap(specStr);
1306   }
1307
1308   private void checkHasCoprocessor(final String className) throws IOException {
1309     if (hasCoprocessor(className)) {
1310       throw new IOException("Coprocessor " + className + " already exists.");
1311     }
1312   }
1313
1314   /**
1315    * Add coprocessor to values Map
1316    * @param specStr The Coprocessor specification all in in one String formatted so matches
1317    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1318    * @return Returns <code>this</code>
1319    */
1320   private HTableDescriptor addCoprocessorToMap(final String specStr) {
1321     if (specStr == null) return this;
1322     // generate a coprocessor key
1323     int maxCoprocessorNumber = 0;
1324     Matcher keyMatcher;
1325     for (Map.Entry<Bytes, Bytes> e: this.values.entrySet()) {
1326       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1327       if (!keyMatcher.matches()) {
1328         continue;
1329       }
1330       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
1331     }
1332     maxCoprocessorNumber++;
1333     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1334     this.values.put(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr)));
1335     return this;
1336   }
1337
1338   /**
1339    * Check if the table has an attached co-processor represented by the name className
1340    *
1341    * @param classNameToMatch - Class name of the co-processor
1342    * @return true of the table has a co-processor className
1343    */
1344   public boolean hasCoprocessor(String classNameToMatch) {
1345     Matcher keyMatcher;
1346     for (Map.Entry<Bytes, Bytes> e :
1347         this.values.entrySet()) {
1348       keyMatcher =
1349           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1350               Bytes.toString(e.getKey().get()));
1351       if (!keyMatcher.matches()) {
1352         continue;
1353       }
1354       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1355       if (className == null) continue;
1356       if (className.equals(classNameToMatch.trim())) {
1357         return true;
1358       }
1359     }
1360     return false;
1361   }
1362
1363   /**
1364    * Return the list of attached co-processor represented by their name className
1365    *
1366    * @return The list of co-processors classNames
1367    */
1368   public List<String> getCoprocessors() {
1369     List<String> result = new ArrayList<String>();
1370     Matcher keyMatcher;
1371     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1372       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1373       if (!keyMatcher.matches()) {
1374         continue;
1375       }
1376       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1377       if (className == null) continue;
1378       result.add(className); // classname is the 2nd field
1379     }
1380     return result;
1381   }
1382
1383   /**
1384    * @param spec String formatted as per {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1385    * @return Class parsed from passed in <code>spec</code> or null if no match or classpath found
1386    */
1387   private static String getCoprocessorClassNameFromSpecStr(final String spec) {
1388     Matcher matcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(spec);
1389     // Classname is the 2nd field
1390     return matcher != null && matcher.matches()? matcher.group(2).trim(): null;
1391   }
1392
1393   /**
1394    * Remove a coprocessor from those set on the table
1395    * @param className Class name of the co-processor
1396    */
1397   public void removeCoprocessor(String className) {
1398     Bytes match = null;
1399     Matcher keyMatcher;
1400     Matcher valueMatcher;
1401     for (Map.Entry<Bytes, Bytes> e : this.values
1402         .entrySet()) {
1403       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1404           .getKey().get()));
1405       if (!keyMatcher.matches()) {
1406         continue;
1407       }
1408       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1409           .toString(e.getValue().get()));
1410       if (!valueMatcher.matches()) {
1411         continue;
1412       }
1413       // get className and compare
1414       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1415       // remove the CP if it is present
1416       if (clazz.equals(className.trim())) {
1417         match = e.getKey();
1418         break;
1419       }
1420     }
1421     // if we found a match, remove it
1422     if (match != null)
1423       remove(match);
1424   }
1425
1426   /**
1427    * Returns the {@link Path} object representing the table directory under
1428    * path rootdir
1429    *
1430    * Deprecated use FSUtils.getTableDir() instead.
1431    *
1432    * @param rootdir qualified path of HBase root directory
1433    * @param tableName name of table
1434    * @return {@link Path} for table
1435    */
1436   @Deprecated
1437   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1438     //This is bad I had to mirror code from FSUTils.getTableDir since
1439     //there is no module dependency between hbase-client and hbase-server
1440     TableName name = TableName.valueOf(tableName);
1441     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1442               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1443   }
1444
1445   public final static String NAMESPACE_FAMILY_INFO = "info";
1446   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1447   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1448
1449   /** Table descriptor for namespace table */
1450   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1451       TableName.NAMESPACE_TABLE_NAME,
1452       new HColumnDescriptor[] {
1453           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1454               // Ten is arbitrary number.  Keep versions to help debugging.
1455               .setMaxVersions(10)
1456               .setInMemory(true)
1457               .setBlocksize(8 * 1024)
1458               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1459               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1460               // e.g. if using CombinedBlockCache (BucketCache).
1461               .setCacheDataInL1(true)
1462       });
1463
1464   @Deprecated
1465   public HTableDescriptor setOwner(User owner) {
1466     return setOwnerString(owner != null ? owner.getShortName() : null);
1467   }
1468
1469   // used by admin.rb:alter(table_name,*args) to update owner.
1470   @Deprecated
1471   public HTableDescriptor setOwnerString(String ownerString) {
1472     if (ownerString != null) {
1473       setValue(OWNER_KEY, ownerString);
1474     } else {
1475       remove(OWNER_KEY);
1476     }
1477     return this;
1478   }
1479
1480   @Deprecated
1481   public String getOwnerString() {
1482     if (getValue(OWNER_KEY) != null) {
1483       return Bytes.toString(getValue(OWNER_KEY));
1484     }
1485     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1486     // hbase:meta and -ROOT- should return system user as owner, not null (see
1487     // MasterFileSystem.java:bootstrap()).
1488     return null;
1489   }
1490
1491   /**
1492    * @return This instance serialized with pb with pb magic prefix
1493    * @see #parseFrom(byte[])
1494    */
1495   public byte[] toByteArray() {
1496     return ProtobufUtil.prependPBMagic(ProtobufUtil.convertToTableSchema(this).toByteArray());
1497   }
1498
1499   /**
1500    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1501    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1502    * @throws DeserializationException
1503    * @throws IOException
1504    * @see #toByteArray()
1505    */
1506   public static HTableDescriptor parseFrom(final byte [] bytes)
1507   throws DeserializationException, IOException {
1508     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1509       throw new DeserializationException("Expected PB encoded HTableDescriptor");
1510     }
1511     int pblen = ProtobufUtil.lengthOfPBMagic();
1512     TableSchema.Builder builder = TableSchema.newBuilder();
1513     TableSchema ts;
1514     try {
1515       ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
1516       ts = builder.build();
1517     } catch (IOException e) {
1518       throw new DeserializationException(e);
1519     }
1520     return ProtobufUtil.convertToHTableDesc(ts);
1521   }
1522
1523   /**
1524    * Getter for accessing the configuration value by key
1525    */
1526   public String getConfigurationValue(String key) {
1527     return configuration.get(key);
1528   }
1529
1530   /**
1531    * Getter for fetching an unmodifiable {@link #configuration} map.
1532    */
1533   public Map<String, String> getConfiguration() {
1534     // shallow pointer copy
1535     return Collections.unmodifiableMap(configuration);
1536   }
1537
1538   /**
1539    * Setter for storing a configuration setting in {@link #configuration} map.
1540    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1541    * @param value String value. If null, removes the setting.
1542    */
1543   public HTableDescriptor setConfiguration(String key, String value) {
1544     if (value == null) {
1545       removeConfiguration(key);
1546     } else {
1547       configuration.put(key, value);
1548     }
1549     return this;
1550   }
1551
1552   /**
1553    * Remove a config setting represented by the key from the {@link #configuration} map
1554    */
1555   public void removeConfiguration(final String key) {
1556     configuration.remove(key);
1557   }
1558 }