View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import javax.annotation.Nonnull;
22  import java.io.IOException;
23  import java.util.ArrayList;
24  import java.util.Collection;
25  import java.util.Collections;
26  import java.util.HashMap;
27  import java.util.HashSet;
28  import java.util.Iterator;
29  import java.util.List;
30  import java.util.Map;
31  import java.util.Set;
32  import java.util.TreeMap;
33  import java.util.TreeSet;
34  import java.util.regex.Matcher;
35  
36  import com.google.protobuf.InvalidProtocolBufferException;
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.hadoop.classification.InterfaceAudience;
40  import org.apache.hadoop.classification.InterfaceStability;
41  import org.apache.hadoop.fs.Path;
42  import org.apache.hadoop.hbase.client.Durability;
43  import org.apache.hadoop.hbase.exceptions.DeserializationException;
44  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
45  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
46  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
47  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
48  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
49  import org.apache.hadoop.hbase.regionserver.BloomType;
50  import org.apache.hadoop.hbase.security.User;
51  import org.apache.hadoop.hbase.util.ByteStringer;
52  import org.apache.hadoop.hbase.util.Bytes;
53  
54  /**
55   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
56   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
57   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
58   * when the region split should occur, coprocessors associated with it etc...
59   */
60  @InterfaceAudience.Public
61  @InterfaceStability.Evolving
62  public class HTableDescriptor implements Comparable<HTableDescriptor> {
63  
64    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
65  
66    private TableName name = null;
67  
68    /**
69     * A map which holds the metadata information of the table. This metadata
70     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
71     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
72     */
73    private final Map<Bytes, Bytes> values =
74        new HashMap<Bytes, Bytes>();
75  
76    /**
77     * A map which holds the configuration specific to the table.
78     * The keys of the map have the same names as config keys and override the defaults with
79     * table-specific settings. Example usage may be for compactions, etc.
80     */
81    private final Map<String, String> configuration = new HashMap<String, String>();
82  
83    public static final String SPLIT_POLICY = "SPLIT_POLICY";
84  
85    /**
86     * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
87     * attribute which denotes the maximum size of the store file after which
88     * a region split occurs
89     *
90     * @see #getMaxFileSize()
91     */
92    public static final String MAX_FILESIZE = "MAX_FILESIZE";
93    private static final Bytes MAX_FILESIZE_KEY =
94        new Bytes(Bytes.toBytes(MAX_FILESIZE));
95  
96    public static final String OWNER = "OWNER";
97    public static final Bytes OWNER_KEY =
98        new Bytes(Bytes.toBytes(OWNER));
99  
100   /**
101    * <em>INTERNAL</em> Used by rest interface to access this metadata
102    * attribute which denotes if the table is Read Only
103    *
104    * @see #isReadOnly()
105    */
106   public static final String READONLY = "READONLY";
107   private static final Bytes READONLY_KEY =
108       new Bytes(Bytes.toBytes(READONLY));
109 
110   /**
111    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
112    * attribute which denotes if the table is compaction enabled
113    *
114    * @see #isCompactionEnabled()
115    */
116   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
117   private static final Bytes COMPACTION_ENABLED_KEY =
118       new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
119 
120   /**
121    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
122    * attribute which represents the maximum size of the memstore after which
123    * its contents are flushed onto the disk
124    *
125    * @see #getMemStoreFlushSize()
126    */
127   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
128   private static final Bytes MEMSTORE_FLUSHSIZE_KEY =
129       new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
130 
131   /**
132    * <em>INTERNAL</em> Used by rest interface to access this metadata
133    * attribute which denotes if the table is a -ROOT- region or not
134    *
135    * @see #isRootRegion()
136    */
137   public static final String IS_ROOT = "IS_ROOT";
138   private static final Bytes IS_ROOT_KEY =
139       new Bytes(Bytes.toBytes(IS_ROOT));
140 
141   /**
142    * <em>INTERNAL</em> Used by rest interface to access this metadata
143    * attribute which denotes if it is a catalog table, either
144    * <code> hbase:meta </code> or <code> -ROOT- </code>
145    *
146    * @see #isMetaRegion()
147    */
148   public static final String IS_META = "IS_META";
149   private static final Bytes IS_META_KEY =
150       new Bytes(Bytes.toBytes(IS_META));
151 
152   /**
153    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
154    * attribute which denotes if the deferred log flush option is enabled.
155    * @deprecated Use {@link #DURABILITY} instead.
156    */
157   @Deprecated
158   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
159   @Deprecated
160   private static final Bytes DEFERRED_LOG_FLUSH_KEY =
161       new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH));
162 
163   /**
164    * <em>INTERNAL</em> {@link Durability} setting for the table.
165    */
166   public static final String DURABILITY = "DURABILITY";
167   private static final Bytes DURABILITY_KEY =
168       new Bytes(Bytes.toBytes("DURABILITY"));
169 
170   /**
171    * <em>INTERNAL</em> number of region replicas for the table.
172    */
173   public static final String REGION_REPLICATION = "REGION_REPLICATION";
174   private static final Bytes REGION_REPLICATION_KEY =
175       new Bytes(Bytes.toBytes(REGION_REPLICATION));
176 
177   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
178   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
179 
180   /*
181    *  The below are ugly but better than creating them each time till we
182    *  replace booleans being saved as Strings with plain booleans.  Need a
183    *  migration script to do this.  TODO.
184    */
185   private static final Bytes FALSE =
186       new Bytes(Bytes.toBytes(Boolean.FALSE.toString()));
187 
188   private static final Bytes TRUE =
189       new Bytes(Bytes.toBytes(Boolean.TRUE.toString()));
190 
191   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
192 
193   /**
194    * Constant that denotes whether the table is READONLY by default and is false
195    */
196   public static final boolean DEFAULT_READONLY = false;
197 
198   /**
199    * Constant that denotes whether the table is compaction enabled by default
200    */
201   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
202 
203   /**
204    * Constant that denotes the maximum default size of the memstore after which
205    * the contents are flushed to the store files
206    */
207   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
208 
209   public static final int DEFAULT_REGION_REPLICATION = 1;
210 
211   private final static Map<String, String> DEFAULT_VALUES
212     = new HashMap<String, String>();
213   private final static Set<Bytes> RESERVED_KEYWORDS
214       = new HashSet<Bytes>();
215 
216   static {
217     DEFAULT_VALUES.put(MAX_FILESIZE,
218         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
219     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
220     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
221         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
222     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
223         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
224     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
225     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
226     for (String s : DEFAULT_VALUES.keySet()) {
227       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
228     }
229     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
230     RESERVED_KEYWORDS.add(IS_META_KEY);
231   }
232 
233   /**
234    * Cache of whether this is a meta table or not.
235    */
236   private volatile Boolean meta = null;
237   /**
238    * Cache of whether this is root table or not.
239    */
240   private volatile Boolean root = null;
241 
242   /**
243    * Durability setting for the table
244    */
245   private Durability durability = null;
246 
247   /**
248    * Maps column family name to the respective HColumnDescriptors
249    */
250   private final Map<byte [], HColumnDescriptor> families =
251     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
252 
253   /**
254    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
255    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
256    */
257   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
258     setName(name);
259     for(HColumnDescriptor descriptor : families) {
260       this.families.put(descriptor.getName(), descriptor);
261     }
262   }
263 
264   /**
265    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
266    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
267    */
268   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
269       Map<Bytes, Bytes> values) {
270     setName(name);
271     for(HColumnDescriptor descriptor : families) {
272       this.families.put(descriptor.getName(), descriptor);
273     }
274     for (Map.Entry<Bytes, Bytes> entry :
275         values.entrySet()) {
276       setValue(entry.getKey(), entry.getValue());
277     }
278   }
279 
280   /**
281    * Default constructor which constructs an empty object.
282    * For deserializing an HTableDescriptor instance only.
283    * @deprecated Used by Writables and Writables are going away.
284    */
285   @Deprecated
286   public HTableDescriptor() {
287     super();
288   }
289 
290   /**
291    * Construct a table descriptor specifying a TableName object
292    * @param name Table name.
293    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
294    */
295   public HTableDescriptor(final TableName name) {
296     super();
297     setName(name);
298   }
299 
300   /**
301    * Construct a table descriptor specifying a byte array table name
302    * @param name Table name.
303    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
304    */
305   @Deprecated
306   public HTableDescriptor(final byte[] name) {
307     this(TableName.valueOf(name));
308   }
309 
310   /**
311    * Construct a table descriptor specifying a String table name
312    * @param name Table name.
313    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
314    */
315   @Deprecated
316   public HTableDescriptor(final String name) {
317     this(TableName.valueOf(name));
318   }
319 
320   /**
321    * Construct a table descriptor by cloning the descriptor passed as a parameter.
322    * <p>
323    * Makes a deep copy of the supplied descriptor.
324    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
325    * @param desc The descriptor.
326    */
327   public HTableDescriptor(final HTableDescriptor desc) {
328     super();
329     setName(desc.name);
330     setMetaFlags(this.name);
331     for (HColumnDescriptor c: desc.families.values()) {
332       this.families.put(c.getName(), new HColumnDescriptor(c));
333     }
334     for (Map.Entry<Bytes, Bytes> e :
335         desc.values.entrySet()) {
336       setValue(e.getKey(), e.getValue());
337     }
338     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
339       this.configuration.put(e.getKey(), e.getValue());
340     }
341   }
342 
343   /*
344    * Set meta flags on this table.
345    * IS_ROOT_KEY is set if its a -ROOT- table
346    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
347    * Called by constructors.
348    * @param name
349    */
350   private void setMetaFlags(final TableName name) {
351     setMetaRegion(isRootRegion() ||
352         name.equals(TableName.META_TABLE_NAME));
353   }
354 
355   /**
356    * Check if the descriptor represents a <code> -ROOT- </code> region.
357    *
358    * @return true if this is a <code> -ROOT- </code> region
359    */
360   public boolean isRootRegion() {
361     if (this.root == null) {
362       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
363     }
364     return this.root.booleanValue();
365   }
366 
367   /**
368    * <em> INTERNAL </em> Used to denote if the current table represents
369    * <code> -ROOT- </code> region. This is used internally by the
370    * HTableDescriptor constructors
371    *
372    * @param isRoot true if this is the <code> -ROOT- </code> region
373    */
374   protected void setRootRegion(boolean isRoot) {
375     // TODO: Make the value a boolean rather than String of boolean.
376     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
377   }
378 
379   /**
380    * Checks if this table is <code> hbase:meta </code>
381    * region.
382    *
383    * @return true if this table is <code> hbase:meta </code>
384    * region
385    */
386   public boolean isMetaRegion() {
387     if (this.meta == null) {
388       this.meta = calculateIsMetaRegion();
389     }
390     return this.meta.booleanValue();
391   }
392 
393   private synchronized Boolean calculateIsMetaRegion() {
394     byte [] value = getValue(IS_META_KEY);
395     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
396   }
397 
398   private boolean isSomething(final Bytes key,
399       final boolean valueIfNull) {
400     byte [] value = getValue(key);
401     if (value != null) {
402       return Boolean.valueOf(Bytes.toString(value));
403     }
404     return valueIfNull;
405   }
406 
407   /**
408    * <em> INTERNAL </em> Used to denote if the current table represents
409    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
410    * internally by the HTableDescriptor constructors
411    *
412    * @param isMeta true if its either <code> -ROOT- </code> or
413    * <code> hbase:meta </code> region
414    */
415   protected void setMetaRegion(boolean isMeta) {
416     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
417   }
418 
419   /**
420    * Checks if the table is a <code>hbase:meta</code> table
421    *
422    * @return true if table is <code> hbase:meta </code> region.
423    */
424   public boolean isMetaTable() {
425     return isMetaRegion() && !isRootRegion();
426   }
427 
428   /**
429    * Getter for accessing the metadata associated with the key
430    *
431    * @param key The key.
432    * @return The value.
433    * @see #values
434    */
435   public byte[] getValue(byte[] key) {
436     return getValue(new Bytes(key));
437   }
438 
439   private byte[] getValue(final Bytes key) {
440     Bytes ibw = values.get(key);
441     if (ibw == null)
442       return null;
443     return ibw.get();
444   }
445 
446   /**
447    * Getter for accessing the metadata associated with the key
448    *
449    * @param key The key.
450    * @return The value.
451    * @see #values
452    */
453   public String getValue(String key) {
454     byte[] value = getValue(Bytes.toBytes(key));
455     if (value == null)
456       return null;
457     return Bytes.toString(value);
458   }
459 
460   /**
461    * Getter for fetching an unmodifiable {@link #values} map.
462    *
463    * @return unmodifiable map {@link #values}.
464    * @see #values
465    */
466   public Map<Bytes, Bytes> getValues() {
467     // shallow pointer copy
468     return Collections.unmodifiableMap(values);
469   }
470 
471   /**
472    * Setter for storing metadata as a (key, value) pair in {@link #values} map
473    *
474    * @param key The key.
475    * @param value The value.
476    * @see #values
477    */
478   public void setValue(byte[] key, byte[] value) {
479     setValue(new Bytes(key), new Bytes(value));
480   }
481 
482   /*
483    * @param key The key.
484    * @param value The value.
485    */
486   private void setValue(final Bytes key,
487       final String value) {
488     setValue(key, new Bytes(Bytes.toBytes(value)));
489   }
490 
491   /*
492    * Setter for storing metadata as a (key, value) pair in {@link #values} map
493    *
494    * @param key The key.
495    * @param value The value.
496    */
497   public void setValue(final Bytes key,
498       final Bytes value) {
499     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
500       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
501       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
502           "use " + DURABILITY + " instead");
503       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
504       return;
505     }
506     values.put(key, value);
507   }
508 
509   /**
510    * Setter for storing metadata as a (key, value) pair in {@link #values} map
511    *
512    * @param key The key.
513    * @param value The value.
514    * @see #values
515    */
516   public void setValue(String key, String value) {
517     if (value == null) {
518       remove(key);
519     } else {
520       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
521     }
522   }
523 
524   /**
525    * Remove metadata represented by the key from the {@link #values} map
526    *
527    * @param key Key whose key and value we're to remove from HTableDescriptor
528    * parameters.
529    */
530   public void remove(final String key) {
531     remove(new Bytes(Bytes.toBytes(key)));
532   }
533 
534   /**
535    * Remove metadata represented by the key from the {@link #values} map
536    *
537    * @param key Key whose key and value we're to remove from HTableDescriptor
538    * parameters.
539    */
540   public void remove(Bytes key) {
541     values.remove(key);
542   }
543 
544   /**
545    * Remove metadata represented by the key from the {@link #values} map
546    *
547    * @param key Key whose key and value we're to remove from HTableDescriptor
548    * parameters.
549    */
550   public void remove(final byte [] key) {
551     remove(new Bytes(key));
552   }
553 
554   /**
555    * Check if the readOnly flag of the table is set. If the readOnly flag is
556    * set then the contents of the table can only be read from but not modified.
557    *
558    * @return true if all columns in the table should be read only
559    */
560   public boolean isReadOnly() {
561     return isSomething(READONLY_KEY, DEFAULT_READONLY);
562   }
563 
564   /**
565    * Setting the table as read only sets all the columns in the table as read
566    * only. By default all tables are modifiable, but if the readOnly flag is
567    * set to true then the contents of the table can only be read but not modified.
568    *
569    * @param readOnly True if all of the columns in the table should be read
570    * only.
571    */
572   public void setReadOnly(final boolean readOnly) {
573     setValue(READONLY_KEY, readOnly? TRUE: FALSE);
574   }
575 
576   /**
577    * Check if the compaction enable flag of the table is true. If flag is
578    * false then no minor/major compactions will be done in real.
579    *
580    * @return true if table compaction enabled
581    */
582   public boolean isCompactionEnabled() {
583     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
584   }
585 
586   /**
587    * Setting the table compaction enable flag.
588    *
589    * @param isEnable True if enable compaction.
590    */
591   public void setCompactionEnabled(final boolean isEnable) {
592     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
593   }
594 
595   /**
596    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
597    * @param durability enum value
598    */
599   public void setDurability(Durability durability) {
600     this.durability = durability;
601     setValue(DURABILITY_KEY, durability.name());
602   }
603 
604   /**
605    * Returns the durability setting for the table.
606    * @return durability setting for the table.
607    */
608   public Durability getDurability() {
609     if (this.durability == null) {
610       byte[] durabilityValue = getValue(DURABILITY_KEY);
611       if (durabilityValue == null) {
612         this.durability = DEFAULT_DURABLITY;
613       } else {
614         try {
615           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
616         } catch (IllegalArgumentException ex) {
617           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
618             + " is not known. Durability:" + Bytes.toString(durabilityValue));
619           this.durability = DEFAULT_DURABLITY;
620         }
621       }
622     }
623     return this.durability;
624   }
625 
626   /**
627    * Get the name of the table
628    *
629    * @return TableName
630    */
631   public TableName getTableName() {
632     return name;
633   }
634 
635   /**
636    * Get the name of the table as a byte array.
637    *
638    * @return name of table
639    */
640   public byte[] getName() {
641     return name.getName();
642   }
643 
644   /**
645    * Get the name of the table as a String
646    *
647    * @return name of table as a String
648    */
649   public String getNameAsString() {
650     return name.getNameAsString();
651   }
652 
653   /**
654    * This sets the class associated with the region split policy which
655    * determines when a region split should occur.  The class used by
656    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
657    * @param clazz the class name
658    */
659   public void setRegionSplitPolicyClassName(String clazz) {
660     setValue(SPLIT_POLICY, clazz);
661   }
662 
663   /**
664    * This gets the class associated with the region split policy which
665    * determines when a region split should occur.  The class used by
666    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
667    *
668    * @return the class name of the region split policy for this table.
669    * If this returns null, the default split policy is used.
670    */
671    public String getRegionSplitPolicyClassName() {
672     return getValue(SPLIT_POLICY);
673   }
674 
675   /**
676    * Set the name of the table.
677    *
678    * @param name name of table
679    */
680   @Deprecated
681   public void setName(byte[] name) {
682     setName(TableName.valueOf(name));
683   }
684 
685   @Deprecated
686   public void setName(TableName name) {
687     this.name = name;
688     setMetaFlags(this.name);
689   }
690 
691   /**
692    * Returns the maximum size upto which a region can grow to after which a region
693    * split is triggered. The region size is represented by the size of the biggest
694    * store file in that region.
695    *
696    * @return max hregion size for table, -1 if not set.
697    *
698    * @see #setMaxFileSize(long)
699    */
700   public long getMaxFileSize() {
701     byte [] value = getValue(MAX_FILESIZE_KEY);
702     if (value != null) {
703       return Long.parseLong(Bytes.toString(value));
704     }
705     return -1;
706   }
707 
708   /**
709    * Sets the maximum size upto which a region can grow to after which a region
710    * split is triggered. The region size is represented by the size of the biggest
711    * store file in that region, i.e. If the biggest store file grows beyond the
712    * maxFileSize, then the region split is triggered. This defaults to a value of
713    * 256 MB.
714    * <p>
715    * This is not an absolute value and might vary. Assume that a single row exceeds
716    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
717    * a single row cannot be split across multiple regions
718    * </p>
719    *
720    * @param maxFileSize The maximum file size that a store file can grow to
721    * before a split is triggered.
722    */
723   public void setMaxFileSize(long maxFileSize) {
724     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
725   }
726 
727   /**
728    * Returns the size of the memstore after which a flush to filesystem is triggered.
729    *
730    * @return memory cache flush size for each hregion, -1 if not set.
731    *
732    * @see #setMemStoreFlushSize(long)
733    */
734   public long getMemStoreFlushSize() {
735     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
736     if (value != null) {
737       return Long.parseLong(Bytes.toString(value));
738     }
739     return -1;
740   }
741 
742   /**
743    * Represents the maximum size of the memstore after which the contents of the
744    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
745    *
746    * @param memstoreFlushSize memory cache flush size for each hregion
747    */
748   public void setMemStoreFlushSize(long memstoreFlushSize) {
749     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
750   }
751 
752   /**
753    * Adds a column family.
754    * @param family HColumnDescriptor of family to add.
755    */
756   public void addFamily(final HColumnDescriptor family) {
757     if (family.getName() == null || family.getName().length <= 0) {
758       throw new NullPointerException("Family name cannot be null or empty");
759     }
760     this.families.put(family.getName(), family);
761   }
762 
763   /**
764    * Checks to see if this table contains the given column family
765    * @param familyName Family name or column name.
766    * @return true if the table contains the specified family name
767    */
768   public boolean hasFamily(final byte [] familyName) {
769     return families.containsKey(familyName);
770   }
771 
772   /**
773    * @return Name of this table and then a map of all of the column family
774    * descriptors.
775    * @see #getNameAsString()
776    */
777   @Override
778   public String toString() {
779     StringBuilder s = new StringBuilder();
780     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
781     s.append(getValues(true));
782     for (HColumnDescriptor f : families.values()) {
783       s.append(", ").append(f);
784     }
785     return s.toString();
786   }
787 
788   /**
789    * @return Name of this table and then a map of all of the column family
790    * descriptors (with only the non-default column family attributes)
791    */
792   public String toStringCustomizedValues() {
793     StringBuilder s = new StringBuilder();
794     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
795     s.append(getValues(false));
796     for(HColumnDescriptor hcd : families.values()) {
797       s.append(", ").append(hcd.toStringCustomizedValues());
798     }
799     return s.toString();
800   }
801 
802   private StringBuilder getValues(boolean printDefaults) {
803     StringBuilder s = new StringBuilder();
804 
805     // step 1: set partitioning and pruning
806     Set<Bytes> reservedKeys = new TreeSet<Bytes>();
807     Set<Bytes> userKeys = new TreeSet<Bytes>();
808     for (Bytes k : values.keySet()) {
809       if (k == null || k.get() == null) continue;
810       String key = Bytes.toString(k.get());
811       // in this section, print out reserved keywords + coprocessor info
812       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
813         userKeys.add(k);
814         continue;
815       }
816       // only print out IS_ROOT/IS_META if true
817       String value = Bytes.toString(values.get(k).get());
818       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
819         if (Boolean.valueOf(value) == false) continue;
820       }
821       // see if a reserved key is a default value. may not want to print it out
822       if (printDefaults
823           || !DEFAULT_VALUES.containsKey(key)
824           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
825         reservedKeys.add(k);
826       }
827     }
828 
829     // early exit optimization
830     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
831     if (!hasAttributes && configuration.isEmpty()) return s;
832 
833     s.append(", {");
834     // step 2: printing attributes
835     if (hasAttributes) {
836       s.append("TABLE_ATTRIBUTES => {");
837 
838       // print all reserved keys first
839       boolean printCommaForAttr = false;
840       for (Bytes k : reservedKeys) {
841         String key = Bytes.toString(k.get());
842         String value = Bytes.toStringBinary(values.get(k).get());
843         if (printCommaForAttr) s.append(", ");
844         printCommaForAttr = true;
845         s.append(key);
846         s.append(" => ");
847         s.append('\'').append(value).append('\'');
848       }
849 
850       if (!userKeys.isEmpty()) {
851         // print all non-reserved, advanced config keys as a separate subset
852         if (printCommaForAttr) s.append(", ");
853         printCommaForAttr = true;
854         s.append(HConstants.METADATA).append(" => ");
855         s.append("{");
856         boolean printCommaForCfg = false;
857         for (Bytes k : userKeys) {
858           String key = Bytes.toString(k.get());
859           String value = Bytes.toStringBinary(values.get(k).get());
860           if (printCommaForCfg) s.append(", ");
861           printCommaForCfg = true;
862           s.append('\'').append(key).append('\'');
863           s.append(" => ");
864           s.append('\'').append(value).append('\'');
865         }
866         s.append("}");
867       }
868     }
869 
870     // step 3: printing all configuration:
871     if (!configuration.isEmpty()) {
872       if (hasAttributes) {
873         s.append(", ");
874       }
875       s.append(HConstants.CONFIGURATION).append(" => ");
876       s.append('{');
877       boolean printCommaForConfig = false;
878       for (Map.Entry<String, String> e : configuration.entrySet()) {
879         if (printCommaForConfig) s.append(", ");
880         printCommaForConfig = true;
881         s.append('\'').append(e.getKey()).append('\'');
882         s.append(" => ");
883         s.append('\'').append(e.getValue()).append('\'');
884       }
885       s.append("}");
886     }
887     s.append("}"); // end METHOD
888     return s;
889   }
890 
891   /**
892    * Compare the contents of the descriptor with another one passed as a parameter.
893    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
894    * contents of the descriptors are compared.
895    *
896    * @return true if the contents of the the two descriptors exactly match
897    *
898    * @see java.lang.Object#equals(java.lang.Object)
899    */
900   @Override
901   public boolean equals(Object obj) {
902     if (this == obj) {
903       return true;
904     }
905     if (obj == null) {
906       return false;
907     }
908     if (!(obj instanceof HTableDescriptor)) {
909       return false;
910     }
911     return compareTo((HTableDescriptor)obj) == 0;
912   }
913 
914   /**
915    * @see java.lang.Object#hashCode()
916    */
917   @Override
918   public int hashCode() {
919     int result = this.name.hashCode();
920     if (this.families.size() > 0) {
921       for (HColumnDescriptor e: this.families.values()) {
922         result ^= e.hashCode();
923       }
924     }
925     result ^= values.hashCode();
926     result ^= configuration.hashCode();
927     return result;
928   }
929 
930   // Comparable
931 
932   /**
933    * Compares the descriptor with another descriptor which is passed as a parameter.
934    * This compares the content of the two descriptors and not the reference.
935    *
936    * @return 0 if the contents of the descriptors are exactly matching,
937    * 		 1 if there is a mismatch in the contents
938    */
939   @Override
940   public int compareTo(@Nonnull final HTableDescriptor other) {
941     int result = this.name.compareTo(other.name);
942     if (result == 0) {
943       result = families.size() - other.families.size();
944     }
945     if (result == 0 && families.size() != other.families.size()) {
946       result = Integer.valueOf(families.size()).compareTo(
947           Integer.valueOf(other.families.size()));
948     }
949     if (result == 0) {
950       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
951           it2 = other.families.values().iterator(); it.hasNext(); ) {
952         result = it.next().compareTo(it2.next());
953         if (result != 0) {
954           break;
955         }
956       }
957     }
958     if (result == 0) {
959       // punt on comparison for ordering, just calculate difference
960       result = this.values.hashCode() - other.values.hashCode();
961       if (result < 0)
962         result = -1;
963       else if (result > 0)
964         result = 1;
965     }
966     if (result == 0) {
967       result = this.configuration.hashCode() - other.configuration.hashCode();
968       if (result < 0)
969         result = -1;
970       else if (result > 0)
971         result = 1;
972     }
973     return result;
974   }
975 
976   /**
977    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
978    * of all the column families of the table.
979    *
980    * @return Immutable collection of {@link HColumnDescriptor} of all the
981    * column families.
982    */
983   public Collection<HColumnDescriptor> getFamilies() {
984     return Collections.unmodifiableCollection(this.families.values());
985   }
986 
987   /**
988    * Returns the configured replicas per region
989    */
990   public int getRegionReplication() {
991     byte[] val = getValue(REGION_REPLICATION_KEY);
992     if (val == null || val.length == 0) {
993       return DEFAULT_REGION_REPLICATION;
994     }
995     return Integer.parseInt(Bytes.toString(val));
996   }
997 
998   /**
999    * Sets the number of replicas per region.
1000    * @param regionReplication the replication factor per region
1001    */
1002   public void setRegionReplication(int regionReplication) {
1003     setValue(REGION_REPLICATION_KEY,
1004         new Bytes(Bytes.toBytes(Integer.toString(regionReplication))));
1005   }
1006 
1007   /**
1008    * Returns all the column family names of the current table. The map of
1009    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1010    * This returns all the keys of the family map which represents the column
1011    * family names of the table.
1012    *
1013    * @return Immutable sorted set of the keys of the families.
1014    */
1015   public Set<byte[]> getFamiliesKeys() {
1016     return Collections.unmodifiableSet(this.families.keySet());
1017   }
1018 
1019   /**
1020    * Returns an array all the {@link HColumnDescriptor} of the column families
1021    * of the table.
1022    *
1023    * @return Array of all the HColumnDescriptors of the current table
1024    *
1025    * @see #getFamilies()
1026    */
1027   public HColumnDescriptor[] getColumnFamilies() {
1028     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1029     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1030   }
1031 
1032 
1033   /**
1034    * Returns the HColumnDescriptor for a specific column family with name as
1035    * specified by the parameter column.
1036    *
1037    * @param column Column family name
1038    * @return Column descriptor for the passed family name or the family on
1039    * passed in column.
1040    */
1041   public HColumnDescriptor getFamily(final byte [] column) {
1042     return this.families.get(column);
1043   }
1044 
1045 
1046   /**
1047    * Removes the HColumnDescriptor with name specified by the parameter column
1048    * from the table descriptor
1049    *
1050    * @param column Name of the column family to be removed.
1051    * @return Column descriptor for the passed family name or the family on
1052    * passed in column.
1053    */
1054   public HColumnDescriptor removeFamily(final byte [] column) {
1055     return this.families.remove(column);
1056   }
1057 
1058 
1059   /**
1060    * Add a table coprocessor to this table. The coprocessor
1061    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1062    * or Endpoint.
1063    * It won't check if the class can be loaded or not.
1064    * Whether a coprocessor is loadable or not will be determined when
1065    * a region is opened.
1066    * @param className Full class name.
1067    * @throws IOException
1068    */
1069   public void addCoprocessor(String className) throws IOException {
1070     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1071   }
1072 
1073 
1074   /**
1075    * Add a table coprocessor to this table. The coprocessor
1076    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1077    * or Endpoint.
1078    * It won't check if the class can be loaded or not.
1079    * Whether a coprocessor is loadable or not will be determined when
1080    * a region is opened.
1081    * @param jarFilePath Path of the jar file. If it's null, the class will be
1082    * loaded from default classloader.
1083    * @param className Full class name.
1084    * @param priority Priority
1085    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1086    * @throws IOException
1087    */
1088   public void addCoprocessor(String className, Path jarFilePath,
1089                              int priority, final Map<String, String> kvs)
1090   throws IOException {
1091     if (hasCoprocessor(className)) {
1092       throw new IOException("Coprocessor " + className + " already exists.");
1093     }
1094     // validate parameter kvs
1095     StringBuilder kvString = new StringBuilder();
1096     if (kvs != null) {
1097       for (Map.Entry<String, String> e: kvs.entrySet()) {
1098         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1099           throw new IOException("Illegal parameter key = " + e.getKey());
1100         }
1101         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1102           throw new IOException("Illegal parameter (" + e.getKey() +
1103               ") value = " + e.getValue());
1104         }
1105         if (kvString.length() != 0) {
1106           kvString.append(',');
1107         }
1108         kvString.append(e.getKey());
1109         kvString.append('=');
1110         kvString.append(e.getValue());
1111       }
1112     }
1113 
1114     // generate a coprocessor key
1115     int maxCoprocessorNumber = 0;
1116     Matcher keyMatcher;
1117     for (Map.Entry<Bytes, Bytes> e :
1118         this.values.entrySet()) {
1119       keyMatcher =
1120           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1121               Bytes.toString(e.getKey().get()));
1122       if (!keyMatcher.matches()) {
1123         continue;
1124       }
1125       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)),
1126           maxCoprocessorNumber);
1127     }
1128     maxCoprocessorNumber++;
1129 
1130     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1131     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1132         "|" + className + "|" + Integer.toString(priority) + "|" +
1133         kvString.toString();
1134     setValue(key, value);
1135   }
1136 
1137 
1138   /**
1139    * Check if the table has an attached co-processor represented by the name className
1140    *
1141    * @param className - Class name of the co-processor
1142    * @return true of the table has a co-processor className
1143    */
1144   public boolean hasCoprocessor(String className) {
1145     Matcher keyMatcher;
1146     Matcher valueMatcher;
1147     for (Map.Entry<Bytes, Bytes> e :
1148         this.values.entrySet()) {
1149       keyMatcher =
1150           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1151               Bytes.toString(e.getKey().get()));
1152       if (!keyMatcher.matches()) {
1153         continue;
1154       }
1155       valueMatcher =
1156         HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(
1157             Bytes.toString(e.getValue().get()));
1158       if (!valueMatcher.matches()) {
1159         continue;
1160       }
1161       // get className and compare
1162       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1163       if (clazz.equals(className.trim())) {
1164         return true;
1165       }
1166     }
1167     return false;
1168   }
1169 
1170   /**
1171    * Return the list of attached co-processor represented by their name className
1172    *
1173    * @return The list of co-processors classNames
1174    */
1175   public List<String> getCoprocessors() {
1176     List<String> result = new ArrayList<String>();
1177     Matcher keyMatcher;
1178     Matcher valueMatcher;
1179     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1180       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1181       if (!keyMatcher.matches()) {
1182         continue;
1183       }
1184       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1185           .toString(e.getValue().get()));
1186       if (!valueMatcher.matches()) {
1187         continue;
1188       }
1189       result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
1190     }
1191     return result;
1192   }
1193 
1194   /**
1195    * Remove a coprocessor from those set on the table
1196    * @param className Class name of the co-processor
1197    */
1198   public void removeCoprocessor(String className) {
1199     Bytes match = null;
1200     Matcher keyMatcher;
1201     Matcher valueMatcher;
1202     for (Map.Entry<Bytes, Bytes> e : this.values
1203         .entrySet()) {
1204       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1205           .getKey().get()));
1206       if (!keyMatcher.matches()) {
1207         continue;
1208       }
1209       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1210           .toString(e.getValue().get()));
1211       if (!valueMatcher.matches()) {
1212         continue;
1213       }
1214       // get className and compare
1215       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1216       // remove the CP if it is present
1217       if (clazz.equals(className.trim())) {
1218         match = e.getKey();
1219         break;
1220       }
1221     }
1222     // if we found a match, remove it
1223     if (match != null)
1224       remove(match);
1225   }
1226 
1227   /**
1228    * Returns the {@link Path} object representing the table directory under
1229    * path rootdir
1230    *
1231    * Deprecated use FSUtils.getTableDir() instead.
1232    *
1233    * @param rootdir qualified path of HBase root directory
1234    * @param tableName name of table
1235    * @return {@link Path} for table
1236    */
1237   @Deprecated
1238   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1239     //This is bad I had to mirror code from FSUTils.getTableDir since
1240     //there is no module dependency between hbase-client and hbase-server
1241     TableName name = TableName.valueOf(tableName);
1242     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1243               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1244   }
1245 
1246   /** Table descriptor for <code>hbase:meta</code> catalog table */
1247   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1248       TableName.META_TABLE_NAME,
1249       new HColumnDescriptor[] {
1250           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1251               // Ten is arbitrary number.  Keep versions to help debugging.
1252               .setMaxVersions(10)
1253               .setInMemory(true)
1254               .setBlocksize(8 * 1024)
1255               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1256               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1257               .setBloomFilterType(BloomType.NONE)
1258               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1259               // e.g. if using CombinedBlockCache (BucketCache).
1260               .setCacheDataInL1(true)
1261       });
1262 
1263   static {
1264     try {
1265       META_TABLEDESC.addCoprocessor(
1266           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1267           null, Coprocessor.PRIORITY_SYSTEM, null);
1268     } catch (IOException ex) {
1269       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1270       throw new RuntimeException(ex);
1271     }
1272   }
1273 
1274   public final static String NAMESPACE_FAMILY_INFO = "info";
1275   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1276   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1277 
1278   /** Table descriptor for namespace table */
1279   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1280       TableName.NAMESPACE_TABLE_NAME,
1281       new HColumnDescriptor[] {
1282           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1283               // Ten is arbitrary number.  Keep versions to help debugging.
1284               .setMaxVersions(10)
1285               .setInMemory(true)
1286               .setBlocksize(8 * 1024)
1287               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1288               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1289               // e.g. if using CombinedBlockCache (BucketCache).
1290               .setCacheDataInL1(true)
1291       });
1292 
1293   @Deprecated
1294   public void setOwner(User owner) {
1295     setOwnerString(owner != null ? owner.getShortName() : null);
1296   }
1297 
1298   // used by admin.rb:alter(table_name,*args) to update owner.
1299   @Deprecated
1300   public void setOwnerString(String ownerString) {
1301     if (ownerString != null) {
1302       setValue(OWNER_KEY, ownerString);
1303     } else {
1304       remove(OWNER_KEY);
1305     }
1306   }
1307 
1308   @Deprecated
1309   public String getOwnerString() {
1310     if (getValue(OWNER_KEY) != null) {
1311       return Bytes.toString(getValue(OWNER_KEY));
1312     }
1313     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1314     // hbase:meta and -ROOT- should return system user as owner, not null (see
1315     // MasterFileSystem.java:bootstrap()).
1316     return null;
1317   }
1318 
1319   /**
1320    * @return This instance serialized with pb with pb magic prefix
1321    * @see #parseFrom(byte[])
1322    */
1323   public byte [] toByteArray() {
1324     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1325   }
1326 
1327   /**
1328    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1329    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1330    * @throws DeserializationException
1331    * @throws IOException
1332    * @see #toByteArray()
1333    */
1334   public static HTableDescriptor parseFrom(final byte [] bytes)
1335   throws DeserializationException, IOException {
1336     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1337       throw new DeserializationException("Expected PB encoded HTableDescriptor");
1338     }
1339     int pblen = ProtobufUtil.lengthOfPBMagic();
1340     TableSchema.Builder builder = TableSchema.newBuilder();
1341     TableSchema ts;
1342     try {
1343       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1344     } catch (InvalidProtocolBufferException e) {
1345       throw new DeserializationException(e);
1346     }
1347     return convert(ts);
1348   }
1349 
1350   /**
1351    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1352    */
1353   public TableSchema convert() {
1354     TableSchema.Builder builder = TableSchema.newBuilder();
1355     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1356     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1357       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1358       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1359       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1360       builder.addAttributes(aBuilder.build());
1361     }
1362     for (HColumnDescriptor hcd: getColumnFamilies()) {
1363       builder.addColumnFamilies(hcd.convert());
1364     }
1365     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1366       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1367       aBuilder.setName(e.getKey());
1368       aBuilder.setValue(e.getValue());
1369       builder.addConfiguration(aBuilder.build());
1370     }
1371     return builder.build();
1372   }
1373 
1374   /**
1375    * @param ts A pb TableSchema instance.
1376    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1377    */
1378   public static HTableDescriptor convert(final TableSchema ts) {
1379     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1380     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1381     int index = 0;
1382     for (ColumnFamilySchema cfs: list) {
1383       hcds[index++] = HColumnDescriptor.convert(cfs);
1384     }
1385     HTableDescriptor htd = new HTableDescriptor(
1386         ProtobufUtil.toTableName(ts.getTableName()),
1387         hcds);
1388     for (BytesBytesPair a: ts.getAttributesList()) {
1389       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1390     }
1391     for (NameStringPair a: ts.getConfigurationList()) {
1392       htd.setConfiguration(a.getName(), a.getValue());
1393     }
1394     return htd;
1395   }
1396 
1397   /**
1398    * Getter for accessing the configuration value by key
1399    */
1400   public String getConfigurationValue(String key) {
1401     return configuration.get(key);
1402   }
1403 
1404   /**
1405    * Getter for fetching an unmodifiable {@link #configuration} map.
1406    */
1407   public Map<String, String> getConfiguration() {
1408     // shallow pointer copy
1409     return Collections.unmodifiableMap(configuration);
1410   }
1411 
1412   /**
1413    * Setter for storing a configuration setting in {@link #configuration} map.
1414    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1415    * @param value String value. If null, removes the setting.
1416    */
1417   public void setConfiguration(String key, String value) {
1418     if (value == null) {
1419       removeConfiguration(key);
1420     } else {
1421       configuration.put(key, value);
1422     }
1423   }
1424 
1425   /**
1426    * Remove a config setting represented by the key from the {@link #configuration} map
1427    */
1428   public void removeConfiguration(final String key) {
1429     configuration.remove(key);
1430   }
1431 }