View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.Collection;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Set;
31  import java.util.TreeMap;
32  import java.util.TreeSet;
33  import java.util.regex.Matcher;
34  
35  import javax.annotation.Nonnull;
36  
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.hadoop.fs.Path;
40  import org.apache.hadoop.hbase.classification.InterfaceAudience;
41  import org.apache.hadoop.hbase.classification.InterfaceStability;
42  import org.apache.hadoop.hbase.client.Durability;
43  import org.apache.hadoop.hbase.exceptions.DeserializationException;
44  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
45  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
46  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
47  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
48  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
49  import org.apache.hadoop.hbase.regionserver.BloomType;
50  import org.apache.hadoop.hbase.security.User;
51  import org.apache.hadoop.hbase.util.ByteStringer;
52  import org.apache.hadoop.hbase.util.Bytes;
53  
54  import com.google.protobuf.InvalidProtocolBufferException;
55  
56  /**
57   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
58   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
59   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
60   * when the region split should occur, coprocessors associated with it etc...
61   */
62  @InterfaceAudience.Public
63  @InterfaceStability.Evolving
64  public class HTableDescriptor implements Comparable<HTableDescriptor> {
65  
66    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
67  
68    private TableName name = null;
69  
70    /**
71     * A map which holds the metadata information of the table. This metadata
72     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
73     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
74     */
75    private final Map<Bytes, Bytes> values =
76        new HashMap<Bytes, Bytes>();
77  
78    /**
79     * A map which holds the configuration specific to the table.
80     * The keys of the map have the same names as config keys and override the defaults with
81     * table-specific settings. Example usage may be for compactions, etc.
82     */
83    private final Map<String, String> configuration = new HashMap<String, String>();
84  
85    public static final String SPLIT_POLICY = "SPLIT_POLICY";
86  
87    /**
88     * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
89     * attribute which denotes the maximum size of the store file after which
90     * a region split occurs
91     *
92     * @see #getMaxFileSize()
93     */
94    public static final String MAX_FILESIZE = "MAX_FILESIZE";
95    private static final Bytes MAX_FILESIZE_KEY =
96        new Bytes(Bytes.toBytes(MAX_FILESIZE));
97  
98    public static final String OWNER = "OWNER";
99    public static final Bytes OWNER_KEY =
100       new Bytes(Bytes.toBytes(OWNER));
101 
102   /**
103    * <em>INTERNAL</em> Used by rest interface to access this metadata
104    * attribute which denotes if the table is Read Only
105    *
106    * @see #isReadOnly()
107    */
108   public static final String READONLY = "READONLY";
109   private static final Bytes READONLY_KEY =
110       new Bytes(Bytes.toBytes(READONLY));
111 
112   /**
113    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
114    * attribute which denotes if the table is compaction enabled
115    *
116    * @see #isCompactionEnabled()
117    */
118   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
119   private static final Bytes COMPACTION_ENABLED_KEY =
120       new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
121 
122   /**
123    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
124    * attribute which represents the maximum size of the memstore after which
125    * its contents are flushed onto the disk
126    *
127    * @see #getMemStoreFlushSize()
128    */
129   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
130   private static final Bytes MEMSTORE_FLUSHSIZE_KEY =
131       new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
132 
133   /**
134    * <em>INTERNAL</em> Used by rest interface to access this metadata
135    * attribute which denotes if the table is a -ROOT- region or not
136    *
137    * @see #isRootRegion()
138    */
139   public static final String IS_ROOT = "IS_ROOT";
140   private static final Bytes IS_ROOT_KEY =
141       new Bytes(Bytes.toBytes(IS_ROOT));
142 
143   /**
144    * <em>INTERNAL</em> Used by rest interface to access this metadata
145    * attribute which denotes if it is a catalog table, either
146    * <code> hbase:meta </code> or <code> -ROOT- </code>
147    *
148    * @see #isMetaRegion()
149    */
150   public static final String IS_META = "IS_META";
151   private static final Bytes IS_META_KEY =
152       new Bytes(Bytes.toBytes(IS_META));
153 
154   /**
155    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
156    * attribute which denotes if the deferred log flush option is enabled.
157    * @deprecated Use {@link #DURABILITY} instead.
158    */
159   @Deprecated
160   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
161   @Deprecated
162   private static final Bytes DEFERRED_LOG_FLUSH_KEY =
163       new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH));
164 
165   /**
166    * <em>INTERNAL</em> {@link Durability} setting for the table.
167    */
168   public static final String DURABILITY = "DURABILITY";
169   private static final Bytes DURABILITY_KEY =
170       new Bytes(Bytes.toBytes("DURABILITY"));
171 
172   /**
173    * <em>INTERNAL</em> number of region replicas for the table.
174    */
175   public static final String REGION_REPLICATION = "REGION_REPLICATION";
176   private static final Bytes REGION_REPLICATION_KEY =
177       new Bytes(Bytes.toBytes(REGION_REPLICATION));
178 
179   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
180   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
181 
182   /*
183    *  The below are ugly but better than creating them each time till we
184    *  replace booleans being saved as Strings with plain booleans.  Need a
185    *  migration script to do this.  TODO.
186    */
187   private static final Bytes FALSE =
188       new Bytes(Bytes.toBytes(Boolean.FALSE.toString()));
189 
190   private static final Bytes TRUE =
191       new Bytes(Bytes.toBytes(Boolean.TRUE.toString()));
192 
193   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
194 
195   /**
196    * Constant that denotes whether the table is READONLY by default and is false
197    */
198   public static final boolean DEFAULT_READONLY = false;
199 
200   /**
201    * Constant that denotes whether the table is compaction enabled by default
202    */
203   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
204 
205   /**
206    * Constant that denotes the maximum default size of the memstore after which
207    * the contents are flushed to the store files
208    */
209   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
210 
211   public static final int DEFAULT_REGION_REPLICATION = 1;
212 
213   private final static Map<String, String> DEFAULT_VALUES
214     = new HashMap<String, String>();
215   private final static Set<Bytes> RESERVED_KEYWORDS
216       = new HashSet<Bytes>();
217 
218   static {
219     DEFAULT_VALUES.put(MAX_FILESIZE,
220         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
221     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
222     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
223         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
224     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
225         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
226     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
227     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
228     for (String s : DEFAULT_VALUES.keySet()) {
229       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
230     }
231     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
232     RESERVED_KEYWORDS.add(IS_META_KEY);
233   }
234 
235   /**
236    * Cache of whether this is a meta table or not.
237    */
238   private volatile Boolean meta = null;
239   /**
240    * Cache of whether this is root table or not.
241    */
242   private volatile Boolean root = null;
243 
244   /**
245    * Durability setting for the table
246    */
247   private Durability durability = null;
248 
249   /**
250    * Maps column family name to the respective HColumnDescriptors
251    */
252   private final Map<byte [], HColumnDescriptor> families =
253     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
254 
255   /**
256    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
257    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
258    */
259   @InterfaceAudience.Private
260   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
261     setName(name);
262     for(HColumnDescriptor descriptor : families) {
263       this.families.put(descriptor.getName(), descriptor);
264     }
265   }
266 
267   /**
268    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
269    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
270    */
271   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
272       Map<Bytes, Bytes> values) {
273     setName(name);
274     for(HColumnDescriptor descriptor : families) {
275       this.families.put(descriptor.getName(), descriptor);
276     }
277     for (Map.Entry<Bytes, Bytes> entry :
278         values.entrySet()) {
279       setValue(entry.getKey(), entry.getValue());
280     }
281   }
282 
283   /**
284    * Default constructor which constructs an empty object.
285    * For deserializing an HTableDescriptor instance only.
286    * @deprecated Used by Writables and Writables are going away.
287    */
288   @Deprecated
289   public HTableDescriptor() {
290     super();
291   }
292 
293   /**
294    * Construct a table descriptor specifying a TableName object
295    * @param name Table name.
296    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
297    */
298   public HTableDescriptor(final TableName name) {
299     super();
300     setName(name);
301   }
302 
303   /**
304    * Construct a table descriptor specifying a byte array table name
305    * @param name Table name.
306    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
307    */
308   @Deprecated
309   public HTableDescriptor(final byte[] name) {
310     this(TableName.valueOf(name));
311   }
312 
313   /**
314    * Construct a table descriptor specifying a String table name
315    * @param name Table name.
316    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
317    */
318   @Deprecated
319   public HTableDescriptor(final String name) {
320     this(TableName.valueOf(name));
321   }
322 
323   /**
324    * Construct a table descriptor by cloning the descriptor passed as a parameter.
325    * <p>
326    * Makes a deep copy of the supplied descriptor.
327    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
328    * @param desc The descriptor.
329    */
330   public HTableDescriptor(final HTableDescriptor desc) {
331     super();
332     setName(desc.name);
333     setMetaFlags(this.name);
334     for (HColumnDescriptor c: desc.families.values()) {
335       this.families.put(c.getName(), new HColumnDescriptor(c));
336     }
337     for (Map.Entry<Bytes, Bytes> e :
338         desc.values.entrySet()) {
339       setValue(e.getKey(), e.getValue());
340     }
341     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
342       this.configuration.put(e.getKey(), e.getValue());
343     }
344   }
345 
346   /*
347    * Set meta flags on this table.
348    * IS_ROOT_KEY is set if its a -ROOT- table
349    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
350    * Called by constructors.
351    * @param name
352    */
353   private void setMetaFlags(final TableName name) {
354     setMetaRegion(isRootRegion() ||
355         name.equals(TableName.META_TABLE_NAME));
356   }
357 
358   /**
359    * Check if the descriptor represents a <code> -ROOT- </code> region.
360    *
361    * @return true if this is a <code> -ROOT- </code> region
362    */
363   public boolean isRootRegion() {
364     if (this.root == null) {
365       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
366     }
367     return this.root.booleanValue();
368   }
369 
370   /**
371    * <em> INTERNAL </em> Used to denote if the current table represents
372    * <code> -ROOT- </code> region. This is used internally by the
373    * HTableDescriptor constructors
374    *
375    * @param isRoot true if this is the <code> -ROOT- </code> region
376    */
377   protected void setRootRegion(boolean isRoot) {
378     // TODO: Make the value a boolean rather than String of boolean.
379     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
380   }
381 
382   /**
383    * Checks if this table is <code> hbase:meta </code>
384    * region.
385    *
386    * @return true if this table is <code> hbase:meta </code>
387    * region
388    */
389   public boolean isMetaRegion() {
390     if (this.meta == null) {
391       this.meta = calculateIsMetaRegion();
392     }
393     return this.meta.booleanValue();
394   }
395 
396   private synchronized Boolean calculateIsMetaRegion() {
397     byte [] value = getValue(IS_META_KEY);
398     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
399   }
400 
401   private boolean isSomething(final Bytes key,
402       final boolean valueIfNull) {
403     byte [] value = getValue(key);
404     if (value != null) {
405       return Boolean.valueOf(Bytes.toString(value));
406     }
407     return valueIfNull;
408   }
409 
410   /**
411    * <em> INTERNAL </em> Used to denote if the current table represents
412    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
413    * internally by the HTableDescriptor constructors
414    *
415    * @param isMeta true if its either <code> -ROOT- </code> or
416    * <code> hbase:meta </code> region
417    */
418   protected void setMetaRegion(boolean isMeta) {
419     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
420   }
421 
422   /**
423    * Checks if the table is a <code>hbase:meta</code> table
424    *
425    * @return true if table is <code> hbase:meta </code> region.
426    */
427   public boolean isMetaTable() {
428     return isMetaRegion() && !isRootRegion();
429   }
430 
431   /**
432    * Getter for accessing the metadata associated with the key
433    *
434    * @param key The key.
435    * @return The value.
436    * @see #values
437    */
438   public byte[] getValue(byte[] key) {
439     return getValue(new Bytes(key));
440   }
441 
442   private byte[] getValue(final Bytes key) {
443     Bytes ibw = values.get(key);
444     if (ibw == null)
445       return null;
446     return ibw.get();
447   }
448 
449   /**
450    * Getter for accessing the metadata associated with the key
451    *
452    * @param key The key.
453    * @return The value.
454    * @see #values
455    */
456   public String getValue(String key) {
457     byte[] value = getValue(Bytes.toBytes(key));
458     if (value == null)
459       return null;
460     return Bytes.toString(value);
461   }
462 
463   /**
464    * Getter for fetching an unmodifiable {@link #values} map.
465    *
466    * @return unmodifiable map {@link #values}.
467    * @see #values
468    */
469   public Map<Bytes, Bytes> getValues() {
470     // shallow pointer copy
471     return Collections.unmodifiableMap(values);
472   }
473 
474   /**
475    * Setter for storing metadata as a (key, value) pair in {@link #values} map
476    *
477    * @param key The key.
478    * @param value The value.
479    * @see #values
480    */
481   public HTableDescriptor setValue(byte[] key, byte[] value) {
482     setValue(new Bytes(key), new Bytes(value));
483     return this;
484   }
485 
486   /*
487    * @param key The key.
488    * @param value The value.
489    */
490   private HTableDescriptor setValue(final Bytes key,
491       final String value) {
492     setValue(key, new Bytes(Bytes.toBytes(value)));
493     return this;
494   }
495 
496   /*
497    * Setter for storing metadata as a (key, value) pair in {@link #values} map
498    *
499    * @param key The key.
500    * @param value The value.
501    */
502   public HTableDescriptor setValue(final Bytes key,
503       final Bytes value) {
504     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
505       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
506       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
507           "use " + DURABILITY + " instead");
508       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
509       return this;
510     }
511     values.put(key, value);
512     return this;
513   }
514 
515   /**
516    * Setter for storing metadata as a (key, value) pair in {@link #values} map
517    *
518    * @param key The key.
519    * @param value The value.
520    * @see #values
521    */
522   public HTableDescriptor setValue(String key, String value) {
523     if (value == null) {
524       remove(key);
525     } else {
526       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
527     }
528     return this;
529   }
530 
531   /**
532    * Remove metadata represented by the key from the {@link #values} map
533    *
534    * @param key Key whose key and value we're to remove from HTableDescriptor
535    * parameters.
536    */
537   public void remove(final String key) {
538     remove(new Bytes(Bytes.toBytes(key)));
539   }
540 
541   /**
542    * Remove metadata represented by the key from the {@link #values} map
543    *
544    * @param key Key whose key and value we're to remove from HTableDescriptor
545    * parameters.
546    */
547   public void remove(Bytes key) {
548     values.remove(key);
549   }
550 
551   /**
552    * Remove metadata represented by the key from the {@link #values} map
553    *
554    * @param key Key whose key and value we're to remove from HTableDescriptor
555    * parameters.
556    */
557   public void remove(final byte [] key) {
558     remove(new Bytes(key));
559   }
560 
561   /**
562    * Check if the readOnly flag of the table is set. If the readOnly flag is
563    * set then the contents of the table can only be read from but not modified.
564    *
565    * @return true if all columns in the table should be read only
566    */
567   public boolean isReadOnly() {
568     return isSomething(READONLY_KEY, DEFAULT_READONLY);
569   }
570 
571   /**
572    * Setting the table as read only sets all the columns in the table as read
573    * only. By default all tables are modifiable, but if the readOnly flag is
574    * set to true then the contents of the table can only be read but not modified.
575    *
576    * @param readOnly True if all of the columns in the table should be read
577    * only.
578    */
579   public HTableDescriptor setReadOnly(final boolean readOnly) {
580     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
581   }
582 
583   /**
584    * Check if the compaction enable flag of the table is true. If flag is
585    * false then no minor/major compactions will be done in real.
586    *
587    * @return true if table compaction enabled
588    */
589   public boolean isCompactionEnabled() {
590     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
591   }
592 
593   /**
594    * Setting the table compaction enable flag.
595    *
596    * @param isEnable True if enable compaction.
597    */
598   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
599     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
600     return this;
601   }
602 
603   /**
604    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
605    * @param durability enum value
606    */
607   public HTableDescriptor setDurability(Durability durability) {
608     this.durability = durability;
609     setValue(DURABILITY_KEY, durability.name());
610     return this;
611   }
612 
613   /**
614    * Returns the durability setting for the table.
615    * @return durability setting for the table.
616    */
617   public Durability getDurability() {
618     if (this.durability == null) {
619       byte[] durabilityValue = getValue(DURABILITY_KEY);
620       if (durabilityValue == null) {
621         this.durability = DEFAULT_DURABLITY;
622       } else {
623         try {
624           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
625         } catch (IllegalArgumentException ex) {
626           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
627             + " is not known. Durability:" + Bytes.toString(durabilityValue));
628           this.durability = DEFAULT_DURABLITY;
629         }
630       }
631     }
632     return this.durability;
633   }
634 
635   /**
636    * Get the name of the table
637    *
638    * @return TableName
639    */
640   public TableName getTableName() {
641     return name;
642   }
643 
644   /**
645    * Get the name of the table as a byte array.
646    *
647    * @return name of table
648    * @deprecated Use {@link #getTableName()} instead
649    */
650   @Deprecated
651   public byte[] getName() {
652     return name.getName();
653   }
654 
655   /**
656    * Get the name of the table as a String
657    *
658    * @return name of table as a String
659    */
660   public String getNameAsString() {
661     return name.getNameAsString();
662   }
663 
664   /**
665    * This sets the class associated with the region split policy which
666    * determines when a region split should occur.  The class used by
667    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
668    * @param clazz the class name
669    */
670   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
671     setValue(SPLIT_POLICY, clazz);
672     return this;
673   }
674 
675   /**
676    * This gets the class associated with the region split policy which
677    * determines when a region split should occur.  The class used by
678    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
679    *
680    * @return the class name of the region split policy for this table.
681    * If this returns null, the default split policy is used.
682    */
683    public String getRegionSplitPolicyClassName() {
684     return getValue(SPLIT_POLICY);
685   }
686 
687   /**
688    * Set the name of the table.
689    *
690    * @param name name of table
691    */
692   @Deprecated
693   public HTableDescriptor setName(byte[] name) {
694     setName(TableName.valueOf(name));
695     return this;
696   }
697 
698   @Deprecated
699   public HTableDescriptor setName(TableName name) {
700     this.name = name;
701     setMetaFlags(this.name);
702     return this;
703   }
704 
705   /**
706    * Returns the maximum size upto which a region can grow to after which a region
707    * split is triggered. The region size is represented by the size of the biggest
708    * store file in that region.
709    *
710    * @return max hregion size for table, -1 if not set.
711    *
712    * @see #setMaxFileSize(long)
713    */
714   public long getMaxFileSize() {
715     byte [] value = getValue(MAX_FILESIZE_KEY);
716     if (value != null) {
717       return Long.parseLong(Bytes.toString(value));
718     }
719     return -1;
720   }
721 
722   /**
723    * Sets the maximum size upto which a region can grow to after which a region
724    * split is triggered. The region size is represented by the size of the biggest
725    * store file in that region, i.e. If the biggest store file grows beyond the
726    * maxFileSize, then the region split is triggered. This defaults to a value of
727    * 256 MB.
728    * <p>
729    * This is not an absolute value and might vary. Assume that a single row exceeds
730    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
731    * a single row cannot be split across multiple regions
732    * </p>
733    *
734    * @param maxFileSize The maximum file size that a store file can grow to
735    * before a split is triggered.
736    */
737   public HTableDescriptor setMaxFileSize(long maxFileSize) {
738     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
739     return this;
740   }
741 
742   /**
743    * Returns the size of the memstore after which a flush to filesystem is triggered.
744    *
745    * @return memory cache flush size for each hregion, -1 if not set.
746    *
747    * @see #setMemStoreFlushSize(long)
748    */
749   public long getMemStoreFlushSize() {
750     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
751     if (value != null) {
752       return Long.parseLong(Bytes.toString(value));
753     }
754     return -1;
755   }
756 
757   /**
758    * Represents the maximum size of the memstore after which the contents of the
759    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
760    *
761    * @param memstoreFlushSize memory cache flush size for each hregion
762    */
763   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
764     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
765     return this;
766   }
767 
768   /**
769    * Adds a column family.
770    * @param family HColumnDescriptor of family to add.
771    */
772   public HTableDescriptor addFamily(final HColumnDescriptor family) {
773     if (family.getName() == null || family.getName().length <= 0) {
774       throw new NullPointerException("Family name cannot be null or empty");
775     }
776     this.families.put(family.getName(), family);
777     return this;
778   }
779 
780   /**
781    * Checks to see if this table contains the given column family
782    * @param familyName Family name or column name.
783    * @return true if the table contains the specified family name
784    */
785   public boolean hasFamily(final byte [] familyName) {
786     return families.containsKey(familyName);
787   }
788 
789   /**
790    * @return Name of this table and then a map of all of the column family
791    * descriptors.
792    * @see #getNameAsString()
793    */
794   @Override
795   public String toString() {
796     StringBuilder s = new StringBuilder();
797     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
798     s.append(getValues(true));
799     for (HColumnDescriptor f : families.values()) {
800       s.append(", ").append(f);
801     }
802     return s.toString();
803   }
804 
805   /**
806    * @return Name of this table and then a map of all of the column family
807    * descriptors (with only the non-default column family attributes)
808    */
809   public String toStringCustomizedValues() {
810     StringBuilder s = new StringBuilder();
811     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
812     s.append(getValues(false));
813     for(HColumnDescriptor hcd : families.values()) {
814       s.append(", ").append(hcd.toStringCustomizedValues());
815     }
816     return s.toString();
817   }
818 
819   private StringBuilder getValues(boolean printDefaults) {
820     StringBuilder s = new StringBuilder();
821 
822     // step 1: set partitioning and pruning
823     Set<Bytes> reservedKeys = new TreeSet<Bytes>();
824     Set<Bytes> userKeys = new TreeSet<Bytes>();
825     for (Bytes k : values.keySet()) {
826       if (k == null || k.get() == null) continue;
827       String key = Bytes.toString(k.get());
828       // in this section, print out reserved keywords + coprocessor info
829       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
830         userKeys.add(k);
831         continue;
832       }
833       // only print out IS_ROOT/IS_META if true
834       String value = Bytes.toString(values.get(k).get());
835       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
836         if (Boolean.valueOf(value) == false) continue;
837       }
838       // see if a reserved key is a default value. may not want to print it out
839       if (printDefaults
840           || !DEFAULT_VALUES.containsKey(key)
841           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
842         reservedKeys.add(k);
843       }
844     }
845 
846     // early exit optimization
847     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
848     if (!hasAttributes && configuration.isEmpty()) return s;
849 
850     s.append(", {");
851     // step 2: printing attributes
852     if (hasAttributes) {
853       s.append("TABLE_ATTRIBUTES => {");
854 
855       // print all reserved keys first
856       boolean printCommaForAttr = false;
857       for (Bytes k : reservedKeys) {
858         String key = Bytes.toString(k.get());
859         String value = Bytes.toStringBinary(values.get(k).get());
860         if (printCommaForAttr) s.append(", ");
861         printCommaForAttr = true;
862         s.append(key);
863         s.append(" => ");
864         s.append('\'').append(value).append('\'');
865       }
866 
867       if (!userKeys.isEmpty()) {
868         // print all non-reserved, advanced config keys as a separate subset
869         if (printCommaForAttr) s.append(", ");
870         printCommaForAttr = true;
871         s.append(HConstants.METADATA).append(" => ");
872         s.append("{");
873         boolean printCommaForCfg = false;
874         for (Bytes k : userKeys) {
875           String key = Bytes.toString(k.get());
876           String value = Bytes.toStringBinary(values.get(k).get());
877           if (printCommaForCfg) s.append(", ");
878           printCommaForCfg = true;
879           s.append('\'').append(key).append('\'');
880           s.append(" => ");
881           s.append('\'').append(value).append('\'');
882         }
883         s.append("}");
884       }
885     }
886 
887     // step 3: printing all configuration:
888     if (!configuration.isEmpty()) {
889       if (hasAttributes) {
890         s.append(", ");
891       }
892       s.append(HConstants.CONFIGURATION).append(" => ");
893       s.append('{');
894       boolean printCommaForConfig = false;
895       for (Map.Entry<String, String> e : configuration.entrySet()) {
896         if (printCommaForConfig) s.append(", ");
897         printCommaForConfig = true;
898         s.append('\'').append(e.getKey()).append('\'');
899         s.append(" => ");
900         s.append('\'').append(e.getValue()).append('\'');
901       }
902       s.append("}");
903     }
904     s.append("}"); // end METHOD
905     return s;
906   }
907 
908   /**
909    * Compare the contents of the descriptor with another one passed as a parameter.
910    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
911    * contents of the descriptors are compared.
912    *
913    * @return true if the contents of the the two descriptors exactly match
914    *
915    * @see java.lang.Object#equals(java.lang.Object)
916    */
917   @Override
918   public boolean equals(Object obj) {
919     if (this == obj) {
920       return true;
921     }
922     if (obj == null) {
923       return false;
924     }
925     if (!(obj instanceof HTableDescriptor)) {
926       return false;
927     }
928     return compareTo((HTableDescriptor)obj) == 0;
929   }
930 
931   /**
932    * @see java.lang.Object#hashCode()
933    */
934   @Override
935   public int hashCode() {
936     int result = this.name.hashCode();
937     if (this.families.size() > 0) {
938       for (HColumnDescriptor e: this.families.values()) {
939         result ^= e.hashCode();
940       }
941     }
942     result ^= values.hashCode();
943     result ^= configuration.hashCode();
944     return result;
945   }
946 
947   // Comparable
948 
949   /**
950    * Compares the descriptor with another descriptor which is passed as a parameter.
951    * This compares the content of the two descriptors and not the reference.
952    *
953    * @return 0 if the contents of the descriptors are exactly matching,
954    * 		 1 if there is a mismatch in the contents
955    */
956   @Override
957   public int compareTo(@Nonnull final HTableDescriptor other) {
958     int result = this.name.compareTo(other.name);
959     if (result == 0) {
960       result = families.size() - other.families.size();
961     }
962     if (result == 0 && families.size() != other.families.size()) {
963       result = Integer.valueOf(families.size()).compareTo(
964           Integer.valueOf(other.families.size()));
965     }
966     if (result == 0) {
967       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
968           it2 = other.families.values().iterator(); it.hasNext(); ) {
969         result = it.next().compareTo(it2.next());
970         if (result != 0) {
971           break;
972         }
973       }
974     }
975     if (result == 0) {
976       // punt on comparison for ordering, just calculate difference
977       result = this.values.hashCode() - other.values.hashCode();
978       if (result < 0)
979         result = -1;
980       else if (result > 0)
981         result = 1;
982     }
983     if (result == 0) {
984       result = this.configuration.hashCode() - other.configuration.hashCode();
985       if (result < 0)
986         result = -1;
987       else if (result > 0)
988         result = 1;
989     }
990     return result;
991   }
992 
993   /**
994    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
995    * of all the column families of the table.
996    *
997    * @return Immutable collection of {@link HColumnDescriptor} of all the
998    * column families.
999    */
1000   public Collection<HColumnDescriptor> getFamilies() {
1001     return Collections.unmodifiableCollection(this.families.values());
1002   }
1003 
1004   /**
1005    * Returns the configured replicas per region
1006    */
1007   public int getRegionReplication() {
1008     byte[] val = getValue(REGION_REPLICATION_KEY);
1009     if (val == null || val.length == 0) {
1010       return DEFAULT_REGION_REPLICATION;
1011     }
1012     return Integer.parseInt(Bytes.toString(val));
1013   }
1014 
1015   /**
1016    * Sets the number of replicas per region.
1017    * @param regionReplication the replication factor per region
1018    */
1019   public HTableDescriptor setRegionReplication(int regionReplication) {
1020     setValue(REGION_REPLICATION_KEY,
1021         new Bytes(Bytes.toBytes(Integer.toString(regionReplication))));
1022     return this;
1023   }
1024 
1025   /**
1026    * Returns all the column family names of the current table. The map of
1027    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1028    * This returns all the keys of the family map which represents the column
1029    * family names of the table.
1030    *
1031    * @return Immutable sorted set of the keys of the families.
1032    */
1033   public Set<byte[]> getFamiliesKeys() {
1034     return Collections.unmodifiableSet(this.families.keySet());
1035   }
1036 
1037   /**
1038    * Returns an array all the {@link HColumnDescriptor} of the column families
1039    * of the table.
1040    *
1041    * @return Array of all the HColumnDescriptors of the current table
1042    *
1043    * @see #getFamilies()
1044    */
1045   public HColumnDescriptor[] getColumnFamilies() {
1046     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1047     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1048   }
1049 
1050 
1051   /**
1052    * Returns the HColumnDescriptor for a specific column family with name as
1053    * specified by the parameter column.
1054    *
1055    * @param column Column family name
1056    * @return Column descriptor for the passed family name or the family on
1057    * passed in column.
1058    */
1059   public HColumnDescriptor getFamily(final byte [] column) {
1060     return this.families.get(column);
1061   }
1062 
1063 
1064   /**
1065    * Removes the HColumnDescriptor with name specified by the parameter column
1066    * from the table descriptor
1067    *
1068    * @param column Name of the column family to be removed.
1069    * @return Column descriptor for the passed family name or the family on
1070    * passed in column.
1071    */
1072   public HColumnDescriptor removeFamily(final byte [] column) {
1073     return this.families.remove(column);
1074   }
1075 
1076 
1077   /**
1078    * Add a table coprocessor to this table. The coprocessor
1079    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1080    * or Endpoint.
1081    * It won't check if the class can be loaded or not.
1082    * Whether a coprocessor is loadable or not will be determined when
1083    * a region is opened.
1084    * @param className Full class name.
1085    * @throws IOException
1086    */
1087   public HTableDescriptor addCoprocessor(String className) throws IOException {
1088     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1089     return this;
1090   }
1091 
1092 
1093   /**
1094    * Add a table coprocessor to this table. The coprocessor
1095    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1096    * or Endpoint.
1097    * It won't check if the class can be loaded or not.
1098    * Whether a coprocessor is loadable or not will be determined when
1099    * a region is opened.
1100    * @param jarFilePath Path of the jar file. If it's null, the class will be
1101    * loaded from default classloader.
1102    * @param className Full class name.
1103    * @param priority Priority
1104    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1105    * @throws IOException
1106    */
1107   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1108                              int priority, final Map<String, String> kvs)
1109   throws IOException {
1110     if (hasCoprocessor(className)) {
1111       throw new IOException("Coprocessor " + className + " already exists.");
1112     }
1113     // validate parameter kvs
1114     StringBuilder kvString = new StringBuilder();
1115     if (kvs != null) {
1116       for (Map.Entry<String, String> e: kvs.entrySet()) {
1117         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1118           throw new IOException("Illegal parameter key = " + e.getKey());
1119         }
1120         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1121           throw new IOException("Illegal parameter (" + e.getKey() +
1122               ") value = " + e.getValue());
1123         }
1124         if (kvString.length() != 0) {
1125           kvString.append(',');
1126         }
1127         kvString.append(e.getKey());
1128         kvString.append('=');
1129         kvString.append(e.getValue());
1130       }
1131     }
1132 
1133     // generate a coprocessor key
1134     int maxCoprocessorNumber = 0;
1135     Matcher keyMatcher;
1136     for (Map.Entry<Bytes, Bytes> e :
1137         this.values.entrySet()) {
1138       keyMatcher =
1139           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1140               Bytes.toString(e.getKey().get()));
1141       if (!keyMatcher.matches()) {
1142         continue;
1143       }
1144       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)),
1145           maxCoprocessorNumber);
1146     }
1147     maxCoprocessorNumber++;
1148 
1149     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1150     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1151         "|" + className + "|" + Integer.toString(priority) + "|" +
1152         kvString.toString();
1153     setValue(key, value);
1154     return this;
1155   }
1156 
1157 
1158   /**
1159    * Check if the table has an attached co-processor represented by the name className
1160    *
1161    * @param className - Class name of the co-processor
1162    * @return true of the table has a co-processor className
1163    */
1164   public boolean hasCoprocessor(String className) {
1165     Matcher keyMatcher;
1166     Matcher valueMatcher;
1167     for (Map.Entry<Bytes, Bytes> e :
1168         this.values.entrySet()) {
1169       keyMatcher =
1170           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1171               Bytes.toString(e.getKey().get()));
1172       if (!keyMatcher.matches()) {
1173         continue;
1174       }
1175       valueMatcher =
1176         HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(
1177             Bytes.toString(e.getValue().get()));
1178       if (!valueMatcher.matches()) {
1179         continue;
1180       }
1181       // get className and compare
1182       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1183       if (clazz.equals(className.trim())) {
1184         return true;
1185       }
1186     }
1187     return false;
1188   }
1189 
1190   /**
1191    * Return the list of attached co-processor represented by their name className
1192    *
1193    * @return The list of co-processors classNames
1194    */
1195   public List<String> getCoprocessors() {
1196     List<String> result = new ArrayList<String>();
1197     Matcher keyMatcher;
1198     Matcher valueMatcher;
1199     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1200       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1201       if (!keyMatcher.matches()) {
1202         continue;
1203       }
1204       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1205           .toString(e.getValue().get()));
1206       if (!valueMatcher.matches()) {
1207         continue;
1208       }
1209       result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
1210     }
1211     return result;
1212   }
1213 
1214   /**
1215    * Remove a coprocessor from those set on the table
1216    * @param className Class name of the co-processor
1217    */
1218   public void removeCoprocessor(String className) {
1219     Bytes match = null;
1220     Matcher keyMatcher;
1221     Matcher valueMatcher;
1222     for (Map.Entry<Bytes, Bytes> e : this.values
1223         .entrySet()) {
1224       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1225           .getKey().get()));
1226       if (!keyMatcher.matches()) {
1227         continue;
1228       }
1229       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1230           .toString(e.getValue().get()));
1231       if (!valueMatcher.matches()) {
1232         continue;
1233       }
1234       // get className and compare
1235       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1236       // remove the CP if it is present
1237       if (clazz.equals(className.trim())) {
1238         match = e.getKey();
1239         break;
1240       }
1241     }
1242     // if we found a match, remove it
1243     if (match != null)
1244       remove(match);
1245   }
1246 
1247   /**
1248    * Returns the {@link Path} object representing the table directory under
1249    * path rootdir
1250    *
1251    * Deprecated use FSUtils.getTableDir() instead.
1252    *
1253    * @param rootdir qualified path of HBase root directory
1254    * @param tableName name of table
1255    * @return {@link Path} for table
1256    */
1257   @Deprecated
1258   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1259     //This is bad I had to mirror code from FSUTils.getTableDir since
1260     //there is no module dependency between hbase-client and hbase-server
1261     TableName name = TableName.valueOf(tableName);
1262     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1263               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1264   }
1265 
1266   /** Table descriptor for <code>hbase:meta</code> catalog table
1267    * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
1268    * Admin#getTableDescriptor(TableName.META_TABLE) instead.
1269    */
1270   @Deprecated
1271   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1272       TableName.META_TABLE_NAME,
1273       new HColumnDescriptor[] {
1274           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1275               // Ten is arbitrary number.  Keep versions to help debugging.
1276               .setMaxVersions(10)
1277               .setInMemory(true)
1278               .setBlocksize(8 * 1024)
1279               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1280               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1281               .setBloomFilterType(BloomType.NONE)
1282               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1283               // e.g. if using CombinedBlockCache (BucketCache).
1284               .setCacheDataInL1(true)
1285       });
1286 
1287   static {
1288     try {
1289       META_TABLEDESC.addCoprocessor(
1290           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1291           null, Coprocessor.PRIORITY_SYSTEM, null);
1292     } catch (IOException ex) {
1293       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1294       throw new RuntimeException(ex);
1295     }
1296   }
1297 
1298   public final static String NAMESPACE_FAMILY_INFO = "info";
1299   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1300   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1301 
1302   /** Table descriptor for namespace table */
1303   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1304       TableName.NAMESPACE_TABLE_NAME,
1305       new HColumnDescriptor[] {
1306           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1307               // Ten is arbitrary number.  Keep versions to help debugging.
1308               .setMaxVersions(10)
1309               .setInMemory(true)
1310               .setBlocksize(8 * 1024)
1311               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1312               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1313               // e.g. if using CombinedBlockCache (BucketCache).
1314               .setCacheDataInL1(true)
1315       });
1316 
1317   @Deprecated
1318   public HTableDescriptor setOwner(User owner) {
1319     return setOwnerString(owner != null ? owner.getShortName() : null);
1320   }
1321 
1322   // used by admin.rb:alter(table_name,*args) to update owner.
1323   @Deprecated
1324   public HTableDescriptor setOwnerString(String ownerString) {
1325     if (ownerString != null) {
1326       setValue(OWNER_KEY, ownerString);
1327     } else {
1328       remove(OWNER_KEY);
1329     }
1330     return this;
1331   }
1332 
1333   @Deprecated
1334   public String getOwnerString() {
1335     if (getValue(OWNER_KEY) != null) {
1336       return Bytes.toString(getValue(OWNER_KEY));
1337     }
1338     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1339     // hbase:meta and -ROOT- should return system user as owner, not null (see
1340     // MasterFileSystem.java:bootstrap()).
1341     return null;
1342   }
1343 
1344   /**
1345    * @return This instance serialized with pb with pb magic prefix
1346    * @see #parseFrom(byte[])
1347    */
1348   public byte [] toByteArray() {
1349     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1350   }
1351 
1352   /**
1353    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1354    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1355    * @throws DeserializationException
1356    * @throws IOException
1357    * @see #toByteArray()
1358    */
1359   public static HTableDescriptor parseFrom(final byte [] bytes)
1360   throws DeserializationException, IOException {
1361     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1362       throw new DeserializationException("Expected PB encoded HTableDescriptor");
1363     }
1364     int pblen = ProtobufUtil.lengthOfPBMagic();
1365     TableSchema.Builder builder = TableSchema.newBuilder();
1366     TableSchema ts;
1367     try {
1368       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1369     } catch (InvalidProtocolBufferException e) {
1370       throw new DeserializationException(e);
1371     }
1372     return convert(ts);
1373   }
1374 
1375   /**
1376    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1377    */
1378   public TableSchema convert() {
1379     TableSchema.Builder builder = TableSchema.newBuilder();
1380     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1381     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1382       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1383       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1384       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1385       builder.addAttributes(aBuilder.build());
1386     }
1387     for (HColumnDescriptor hcd: getColumnFamilies()) {
1388       builder.addColumnFamilies(hcd.convert());
1389     }
1390     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1391       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1392       aBuilder.setName(e.getKey());
1393       aBuilder.setValue(e.getValue());
1394       builder.addConfiguration(aBuilder.build());
1395     }
1396     return builder.build();
1397   }
1398 
1399   /**
1400    * @param ts A pb TableSchema instance.
1401    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1402    */
1403   public static HTableDescriptor convert(final TableSchema ts) {
1404     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1405     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1406     int index = 0;
1407     for (ColumnFamilySchema cfs: list) {
1408       hcds[index++] = HColumnDescriptor.convert(cfs);
1409     }
1410     HTableDescriptor htd = new HTableDescriptor(
1411         ProtobufUtil.toTableName(ts.getTableName()),
1412         hcds);
1413     for (BytesBytesPair a: ts.getAttributesList()) {
1414       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1415     }
1416     for (NameStringPair a: ts.getConfigurationList()) {
1417       htd.setConfiguration(a.getName(), a.getValue());
1418     }
1419     return htd;
1420   }
1421 
1422   /**
1423    * Getter for accessing the configuration value by key
1424    */
1425   public String getConfigurationValue(String key) {
1426     return configuration.get(key);
1427   }
1428 
1429   /**
1430    * Getter for fetching an unmodifiable {@link #configuration} map.
1431    */
1432   public Map<String, String> getConfiguration() {
1433     // shallow pointer copy
1434     return Collections.unmodifiableMap(configuration);
1435   }
1436 
1437   /**
1438    * Setter for storing a configuration setting in {@link #configuration} map.
1439    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1440    * @param value String value. If null, removes the setting.
1441    */
1442   public HTableDescriptor setConfiguration(String key, String value) {
1443     if (value == null) {
1444       removeConfiguration(key);
1445     } else {
1446       configuration.put(key, value);
1447     }
1448     return this;
1449   }
1450 
1451   /**
1452    * Remove a config setting represented by the key from the {@link #configuration} map
1453    */
1454   public void removeConfiguration(final String key) {
1455     configuration.remove(key);
1456   }
1457 }