View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.Collection;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Set;
31  import java.util.TreeMap;
32  import java.util.TreeSet;
33  import java.util.regex.Matcher;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.classification.InterfaceAudience;
39  import org.apache.hadoop.hbase.classification.InterfaceStability;
40  import org.apache.hadoop.hbase.client.Durability;
41  import org.apache.hadoop.hbase.exceptions.DeserializationException;
42  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
43  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
44  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
45  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
46  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
47  import org.apache.hadoop.hbase.regionserver.BloomType;
48  import org.apache.hadoop.hbase.security.User;
49  import org.apache.hadoop.hbase.util.ByteStringer;
50  import org.apache.hadoop.hbase.util.Bytes;
51  
52  import com.google.protobuf.InvalidProtocolBufferException;
53  
54  /**
55   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
56   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
57   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
58   * when the region split should occur, coprocessors associated with it etc...
59   */
60  @InterfaceAudience.Public
61  @InterfaceStability.Evolving
62  public class HTableDescriptor implements Comparable<HTableDescriptor> {
63  
64    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
65  
66    private TableName name = null;
67  
68    /**
69     * A map which holds the metadata information of the table. This metadata
70     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
71     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
72     */
73    private final Map<Bytes, Bytes> values =
74        new HashMap<Bytes, Bytes>();
75  
76    /**
77     * A map which holds the configuration specific to the table.
78     * The keys of the map have the same names as config keys and override the defaults with
79     * table-specific settings. Example usage may be for compactions, etc.
80     */
81    private final Map<String, String> configuration = new HashMap<String, String>();
82  
83    public static final String SPLIT_POLICY = "SPLIT_POLICY";
84  
85    /**
86     * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
87     * attribute which denotes the maximum size of the store file after which
88     * a region split occurs
89     *
90     * @see #getMaxFileSize()
91     */
92    public static final String MAX_FILESIZE = "MAX_FILESIZE";
93    private static final Bytes MAX_FILESIZE_KEY =
94        new Bytes(Bytes.toBytes(MAX_FILESIZE));
95  
96    public static final String OWNER = "OWNER";
97    public static final Bytes OWNER_KEY =
98        new Bytes(Bytes.toBytes(OWNER));
99  
100   /**
101    * <em>INTERNAL</em> Used by rest interface to access this metadata
102    * attribute which denotes if the table is Read Only
103    *
104    * @see #isReadOnly()
105    */
106   public static final String READONLY = "READONLY";
107   private static final Bytes READONLY_KEY =
108       new Bytes(Bytes.toBytes(READONLY));
109 
110   /**
111    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
112    * attribute which denotes if the table is compaction enabled
113    *
114    * @see #isCompactionEnabled()
115    */
116   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
117   private static final Bytes COMPACTION_ENABLED_KEY =
118       new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
119 
120   /**
121    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
122    * attribute which represents the maximum size of the memstore after which
123    * its contents are flushed onto the disk
124    *
125    * @see #getMemStoreFlushSize()
126    */
127   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
128   private static final Bytes MEMSTORE_FLUSHSIZE_KEY =
129       new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
130 
131   public static final String FLUSH_POLICY = "FLUSH_POLICY";
132 
133   /**
134    * <em>INTERNAL</em> Used by rest interface to access this metadata
135    * attribute which denotes if the table is a -ROOT- region or not
136    *
137    * @see #isRootRegion()
138    */
139   public static final String IS_ROOT = "IS_ROOT";
140   private static final Bytes IS_ROOT_KEY =
141       new Bytes(Bytes.toBytes(IS_ROOT));
142 
143   /**
144    * <em>INTERNAL</em> Used by rest interface to access this metadata
145    * attribute which denotes if it is a catalog table, either
146    * <code> hbase:meta </code> or <code> -ROOT- </code>
147    *
148    * @see #isMetaRegion()
149    */
150   public static final String IS_META = "IS_META";
151   private static final Bytes IS_META_KEY =
152       new Bytes(Bytes.toBytes(IS_META));
153 
154   /**
155    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
156    * attribute which denotes if the deferred log flush option is enabled.
157    * @deprecated Use {@link #DURABILITY} instead.
158    */
159   @Deprecated
160   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
161   @Deprecated
162   private static final Bytes DEFERRED_LOG_FLUSH_KEY =
163       new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH));
164 
165   /**
166    * <em>INTERNAL</em> {@link Durability} setting for the table.
167    */
168   public static final String DURABILITY = "DURABILITY";
169   private static final Bytes DURABILITY_KEY =
170       new Bytes(Bytes.toBytes("DURABILITY"));
171 
172   /**
173    * <em>INTERNAL</em> number of region replicas for the table.
174    */
175   public static final String REGION_REPLICATION = "REGION_REPLICATION";
176   private static final Bytes REGION_REPLICATION_KEY =
177       new Bytes(Bytes.toBytes(REGION_REPLICATION));
178 
179   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
180   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
181 
182   /*
183    *  The below are ugly but better than creating them each time till we
184    *  replace booleans being saved as Strings with plain booleans.  Need a
185    *  migration script to do this.  TODO.
186    */
187   private static final Bytes FALSE =
188       new Bytes(Bytes.toBytes(Boolean.FALSE.toString()));
189 
190   private static final Bytes TRUE =
191       new Bytes(Bytes.toBytes(Boolean.TRUE.toString()));
192 
193   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
194 
195   /**
196    * Constant that denotes whether the table is READONLY by default and is false
197    */
198   public static final boolean DEFAULT_READONLY = false;
199 
200   /**
201    * Constant that denotes whether the table is compaction enabled by default
202    */
203   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
204 
205   /**
206    * Constant that denotes the maximum default size of the memstore after which
207    * the contents are flushed to the store files
208    */
209   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
210 
211   public static final int DEFAULT_REGION_REPLICATION = 1;
212 
213   private final static Map<String, String> DEFAULT_VALUES
214     = new HashMap<String, String>();
215   private final static Set<Bytes> RESERVED_KEYWORDS
216       = new HashSet<Bytes>();
217 
218   static {
219     DEFAULT_VALUES.put(MAX_FILESIZE,
220         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
221     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
222     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
223         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
224     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
225         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
226     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
227     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
228     for (String s : DEFAULT_VALUES.keySet()) {
229       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
230     }
231     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
232     RESERVED_KEYWORDS.add(IS_META_KEY);
233   }
234 
235   /**
236    * Cache of whether this is a meta table or not.
237    */
238   private volatile Boolean meta = null;
239   /**
240    * Cache of whether this is root table or not.
241    */
242   private volatile Boolean root = null;
243 
244   /**
245    * Durability setting for the table
246    */
247   private Durability durability = null;
248 
249   /**
250    * Maps column family name to the respective HColumnDescriptors
251    */
252   private final Map<byte [], HColumnDescriptor> families =
253     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
254 
255   /**
256    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
257    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
258    */
259   @InterfaceAudience.Private
260   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
261     setName(name);
262     for(HColumnDescriptor descriptor : families) {
263       this.families.put(descriptor.getName(), descriptor);
264     }
265   }
266 
267   /**
268    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
269    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
270    */
271   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
272       Map<Bytes, Bytes> values) {
273     setName(name);
274     for(HColumnDescriptor descriptor : families) {
275       this.families.put(descriptor.getName(), descriptor);
276     }
277     for (Map.Entry<Bytes, Bytes> entry :
278         values.entrySet()) {
279       setValue(entry.getKey(), entry.getValue());
280     }
281   }
282 
283   /**
284    * Default constructor which constructs an empty object.
285    * For deserializing an HTableDescriptor instance only.
286    * @deprecated Used by Writables and Writables are going away.
287    */
288   @Deprecated
289   public HTableDescriptor() {
290     super();
291   }
292 
293   /**
294    * Construct a table descriptor specifying a TableName object
295    * @param name Table name.
296    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
297    */
298   public HTableDescriptor(final TableName name) {
299     super();
300     setName(name);
301   }
302 
303   /**
304    * Construct a table descriptor specifying a byte array table name
305    * @param name Table name.
306    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
307    */
308   @Deprecated
309   public HTableDescriptor(final byte[] name) {
310     this(TableName.valueOf(name));
311   }
312 
313   /**
314    * Construct a table descriptor specifying a String table name
315    * @param name Table name.
316    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
317    */
318   @Deprecated
319   public HTableDescriptor(final String name) {
320     this(TableName.valueOf(name));
321   }
322 
323   /**
324    * Construct a table descriptor by cloning the descriptor passed as a parameter.
325    * <p>
326    * Makes a deep copy of the supplied descriptor.
327    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
328    * @param desc The descriptor.
329    */
330   public HTableDescriptor(final HTableDescriptor desc) {
331     super();
332     setName(desc.name);
333     setMetaFlags(this.name);
334     for (HColumnDescriptor c: desc.families.values()) {
335       this.families.put(c.getName(), new HColumnDescriptor(c));
336     }
337     for (Map.Entry<Bytes, Bytes> e :
338         desc.values.entrySet()) {
339       setValue(e.getKey(), e.getValue());
340     }
341     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
342       this.configuration.put(e.getKey(), e.getValue());
343     }
344   }
345 
346   /*
347    * Set meta flags on this table.
348    * IS_ROOT_KEY is set if its a -ROOT- table
349    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
350    * Called by constructors.
351    * @param name
352    */
353   private void setMetaFlags(final TableName name) {
354     setMetaRegion(isRootRegion() ||
355         name.equals(TableName.META_TABLE_NAME));
356   }
357 
358   /**
359    * Check if the descriptor represents a <code> -ROOT- </code> region.
360    *
361    * @return true if this is a <code> -ROOT- </code> region
362    */
363   public boolean isRootRegion() {
364     if (this.root == null) {
365       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
366     }
367     return this.root.booleanValue();
368   }
369 
370   /**
371    * <em> INTERNAL </em> Used to denote if the current table represents
372    * <code> -ROOT- </code> region. This is used internally by the
373    * HTableDescriptor constructors
374    *
375    * @param isRoot true if this is the <code> -ROOT- </code> region
376    */
377   protected void setRootRegion(boolean isRoot) {
378     // TODO: Make the value a boolean rather than String of boolean.
379     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
380   }
381 
382   /**
383    * Checks if this table is <code> hbase:meta </code>
384    * region.
385    *
386    * @return true if this table is <code> hbase:meta </code>
387    * region
388    */
389   public boolean isMetaRegion() {
390     if (this.meta == null) {
391       this.meta = calculateIsMetaRegion();
392     }
393     return this.meta.booleanValue();
394   }
395 
396   private synchronized Boolean calculateIsMetaRegion() {
397     byte [] value = getValue(IS_META_KEY);
398     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
399   }
400 
401   private boolean isSomething(final Bytes key,
402       final boolean valueIfNull) {
403     byte [] value = getValue(key);
404     if (value != null) {
405       return Boolean.valueOf(Bytes.toString(value));
406     }
407     return valueIfNull;
408   }
409 
410   /**
411    * <em> INTERNAL </em> Used to denote if the current table represents
412    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
413    * internally by the HTableDescriptor constructors
414    *
415    * @param isMeta true if its either <code> -ROOT- </code> or
416    * <code> hbase:meta </code> region
417    */
418   protected void setMetaRegion(boolean isMeta) {
419     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
420   }
421 
422   /**
423    * Checks if the table is a <code>hbase:meta</code> table
424    *
425    * @return true if table is <code> hbase:meta </code> region.
426    */
427   public boolean isMetaTable() {
428     return isMetaRegion() && !isRootRegion();
429   }
430 
431   /**
432    * Getter for accessing the metadata associated with the key
433    *
434    * @param key The key.
435    * @return The value.
436    * @see #values
437    */
438   public byte[] getValue(byte[] key) {
439     return getValue(new Bytes(key));
440   }
441 
442   private byte[] getValue(final Bytes key) {
443     Bytes ibw = values.get(key);
444     if (ibw == null)
445       return null;
446     return ibw.get();
447   }
448 
449   /**
450    * Getter for accessing the metadata associated with the key
451    *
452    * @param key The key.
453    * @return The value.
454    * @see #values
455    */
456   public String getValue(String key) {
457     byte[] value = getValue(Bytes.toBytes(key));
458     if (value == null)
459       return null;
460     return Bytes.toString(value);
461   }
462 
463   /**
464    * Getter for fetching an unmodifiable {@link #values} map.
465    *
466    * @return unmodifiable map {@link #values}.
467    * @see #values
468    */
469   public Map<Bytes, Bytes> getValues() {
470     // shallow pointer copy
471     return Collections.unmodifiableMap(values);
472   }
473 
474   /**
475    * Setter for storing metadata as a (key, value) pair in {@link #values} map
476    *
477    * @param key The key.
478    * @param value The value.
479    * @see #values
480    */
481   public HTableDescriptor setValue(byte[] key, byte[] value) {
482     setValue(new Bytes(key), new Bytes(value));
483     return this;
484   }
485 
486   /*
487    * @param key The key.
488    * @param value The value.
489    */
490   private HTableDescriptor setValue(final Bytes key,
491       final String value) {
492     setValue(key, new Bytes(Bytes.toBytes(value)));
493     return this;
494   }
495 
496   /*
497    * Setter for storing metadata as a (key, value) pair in {@link #values} map
498    *
499    * @param key The key.
500    * @param value The value.
501    */
502   public HTableDescriptor setValue(final Bytes key,
503       final Bytes value) {
504     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
505       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
506       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
507           "use " + DURABILITY + " instead");
508       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
509       return this;
510     }
511     values.put(key, value);
512     return this;
513   }
514 
515   /**
516    * Setter for storing metadata as a (key, value) pair in {@link #values} map
517    *
518    * @param key The key.
519    * @param value The value.
520    * @see #values
521    */
522   public HTableDescriptor setValue(String key, String value) {
523     if (value == null) {
524       remove(key);
525     } else {
526       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
527     }
528     return this;
529   }
530 
531   /**
532    * Remove metadata represented by the key from the {@link #values} map
533    *
534    * @param key Key whose key and value we're to remove from HTableDescriptor
535    * parameters.
536    */
537   public void remove(final String key) {
538     remove(new Bytes(Bytes.toBytes(key)));
539   }
540 
541   /**
542    * Remove metadata represented by the key from the {@link #values} map
543    *
544    * @param key Key whose key and value we're to remove from HTableDescriptor
545    * parameters.
546    */
547   public void remove(Bytes key) {
548     values.remove(key);
549   }
550 
551   /**
552    * Remove metadata represented by the key from the {@link #values} map
553    *
554    * @param key Key whose key and value we're to remove from HTableDescriptor
555    * parameters.
556    */
557   public void remove(final byte [] key) {
558     remove(new Bytes(key));
559   }
560 
561   /**
562    * Check if the readOnly flag of the table is set. If the readOnly flag is
563    * set then the contents of the table can only be read from but not modified.
564    *
565    * @return true if all columns in the table should be read only
566    */
567   public boolean isReadOnly() {
568     return isSomething(READONLY_KEY, DEFAULT_READONLY);
569   }
570 
571   /**
572    * Setting the table as read only sets all the columns in the table as read
573    * only. By default all tables are modifiable, but if the readOnly flag is
574    * set to true then the contents of the table can only be read but not modified.
575    *
576    * @param readOnly True if all of the columns in the table should be read
577    * only.
578    */
579   public HTableDescriptor setReadOnly(final boolean readOnly) {
580     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
581   }
582 
583   /**
584    * Check if the compaction enable flag of the table is true. If flag is
585    * false then no minor/major compactions will be done in real.
586    *
587    * @return true if table compaction enabled
588    */
589   public boolean isCompactionEnabled() {
590     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
591   }
592 
593   /**
594    * Setting the table compaction enable flag.
595    *
596    * @param isEnable True if enable compaction.
597    */
598   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
599     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
600     return this;
601   }
602 
603   /**
604    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
605    * @param durability enum value
606    */
607   public HTableDescriptor setDurability(Durability durability) {
608     this.durability = durability;
609     setValue(DURABILITY_KEY, durability.name());
610     return this;
611   }
612 
613   /**
614    * Returns the durability setting for the table.
615    * @return durability setting for the table.
616    */
617   public Durability getDurability() {
618     if (this.durability == null) {
619       byte[] durabilityValue = getValue(DURABILITY_KEY);
620       if (durabilityValue == null) {
621         this.durability = DEFAULT_DURABLITY;
622       } else {
623         try {
624           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
625         } catch (IllegalArgumentException ex) {
626           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
627             + " is not known. Durability:" + Bytes.toString(durabilityValue));
628           this.durability = DEFAULT_DURABLITY;
629         }
630       }
631     }
632     return this.durability;
633   }
634 
635   /**
636    * Get the name of the table
637    *
638    * @return TableName
639    */
640   public TableName getTableName() {
641     return name;
642   }
643 
644   /**
645    * Get the name of the table as a byte array.
646    *
647    * @return name of table
648    * @deprecated Use {@link #getTableName()} instead
649    */
650   @Deprecated
651   public byte[] getName() {
652     return name.getName();
653   }
654 
655   /**
656    * Get the name of the table as a String
657    *
658    * @return name of table as a String
659    */
660   public String getNameAsString() {
661     return name.getNameAsString();
662   }
663 
664   /**
665    * This sets the class associated with the region split policy which
666    * determines when a region split should occur.  The class used by
667    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
668    * @param clazz the class name
669    */
670   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
671     setValue(SPLIT_POLICY, clazz);
672     return this;
673   }
674 
675   /**
676    * This gets the class associated with the region split policy which
677    * determines when a region split should occur.  The class used by
678    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
679    *
680    * @return the class name of the region split policy for this table.
681    * If this returns null, the default split policy is used.
682    */
683    public String getRegionSplitPolicyClassName() {
684     return getValue(SPLIT_POLICY);
685   }
686 
687   /**
688    * Set the name of the table.
689    *
690    * @param name name of table
691    */
692   @Deprecated
693   public HTableDescriptor setName(byte[] name) {
694     setName(TableName.valueOf(name));
695     return this;
696   }
697 
698   @Deprecated
699   public HTableDescriptor setName(TableName name) {
700     this.name = name;
701     setMetaFlags(this.name);
702     return this;
703   }
704 
705   /**
706    * Returns the maximum size upto which a region can grow to after which a region
707    * split is triggered. The region size is represented by the size of the biggest
708    * store file in that region.
709    *
710    * @return max hregion size for table, -1 if not set.
711    *
712    * @see #setMaxFileSize(long)
713    */
714   public long getMaxFileSize() {
715     byte [] value = getValue(MAX_FILESIZE_KEY);
716     if (value != null) {
717       return Long.parseLong(Bytes.toString(value));
718     }
719     return -1;
720   }
721 
722   /**
723    * Sets the maximum size upto which a region can grow to after which a region
724    * split is triggered. The region size is represented by the size of the biggest
725    * store file in that region, i.e. If the biggest store file grows beyond the
726    * maxFileSize, then the region split is triggered. This defaults to a value of
727    * 256 MB.
728    * <p>
729    * This is not an absolute value and might vary. Assume that a single row exceeds
730    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
731    * a single row cannot be split across multiple regions
732    * </p>
733    *
734    * @param maxFileSize The maximum file size that a store file can grow to
735    * before a split is triggered.
736    */
737   public HTableDescriptor setMaxFileSize(long maxFileSize) {
738     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
739     return this;
740   }
741 
742   /**
743    * Returns the size of the memstore after which a flush to filesystem is triggered.
744    *
745    * @return memory cache flush size for each hregion, -1 if not set.
746    *
747    * @see #setMemStoreFlushSize(long)
748    */
749   public long getMemStoreFlushSize() {
750     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
751     if (value != null) {
752       return Long.parseLong(Bytes.toString(value));
753     }
754     return -1;
755   }
756 
757   /**
758    * Represents the maximum size of the memstore after which the contents of the
759    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
760    *
761    * @param memstoreFlushSize memory cache flush size for each hregion
762    */
763   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
764     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
765     return this;
766   }
767 
768   /**
769    * This sets the class associated with the flush policy which determines determines the stores
770    * need to be flushed when flushing a region. The class used by default is defined in
771    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
772    * @param clazz the class name
773    */
774   public HTableDescriptor setFlushPolicyClassName(String clazz) {
775     setValue(FLUSH_POLICY, clazz);
776     return this;
777   }
778 
779   /**
780    * This gets the class associated with the flush policy which determines the stores need to be
781    * flushed when flushing a region. The class used by default is defined in
782    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
783    * @return the class name of the flush policy for this table. If this returns null, the default
784    *         flush policy is used.
785    */
786   public String getFlushPolicyClassName() {
787     return getValue(FLUSH_POLICY);
788   }
789 
790   /**
791    * Adds a column family.
792    * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
793    * @param family HColumnDescriptor of family to add.
794    */
795   public HTableDescriptor addFamily(final HColumnDescriptor family) {
796     if (family.getName() == null || family.getName().length <= 0) {
797       throw new IllegalArgumentException("Family name cannot be null or empty");
798     }
799     if (hasFamily(family.getName())) {
800       throw new IllegalArgumentException("Family '" +
801         family.getNameAsString() + "' already exists so cannot be added");
802     }
803     this.families.put(family.getName(), family);
804     return this;
805   }
806 
807   /**
808    * Modifies the existing column family.
809    * @param family HColumnDescriptor of family to update
810    * @return this (for chained invocation)
811    */
812   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
813     if (family.getName() == null || family.getName().length <= 0) {
814       throw new IllegalArgumentException("Family name cannot be null or empty");
815     }
816     if (!hasFamily(family.getName())) {
817       throw new IllegalArgumentException("Column family '" + family.getNameAsString()
818         + "' does not exist");
819     }
820     this.families.put(family.getName(), family);
821     return this;
822   }
823 
824   /**
825    * Checks to see if this table contains the given column family
826    * @param familyName Family name or column name.
827    * @return true if the table contains the specified family name
828    */
829   public boolean hasFamily(final byte [] familyName) {
830     return families.containsKey(familyName);
831   }
832 
833   /**
834    * @return Name of this table and then a map of all of the column family
835    * descriptors.
836    * @see #getNameAsString()
837    */
838   @Override
839   public String toString() {
840     StringBuilder s = new StringBuilder();
841     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
842     s.append(getValues(true));
843     for (HColumnDescriptor f : families.values()) {
844       s.append(", ").append(f);
845     }
846     return s.toString();
847   }
848 
849   /**
850    * @return Name of this table and then a map of all of the column family
851    * descriptors (with only the non-default column family attributes)
852    */
853   public String toStringCustomizedValues() {
854     StringBuilder s = new StringBuilder();
855     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
856     s.append(getValues(false));
857     for(HColumnDescriptor hcd : families.values()) {
858       s.append(", ").append(hcd.toStringCustomizedValues());
859     }
860     return s.toString();
861   }
862 
863   /**
864    * @return map of all table attributes formatted into string.
865    */
866   public String toStringTableAttributes() {
867    return getValues(true).toString();
868   }
869 
870   private StringBuilder getValues(boolean printDefaults) {
871     StringBuilder s = new StringBuilder();
872 
873     // step 1: set partitioning and pruning
874     Set<Bytes> reservedKeys = new TreeSet<Bytes>();
875     Set<Bytes> userKeys = new TreeSet<Bytes>();
876     for (Bytes k : values.keySet()) {
877       if (k == null || k.get() == null) continue;
878       String key = Bytes.toString(k.get());
879       // in this section, print out reserved keywords + coprocessor info
880       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
881         userKeys.add(k);
882         continue;
883       }
884       // only print out IS_ROOT/IS_META if true
885       String value = Bytes.toString(values.get(k).get());
886       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
887         if (Boolean.valueOf(value) == false) continue;
888       }
889       // see if a reserved key is a default value. may not want to print it out
890       if (printDefaults
891           || !DEFAULT_VALUES.containsKey(key)
892           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
893         reservedKeys.add(k);
894       }
895     }
896 
897     // early exit optimization
898     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
899     if (!hasAttributes && configuration.isEmpty()) return s;
900 
901     s.append(", {");
902     // step 2: printing attributes
903     if (hasAttributes) {
904       s.append("TABLE_ATTRIBUTES => {");
905 
906       // print all reserved keys first
907       boolean printCommaForAttr = false;
908       for (Bytes k : reservedKeys) {
909         String key = Bytes.toString(k.get());
910         String value = Bytes.toStringBinary(values.get(k).get());
911         if (printCommaForAttr) s.append(", ");
912         printCommaForAttr = true;
913         s.append(key);
914         s.append(" => ");
915         s.append('\'').append(value).append('\'');
916       }
917 
918       if (!userKeys.isEmpty()) {
919         // print all non-reserved, advanced config keys as a separate subset
920         if (printCommaForAttr) s.append(", ");
921         printCommaForAttr = true;
922         s.append(HConstants.METADATA).append(" => ");
923         s.append("{");
924         boolean printCommaForCfg = false;
925         for (Bytes k : userKeys) {
926           String key = Bytes.toString(k.get());
927           String value = Bytes.toStringBinary(values.get(k).get());
928           if (printCommaForCfg) s.append(", ");
929           printCommaForCfg = true;
930           s.append('\'').append(key).append('\'');
931           s.append(" => ");
932           s.append('\'').append(value).append('\'');
933         }
934         s.append("}");
935       }
936     }
937 
938     // step 3: printing all configuration:
939     if (!configuration.isEmpty()) {
940       if (hasAttributes) {
941         s.append(", ");
942       }
943       s.append(HConstants.CONFIGURATION).append(" => ");
944       s.append('{');
945       boolean printCommaForConfig = false;
946       for (Map.Entry<String, String> e : configuration.entrySet()) {
947         if (printCommaForConfig) s.append(", ");
948         printCommaForConfig = true;
949         s.append('\'').append(e.getKey()).append('\'');
950         s.append(" => ");
951         s.append('\'').append(e.getValue()).append('\'');
952       }
953       s.append("}");
954     }
955     s.append("}"); // end METHOD
956     return s;
957   }
958 
959   /**
960    * Compare the contents of the descriptor with another one passed as a parameter.
961    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
962    * contents of the descriptors are compared.
963    *
964    * @return true if the contents of the the two descriptors exactly match
965    *
966    * @see java.lang.Object#equals(java.lang.Object)
967    */
968   @Override
969   public boolean equals(Object obj) {
970     if (this == obj) {
971       return true;
972     }
973     if (obj == null) {
974       return false;
975     }
976     if (!(obj instanceof HTableDescriptor)) {
977       return false;
978     }
979     return compareTo((HTableDescriptor)obj) == 0;
980   }
981 
982   /**
983    * @see java.lang.Object#hashCode()
984    */
985   @Override
986   public int hashCode() {
987     int result = this.name.hashCode();
988     if (this.families.size() > 0) {
989       for (HColumnDescriptor e: this.families.values()) {
990         result ^= e.hashCode();
991       }
992     }
993     result ^= values.hashCode();
994     result ^= configuration.hashCode();
995     return result;
996   }
997 
998   // Comparable
999 
1000   /**
1001    * Compares the descriptor with another descriptor which is passed as a parameter.
1002    * This compares the content of the two descriptors and not the reference.
1003    *
1004    * @return 0 if the contents of the descriptors are exactly matching,
1005    *         1 if there is a mismatch in the contents
1006    */
1007   @Override
1008   public int compareTo(final HTableDescriptor other) {
1009     int result = this.name.compareTo(other.name);
1010     if (result == 0) {
1011       result = families.size() - other.families.size();
1012     }
1013     if (result == 0 && families.size() != other.families.size()) {
1014       result = Integer.valueOf(families.size()).compareTo(
1015           Integer.valueOf(other.families.size()));
1016     }
1017     if (result == 0) {
1018       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1019           it2 = other.families.values().iterator(); it.hasNext(); ) {
1020         result = it.next().compareTo(it2.next());
1021         if (result != 0) {
1022           break;
1023         }
1024       }
1025     }
1026     if (result == 0) {
1027       // punt on comparison for ordering, just calculate difference
1028       result = this.values.hashCode() - other.values.hashCode();
1029       if (result < 0)
1030         result = -1;
1031       else if (result > 0)
1032         result = 1;
1033     }
1034     if (result == 0) {
1035       result = this.configuration.hashCode() - other.configuration.hashCode();
1036       if (result < 0)
1037         result = -1;
1038       else if (result > 0)
1039         result = 1;
1040     }
1041     return result;
1042   }
1043 
1044   /**
1045    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1046    * of all the column families of the table.
1047    *
1048    * @return Immutable collection of {@link HColumnDescriptor} of all the
1049    * column families.
1050    */
1051   public Collection<HColumnDescriptor> getFamilies() {
1052     return Collections.unmodifiableCollection(this.families.values());
1053   }
1054 
1055   /**
1056    * Returns the configured replicas per region
1057    */
1058   public int getRegionReplication() {
1059     byte[] val = getValue(REGION_REPLICATION_KEY);
1060     if (val == null || val.length == 0) {
1061       return DEFAULT_REGION_REPLICATION;
1062     }
1063     return Integer.parseInt(Bytes.toString(val));
1064   }
1065 
1066   /**
1067    * Sets the number of replicas per region.
1068    * @param regionReplication the replication factor per region
1069    */
1070   public HTableDescriptor setRegionReplication(int regionReplication) {
1071     setValue(REGION_REPLICATION_KEY,
1072         new Bytes(Bytes.toBytes(Integer.toString(regionReplication))));
1073     return this;
1074   }
1075 
1076   /**
1077    * Returns all the column family names of the current table. The map of
1078    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1079    * This returns all the keys of the family map which represents the column
1080    * family names of the table.
1081    *
1082    * @return Immutable sorted set of the keys of the families.
1083    */
1084   public Set<byte[]> getFamiliesKeys() {
1085     return Collections.unmodifiableSet(this.families.keySet());
1086   }
1087 
1088   /**
1089    * Returns an array all the {@link HColumnDescriptor} of the column families
1090    * of the table.
1091    *
1092    * @return Array of all the HColumnDescriptors of the current table
1093    *
1094    * @see #getFamilies()
1095    */
1096   public HColumnDescriptor[] getColumnFamilies() {
1097     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1098     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1099   }
1100 
1101 
1102   /**
1103    * Returns the HColumnDescriptor for a specific column family with name as
1104    * specified by the parameter column.
1105    *
1106    * @param column Column family name
1107    * @return Column descriptor for the passed family name or the family on
1108    * passed in column.
1109    */
1110   public HColumnDescriptor getFamily(final byte [] column) {
1111     return this.families.get(column);
1112   }
1113 
1114 
1115   /**
1116    * Removes the HColumnDescriptor with name specified by the parameter column
1117    * from the table descriptor
1118    *
1119    * @param column Name of the column family to be removed.
1120    * @return Column descriptor for the passed family name or the family on
1121    * passed in column.
1122    */
1123   public HColumnDescriptor removeFamily(final byte [] column) {
1124     return this.families.remove(column);
1125   }
1126 
1127 
1128   /**
1129    * Add a table coprocessor to this table. The coprocessor
1130    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1131    * or Endpoint.
1132    * It won't check if the class can be loaded or not.
1133    * Whether a coprocessor is loadable or not will be determined when
1134    * a region is opened.
1135    * @param className Full class name.
1136    * @throws IOException
1137    */
1138   public HTableDescriptor addCoprocessor(String className) throws IOException {
1139     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1140     return this;
1141   }
1142 
1143 
1144   /**
1145    * Add a table coprocessor to this table. The coprocessor
1146    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1147    * or Endpoint.
1148    * It won't check if the class can be loaded or not.
1149    * Whether a coprocessor is loadable or not will be determined when
1150    * a region is opened.
1151    * @param jarFilePath Path of the jar file. If it's null, the class will be
1152    * loaded from default classloader.
1153    * @param className Full class name.
1154    * @param priority Priority
1155    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1156    * @throws IOException
1157    */
1158   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1159                              int priority, final Map<String, String> kvs)
1160   throws IOException {
1161     if (hasCoprocessor(className)) {
1162       throw new IOException("Coprocessor " + className + " already exists.");
1163     }
1164     // validate parameter kvs
1165     StringBuilder kvString = new StringBuilder();
1166     if (kvs != null) {
1167       for (Map.Entry<String, String> e: kvs.entrySet()) {
1168         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1169           throw new IOException("Illegal parameter key = " + e.getKey());
1170         }
1171         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1172           throw new IOException("Illegal parameter (" + e.getKey() +
1173               ") value = " + e.getValue());
1174         }
1175         if (kvString.length() != 0) {
1176           kvString.append(',');
1177         }
1178         kvString.append(e.getKey());
1179         kvString.append('=');
1180         kvString.append(e.getValue());
1181       }
1182     }
1183 
1184     // generate a coprocessor key
1185     int maxCoprocessorNumber = 0;
1186     Matcher keyMatcher;
1187     for (Map.Entry<Bytes, Bytes> e :
1188         this.values.entrySet()) {
1189       keyMatcher =
1190           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1191               Bytes.toString(e.getKey().get()));
1192       if (!keyMatcher.matches()) {
1193         continue;
1194       }
1195       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)),
1196           maxCoprocessorNumber);
1197     }
1198     maxCoprocessorNumber++;
1199 
1200     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1201     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1202         "|" + className + "|" + Integer.toString(priority) + "|" +
1203         kvString.toString();
1204     setValue(key, value);
1205     return this;
1206   }
1207 
1208 
1209   /**
1210    * Check if the table has an attached co-processor represented by the name className
1211    *
1212    * @param className - Class name of the co-processor
1213    * @return true of the table has a co-processor className
1214    */
1215   public boolean hasCoprocessor(String className) {
1216     Matcher keyMatcher;
1217     Matcher valueMatcher;
1218     for (Map.Entry<Bytes, Bytes> e :
1219         this.values.entrySet()) {
1220       keyMatcher =
1221           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1222               Bytes.toString(e.getKey().get()));
1223       if (!keyMatcher.matches()) {
1224         continue;
1225       }
1226       valueMatcher =
1227         HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(
1228             Bytes.toString(e.getValue().get()));
1229       if (!valueMatcher.matches()) {
1230         continue;
1231       }
1232       // get className and compare
1233       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1234       if (clazz.equals(className.trim())) {
1235         return true;
1236       }
1237     }
1238     return false;
1239   }
1240 
1241   /**
1242    * Return the list of attached co-processor represented by their name className
1243    *
1244    * @return The list of co-processors classNames
1245    */
1246   public List<String> getCoprocessors() {
1247     List<String> result = new ArrayList<String>();
1248     Matcher keyMatcher;
1249     Matcher valueMatcher;
1250     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1251       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1252       if (!keyMatcher.matches()) {
1253         continue;
1254       }
1255       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1256           .toString(e.getValue().get()));
1257       if (!valueMatcher.matches()) {
1258         continue;
1259       }
1260       result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
1261     }
1262     return result;
1263   }
1264 
1265   /**
1266    * Remove a coprocessor from those set on the table
1267    * @param className Class name of the co-processor
1268    */
1269   public void removeCoprocessor(String className) {
1270     Bytes match = null;
1271     Matcher keyMatcher;
1272     Matcher valueMatcher;
1273     for (Map.Entry<Bytes, Bytes> e : this.values
1274         .entrySet()) {
1275       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1276           .getKey().get()));
1277       if (!keyMatcher.matches()) {
1278         continue;
1279       }
1280       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1281           .toString(e.getValue().get()));
1282       if (!valueMatcher.matches()) {
1283         continue;
1284       }
1285       // get className and compare
1286       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1287       // remove the CP if it is present
1288       if (clazz.equals(className.trim())) {
1289         match = e.getKey();
1290         break;
1291       }
1292     }
1293     // if we found a match, remove it
1294     if (match != null)
1295       remove(match);
1296   }
1297 
1298   /**
1299    * Returns the {@link Path} object representing the table directory under
1300    * path rootdir
1301    *
1302    * Deprecated use FSUtils.getTableDir() instead.
1303    *
1304    * @param rootdir qualified path of HBase root directory
1305    * @param tableName name of table
1306    * @return {@link Path} for table
1307    */
1308   @Deprecated
1309   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1310     //This is bad I had to mirror code from FSUTils.getTableDir since
1311     //there is no module dependency between hbase-client and hbase-server
1312     TableName name = TableName.valueOf(tableName);
1313     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1314               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1315   }
1316 
1317   /** Table descriptor for <code>hbase:meta</code> catalog table
1318    * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
1319    * Admin#getTableDescriptor(TableName.META_TABLE) instead.
1320    */
1321   @Deprecated
1322   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1323       TableName.META_TABLE_NAME,
1324       new HColumnDescriptor[] {
1325           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1326               // Ten is arbitrary number.  Keep versions to help debugging.
1327               .setMaxVersions(10)
1328               .setInMemory(true)
1329               .setBlocksize(8 * 1024)
1330               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1331               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1332               .setBloomFilterType(BloomType.NONE)
1333               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1334               // e.g. if using CombinedBlockCache (BucketCache).
1335               .setCacheDataInL1(true),
1336           new HColumnDescriptor(HConstants.TABLE_FAMILY)
1337               // Ten is arbitrary number.  Keep versions to help debugging.
1338               .setMaxVersions(10)
1339               .setInMemory(true)
1340               .setBlocksize(8 * 1024)
1341               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1342                   // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1343               .setBloomFilterType(BloomType.NONE)
1344                   // Enable cache of data blocks in L1 if more than one caching tier deployed:
1345                   // e.g. if using CombinedBlockCache (BucketCache).
1346               .setCacheDataInL1(true)
1347       });
1348 
1349   static {
1350     try {
1351       META_TABLEDESC.addCoprocessor(
1352           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1353           null, Coprocessor.PRIORITY_SYSTEM, null);
1354     } catch (IOException ex) {
1355       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1356       throw new RuntimeException(ex);
1357     }
1358   }
1359 
1360   public final static String NAMESPACE_FAMILY_INFO = "info";
1361   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1362   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1363 
1364   /** Table descriptor for namespace table */
1365   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1366       TableName.NAMESPACE_TABLE_NAME,
1367       new HColumnDescriptor[] {
1368           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1369               // Ten is arbitrary number.  Keep versions to help debugging.
1370               .setMaxVersions(10)
1371               .setInMemory(true)
1372               .setBlocksize(8 * 1024)
1373               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1374               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1375               // e.g. if using CombinedBlockCache (BucketCache).
1376               .setCacheDataInL1(true)
1377       });
1378 
1379   @Deprecated
1380   public HTableDescriptor setOwner(User owner) {
1381     return setOwnerString(owner != null ? owner.getShortName() : null);
1382   }
1383 
1384   // used by admin.rb:alter(table_name,*args) to update owner.
1385   @Deprecated
1386   public HTableDescriptor setOwnerString(String ownerString) {
1387     if (ownerString != null) {
1388       setValue(OWNER_KEY, ownerString);
1389     } else {
1390       remove(OWNER_KEY);
1391     }
1392     return this;
1393   }
1394 
1395   @Deprecated
1396   public String getOwnerString() {
1397     if (getValue(OWNER_KEY) != null) {
1398       return Bytes.toString(getValue(OWNER_KEY));
1399     }
1400     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1401     // hbase:meta and -ROOT- should return system user as owner, not null (see
1402     // MasterFileSystem.java:bootstrap()).
1403     return null;
1404   }
1405 
1406   /**
1407    * @return This instance serialized with pb with pb magic prefix
1408    * @see #parseFrom(byte[])
1409    */
1410   public byte [] toByteArray() {
1411     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1412   }
1413 
1414   /**
1415    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1416    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1417    * @throws DeserializationException
1418    * @throws IOException
1419    * @see #toByteArray()
1420    */
1421   public static HTableDescriptor parseFrom(final byte [] bytes)
1422   throws DeserializationException, IOException {
1423     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1424       throw new DeserializationException("Expected PB encoded HTableDescriptor");
1425     }
1426     int pblen = ProtobufUtil.lengthOfPBMagic();
1427     TableSchema.Builder builder = TableSchema.newBuilder();
1428     TableSchema ts;
1429     try {
1430       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1431     } catch (InvalidProtocolBufferException e) {
1432       throw new DeserializationException(e);
1433     }
1434     return convert(ts);
1435   }
1436 
1437   /**
1438    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1439    */
1440   public TableSchema convert() {
1441     TableSchema.Builder builder = TableSchema.newBuilder();
1442     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1443     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1444       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1445       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1446       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1447       builder.addAttributes(aBuilder.build());
1448     }
1449     for (HColumnDescriptor hcd: getColumnFamilies()) {
1450       builder.addColumnFamilies(hcd.convert());
1451     }
1452     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1453       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1454       aBuilder.setName(e.getKey());
1455       aBuilder.setValue(e.getValue());
1456       builder.addConfiguration(aBuilder.build());
1457     }
1458     return builder.build();
1459   }
1460 
1461   /**
1462    * @param ts A pb TableSchema instance.
1463    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1464    */
1465   public static HTableDescriptor convert(final TableSchema ts) {
1466     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1467     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1468     int index = 0;
1469     for (ColumnFamilySchema cfs: list) {
1470       hcds[index++] = HColumnDescriptor.convert(cfs);
1471     }
1472     HTableDescriptor htd = new HTableDescriptor(
1473         ProtobufUtil.toTableName(ts.getTableName()),
1474         hcds);
1475     for (BytesBytesPair a: ts.getAttributesList()) {
1476       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1477     }
1478     for (NameStringPair a: ts.getConfigurationList()) {
1479       htd.setConfiguration(a.getName(), a.getValue());
1480     }
1481     return htd;
1482   }
1483 
1484   /**
1485    * Getter for accessing the configuration value by key
1486    */
1487   public String getConfigurationValue(String key) {
1488     return configuration.get(key);
1489   }
1490 
1491   /**
1492    * Getter for fetching an unmodifiable {@link #configuration} map.
1493    */
1494   public Map<String, String> getConfiguration() {
1495     // shallow pointer copy
1496     return Collections.unmodifiableMap(configuration);
1497   }
1498 
1499   /**
1500    * Setter for storing a configuration setting in {@link #configuration} map.
1501    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1502    * @param value String value. If null, removes the setting.
1503    */
1504   public HTableDescriptor setConfiguration(String key, String value) {
1505     if (value == null) {
1506       removeConfiguration(key);
1507     } else {
1508       configuration.put(key, value);
1509     }
1510     return this;
1511   }
1512 
1513   /**
1514    * Remove a config setting represented by the key from the {@link #configuration} map
1515    */
1516   public void removeConfiguration(final String key) {
1517     configuration.remove(key);
1518   }
1519 }