View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.ArrayList;
25  import java.util.Collection;
26  import java.util.Collections;
27  import java.util.HashMap;
28  import java.util.HashSet;
29  import java.util.Iterator;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.Set;
33  import java.util.TreeMap;
34  import java.util.TreeSet;
35  import java.util.regex.Matcher;
36  
37  import org.apache.hadoop.hbase.util.ByteStringer;
38  import org.apache.commons.logging.Log;
39  import org.apache.commons.logging.LogFactory;
40  import org.apache.hadoop.classification.InterfaceAudience;
41  import org.apache.hadoop.classification.InterfaceStability;
42  import org.apache.hadoop.fs.Path;
43  import org.apache.hadoop.hbase.client.Durability;
44  import org.apache.hadoop.hbase.exceptions.DeserializationException;
45  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
46  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
47  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
48  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
49  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
50  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
51  import org.apache.hadoop.hbase.regionserver.BloomType;
52  import org.apache.hadoop.hbase.security.User;
53  import org.apache.hadoop.hbase.util.Bytes;
54  import org.apache.hadoop.hbase.util.Writables;
55  import org.apache.hadoop.io.WritableComparable;
56  
57  import com.google.protobuf.InvalidProtocolBufferException;
58  
59  /**
60   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
61   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
62   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
63   * when the region split should occur, coprocessors associated with it etc...
64   */
65  @InterfaceAudience.Public
66  @InterfaceStability.Evolving
67  public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
68  
69    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
70  
71    /**
72     *  Changes prior to version 3 were not recorded here.
73     *  Version 3 adds metadata as a map where keys and values are byte[].
74     *  Version 4 adds indexes
75     *  Version 5 removed transactional pollution -- e.g. indexes
76     *  Version 6 changed metadata to BytesBytesPair in PB
77     *  Version 7 adds table-level configuration
78     */
79    private static final byte TABLE_DESCRIPTOR_VERSION = 7;
80  
81    private TableName name = null;
82  
83    /**
84     * A map which holds the metadata information of the table. This metadata
85     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
86     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
87     */
88    private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
89      new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
90  
91    /**
92     * A map which holds the configuration specific to the table.
93     * The keys of the map have the same names as config keys and override the defaults with
94     * table-specific settings. Example usage may be for compactions, etc.
95     */
96    private final Map<String, String> configuration = new HashMap<String, String>();
97  
98    public static final String SPLIT_POLICY = "SPLIT_POLICY";
99  
100   /**
101    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
102    * attribute which denotes the maximum size of the store file after which
103    * a region split occurs
104    *
105    * @see #getMaxFileSize()
106    */
107   public static final String MAX_FILESIZE = "MAX_FILESIZE";
108   private static final ImmutableBytesWritable MAX_FILESIZE_KEY =
109     new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
110 
111   public static final String OWNER = "OWNER";
112   public static final ImmutableBytesWritable OWNER_KEY =
113     new ImmutableBytesWritable(Bytes.toBytes(OWNER));
114 
115   /**
116    * <em>INTERNAL</em> Used by rest interface to access this metadata
117    * attribute which denotes if the table is Read Only
118    *
119    * @see #isReadOnly()
120    */
121   public static final String READONLY = "READONLY";
122   private static final ImmutableBytesWritable READONLY_KEY =
123     new ImmutableBytesWritable(Bytes.toBytes(READONLY));
124 
125   /**
126    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
127    * attribute which denotes if the table is compaction enabled
128    *
129    * @see #isCompactionEnabled()
130    */
131   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
132   private static final ImmutableBytesWritable COMPACTION_ENABLED_KEY =
133     new ImmutableBytesWritable(Bytes.toBytes(COMPACTION_ENABLED));
134 
135   /**
136    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
137    * attribute which represents the maximum size of the memstore after which
138    * its contents are flushed onto the disk
139    *
140    * @see #getMemStoreFlushSize()
141    */
142   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
143   private static final ImmutableBytesWritable MEMSTORE_FLUSHSIZE_KEY =
144     new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
145 
146   /**
147    * <em>INTERNAL</em> Used by rest interface to access this metadata
148    * attribute which denotes if the table is a -ROOT- region or not
149    *
150    * @see #isRootRegion()
151    */
152   public static final String IS_ROOT = "IS_ROOT";
153   private static final ImmutableBytesWritable IS_ROOT_KEY =
154     new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
155 
156   /**
157    * <em>INTERNAL</em> Used by rest interface to access this metadata
158    * attribute which denotes if it is a catalog table, either
159    * <code> hbase:meta </code> or <code> -ROOT- </code>
160    *
161    * @see #isMetaRegion()
162    */
163   public static final String IS_META = "IS_META";
164   private static final ImmutableBytesWritable IS_META_KEY =
165     new ImmutableBytesWritable(Bytes.toBytes(IS_META));
166 
167   /**
168    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
169    * attribute which denotes if the deferred log flush option is enabled.
170    * @deprecated Use {@link #DURABILITY} instead.
171    */
172   @Deprecated
173   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
174   @Deprecated
175   private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY =
176     new ImmutableBytesWritable(Bytes.toBytes(DEFERRED_LOG_FLUSH));
177 
178   /**
179    * <em>INTERNAL</em> {@link Durability} setting for the table.
180    */
181   public static final String DURABILITY = "DURABILITY";
182   private static final ImmutableBytesWritable DURABILITY_KEY =
183       new ImmutableBytesWritable(Bytes.toBytes("DURABILITY"));
184 
185   /**
186    * <em>INTERNAL</em> number of region replicas for the table.
187    */
188   public static final String REGION_REPLICATION = "REGION_REPLICATION";
189   private static final ImmutableBytesWritable REGION_REPLICATION_KEY =
190       new ImmutableBytesWritable(Bytes.toBytes(REGION_REPLICATION));
191 
192   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
193   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
194 
195   /*
196    *  The below are ugly but better than creating them each time till we
197    *  replace booleans being saved as Strings with plain booleans.  Need a
198    *  migration script to do this.  TODO.
199    */
200   private static final ImmutableBytesWritable FALSE =
201     new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString()));
202 
203   private static final ImmutableBytesWritable TRUE =
204     new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
205 
206   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
207 
208   /**
209    * Constant that denotes whether the table is READONLY by default and is false
210    */
211   public static final boolean DEFAULT_READONLY = false;
212 
213   /**
214    * Constant that denotes whether the table is compaction enabled by default
215    */
216   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
217 
218   /**
219    * Constant that denotes the maximum default size of the memstore after which
220    * the contents are flushed to the store files
221    */
222   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
223 
224   public static final int DEFAULT_REGION_REPLICATION = 1;
225 
226   private final static Map<String, String> DEFAULT_VALUES
227     = new HashMap<String, String>();
228   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
229     = new HashSet<ImmutableBytesWritable>();
230   static {
231     DEFAULT_VALUES.put(MAX_FILESIZE,
232         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
233     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
234     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
235         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
236     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
237         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
238     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
239     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
240     for (String s : DEFAULT_VALUES.keySet()) {
241       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
242     }
243     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
244     RESERVED_KEYWORDS.add(IS_META_KEY);
245   }
246 
247   /**
248    * Cache of whether this is a meta table or not.
249    */
250   private volatile Boolean meta = null;
251   /**
252    * Cache of whether this is root table or not.
253    */
254   private volatile Boolean root = null;
255 
256   /**
257    * Durability setting for the table
258    */
259   private Durability durability = null;
260 
261   /**
262    * Maps column family name to the respective HColumnDescriptors
263    */
264   private final Map<byte [], HColumnDescriptor> families =
265     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
266 
267   /**
268    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
269    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
270    */
271   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
272     setName(name);
273     for(HColumnDescriptor descriptor : families) {
274       this.families.put(descriptor.getName(), descriptor);
275     }
276   }
277 
278   /**
279    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
280    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
281    */
282   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
283       Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
284     setName(name);
285     for(HColumnDescriptor descriptor : families) {
286       this.families.put(descriptor.getName(), descriptor);
287     }
288     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
289         values.entrySet()) {
290       setValue(entry.getKey(), entry.getValue());
291     }
292   }
293 
294   /**
295    * Default constructor which constructs an empty object.
296    * For deserializing an HTableDescriptor instance only.
297    * @deprecated Used by Writables and Writables are going away.
298    */
299   @Deprecated
300   public HTableDescriptor() {
301     super();
302   }
303 
304   /**
305    * Construct a table descriptor specifying a TableName object
306    * @param name Table name.
307    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
308    */
309   public HTableDescriptor(final TableName name) {
310     super();
311     setName(name);
312   }
313 
314   /**
315    * Construct a table descriptor specifying a byte array table name
316    * @param name Table name.
317    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
318    */
319   @Deprecated
320   public HTableDescriptor(final byte[] name) {
321     this(TableName.valueOf(name));
322   }
323 
324   /**
325    * Construct a table descriptor specifying a String table name
326    * @param name Table name.
327    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
328    */
329   @Deprecated
330   public HTableDescriptor(final String name) {
331     this(TableName.valueOf(name));
332   }
333 
334   /**
335    * Construct a table descriptor by cloning the descriptor passed as a parameter.
336    * <p>
337    * Makes a deep copy of the supplied descriptor.
338    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
339    * @param desc The descriptor.
340    */
341   public HTableDescriptor(final HTableDescriptor desc) {
342     super();
343     setName(desc.name);
344     setMetaFlags(this.name);
345     for (HColumnDescriptor c: desc.families.values()) {
346       this.families.put(c.getName(), new HColumnDescriptor(c));
347     }
348     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
349         desc.values.entrySet()) {
350       setValue(e.getKey(), e.getValue());
351     }
352     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
353       this.configuration.put(e.getKey(), e.getValue());
354     }
355   }
356 
357   /*
358    * Set meta flags on this table.
359    * IS_ROOT_KEY is set if its a -ROOT- table
360    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
361    * Called by constructors.
362    * @param name
363    */
364   private void setMetaFlags(final TableName name) {
365     setMetaRegion(isRootRegion() ||
366         name.equals(TableName.META_TABLE_NAME));
367   }
368 
369   /**
370    * Check if the descriptor represents a <code> -ROOT- </code> region.
371    *
372    * @return true if this is a <code> -ROOT- </code> region
373    */
374   public boolean isRootRegion() {
375     if (this.root == null) {
376       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
377     }
378     return this.root.booleanValue();
379   }
380 
381   /**
382    * <em> INTERNAL </em> Used to denote if the current table represents
383    * <code> -ROOT- </code> region. This is used internally by the
384    * HTableDescriptor constructors
385    *
386    * @param isRoot true if this is the <code> -ROOT- </code> region
387    */
388   protected void setRootRegion(boolean isRoot) {
389     // TODO: Make the value a boolean rather than String of boolean.
390     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
391   }
392 
393   /**
394    * Checks if this table is <code> hbase:meta </code>
395    * region.
396    *
397    * @return true if this table is <code> hbase:meta </code>
398    * region
399    */
400   public boolean isMetaRegion() {
401     if (this.meta == null) {
402       this.meta = calculateIsMetaRegion();
403     }
404     return this.meta.booleanValue();
405   }
406 
407   private synchronized Boolean calculateIsMetaRegion() {
408     byte [] value = getValue(IS_META_KEY);
409     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
410   }
411 
412   private boolean isSomething(final ImmutableBytesWritable key,
413       final boolean valueIfNull) {
414     byte [] value = getValue(key);
415     if (value != null) {
416       return Boolean.valueOf(Bytes.toString(value));
417     }
418     return valueIfNull;
419   }
420 
421   /**
422    * <em> INTERNAL </em> Used to denote if the current table represents
423    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
424    * internally by the HTableDescriptor constructors
425    *
426    * @param isMeta true if its either <code> -ROOT- </code> or
427    * <code> hbase:meta </code> region
428    */
429   protected void setMetaRegion(boolean isMeta) {
430     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
431   }
432 
433   /**
434    * Checks if the table is a <code>hbase:meta</code> table
435    *
436    * @return true if table is <code> hbase:meta </code> region.
437    */
438   public boolean isMetaTable() {
439     return isMetaRegion() && !isRootRegion();
440   }
441 
442   /**
443    * Getter for accessing the metadata associated with the key
444    *
445    * @param key The key.
446    * @return The value.
447    * @see #values
448    */
449   public byte[] getValue(byte[] key) {
450     return getValue(new ImmutableBytesWritable(key));
451   }
452 
453   private byte[] getValue(final ImmutableBytesWritable key) {
454     ImmutableBytesWritable ibw = values.get(key);
455     if (ibw == null)
456       return null;
457     return ibw.get();
458   }
459 
460   /**
461    * Getter for accessing the metadata associated with the key
462    *
463    * @param key The key.
464    * @return The value.
465    * @see #values
466    */
467   public String getValue(String key) {
468     byte[] value = getValue(Bytes.toBytes(key));
469     if (value == null)
470       return null;
471     return Bytes.toString(value);
472   }
473 
474   /**
475    * Getter for fetching an unmodifiable {@link #values} map.
476    *
477    * @return unmodifiable map {@link #values}.
478    * @see #values
479    */
480   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
481     // shallow pointer copy
482     return Collections.unmodifiableMap(values);
483   }
484 
485   /**
486    * Setter for storing metadata as a (key, value) pair in {@link #values} map
487    *
488    * @param key The key.
489    * @param value The value.
490    * @see #values
491    */
492   public void setValue(byte[] key, byte[] value) {
493     setValue(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value));
494   }
495 
496   /*
497    * @param key The key.
498    * @param value The value.
499    */
500   private void setValue(final ImmutableBytesWritable key,
501       final String value) {
502     setValue(key, new ImmutableBytesWritable(Bytes.toBytes(value)));
503   }
504 
505   /*
506    * Setter for storing metadata as a (key, value) pair in {@link #values} map
507    *
508    * @param key The key.
509    * @param value The value.
510    */
511   public void setValue(final ImmutableBytesWritable key,
512       final ImmutableBytesWritable value) {
513     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
514       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
515       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
516           "use " + DURABILITY + " instead");
517       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
518       return;
519     }
520     values.put(key, value);
521   }
522 
523   /**
524    * Setter for storing metadata as a (key, value) pair in {@link #values} map
525    *
526    * @param key The key.
527    * @param value The value.
528    * @see #values
529    */
530   public void setValue(String key, String value) {
531     if (value == null) {
532       remove(key);
533     } else {
534       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
535     }
536   }
537 
538   /**
539    * Remove metadata represented by the key from the {@link #values} map
540    *
541    * @param key Key whose key and value we're to remove from HTableDescriptor
542    * parameters.
543    */
544   public void remove(final String key) {
545     remove(new ImmutableBytesWritable(Bytes.toBytes(key)));
546   }
547 
548   /**
549    * Remove metadata represented by the key from the {@link #values} map
550    *
551    * @param key Key whose key and value we're to remove from HTableDescriptor
552    * parameters.
553    */
554   public void remove(ImmutableBytesWritable key) {
555     values.remove(key);
556   }
557 
558   /**
559    * Remove metadata represented by the key from the {@link #values} map
560    *
561    * @param key Key whose key and value we're to remove from HTableDescriptor
562    * parameters.
563    */
564   public void remove(final byte [] key) {
565     remove(new ImmutableBytesWritable(key));
566   }
567 
568   /**
569    * Check if the readOnly flag of the table is set. If the readOnly flag is
570    * set then the contents of the table can only be read from but not modified.
571    *
572    * @return true if all columns in the table should be read only
573    */
574   public boolean isReadOnly() {
575     return isSomething(READONLY_KEY, DEFAULT_READONLY);
576   }
577 
578   /**
579    * Setting the table as read only sets all the columns in the table as read
580    * only. By default all tables are modifiable, but if the readOnly flag is
581    * set to true then the contents of the table can only be read but not modified.
582    *
583    * @param readOnly True if all of the columns in the table should be read
584    * only.
585    */
586   public void setReadOnly(final boolean readOnly) {
587     setValue(READONLY_KEY, readOnly? TRUE: FALSE);
588   }
589 
590   /**
591    * Check if the compaction enable flag of the table is true. If flag is
592    * false then no minor/major compactions will be done in real.
593    *
594    * @return true if table compaction enabled
595    */
596   public boolean isCompactionEnabled() {
597     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
598   }
599 
600   /**
601    * Setting the table compaction enable flag.
602    *
603    * @param isEnable True if enable compaction.
604    */
605   public void setCompactionEnabled(final boolean isEnable) {
606     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
607   }
608 
609   /**
610    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
611    * @param durability enum value
612    */
613   public void setDurability(Durability durability) {
614     this.durability = durability;
615     setValue(DURABILITY_KEY, durability.name());
616   }
617 
618   /**
619    * Returns the durability setting for the table.
620    * @return durability setting for the table.
621    */
622   public Durability getDurability() {
623     if (this.durability == null) {
624       byte[] durabilityValue = getValue(DURABILITY_KEY);
625       if (durabilityValue == null) {
626         this.durability = DEFAULT_DURABLITY;
627       } else {
628         try {
629           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
630         } catch (IllegalArgumentException ex) {
631           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
632             + " is not known. Durability:" + Bytes.toString(durabilityValue));
633           this.durability = DEFAULT_DURABLITY;
634         }
635       }
636     }
637     return this.durability;
638   }
639 
640   /**
641    * Get the name of the table
642    *
643    * @return TableName
644    */
645   public TableName getTableName() {
646     return name;
647   }
648 
649   /**
650    * Get the name of the table as a byte array.
651    *
652    * @return name of table
653    */
654   public byte[] getName() {
655     return name.getName();
656   }
657 
658   /**
659    * Get the name of the table as a String
660    *
661    * @return name of table as a String
662    */
663   public String getNameAsString() {
664     return name.getNameAsString();
665   }
666 
667   /**
668    * This sets the class associated with the region split policy which
669    * determines when a region split should occur.  The class used by
670    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
671    * @param clazz the class name
672    */
673   public void setRegionSplitPolicyClassName(String clazz) {
674     setValue(SPLIT_POLICY, clazz);
675   }
676 
677   /**
678    * This gets the class associated with the region split policy which
679    * determines when a region split should occur.  The class used by
680    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
681    *
682    * @return the class name of the region split policy for this table.
683    * If this returns null, the default split policy is used.
684    */
685    public String getRegionSplitPolicyClassName() {
686     return getValue(SPLIT_POLICY);
687   }
688 
689   /**
690    * Set the name of the table.
691    *
692    * @param name name of table
693    */
694   @Deprecated
695   public void setName(byte[] name) {
696     setName(TableName.valueOf(name));
697   }
698 
699   @Deprecated
700   public void setName(TableName name) {
701     this.name = name;
702     setMetaFlags(this.name);
703   }
704 
705   /**
706    * Returns the maximum size upto which a region can grow to after which a region
707    * split is triggered. The region size is represented by the size of the biggest
708    * store file in that region.
709    *
710    * @return max hregion size for table, -1 if not set.
711    *
712    * @see #setMaxFileSize(long)
713    */
714   public long getMaxFileSize() {
715     byte [] value = getValue(MAX_FILESIZE_KEY);
716     if (value != null) {
717       return Long.parseLong(Bytes.toString(value));
718     }
719     return -1;
720   }
721 
722   /**
723    * Sets the maximum size upto which a region can grow to after which a region
724    * split is triggered. The region size is represented by the size of the biggest
725    * store file in that region, i.e. If the biggest store file grows beyond the
726    * maxFileSize, then the region split is triggered. This defaults to a value of
727    * 256 MB.
728    * <p>
729    * This is not an absolute value and might vary. Assume that a single row exceeds
730    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
731    * a single row cannot be split across multiple regions
732    * </p>
733    *
734    * @param maxFileSize The maximum file size that a store file can grow to
735    * before a split is triggered.
736    */
737   public void setMaxFileSize(long maxFileSize) {
738     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
739   }
740 
741   /**
742    * Returns the size of the memstore after which a flush to filesystem is triggered.
743    *
744    * @return memory cache flush size for each hregion, -1 if not set.
745    *
746    * @see #setMemStoreFlushSize(long)
747    */
748   public long getMemStoreFlushSize() {
749     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
750     if (value != null) {
751       return Long.parseLong(Bytes.toString(value));
752     }
753     return -1;
754   }
755 
756   /**
757    * Represents the maximum size of the memstore after which the contents of the
758    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
759    *
760    * @param memstoreFlushSize memory cache flush size for each hregion
761    */
762   public void setMemStoreFlushSize(long memstoreFlushSize) {
763     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
764   }
765 
766   /**
767    * Adds a column family.
768    * @param family HColumnDescriptor of family to add.
769    */
770   public void addFamily(final HColumnDescriptor family) {
771     if (family.getName() == null || family.getName().length <= 0) {
772       throw new NullPointerException("Family name cannot be null or empty");
773     }
774     this.families.put(family.getName(), family);
775   }
776 
777   /**
778    * Checks to see if this table contains the given column family
779    * @param familyName Family name or column name.
780    * @return true if the table contains the specified family name
781    */
782   public boolean hasFamily(final byte [] familyName) {
783     return families.containsKey(familyName);
784   }
785 
786   /**
787    * @return Name of this table and then a map of all of the column family
788    * descriptors.
789    * @see #getNameAsString()
790    */
791   @Override
792   public String toString() {
793     StringBuilder s = new StringBuilder();
794     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
795     s.append(getValues(true));
796     for (HColumnDescriptor f : families.values()) {
797       s.append(", ").append(f);
798     }
799     return s.toString();
800   }
801 
802   /**
803    * @return Name of this table and then a map of all of the column family
804    * descriptors (with only the non-default column family attributes)
805    */
806   public String toStringCustomizedValues() {
807     StringBuilder s = new StringBuilder();
808     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
809     s.append(getValues(false));
810     for(HColumnDescriptor hcd : families.values()) {
811       s.append(", ").append(hcd.toStringCustomizedValues());
812     }
813     return s.toString();
814   }
815 
816   private StringBuilder getValues(boolean printDefaults) {
817     StringBuilder s = new StringBuilder();
818 
819     // step 1: set partitioning and pruning
820     Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
821     Set<ImmutableBytesWritable> userKeys = new TreeSet<ImmutableBytesWritable>();
822     for (ImmutableBytesWritable k : values.keySet()) {
823       if (k == null || k.get() == null) continue;
824       String key = Bytes.toString(k.get());
825       // in this section, print out reserved keywords + coprocessor info
826       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
827         userKeys.add(k);
828         continue;
829       }
830       // only print out IS_ROOT/IS_META if true
831       String value = Bytes.toString(values.get(k).get());
832       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
833         if (Boolean.valueOf(value) == false) continue;
834       }
835       // see if a reserved key is a default value. may not want to print it out
836       if (printDefaults
837           || !DEFAULT_VALUES.containsKey(key)
838           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
839         reservedKeys.add(k);
840       }
841     }
842 
843     // early exit optimization
844     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
845     if (!hasAttributes && configuration.isEmpty()) return s;
846 
847     s.append(", {");
848     // step 2: printing attributes
849     if (hasAttributes) {
850       s.append("TABLE_ATTRIBUTES => {");
851 
852       // print all reserved keys first
853       boolean printCommaForAttr = false;
854       for (ImmutableBytesWritable k : reservedKeys) {
855         String key = Bytes.toString(k.get());
856         String value = Bytes.toStringBinary(values.get(k).get());
857         if (printCommaForAttr) s.append(", ");
858         printCommaForAttr = true;
859         s.append(key);
860         s.append(" => ");
861         s.append('\'').append(value).append('\'');
862       }
863 
864       if (!userKeys.isEmpty()) {
865         // print all non-reserved, advanced config keys as a separate subset
866         if (printCommaForAttr) s.append(", ");
867         printCommaForAttr = true;
868         s.append(HConstants.METADATA).append(" => ");
869         s.append("{");
870         boolean printCommaForCfg = false;
871         for (ImmutableBytesWritable k : userKeys) {
872           String key = Bytes.toString(k.get());
873           String value = Bytes.toStringBinary(values.get(k).get());
874           if (printCommaForCfg) s.append(", ");
875           printCommaForCfg = true;
876           s.append('\'').append(key).append('\'');
877           s.append(" => ");
878           s.append('\'').append(value).append('\'');
879         }
880         s.append("}");
881       }
882     }
883 
884     // step 3: printing all configuration:
885     if (!configuration.isEmpty()) {
886       if (hasAttributes) {
887         s.append(", ");
888       }
889       s.append(HConstants.CONFIGURATION).append(" => ");
890       s.append('{');
891       boolean printCommaForConfig = false;
892       for (Map.Entry<String, String> e : configuration.entrySet()) {
893         if (printCommaForConfig) s.append(", ");
894         printCommaForConfig = true;
895         s.append('\'').append(e.getKey()).append('\'');
896         s.append(" => ");
897         s.append('\'').append(e.getValue()).append('\'');
898       }
899       s.append("}");
900     }
901     s.append("}"); // end METHOD
902     return s;
903   }
904 
905   /**
906    * Compare the contents of the descriptor with another one passed as a parameter.
907    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
908    * contents of the descriptors are compared.
909    *
910    * @return true if the contents of the the two descriptors exactly match
911    *
912    * @see java.lang.Object#equals(java.lang.Object)
913    */
914   @Override
915   public boolean equals(Object obj) {
916     if (this == obj) {
917       return true;
918     }
919     if (obj == null) {
920       return false;
921     }
922     if (!(obj instanceof HTableDescriptor)) {
923       return false;
924     }
925     return compareTo((HTableDescriptor)obj) == 0;
926   }
927 
928   /**
929    * @see java.lang.Object#hashCode()
930    */
931   @Override
932   public int hashCode() {
933     int result = this.name.hashCode();
934     result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode();
935     if (this.families != null && this.families.size() > 0) {
936       for (HColumnDescriptor e: this.families.values()) {
937         result ^= e.hashCode();
938       }
939     }
940     result ^= values.hashCode();
941     result ^= configuration.hashCode();
942     return result;
943   }
944 
945   /**
946    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
947    * and is used for de-serialization of the HTableDescriptor over RPC
948    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
949    */
950   @Deprecated
951   @Override
952   public void readFields(DataInput in) throws IOException {
953     int version = in.readInt();
954     if (version < 3)
955       throw new IOException("versions < 3 are not supported (and never existed!?)");
956     // version 3+
957     name = TableName.valueOf(Bytes.readByteArray(in));
958     setRootRegion(in.readBoolean());
959     setMetaRegion(in.readBoolean());
960     values.clear();
961     configuration.clear();
962     int numVals = in.readInt();
963     for (int i = 0; i < numVals; i++) {
964       ImmutableBytesWritable key = new ImmutableBytesWritable();
965       ImmutableBytesWritable value = new ImmutableBytesWritable();
966       key.readFields(in);
967       value.readFields(in);
968       setValue(key, value);
969     }
970     families.clear();
971     int numFamilies = in.readInt();
972     for (int i = 0; i < numFamilies; i++) {
973       HColumnDescriptor c = new HColumnDescriptor();
974       c.readFields(in);
975       families.put(c.getName(), c);
976     }
977     if (version >= 7) {
978       int numConfigs = in.readInt();
979       for (int i = 0; i < numConfigs; i++) {
980         ImmutableBytesWritable key = new ImmutableBytesWritable();
981         ImmutableBytesWritable value = new ImmutableBytesWritable();
982         key.readFields(in);
983         value.readFields(in);
984         configuration.put(
985           Bytes.toString(key.get(), key.getOffset(), key.getLength()),
986           Bytes.toString(value.get(), value.getOffset(), value.getLength()));
987       }
988     }
989   }
990 
991   /**
992    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
993    * and is used for serialization of the HTableDescriptor over RPC
994    * @deprecated Writables are going away.
995    * Use {@link com.google.protobuf.MessageLite#toByteArray} instead.
996    */
997   @Deprecated
998   @Override
999   public void write(DataOutput out) throws IOException {
1000 	  out.writeInt(TABLE_DESCRIPTOR_VERSION);
1001     Bytes.writeByteArray(out, name.toBytes());
1002     out.writeBoolean(isRootRegion());
1003     out.writeBoolean(isMetaRegion());
1004     out.writeInt(values.size());
1005     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1006         values.entrySet()) {
1007       e.getKey().write(out);
1008       e.getValue().write(out);
1009     }
1010     out.writeInt(families.size());
1011     for(Iterator<HColumnDescriptor> it = families.values().iterator();
1012         it.hasNext(); ) {
1013       HColumnDescriptor family = it.next();
1014       family.write(out);
1015     }
1016     out.writeInt(configuration.size());
1017     for (Map.Entry<String, String> e : configuration.entrySet()) {
1018       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1019       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1020     }
1021   }
1022 
1023   // Comparable
1024 
1025   /**
1026    * Compares the descriptor with another descriptor which is passed as a parameter.
1027    * This compares the content of the two descriptors and not the reference.
1028    *
1029    * @return 0 if the contents of the descriptors are exactly matching,
1030    * 		 1 if there is a mismatch in the contents
1031    */
1032   @Override
1033   public int compareTo(final HTableDescriptor other) {
1034     int result = this.name.compareTo(other.name);
1035     if (result == 0) {
1036       result = families.size() - other.families.size();
1037     }
1038     if (result == 0 && families.size() != other.families.size()) {
1039       result = Integer.valueOf(families.size()).compareTo(
1040           Integer.valueOf(other.families.size()));
1041     }
1042     if (result == 0) {
1043       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1044           it2 = other.families.values().iterator(); it.hasNext(); ) {
1045         result = it.next().compareTo(it2.next());
1046         if (result != 0) {
1047           break;
1048         }
1049       }
1050     }
1051     if (result == 0) {
1052       // punt on comparison for ordering, just calculate difference
1053       result = this.values.hashCode() - other.values.hashCode();
1054       if (result < 0)
1055         result = -1;
1056       else if (result > 0)
1057         result = 1;
1058     }
1059     if (result == 0) {
1060       result = this.configuration.hashCode() - other.configuration.hashCode();
1061       if (result < 0)
1062         result = -1;
1063       else if (result > 0)
1064         result = 1;
1065     }
1066     return result;
1067   }
1068 
1069   /**
1070    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1071    * of all the column families of the table.
1072    *
1073    * @return Immutable collection of {@link HColumnDescriptor} of all the
1074    * column families.
1075    */
1076   public Collection<HColumnDescriptor> getFamilies() {
1077     return Collections.unmodifiableCollection(this.families.values());
1078   }
1079 
1080   /**
1081    * Returns the configured replicas per region
1082    */
1083   public int getRegionReplication() {
1084     byte[] val = getValue(REGION_REPLICATION_KEY);
1085     if (val == null || val.length == 0) {
1086       return DEFAULT_REGION_REPLICATION;
1087     }
1088     return Integer.parseInt(Bytes.toString(val));
1089   }
1090 
1091   /**
1092    * Sets the number of replicas per region.
1093    * @param regionReplication the replication factor per region
1094    */
1095   public void setRegionReplication(int regionReplication) {
1096     setValue(REGION_REPLICATION_KEY,
1097         new ImmutableBytesWritable(Bytes.toBytes(Integer.toString(regionReplication))));
1098   }
1099 
1100   /**
1101    * Returns all the column family names of the current table. The map of
1102    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1103    * This returns all the keys of the family map which represents the column
1104    * family names of the table.
1105    *
1106    * @return Immutable sorted set of the keys of the families.
1107    */
1108   public Set<byte[]> getFamiliesKeys() {
1109     return Collections.unmodifiableSet(this.families.keySet());
1110   }
1111 
1112   /**
1113    * Returns an array all the {@link HColumnDescriptor} of the column families
1114    * of the table.
1115    *
1116    * @return Array of all the HColumnDescriptors of the current table
1117    *
1118    * @see #getFamilies()
1119    */
1120   public HColumnDescriptor[] getColumnFamilies() {
1121     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1122     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1123   }
1124 
1125 
1126   /**
1127    * Returns the HColumnDescriptor for a specific column family with name as
1128    * specified by the parameter column.
1129    *
1130    * @param column Column family name
1131    * @return Column descriptor for the passed family name or the family on
1132    * passed in column.
1133    */
1134   public HColumnDescriptor getFamily(final byte [] column) {
1135     return this.families.get(column);
1136   }
1137 
1138 
1139   /**
1140    * Removes the HColumnDescriptor with name specified by the parameter column
1141    * from the table descriptor
1142    *
1143    * @param column Name of the column family to be removed.
1144    * @return Column descriptor for the passed family name or the family on
1145    * passed in column.
1146    */
1147   public HColumnDescriptor removeFamily(final byte [] column) {
1148     return this.families.remove(column);
1149   }
1150 
1151 
1152   /**
1153    * Add a table coprocessor to this table. The coprocessor
1154    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1155    * or Endpoint.
1156    * It won't check if the class can be loaded or not.
1157    * Whether a coprocessor is loadable or not will be determined when
1158    * a region is opened.
1159    * @param className Full class name.
1160    * @throws IOException
1161    */
1162   public void addCoprocessor(String className) throws IOException {
1163     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1164   }
1165 
1166 
1167   /**
1168    * Add a table coprocessor to this table. The coprocessor
1169    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1170    * or Endpoint.
1171    * It won't check if the class can be loaded or not.
1172    * Whether a coprocessor is loadable or not will be determined when
1173    * a region is opened.
1174    * @param jarFilePath Path of the jar file. If it's null, the class will be
1175    * loaded from default classloader.
1176    * @param className Full class name.
1177    * @param priority Priority
1178    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1179    * @throws IOException
1180    */
1181   public void addCoprocessor(String className, Path jarFilePath,
1182                              int priority, final Map<String, String> kvs)
1183   throws IOException {
1184     if (hasCoprocessor(className)) {
1185       throw new IOException("Coprocessor " + className + " already exists.");
1186     }
1187     // validate parameter kvs
1188     StringBuilder kvString = new StringBuilder();
1189     if (kvs != null) {
1190       for (Map.Entry<String, String> e: kvs.entrySet()) {
1191         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1192           throw new IOException("Illegal parameter key = " + e.getKey());
1193         }
1194         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1195           throw new IOException("Illegal parameter (" + e.getKey() +
1196               ") value = " + e.getValue());
1197         }
1198         if (kvString.length() != 0) {
1199           kvString.append(',');
1200         }
1201         kvString.append(e.getKey());
1202         kvString.append('=');
1203         kvString.append(e.getValue());
1204       }
1205     }
1206 
1207     // generate a coprocessor key
1208     int maxCoprocessorNumber = 0;
1209     Matcher keyMatcher;
1210     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1211         this.values.entrySet()) {
1212       keyMatcher =
1213           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1214               Bytes.toString(e.getKey().get()));
1215       if (!keyMatcher.matches()) {
1216         continue;
1217       }
1218       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)),
1219           maxCoprocessorNumber);
1220     }
1221     maxCoprocessorNumber++;
1222 
1223     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1224     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1225         "|" + className + "|" + Integer.toString(priority) + "|" +
1226         kvString.toString();
1227     setValue(key, value);
1228   }
1229 
1230 
1231   /**
1232    * Check if the table has an attached co-processor represented by the name className
1233    *
1234    * @param className - Class name of the co-processor
1235    * @return true of the table has a co-processor className
1236    */
1237   public boolean hasCoprocessor(String className) {
1238     Matcher keyMatcher;
1239     Matcher valueMatcher;
1240     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1241         this.values.entrySet()) {
1242       keyMatcher =
1243           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1244               Bytes.toString(e.getKey().get()));
1245       if (!keyMatcher.matches()) {
1246         continue;
1247       }
1248       valueMatcher =
1249         HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(
1250             Bytes.toString(e.getValue().get()));
1251       if (!valueMatcher.matches()) {
1252         continue;
1253       }
1254       // get className and compare
1255       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1256       if (clazz.equals(className.trim())) {
1257         return true;
1258       }
1259     }
1260     return false;
1261   }
1262 
1263   /**
1264    * Return the list of attached co-processor represented by their name className
1265    *
1266    * @return The list of co-processors classNames
1267    */
1268   public List<String> getCoprocessors() {
1269     List<String> result = new ArrayList<String>();
1270     Matcher keyMatcher;
1271     Matcher valueMatcher;
1272     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values.entrySet()) {
1273       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1274       if (!keyMatcher.matches()) {
1275         continue;
1276       }
1277       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1278           .toString(e.getValue().get()));
1279       if (!valueMatcher.matches()) {
1280         continue;
1281       }
1282       result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
1283     }
1284     return result;
1285   }
1286 
1287   /**
1288    * Remove a coprocessor from those set on the table
1289    * @param className Class name of the co-processor
1290    */
1291   public void removeCoprocessor(String className) {
1292     ImmutableBytesWritable match = null;
1293     Matcher keyMatcher;
1294     Matcher valueMatcher;
1295     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values
1296         .entrySet()) {
1297       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1298           .getKey().get()));
1299       if (!keyMatcher.matches()) {
1300         continue;
1301       }
1302       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1303           .toString(e.getValue().get()));
1304       if (!valueMatcher.matches()) {
1305         continue;
1306       }
1307       // get className and compare
1308       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1309       // remove the CP if it is present
1310       if (clazz.equals(className.trim())) {
1311         match = e.getKey();
1312         break;
1313       }
1314     }
1315     // if we found a match, remove it
1316     if (match != null)
1317       remove(match);
1318   }
1319 
1320   /**
1321    * Returns the {@link Path} object representing the table directory under
1322    * path rootdir
1323    *
1324    * Deprecated use FSUtils.getTableDir() instead.
1325    *
1326    * @param rootdir qualified path of HBase root directory
1327    * @param tableName name of table
1328    * @return {@link Path} for table
1329    */
1330   @Deprecated
1331   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1332     //This is bad I had to mirror code from FSUTils.getTableDir since
1333     //there is no module dependency between hbase-client and hbase-server
1334     TableName name = TableName.valueOf(tableName);
1335     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1336               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1337   }
1338 
1339   /** Table descriptor for <code>hbase:meta</code> catalog table */
1340   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1341       TableName.META_TABLE_NAME,
1342       new HColumnDescriptor[] {
1343           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1344               // Ten is arbitrary number.  Keep versions to help debugging.
1345               .setMaxVersions(10)
1346               .setInMemory(true)
1347               .setBlocksize(8 * 1024)
1348               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1349               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1350               .setBloomFilterType(BloomType.NONE)
1351               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1352               // e.g. if using CombinedBlockCache (BucketCache).
1353               .setCacheDataInL1(true)
1354       });
1355 
1356   static {
1357     try {
1358       META_TABLEDESC.addCoprocessor(
1359           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1360           null, Coprocessor.PRIORITY_SYSTEM, null);
1361     } catch (IOException ex) {
1362       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1363       throw new RuntimeException(ex);
1364     }
1365   }
1366 
1367   public final static String NAMESPACE_FAMILY_INFO = "info";
1368   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1369   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1370 
1371   /** Table descriptor for namespace table */
1372   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1373       TableName.NAMESPACE_TABLE_NAME,
1374       new HColumnDescriptor[] {
1375           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1376               // Ten is arbitrary number.  Keep versions to help debugging.
1377               .setMaxVersions(10)
1378               .setInMemory(true)
1379               .setBlocksize(8 * 1024)
1380               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1381               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1382               // e.g. if using CombinedBlockCache (BucketCache).
1383               .setCacheDataInL1(true)
1384       });
1385 
1386   @Deprecated
1387   public void setOwner(User owner) {
1388     setOwnerString(owner != null ? owner.getShortName() : null);
1389   }
1390 
1391   // used by admin.rb:alter(table_name,*args) to update owner.
1392   @Deprecated
1393   public void setOwnerString(String ownerString) {
1394     if (ownerString != null) {
1395       setValue(OWNER_KEY, ownerString);
1396     } else {
1397       remove(OWNER_KEY);
1398     }
1399   }
1400 
1401   @Deprecated
1402   public String getOwnerString() {
1403     if (getValue(OWNER_KEY) != null) {
1404       return Bytes.toString(getValue(OWNER_KEY));
1405     }
1406     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1407     // hbase:meta and -ROOT- should return system user as owner, not null (see
1408     // MasterFileSystem.java:bootstrap()).
1409     return null;
1410   }
1411 
1412   /**
1413    * @return This instance serialized with pb with pb magic prefix
1414    * @see #parseFrom(byte[])
1415    */
1416   public byte [] toByteArray() {
1417     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1418   }
1419 
1420   /**
1421    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1422    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1423    * @throws DeserializationException
1424    * @throws IOException
1425    * @see #toByteArray()
1426    */
1427   public static HTableDescriptor parseFrom(final byte [] bytes)
1428   throws DeserializationException, IOException {
1429     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1430       return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
1431     }
1432     int pblen = ProtobufUtil.lengthOfPBMagic();
1433     TableSchema.Builder builder = TableSchema.newBuilder();
1434     TableSchema ts;
1435     try {
1436       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1437     } catch (InvalidProtocolBufferException e) {
1438       throw new DeserializationException(e);
1439     }
1440     return convert(ts);
1441   }
1442 
1443   /**
1444    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1445    */
1446   public TableSchema convert() {
1447     TableSchema.Builder builder = TableSchema.newBuilder();
1448     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1449     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1450       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1451       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1452       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1453       builder.addAttributes(aBuilder.build());
1454     }
1455     for (HColumnDescriptor hcd: getColumnFamilies()) {
1456       builder.addColumnFamilies(hcd.convert());
1457     }
1458     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1459       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1460       aBuilder.setName(e.getKey());
1461       aBuilder.setValue(e.getValue());
1462       builder.addConfiguration(aBuilder.build());
1463     }
1464     return builder.build();
1465   }
1466 
1467   /**
1468    * @param ts A pb TableSchema instance.
1469    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1470    */
1471   public static HTableDescriptor convert(final TableSchema ts) {
1472     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1473     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1474     int index = 0;
1475     for (ColumnFamilySchema cfs: list) {
1476       hcds[index++] = HColumnDescriptor.convert(cfs);
1477     }
1478     HTableDescriptor htd = new HTableDescriptor(
1479         ProtobufUtil.toTableName(ts.getTableName()),
1480         hcds);
1481     for (BytesBytesPair a: ts.getAttributesList()) {
1482       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1483     }
1484     for (NameStringPair a: ts.getConfigurationList()) {
1485       htd.setConfiguration(a.getName(), a.getValue());
1486     }
1487     return htd;
1488   }
1489 
1490   /**
1491    * Getter for accessing the configuration value by key
1492    */
1493   public String getConfigurationValue(String key) {
1494     return configuration.get(key);
1495   }
1496 
1497   /**
1498    * Getter for fetching an unmodifiable {@link #configuration} map.
1499    */
1500   public Map<String, String> getConfiguration() {
1501     // shallow pointer copy
1502     return Collections.unmodifiableMap(configuration);
1503   }
1504 
1505   /**
1506    * Setter for storing a configuration setting in {@link #configuration} map.
1507    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1508    * @param value String value. If null, removes the setting.
1509    */
1510   public void setConfiguration(String key, String value) {
1511     if (value == null) {
1512       removeConfiguration(key);
1513     } else {
1514       configuration.put(key, value);
1515     }
1516   }
1517 
1518   /**
1519    * Remove a config setting represented by the key from the {@link #configuration} map
1520    */
1521   public void removeConfiguration(final String key) {
1522     configuration.remove(key);
1523   }
1524 }