View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.ArrayList;
25  import java.util.Collection;
26  import java.util.Collections;
27  import java.util.HashMap;
28  import java.util.HashSet;
29  import java.util.Iterator;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.Set;
33  import java.util.TreeMap;
34  import java.util.TreeSet;
35  import java.util.regex.Matcher;
36  
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.hadoop.classification.InterfaceAudience;
40  import org.apache.hadoop.classification.InterfaceStability;
41  import org.apache.hadoop.fs.Path;
42  import org.apache.hadoop.hbase.client.Durability;
43  import org.apache.hadoop.hbase.exceptions.DeserializationException;
44  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
45  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
46  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
47  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
48  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
49  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
50  import org.apache.hadoop.hbase.regionserver.BloomType;
51  import org.apache.hadoop.hbase.security.User;
52  import org.apache.hadoop.hbase.util.Bytes;
53  import org.apache.hadoop.hbase.util.Writables;
54  import org.apache.hadoop.io.WritableComparable;
55  
56  import com.google.protobuf.HBaseZeroCopyByteString;
57  import com.google.protobuf.InvalidProtocolBufferException;
58  
59  /**
60   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
61   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
62   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
63   * when the region split should occur, coprocessors associated with it etc...
64   */
65  @InterfaceAudience.Public
66  @InterfaceStability.Evolving
67  public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
68  
69    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
70  
71    /**
72     *  Changes prior to version 3 were not recorded here.
73     *  Version 3 adds metadata as a map where keys and values are byte[].
74     *  Version 4 adds indexes
75     *  Version 5 removed transactional pollution -- e.g. indexes
76     *  Version 6 changed metadata to BytesBytesPair in PB
77     *  Version 7 adds table-level configuration
78     */
79    private static final byte TABLE_DESCRIPTOR_VERSION = 7;
80  
81    private TableName name = null;
82  
83    /**
84     * A map which holds the metadata information of the table. This metadata
85     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
86     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
87     */
88    private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
89      new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
90  
91    /**
92     * A map which holds the configuration specific to the table.
93     * The keys of the map have the same names as config keys and override the defaults with
94     * table-specific settings. Example usage may be for compactions, etc.
95     */
96    private final Map<String, String> configuration = new HashMap<String, String>();
97  
98    public static final String SPLIT_POLICY = "SPLIT_POLICY";
99  
100   /**
101    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
102    * attribute which denotes the maximum size of the store file after which
103    * a region split occurs
104    *
105    * @see #getMaxFileSize()
106    */
107   public static final String MAX_FILESIZE = "MAX_FILESIZE";
108   private static final ImmutableBytesWritable MAX_FILESIZE_KEY =
109     new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
110 
111   public static final String OWNER = "OWNER";
112   public static final ImmutableBytesWritable OWNER_KEY =
113     new ImmutableBytesWritable(Bytes.toBytes(OWNER));
114 
115   /**
116    * <em>INTERNAL</em> Used by rest interface to access this metadata
117    * attribute which denotes if the table is Read Only
118    *
119    * @see #isReadOnly()
120    */
121   public static final String READONLY = "READONLY";
122   private static final ImmutableBytesWritable READONLY_KEY =
123     new ImmutableBytesWritable(Bytes.toBytes(READONLY));
124 
125   /**
126    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
127    * attribute which denotes if the table is compaction enabled
128    *
129    * @see #isCompactionEnabled()
130    */
131   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
132   private static final ImmutableBytesWritable COMPACTION_ENABLED_KEY =
133     new ImmutableBytesWritable(Bytes.toBytes(COMPACTION_ENABLED));
134 
135   /**
136    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
137    * attribute which represents the maximum size of the memstore after which
138    * its contents are flushed onto the disk
139    *
140    * @see #getMemStoreFlushSize()
141    */
142   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
143   private static final ImmutableBytesWritable MEMSTORE_FLUSHSIZE_KEY =
144     new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
145 
146   /**
147    * <em>INTERNAL</em> Used by rest interface to access this metadata
148    * attribute which denotes if the table is a -ROOT- region or not
149    *
150    * @see #isRootRegion()
151    */
152   public static final String IS_ROOT = "IS_ROOT";
153   private static final ImmutableBytesWritable IS_ROOT_KEY =
154     new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
155 
156   /**
157    * <em>INTERNAL</em> Used by rest interface to access this metadata
158    * attribute which denotes if it is a catalog table, either
159    * <code> hbase:meta </code> or <code> -ROOT- </code>
160    *
161    * @see #isMetaRegion()
162    */
163   public static final String IS_META = "IS_META";
164   private static final ImmutableBytesWritable IS_META_KEY =
165     new ImmutableBytesWritable(Bytes.toBytes(IS_META));
166 
167   /**
168    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
169    * attribute which denotes if the deferred log flush option is enabled.
170    * @deprecated Use {@link #DURABILITY} instead.
171    */
172   @Deprecated
173   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
174   @Deprecated
175   private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY =
176     new ImmutableBytesWritable(Bytes.toBytes(DEFERRED_LOG_FLUSH));
177 
178   /**
179    * <em>INTERNAL</em> {@link Durability} setting for the table.
180    */
181   public static final String DURABILITY = "DURABILITY";
182   private static final ImmutableBytesWritable DURABILITY_KEY =
183       new ImmutableBytesWritable(Bytes.toBytes("DURABILITY"));
184 
185   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
186   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
187 
188   /*
189    *  The below are ugly but better than creating them each time till we
190    *  replace booleans being saved as Strings with plain booleans.  Need a
191    *  migration script to do this.  TODO.
192    */
193   private static final ImmutableBytesWritable FALSE =
194     new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString()));
195 
196   private static final ImmutableBytesWritable TRUE =
197     new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
198 
199   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
200 
201   /**
202    * Constant that denotes whether the table is READONLY by default and is false
203    */
204   public static final boolean DEFAULT_READONLY = false;
205 
206   /**
207    * Constant that denotes whether the table is compaction enabled by default
208    */
209   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
210 
211   /**
212    * Constant that denotes the maximum default size of the memstore after which
213    * the contents are flushed to the store files
214    */
215   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
216 
217   private final static Map<String, String> DEFAULT_VALUES
218     = new HashMap<String, String>();
219   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
220     = new HashSet<ImmutableBytesWritable>();
221   static {
222     DEFAULT_VALUES.put(MAX_FILESIZE,
223         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
224     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
225     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
226         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
227     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
228         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
229     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
230     for (String s : DEFAULT_VALUES.keySet()) {
231       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
232     }
233     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
234     RESERVED_KEYWORDS.add(IS_META_KEY);
235   }
236 
237   /**
238    * Cache of whether this is a meta table or not.
239    */
240   private volatile Boolean meta = null;
241   /**
242    * Cache of whether this is root table or not.
243    */
244   private volatile Boolean root = null;
245 
246   /**
247    * Durability setting for the table
248    */
249   private Durability durability = null;
250 
251   /**
252    * Maps column family name to the respective HColumnDescriptors
253    */
254   private final Map<byte [], HColumnDescriptor> families =
255     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
256 
257   /**
258    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
259    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
260    */
261   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
262     setName(name);
263     for(HColumnDescriptor descriptor : families) {
264       this.families.put(descriptor.getName(), descriptor);
265     }
266   }
267 
268   /**
269    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
270    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
271    */
272   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
273       Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
274     setName(name);
275     for(HColumnDescriptor descriptor : families) {
276       this.families.put(descriptor.getName(), descriptor);
277     }
278     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
279         values.entrySet()) {
280       setValue(entry.getKey(), entry.getValue());
281     }
282   }
283 
284   /**
285    * Default constructor which constructs an empty object.
286    * For deserializing an HTableDescriptor instance only.
287    * @deprecated Used by Writables and Writables are going away.
288    */
289   @Deprecated
290   public HTableDescriptor() {
291     super();
292   }
293 
294   /**
295    * Construct a table descriptor specifying a TableName object
296    * @param name Table name.
297    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
298    */
299   public HTableDescriptor(final TableName name) {
300     super();
301     setName(name);
302   }
303 
304   /**
305    * Construct a table descriptor specifying a byte array table name
306    * @param name Table name.
307    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
308    */
309   @Deprecated
310   public HTableDescriptor(final byte[] name) {
311     this(TableName.valueOf(name));
312   }
313 
314   /**
315    * Construct a table descriptor specifying a String table name
316    * @param name Table name.
317    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
318    */
319   @Deprecated
320   public HTableDescriptor(final String name) {
321     this(TableName.valueOf(name));
322   }
323 
324   /**
325    * Construct a table descriptor by cloning the descriptor passed as a parameter.
326    * <p>
327    * Makes a deep copy of the supplied descriptor.
328    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
329    * @param desc The descriptor.
330    */
331   public HTableDescriptor(final HTableDescriptor desc) {
332     super();
333     setName(desc.name);
334     setMetaFlags(this.name);
335     for (HColumnDescriptor c: desc.families.values()) {
336       this.families.put(c.getName(), new HColumnDescriptor(c));
337     }
338     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
339         desc.values.entrySet()) {
340       setValue(e.getKey(), e.getValue());
341     }
342     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
343       this.configuration.put(e.getKey(), e.getValue());
344     }
345   }
346 
347   /*
348    * Set meta flags on this table.
349    * IS_ROOT_KEY is set if its a -ROOT- table
350    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
351    * Called by constructors.
352    * @param name
353    */
354   private void setMetaFlags(final TableName name) {
355     setMetaRegion(isRootRegion() ||
356         name.equals(TableName.META_TABLE_NAME));
357   }
358 
359   /**
360    * Check if the descriptor represents a <code> -ROOT- </code> region.
361    *
362    * @return true if this is a <code> -ROOT- </code> region
363    */
364   public boolean isRootRegion() {
365     if (this.root == null) {
366       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
367     }
368     return this.root.booleanValue();
369   }
370 
371   /**
372    * <em> INTERNAL </em> Used to denote if the current table represents
373    * <code> -ROOT- </code> region. This is used internally by the
374    * HTableDescriptor constructors
375    *
376    * @param isRoot true if this is the <code> -ROOT- </code> region
377    */
378   protected void setRootRegion(boolean isRoot) {
379     // TODO: Make the value a boolean rather than String of boolean.
380     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
381   }
382 
383   /**
384    * Checks if this table is <code> hbase:meta </code>
385    * region.
386    *
387    * @return true if this table is <code> hbase:meta </code>
388    * region
389    */
390   public boolean isMetaRegion() {
391     if (this.meta == null) {
392       this.meta = calculateIsMetaRegion();
393     }
394     return this.meta.booleanValue();
395   }
396 
397   private synchronized Boolean calculateIsMetaRegion() {
398     byte [] value = getValue(IS_META_KEY);
399     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
400   }
401 
402   private boolean isSomething(final ImmutableBytesWritable key,
403       final boolean valueIfNull) {
404     byte [] value = getValue(key);
405     if (value != null) {
406       return Boolean.valueOf(Bytes.toString(value));
407     }
408     return valueIfNull;
409   }
410 
411   /**
412    * <em> INTERNAL </em> Used to denote if the current table represents
413    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
414    * internally by the HTableDescriptor constructors
415    *
416    * @param isMeta true if its either <code> -ROOT- </code> or
417    * <code> hbase:meta </code> region
418    */
419   protected void setMetaRegion(boolean isMeta) {
420     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
421   }
422 
423   /**
424    * Checks if the table is a <code>hbase:meta</code> table
425    *
426    * @return true if table is <code> hbase:meta </code> region.
427    */
428   public boolean isMetaTable() {
429     return isMetaRegion() && !isRootRegion();
430   }
431 
432   /**
433    * Getter for accessing the metadata associated with the key
434    *
435    * @param key The key.
436    * @return The value.
437    * @see #values
438    */
439   public byte[] getValue(byte[] key) {
440     return getValue(new ImmutableBytesWritable(key));
441   }
442 
443   private byte[] getValue(final ImmutableBytesWritable key) {
444     ImmutableBytesWritable ibw = values.get(key);
445     if (ibw == null)
446       return null;
447     return ibw.get();
448   }
449 
450   /**
451    * Getter for accessing the metadata associated with the key
452    *
453    * @param key The key.
454    * @return The value.
455    * @see #values
456    */
457   public String getValue(String key) {
458     byte[] value = getValue(Bytes.toBytes(key));
459     if (value == null)
460       return null;
461     return Bytes.toString(value);
462   }
463 
464   /**
465    * Getter for fetching an unmodifiable {@link #values} map.
466    *
467    * @return unmodifiable map {@link #values}.
468    * @see #values
469    */
470   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
471     // shallow pointer copy
472     return Collections.unmodifiableMap(values);
473   }
474 
475   /**
476    * Setter for storing metadata as a (key, value) pair in {@link #values} map
477    *
478    * @param key The key.
479    * @param value The value.
480    * @see #values
481    */
482   public void setValue(byte[] key, byte[] value) {
483     setValue(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value));
484   }
485 
486   /*
487    * @param key The key.
488    * @param value The value.
489    */
490   private void setValue(final ImmutableBytesWritable key,
491       final String value) {
492     setValue(key, new ImmutableBytesWritable(Bytes.toBytes(value)));
493   }
494 
495   /*
496    * Setter for storing metadata as a (key, value) pair in {@link #values} map
497    *
498    * @param key The key.
499    * @param value The value.
500    */
501   public void setValue(final ImmutableBytesWritable key,
502       final ImmutableBytesWritable value) {
503     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
504       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
505       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
506           "use " + DURABILITY + " instead");
507       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
508       return;
509     }
510     values.put(key, value);
511   }
512 
513   /**
514    * Setter for storing metadata as a (key, value) pair in {@link #values} map
515    *
516    * @param key The key.
517    * @param value The value.
518    * @see #values
519    */
520   public void setValue(String key, String value) {
521     if (value == null) {
522       remove(key);
523     } else {
524       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
525     }
526   }
527 
528   /**
529    * Remove metadata represented by the key from the {@link #values} map
530    *
531    * @param key Key whose key and value we're to remove from HTableDescriptor
532    * parameters.
533    */
534   public void remove(final String key) {
535     remove(new ImmutableBytesWritable(Bytes.toBytes(key)));
536   }
537 
538   /**
539    * Remove metadata represented by the key from the {@link #values} map
540    *
541    * @param key Key whose key and value we're to remove from HTableDescriptor
542    * parameters.
543    */
544   public void remove(ImmutableBytesWritable key) {
545     values.remove(key);
546   }
547 
548   /**
549    * Remove metadata represented by the key from the {@link #values} map
550    *
551    * @param key Key whose key and value we're to remove from HTableDescriptor
552    * parameters.
553    */
554   public void remove(final byte [] key) {
555     remove(new ImmutableBytesWritable(key));
556   }
557 
558   /**
559    * Check if the readOnly flag of the table is set. If the readOnly flag is
560    * set then the contents of the table can only be read from but not modified.
561    *
562    * @return true if all columns in the table should be read only
563    */
564   public boolean isReadOnly() {
565     return isSomething(READONLY_KEY, DEFAULT_READONLY);
566   }
567 
568   /**
569    * Setting the table as read only sets all the columns in the table as read
570    * only. By default all tables are modifiable, but if the readOnly flag is
571    * set to true then the contents of the table can only be read but not modified.
572    *
573    * @param readOnly True if all of the columns in the table should be read
574    * only.
575    */
576   public void setReadOnly(final boolean readOnly) {
577     setValue(READONLY_KEY, readOnly? TRUE: FALSE);
578   }
579 
580   /**
581    * Check if the compaction enable flag of the table is true. If flag is
582    * false then no minor/major compactions will be done in real.
583    *
584    * @return true if table compaction enabled
585    */
586   public boolean isCompactionEnabled() {
587     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
588   }
589 
590   /**
591    * Setting the table compaction enable flag.
592    *
593    * @param isEnable True if enable compaction.
594    */
595   public void setCompactionEnabled(final boolean isEnable) {
596     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
597   }
598 
599   /**
600    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
601    * @param durability enum value
602    */
603   public void setDurability(Durability durability) {
604     this.durability = durability;
605     setValue(DURABILITY_KEY, durability.name());
606   }
607 
608   /**
609    * Returns the durability setting for the table.
610    * @return durability setting for the table.
611    */
612   public Durability getDurability() {
613     if (this.durability == null) {
614       byte[] durabilityValue = getValue(DURABILITY_KEY);
615       if (durabilityValue == null) {
616         this.durability = DEFAULT_DURABLITY;
617       } else {
618         try {
619           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
620         } catch (IllegalArgumentException ex) {
621           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
622             + " is not known. Durability:" + Bytes.toString(durabilityValue));
623           this.durability = DEFAULT_DURABLITY;
624         }
625       }
626     }
627     return this.durability;
628   }
629 
630   /**
631    * Get the name of the table
632    *
633    * @return TableName
634    */
635   public TableName getTableName() {
636     return name;
637   }
638 
639   /**
640    * Get the name of the table as a byte array.
641    *
642    * @return name of table
643    */
644   public byte[] getName() {
645     return name.getName();
646   }
647 
648   /**
649    * Get the name of the table as a String
650    *
651    * @return name of table as a String
652    */
653   public String getNameAsString() {
654     return name.getNameAsString();
655   }
656 
657   /**
658    * This sets the class associated with the region split policy which
659    * determines when a region split should occur.  The class used by
660    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
661    * @param clazz the class name
662    */
663   public void setRegionSplitPolicyClassName(String clazz) {
664     setValue(SPLIT_POLICY, clazz);
665   }
666 
667   /**
668    * This gets the class associated with the region split policy which
669    * determines when a region split should occur.  The class used by
670    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
671    *
672    * @return the class name of the region split policy for this table.
673    * If this returns null, the default split policy is used.
674    */
675    public String getRegionSplitPolicyClassName() {
676     return getValue(SPLIT_POLICY);
677   }
678 
679   /**
680    * Set the name of the table.
681    *
682    * @param name name of table
683    */
684   @Deprecated
685   public void setName(byte[] name) {
686     setName(TableName.valueOf(name));
687   }
688 
689   @Deprecated
690   public void setName(TableName name) {
691     this.name = name;
692     setMetaFlags(this.name);
693   }
694 
695   /**
696    * Returns the maximum size upto which a region can grow to after which a region
697    * split is triggered. The region size is represented by the size of the biggest
698    * store file in that region.
699    *
700    * @return max hregion size for table, -1 if not set.
701    *
702    * @see #setMaxFileSize(long)
703    */
704   public long getMaxFileSize() {
705     byte [] value = getValue(MAX_FILESIZE_KEY);
706     if (value != null) {
707       return Long.parseLong(Bytes.toString(value));
708     }
709     return -1;
710   }
711 
712   /**
713    * Sets the maximum size upto which a region can grow to after which a region
714    * split is triggered. The region size is represented by the size of the biggest
715    * store file in that region, i.e. If the biggest store file grows beyond the
716    * maxFileSize, then the region split is triggered. This defaults to a value of
717    * 256 MB.
718    * <p>
719    * This is not an absolute value and might vary. Assume that a single row exceeds
720    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
721    * a single row cannot be split across multiple regions
722    * </p>
723    *
724    * @param maxFileSize The maximum file size that a store file can grow to
725    * before a split is triggered.
726    */
727   public void setMaxFileSize(long maxFileSize) {
728     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
729   }
730 
731   /**
732    * Returns the size of the memstore after which a flush to filesystem is triggered.
733    *
734    * @return memory cache flush size for each hregion, -1 if not set.
735    *
736    * @see #setMemStoreFlushSize(long)
737    */
738   public long getMemStoreFlushSize() {
739     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
740     if (value != null) {
741       return Long.parseLong(Bytes.toString(value));
742     }
743     return -1;
744   }
745 
746   /**
747    * Represents the maximum size of the memstore after which the contents of the
748    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
749    *
750    * @param memstoreFlushSize memory cache flush size for each hregion
751    */
752   public void setMemStoreFlushSize(long memstoreFlushSize) {
753     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
754   }
755 
756   /**
757    * Adds a column family.
758    * @param family HColumnDescriptor of family to add.
759    */
760   public void addFamily(final HColumnDescriptor family) {
761     if (family.getName() == null || family.getName().length <= 0) {
762       throw new NullPointerException("Family name cannot be null or empty");
763     }
764     this.families.put(family.getName(), family);
765   }
766 
767   /**
768    * Checks to see if this table contains the given column family
769    * @param familyName Family name or column name.
770    * @return true if the table contains the specified family name
771    */
772   public boolean hasFamily(final byte [] familyName) {
773     return families.containsKey(familyName);
774   }
775 
776   /**
777    * @return Name of this table and then a map of all of the column family
778    * descriptors.
779    * @see #getNameAsString()
780    */
781   @Override
782   public String toString() {
783     StringBuilder s = new StringBuilder();
784     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
785     s.append(getValues(true));
786     for (HColumnDescriptor f : families.values()) {
787       s.append(", ").append(f);
788     }
789     return s.toString();
790   }
791 
792   /**
793    * @return Name of this table and then a map of all of the column family
794    * descriptors (with only the non-default column family attributes)
795    */
796   public String toStringCustomizedValues() {
797     StringBuilder s = new StringBuilder();
798     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
799     s.append(getValues(false));
800     for(HColumnDescriptor hcd : families.values()) {
801       s.append(", ").append(hcd.toStringCustomizedValues());
802     }
803     return s.toString();
804   }
805 
806   private StringBuilder getValues(boolean printDefaults) {
807     StringBuilder s = new StringBuilder();
808 
809     // step 1: set partitioning and pruning
810     Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
811     Set<ImmutableBytesWritable> userKeys = new TreeSet<ImmutableBytesWritable>();
812     for (ImmutableBytesWritable k : values.keySet()) {
813       if (k == null || k.get() == null) continue;
814       String key = Bytes.toString(k.get());
815       // in this section, print out reserved keywords + coprocessor info
816       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
817         userKeys.add(k);
818         continue;
819       }
820       // only print out IS_ROOT/IS_META if true
821       String value = Bytes.toString(values.get(k).get());
822       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
823         if (Boolean.valueOf(value) == false) continue;
824       }
825       // see if a reserved key is a default value. may not want to print it out
826       if (printDefaults
827           || !DEFAULT_VALUES.containsKey(key)
828           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
829         reservedKeys.add(k);
830       }
831     }
832 
833     // early exit optimization
834     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
835     if (!hasAttributes && configuration.isEmpty()) return s;
836 
837     s.append(", {");
838     // step 2: printing attributes
839     if (hasAttributes) {
840       s.append("TABLE_ATTRIBUTES => {");
841 
842       // print all reserved keys first
843       boolean printCommaForAttr = false;
844       for (ImmutableBytesWritable k : reservedKeys) {
845         String key = Bytes.toString(k.get());
846         String value = Bytes.toStringBinary(values.get(k).get());
847         if (printCommaForAttr) s.append(", ");
848         printCommaForAttr = true;
849         s.append(key);
850         s.append(" => ");
851         s.append('\'').append(value).append('\'');
852       }
853 
854       if (!userKeys.isEmpty()) {
855         // print all non-reserved, advanced config keys as a separate subset
856         if (printCommaForAttr) s.append(", ");
857         printCommaForAttr = true;
858         s.append(HConstants.METADATA).append(" => ");
859         s.append("{");
860         boolean printCommaForCfg = false;
861         for (ImmutableBytesWritable k : userKeys) {
862           String key = Bytes.toString(k.get());
863           String value = Bytes.toStringBinary(values.get(k).get());
864           if (printCommaForCfg) s.append(", ");
865           printCommaForCfg = true;
866           s.append('\'').append(key).append('\'');
867           s.append(" => ");
868           s.append('\'').append(value).append('\'');
869         }
870         s.append("}");
871       }
872     }
873 
874     // step 3: printing all configuration:
875     if (!configuration.isEmpty()) {
876       if (hasAttributes) {
877         s.append(", ");
878       }
879       s.append(HConstants.CONFIGURATION).append(" => ");
880       s.append('{');
881       boolean printCommaForConfig = false;
882       for (Map.Entry<String, String> e : configuration.entrySet()) {
883         if (printCommaForConfig) s.append(", ");
884         printCommaForConfig = true;
885         s.append('\'').append(e.getKey()).append('\'');
886         s.append(" => ");
887         s.append('\'').append(e.getValue()).append('\'');
888       }
889       s.append("}");
890     }
891     s.append("}"); // end METHOD
892     return s;
893   }
894 
895   /**
896    * Compare the contents of the descriptor with another one passed as a parameter.
897    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
898    * contents of the descriptors are compared.
899    *
900    * @return true if the contents of the the two descriptors exactly match
901    *
902    * @see java.lang.Object#equals(java.lang.Object)
903    */
904   @Override
905   public boolean equals(Object obj) {
906     if (this == obj) {
907       return true;
908     }
909     if (obj == null) {
910       return false;
911     }
912     if (!(obj instanceof HTableDescriptor)) {
913       return false;
914     }
915     return compareTo((HTableDescriptor)obj) == 0;
916   }
917 
918   /**
919    * @see java.lang.Object#hashCode()
920    */
921   @Override
922   public int hashCode() {
923     int result = this.name.hashCode();
924     result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode();
925     if (this.families != null && this.families.size() > 0) {
926       for (HColumnDescriptor e: this.families.values()) {
927         result ^= e.hashCode();
928       }
929     }
930     result ^= values.hashCode();
931     result ^= configuration.hashCode();
932     return result;
933   }
934 
935   /**
936    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
937    * and is used for de-serialization of the HTableDescriptor over RPC
938    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
939    */
940   @Deprecated
941   @Override
942   public void readFields(DataInput in) throws IOException {
943     int version = in.readInt();
944     if (version < 3)
945       throw new IOException("versions < 3 are not supported (and never existed!?)");
946     // version 3+
947     name = TableName.valueOf(Bytes.readByteArray(in));
948     setRootRegion(in.readBoolean());
949     setMetaRegion(in.readBoolean());
950     values.clear();
951     configuration.clear();
952     int numVals = in.readInt();
953     for (int i = 0; i < numVals; i++) {
954       ImmutableBytesWritable key = new ImmutableBytesWritable();
955       ImmutableBytesWritable value = new ImmutableBytesWritable();
956       key.readFields(in);
957       value.readFields(in);
958       setValue(key, value);
959     }
960     families.clear();
961     int numFamilies = in.readInt();
962     for (int i = 0; i < numFamilies; i++) {
963       HColumnDescriptor c = new HColumnDescriptor();
964       c.readFields(in);
965       families.put(c.getName(), c);
966     }
967     if (version >= 7) {
968       int numConfigs = in.readInt();
969       for (int i = 0; i < numConfigs; i++) {
970         ImmutableBytesWritable key = new ImmutableBytesWritable();
971         ImmutableBytesWritable value = new ImmutableBytesWritable();
972         key.readFields(in);
973         value.readFields(in);
974         configuration.put(
975           Bytes.toString(key.get(), key.getOffset(), key.getLength()),
976           Bytes.toString(value.get(), value.getOffset(), value.getLength()));
977       }
978     }
979   }
980 
981   /**
982    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
983    * and is used for serialization of the HTableDescriptor over RPC
984    * @deprecated Writables are going away.
985    * Use {@link com.google.protobuf.MessageLite#toByteArray} instead.
986    */
987   @Deprecated
988   @Override
989   public void write(DataOutput out) throws IOException {
990 	  out.writeInt(TABLE_DESCRIPTOR_VERSION);
991     Bytes.writeByteArray(out, name.toBytes());
992     out.writeBoolean(isRootRegion());
993     out.writeBoolean(isMetaRegion());
994     out.writeInt(values.size());
995     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
996         values.entrySet()) {
997       e.getKey().write(out);
998       e.getValue().write(out);
999     }
1000     out.writeInt(families.size());
1001     for(Iterator<HColumnDescriptor> it = families.values().iterator();
1002         it.hasNext(); ) {
1003       HColumnDescriptor family = it.next();
1004       family.write(out);
1005     }
1006     out.writeInt(configuration.size());
1007     for (Map.Entry<String, String> e : configuration.entrySet()) {
1008       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1009       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1010     }
1011   }
1012 
1013   // Comparable
1014 
1015   /**
1016    * Compares the descriptor with another descriptor which is passed as a parameter.
1017    * This compares the content of the two descriptors and not the reference.
1018    *
1019    * @return 0 if the contents of the descriptors are exactly matching,
1020    * 		 1 if there is a mismatch in the contents
1021    */
1022   @Override
1023   public int compareTo(final HTableDescriptor other) {
1024     int result = this.name.compareTo(other.name);
1025     if (result == 0) {
1026       result = families.size() - other.families.size();
1027     }
1028     if (result == 0 && families.size() != other.families.size()) {
1029       result = Integer.valueOf(families.size()).compareTo(
1030           Integer.valueOf(other.families.size()));
1031     }
1032     if (result == 0) {
1033       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1034           it2 = other.families.values().iterator(); it.hasNext(); ) {
1035         result = it.next().compareTo(it2.next());
1036         if (result != 0) {
1037           break;
1038         }
1039       }
1040     }
1041     if (result == 0) {
1042       // punt on comparison for ordering, just calculate difference
1043       result = this.values.hashCode() - other.values.hashCode();
1044       if (result < 0)
1045         result = -1;
1046       else if (result > 0)
1047         result = 1;
1048     }
1049     if (result == 0) {
1050       result = this.configuration.hashCode() - other.configuration.hashCode();
1051       if (result < 0)
1052         result = -1;
1053       else if (result > 0)
1054         result = 1;
1055     }
1056     return result;
1057   }
1058 
1059   /**
1060    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1061    * of all the column families of the table.
1062    *
1063    * @return Immutable collection of {@link HColumnDescriptor} of all the
1064    * column families.
1065    */
1066   public Collection<HColumnDescriptor> getFamilies() {
1067     return Collections.unmodifiableCollection(this.families.values());
1068   }
1069 
1070   /**
1071    * Returns all the column family names of the current table. The map of
1072    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1073    * This returns all the keys of the family map which represents the column
1074    * family names of the table.
1075    *
1076    * @return Immutable sorted set of the keys of the families.
1077    */
1078   public Set<byte[]> getFamiliesKeys() {
1079     return Collections.unmodifiableSet(this.families.keySet());
1080   }
1081 
1082   /**
1083    * Returns an array all the {@link HColumnDescriptor} of the column families
1084    * of the table.
1085    *
1086    * @return Array of all the HColumnDescriptors of the current table
1087    *
1088    * @see #getFamilies()
1089    */
1090   public HColumnDescriptor[] getColumnFamilies() {
1091     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1092     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1093   }
1094 
1095 
1096   /**
1097    * Returns the HColumnDescriptor for a specific column family with name as
1098    * specified by the parameter column.
1099    *
1100    * @param column Column family name
1101    * @return Column descriptor for the passed family name or the family on
1102    * passed in column.
1103    */
1104   public HColumnDescriptor getFamily(final byte [] column) {
1105     return this.families.get(column);
1106   }
1107 
1108 
1109   /**
1110    * Removes the HColumnDescriptor with name specified by the parameter column
1111    * from the table descriptor
1112    *
1113    * @param column Name of the column family to be removed.
1114    * @return Column descriptor for the passed family name or the family on
1115    * passed in column.
1116    */
1117   public HColumnDescriptor removeFamily(final byte [] column) {
1118     return this.families.remove(column);
1119   }
1120 
1121 
1122   /**
1123    * Add a table coprocessor to this table. The coprocessor
1124    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1125    * or Endpoint.
1126    * It won't check if the class can be loaded or not.
1127    * Whether a coprocessor is loadable or not will be determined when
1128    * a region is opened.
1129    * @param className Full class name.
1130    * @throws IOException
1131    */
1132   public void addCoprocessor(String className) throws IOException {
1133     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1134   }
1135 
1136 
1137   /**
1138    * Add a table coprocessor to this table. The coprocessor
1139    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1140    * or Endpoint.
1141    * It won't check if the class can be loaded or not.
1142    * Whether a coprocessor is loadable or not will be determined when
1143    * a region is opened.
1144    * @param jarFilePath Path of the jar file. If it's null, the class will be
1145    * loaded from default classloader.
1146    * @param className Full class name.
1147    * @param priority Priority
1148    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1149    * @throws IOException
1150    */
1151   public void addCoprocessor(String className, Path jarFilePath,
1152                              int priority, final Map<String, String> kvs)
1153   throws IOException {
1154     if (hasCoprocessor(className)) {
1155       throw new IOException("Coprocessor " + className + " already exists.");
1156     }
1157     // validate parameter kvs
1158     StringBuilder kvString = new StringBuilder();
1159     if (kvs != null) {
1160       for (Map.Entry<String, String> e: kvs.entrySet()) {
1161         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1162           throw new IOException("Illegal parameter key = " + e.getKey());
1163         }
1164         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1165           throw new IOException("Illegal parameter (" + e.getKey() +
1166               ") value = " + e.getValue());
1167         }
1168         if (kvString.length() != 0) {
1169           kvString.append(',');
1170         }
1171         kvString.append(e.getKey());
1172         kvString.append('=');
1173         kvString.append(e.getValue());
1174       }
1175     }
1176 
1177     // generate a coprocessor key
1178     int maxCoprocessorNumber = 0;
1179     Matcher keyMatcher;
1180     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1181         this.values.entrySet()) {
1182       keyMatcher =
1183           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1184               Bytes.toString(e.getKey().get()));
1185       if (!keyMatcher.matches()) {
1186         continue;
1187       }
1188       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)),
1189           maxCoprocessorNumber);
1190     }
1191     maxCoprocessorNumber++;
1192 
1193     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1194     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1195         "|" + className + "|" + Integer.toString(priority) + "|" +
1196         kvString.toString();
1197     setValue(key, value);
1198   }
1199 
1200 
1201   /**
1202    * Check if the table has an attached co-processor represented by the name className
1203    *
1204    * @param className - Class name of the co-processor
1205    * @return true of the table has a co-processor className
1206    */
1207   public boolean hasCoprocessor(String className) {
1208     Matcher keyMatcher;
1209     Matcher valueMatcher;
1210     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1211         this.values.entrySet()) {
1212       keyMatcher =
1213           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1214               Bytes.toString(e.getKey().get()));
1215       if (!keyMatcher.matches()) {
1216         continue;
1217       }
1218       valueMatcher =
1219         HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(
1220             Bytes.toString(e.getValue().get()));
1221       if (!valueMatcher.matches()) {
1222         continue;
1223       }
1224       // get className and compare
1225       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1226       if (clazz.equals(className.trim())) {
1227         return true;
1228       }
1229     }
1230     return false;
1231   }
1232 
1233   /**
1234    * Return the list of attached co-processor represented by their name className
1235    *
1236    * @return The list of co-processors classNames
1237    */
1238   public List<String> getCoprocessors() {
1239     List<String> result = new ArrayList<String>();
1240     Matcher keyMatcher;
1241     Matcher valueMatcher;
1242     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values.entrySet()) {
1243       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1244       if (!keyMatcher.matches()) {
1245         continue;
1246       }
1247       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1248           .toString(e.getValue().get()));
1249       if (!valueMatcher.matches()) {
1250         continue;
1251       }
1252       result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
1253     }
1254     return result;
1255   }
1256 
1257   /**
1258    * Remove a coprocessor from those set on the table
1259    * @param className Class name of the co-processor
1260    */
1261   public void removeCoprocessor(String className) {
1262     ImmutableBytesWritable match = null;
1263     Matcher keyMatcher;
1264     Matcher valueMatcher;
1265     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values
1266         .entrySet()) {
1267       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1268           .getKey().get()));
1269       if (!keyMatcher.matches()) {
1270         continue;
1271       }
1272       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1273           .toString(e.getValue().get()));
1274       if (!valueMatcher.matches()) {
1275         continue;
1276       }
1277       // get className and compare
1278       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1279       // remove the CP if it is present
1280       if (clazz.equals(className.trim())) {
1281         match = e.getKey();
1282         break;
1283       }
1284     }
1285     // if we found a match, remove it
1286     if (match != null)
1287       remove(match);
1288   }
1289 
1290   /**
1291    * Returns the {@link Path} object representing the table directory under
1292    * path rootdir
1293    *
1294    * Deprecated use FSUtils.getTableDir() instead.
1295    *
1296    * @param rootdir qualified path of HBase root directory
1297    * @param tableName name of table
1298    * @return {@link Path} for table
1299    */
1300   @Deprecated
1301   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1302     //This is bad I had to mirror code from FSUTils.getTableDir since
1303     //there is no module dependency between hbase-client and hbase-server
1304     TableName name = TableName.valueOf(tableName);
1305     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1306               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1307   }
1308 
1309   /** Table descriptor for <code>hbase:meta</code> catalog table */
1310   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1311       TableName.META_TABLE_NAME,
1312       new HColumnDescriptor[] {
1313           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1314               // Ten is arbitrary number.  Keep versions to help debugging.
1315               .setMaxVersions(10)
1316               .setInMemory(true)
1317               .setBlocksize(8 * 1024)
1318               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1319               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1320               .setBloomFilterType(BloomType.NONE)
1321       });
1322 
1323   static {
1324     try {
1325       META_TABLEDESC.addCoprocessor(
1326           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1327           null, Coprocessor.PRIORITY_SYSTEM, null);
1328     } catch (IOException ex) {
1329       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1330       throw new RuntimeException(ex);
1331     }
1332   }
1333 
1334   public final static String NAMESPACE_FAMILY_INFO = "info";
1335   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1336   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1337 
1338   /** Table descriptor for namespace table */
1339   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1340       TableName.NAMESPACE_TABLE_NAME,
1341       new HColumnDescriptor[] {
1342           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1343               // Ten is arbitrary number.  Keep versions to help debugging.
1344               .setMaxVersions(10)
1345               .setInMemory(true)
1346               .setBlocksize(8 * 1024)
1347               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1348       });
1349 
1350   @Deprecated
1351   public void setOwner(User owner) {
1352     setOwnerString(owner != null ? owner.getShortName() : null);
1353   }
1354 
1355   // used by admin.rb:alter(table_name,*args) to update owner.
1356   @Deprecated
1357   public void setOwnerString(String ownerString) {
1358     if (ownerString != null) {
1359       setValue(OWNER_KEY, ownerString);
1360     } else {
1361       remove(OWNER_KEY);
1362     }
1363   }
1364 
1365   @Deprecated
1366   public String getOwnerString() {
1367     if (getValue(OWNER_KEY) != null) {
1368       return Bytes.toString(getValue(OWNER_KEY));
1369     }
1370     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1371     // hbase:meta and -ROOT- should return system user as owner, not null (see
1372     // MasterFileSystem.java:bootstrap()).
1373     return null;
1374   }
1375 
1376   /**
1377    * @return This instance serialized with pb with pb magic prefix
1378    * @see #parseFrom(byte[])
1379    */
1380   public byte [] toByteArray() {
1381     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1382   }
1383 
1384   /**
1385    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1386    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1387    * @throws DeserializationException
1388    * @throws IOException
1389    * @see #toByteArray()
1390    */
1391   public static HTableDescriptor parseFrom(final byte [] bytes)
1392   throws DeserializationException, IOException {
1393     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1394       return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
1395     }
1396     int pblen = ProtobufUtil.lengthOfPBMagic();
1397     TableSchema.Builder builder = TableSchema.newBuilder();
1398     TableSchema ts;
1399     try {
1400       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1401     } catch (InvalidProtocolBufferException e) {
1402       throw new DeserializationException(e);
1403     }
1404     return convert(ts);
1405   }
1406 
1407   /**
1408    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1409    */
1410   public TableSchema convert() {
1411     TableSchema.Builder builder = TableSchema.newBuilder();
1412     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1413     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1414       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1415       aBuilder.setFirst(HBaseZeroCopyByteString.wrap(e.getKey().get()));
1416       aBuilder.setSecond(HBaseZeroCopyByteString.wrap(e.getValue().get()));
1417       builder.addAttributes(aBuilder.build());
1418     }
1419     for (HColumnDescriptor hcd: getColumnFamilies()) {
1420       builder.addColumnFamilies(hcd.convert());
1421     }
1422     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1423       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1424       aBuilder.setName(e.getKey());
1425       aBuilder.setValue(e.getValue());
1426       builder.addConfiguration(aBuilder.build());
1427     }
1428     return builder.build();
1429   }
1430 
1431   /**
1432    * @param ts A pb TableSchema instance.
1433    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1434    */
1435   public static HTableDescriptor convert(final TableSchema ts) {
1436     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1437     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1438     int index = 0;
1439     for (ColumnFamilySchema cfs: list) {
1440       hcds[index++] = HColumnDescriptor.convert(cfs);
1441     }
1442     HTableDescriptor htd = new HTableDescriptor(
1443         ProtobufUtil.toTableName(ts.getTableName()),
1444         hcds);
1445     for (BytesBytesPair a: ts.getAttributesList()) {
1446       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1447     }
1448     for (NameStringPair a: ts.getConfigurationList()) {
1449       htd.setConfiguration(a.getName(), a.getValue());
1450     }
1451     return htd;
1452   }
1453 
1454   /**
1455    * Getter for accessing the configuration value by key
1456    */
1457   public String getConfigurationValue(String key) {
1458     return configuration.get(key);
1459   }
1460 
1461   /**
1462    * Getter for fetching an unmodifiable {@link #configuration} map.
1463    */
1464   public Map<String, String> getConfiguration() {
1465     // shallow pointer copy
1466     return Collections.unmodifiableMap(configuration);
1467   }
1468 
1469   /**
1470    * Setter for storing a configuration setting in {@link #configuration} map.
1471    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1472    * @param value String value. If null, removes the setting.
1473    */
1474   public void setConfiguration(String key, String value) {
1475     if (value == null) {
1476       removeConfiguration(key);
1477     } else {
1478       configuration.put(key, value);
1479     }
1480   }
1481 
1482   /**
1483    * Remove a config setting represented by the key from the {@link #configuration} map
1484    */
1485   public void removeConfiguration(final String key) {
1486     configuration.remove(key);
1487   }
1488 }