View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.Collection;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Set;
31  import java.util.TreeMap;
32  import java.util.TreeSet;
33  import java.util.regex.Matcher;
34  
35  import javax.annotation.Nonnull;
36  
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.hadoop.fs.Path;
40  import org.apache.hadoop.hbase.classification.InterfaceAudience;
41  import org.apache.hadoop.hbase.classification.InterfaceStability;
42  import org.apache.hadoop.hbase.client.Durability;
43  import org.apache.hadoop.hbase.exceptions.DeserializationException;
44  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
45  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
46  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
47  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
48  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
49  import org.apache.hadoop.hbase.regionserver.BloomType;
50  import org.apache.hadoop.hbase.security.User;
51  import org.apache.hadoop.hbase.util.ByteStringer;
52  import org.apache.hadoop.hbase.util.Bytes;
53  
54  import com.google.protobuf.InvalidProtocolBufferException;
55  
56  /**
57   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
58   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
59   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
60   * when the region split should occur, coprocessors associated with it etc...
61   */
62  @InterfaceAudience.Public
63  @InterfaceStability.Evolving
64  public class HTableDescriptor implements Comparable<HTableDescriptor> {
65  
66    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
67  
68    private TableName name = null;
69  
70    /**
71     * A map which holds the metadata information of the table. This metadata
72     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
73     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
74     */
75    private final Map<Bytes, Bytes> values =
76        new HashMap<Bytes, Bytes>();
77  
78    /**
79     * A map which holds the configuration specific to the table.
80     * The keys of the map have the same names as config keys and override the defaults with
81     * table-specific settings. Example usage may be for compactions, etc.
82     */
83    private final Map<String, String> configuration = new HashMap<String, String>();
84  
85    public static final String SPLIT_POLICY = "SPLIT_POLICY";
86  
87    /**
88     * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
89     * attribute which denotes the maximum size of the store file after which
90     * a region split occurs
91     *
92     * @see #getMaxFileSize()
93     */
94    public static final String MAX_FILESIZE = "MAX_FILESIZE";
95    private static final Bytes MAX_FILESIZE_KEY =
96        new Bytes(Bytes.toBytes(MAX_FILESIZE));
97  
98    public static final String OWNER = "OWNER";
99    public static final Bytes OWNER_KEY =
100       new Bytes(Bytes.toBytes(OWNER));
101 
102   /**
103    * <em>INTERNAL</em> Used by rest interface to access this metadata
104    * attribute which denotes if the table is Read Only
105    *
106    * @see #isReadOnly()
107    */
108   public static final String READONLY = "READONLY";
109   private static final Bytes READONLY_KEY =
110       new Bytes(Bytes.toBytes(READONLY));
111 
112   /**
113    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
114    * attribute which denotes if the table is compaction enabled
115    *
116    * @see #isCompactionEnabled()
117    */
118   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
119   private static final Bytes COMPACTION_ENABLED_KEY =
120       new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
121 
122   /**
123    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
124    * attribute which represents the maximum size of the memstore after which
125    * its contents are flushed onto the disk
126    *
127    * @see #getMemStoreFlushSize()
128    */
129   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
130   private static final Bytes MEMSTORE_FLUSHSIZE_KEY =
131       new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
132 
133   public static final String FLUSH_POLICY = "FLUSH_POLICY";
134 
135   /**
136    * <em>INTERNAL</em> Used by rest interface to access this metadata
137    * attribute which denotes if the table is a -ROOT- region or not
138    *
139    * @see #isRootRegion()
140    */
141   public static final String IS_ROOT = "IS_ROOT";
142   private static final Bytes IS_ROOT_KEY =
143       new Bytes(Bytes.toBytes(IS_ROOT));
144 
145   /**
146    * <em>INTERNAL</em> Used by rest interface to access this metadata
147    * attribute which denotes if it is a catalog table, either
148    * <code> hbase:meta </code> or <code> -ROOT- </code>
149    *
150    * @see #isMetaRegion()
151    */
152   public static final String IS_META = "IS_META";
153   private static final Bytes IS_META_KEY =
154       new Bytes(Bytes.toBytes(IS_META));
155 
156   /**
157    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
158    * attribute which denotes if the deferred log flush option is enabled.
159    * @deprecated Use {@link #DURABILITY} instead.
160    */
161   @Deprecated
162   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
163   @Deprecated
164   private static final Bytes DEFERRED_LOG_FLUSH_KEY =
165       new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH));
166 
167   /**
168    * <em>INTERNAL</em> {@link Durability} setting for the table.
169    */
170   public static final String DURABILITY = "DURABILITY";
171   private static final Bytes DURABILITY_KEY =
172       new Bytes(Bytes.toBytes("DURABILITY"));
173 
174   /**
175    * <em>INTERNAL</em> number of region replicas for the table.
176    */
177   public static final String REGION_REPLICATION = "REGION_REPLICATION";
178   private static final Bytes REGION_REPLICATION_KEY =
179       new Bytes(Bytes.toBytes(REGION_REPLICATION));
180 
181   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
182   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
183 
184   /*
185    *  The below are ugly but better than creating them each time till we
186    *  replace booleans being saved as Strings with plain booleans.  Need a
187    *  migration script to do this.  TODO.
188    */
189   private static final Bytes FALSE =
190       new Bytes(Bytes.toBytes(Boolean.FALSE.toString()));
191 
192   private static final Bytes TRUE =
193       new Bytes(Bytes.toBytes(Boolean.TRUE.toString()));
194 
195   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
196 
197   /**
198    * Constant that denotes whether the table is READONLY by default and is false
199    */
200   public static final boolean DEFAULT_READONLY = false;
201 
202   /**
203    * Constant that denotes whether the table is compaction enabled by default
204    */
205   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
206 
207   /**
208    * Constant that denotes the maximum default size of the memstore after which
209    * the contents are flushed to the store files
210    */
211   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
212 
213   public static final int DEFAULT_REGION_REPLICATION = 1;
214 
215   private final static Map<String, String> DEFAULT_VALUES
216     = new HashMap<String, String>();
217   private final static Set<Bytes> RESERVED_KEYWORDS
218       = new HashSet<Bytes>();
219 
220   static {
221     DEFAULT_VALUES.put(MAX_FILESIZE,
222         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
223     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
224     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
225         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
226     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
227         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
228     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
229     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
230     for (String s : DEFAULT_VALUES.keySet()) {
231       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
232     }
233     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
234     RESERVED_KEYWORDS.add(IS_META_KEY);
235   }
236 
237   /**
238    * Cache of whether this is a meta table or not.
239    */
240   private volatile Boolean meta = null;
241   /**
242    * Cache of whether this is root table or not.
243    */
244   private volatile Boolean root = null;
245 
246   /**
247    * Durability setting for the table
248    */
249   private Durability durability = null;
250 
251   /**
252    * Maps column family name to the respective HColumnDescriptors
253    */
254   private final Map<byte [], HColumnDescriptor> families =
255     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
256 
257   /**
258    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
259    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
260    */
261   @InterfaceAudience.Private
262   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
263     setName(name);
264     for(HColumnDescriptor descriptor : families) {
265       this.families.put(descriptor.getName(), descriptor);
266     }
267   }
268 
269   /**
270    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
271    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
272    */
273   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
274       Map<Bytes, Bytes> values) {
275     setName(name);
276     for(HColumnDescriptor descriptor : families) {
277       this.families.put(descriptor.getName(), descriptor);
278     }
279     for (Map.Entry<Bytes, Bytes> entry :
280         values.entrySet()) {
281       setValue(entry.getKey(), entry.getValue());
282     }
283   }
284 
285   /**
286    * Default constructor which constructs an empty object.
287    * For deserializing an HTableDescriptor instance only.
288    * @deprecated Used by Writables and Writables are going away.
289    */
290   @Deprecated
291   public HTableDescriptor() {
292     super();
293   }
294 
295   /**
296    * Construct a table descriptor specifying a TableName object
297    * @param name Table name.
298    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
299    */
300   public HTableDescriptor(final TableName name) {
301     super();
302     setName(name);
303   }
304 
305   /**
306    * Construct a table descriptor specifying a byte array table name
307    * @param name Table name.
308    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
309    */
310   @Deprecated
311   public HTableDescriptor(final byte[] name) {
312     this(TableName.valueOf(name));
313   }
314 
315   /**
316    * Construct a table descriptor specifying a String table name
317    * @param name Table name.
318    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
319    */
320   @Deprecated
321   public HTableDescriptor(final String name) {
322     this(TableName.valueOf(name));
323   }
324 
325   /**
326    * Construct a table descriptor by cloning the descriptor passed as a parameter.
327    * <p>
328    * Makes a deep copy of the supplied descriptor.
329    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
330    * @param desc The descriptor.
331    */
332   public HTableDescriptor(final HTableDescriptor desc) {
333     super();
334     setName(desc.name);
335     setMetaFlags(this.name);
336     for (HColumnDescriptor c: desc.families.values()) {
337       this.families.put(c.getName(), new HColumnDescriptor(c));
338     }
339     for (Map.Entry<Bytes, Bytes> e :
340         desc.values.entrySet()) {
341       setValue(e.getKey(), e.getValue());
342     }
343     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
344       this.configuration.put(e.getKey(), e.getValue());
345     }
346   }
347 
348   /*
349    * Set meta flags on this table.
350    * IS_ROOT_KEY is set if its a -ROOT- table
351    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
352    * Called by constructors.
353    * @param name
354    */
355   private void setMetaFlags(final TableName name) {
356     setMetaRegion(isRootRegion() ||
357         name.equals(TableName.META_TABLE_NAME));
358   }
359 
360   /**
361    * Check if the descriptor represents a <code> -ROOT- </code> region.
362    *
363    * @return true if this is a <code> -ROOT- </code> region
364    */
365   public boolean isRootRegion() {
366     if (this.root == null) {
367       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
368     }
369     return this.root.booleanValue();
370   }
371 
372   /**
373    * <em> INTERNAL </em> Used to denote if the current table represents
374    * <code> -ROOT- </code> region. This is used internally by the
375    * HTableDescriptor constructors
376    *
377    * @param isRoot true if this is the <code> -ROOT- </code> region
378    */
379   protected void setRootRegion(boolean isRoot) {
380     // TODO: Make the value a boolean rather than String of boolean.
381     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
382   }
383 
384   /**
385    * Checks if this table is <code> hbase:meta </code>
386    * region.
387    *
388    * @return true if this table is <code> hbase:meta </code>
389    * region
390    */
391   public boolean isMetaRegion() {
392     if (this.meta == null) {
393       this.meta = calculateIsMetaRegion();
394     }
395     return this.meta.booleanValue();
396   }
397 
398   private synchronized Boolean calculateIsMetaRegion() {
399     byte [] value = getValue(IS_META_KEY);
400     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
401   }
402 
403   private boolean isSomething(final Bytes key,
404       final boolean valueIfNull) {
405     byte [] value = getValue(key);
406     if (value != null) {
407       return Boolean.valueOf(Bytes.toString(value));
408     }
409     return valueIfNull;
410   }
411 
412   /**
413    * <em> INTERNAL </em> Used to denote if the current table represents
414    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
415    * internally by the HTableDescriptor constructors
416    *
417    * @param isMeta true if its either <code> -ROOT- </code> or
418    * <code> hbase:meta </code> region
419    */
420   protected void setMetaRegion(boolean isMeta) {
421     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
422   }
423 
424   /**
425    * Checks if the table is a <code>hbase:meta</code> table
426    *
427    * @return true if table is <code> hbase:meta </code> region.
428    */
429   public boolean isMetaTable() {
430     return isMetaRegion() && !isRootRegion();
431   }
432 
433   /**
434    * Getter for accessing the metadata associated with the key
435    *
436    * @param key The key.
437    * @return The value.
438    * @see #values
439    */
440   public byte[] getValue(byte[] key) {
441     return getValue(new Bytes(key));
442   }
443 
444   private byte[] getValue(final Bytes key) {
445     Bytes ibw = values.get(key);
446     if (ibw == null)
447       return null;
448     return ibw.get();
449   }
450 
451   /**
452    * Getter for accessing the metadata associated with the key
453    *
454    * @param key The key.
455    * @return The value.
456    * @see #values
457    */
458   public String getValue(String key) {
459     byte[] value = getValue(Bytes.toBytes(key));
460     if (value == null)
461       return null;
462     return Bytes.toString(value);
463   }
464 
465   /**
466    * Getter for fetching an unmodifiable {@link #values} map.
467    *
468    * @return unmodifiable map {@link #values}.
469    * @see #values
470    */
471   public Map<Bytes, Bytes> getValues() {
472     // shallow pointer copy
473     return Collections.unmodifiableMap(values);
474   }
475 
476   /**
477    * Setter for storing metadata as a (key, value) pair in {@link #values} map
478    *
479    * @param key The key.
480    * @param value The value.
481    * @see #values
482    */
483   public HTableDescriptor setValue(byte[] key, byte[] value) {
484     setValue(new Bytes(key), new Bytes(value));
485     return this;
486   }
487 
488   /*
489    * @param key The key.
490    * @param value The value.
491    */
492   private HTableDescriptor setValue(final Bytes key,
493       final String value) {
494     setValue(key, new Bytes(Bytes.toBytes(value)));
495     return this;
496   }
497 
498   /*
499    * Setter for storing metadata as a (key, value) pair in {@link #values} map
500    *
501    * @param key The key.
502    * @param value The value.
503    */
504   public HTableDescriptor setValue(final Bytes key,
505       final Bytes value) {
506     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
507       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
508       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
509           "use " + DURABILITY + " instead");
510       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
511       return this;
512     }
513     values.put(key, value);
514     return this;
515   }
516 
517   /**
518    * Setter for storing metadata as a (key, value) pair in {@link #values} map
519    *
520    * @param key The key.
521    * @param value The value.
522    * @see #values
523    */
524   public HTableDescriptor setValue(String key, String value) {
525     if (value == null) {
526       remove(key);
527     } else {
528       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
529     }
530     return this;
531   }
532 
533   /**
534    * Remove metadata represented by the key from the {@link #values} map
535    *
536    * @param key Key whose key and value we're to remove from HTableDescriptor
537    * parameters.
538    */
539   public void remove(final String key) {
540     remove(new Bytes(Bytes.toBytes(key)));
541   }
542 
543   /**
544    * Remove metadata represented by the key from the {@link #values} map
545    *
546    * @param key Key whose key and value we're to remove from HTableDescriptor
547    * parameters.
548    */
549   public void remove(Bytes key) {
550     values.remove(key);
551   }
552 
553   /**
554    * Remove metadata represented by the key from the {@link #values} map
555    *
556    * @param key Key whose key and value we're to remove from HTableDescriptor
557    * parameters.
558    */
559   public void remove(final byte [] key) {
560     remove(new Bytes(key));
561   }
562 
563   /**
564    * Check if the readOnly flag of the table is set. If the readOnly flag is
565    * set then the contents of the table can only be read from but not modified.
566    *
567    * @return true if all columns in the table should be read only
568    */
569   public boolean isReadOnly() {
570     return isSomething(READONLY_KEY, DEFAULT_READONLY);
571   }
572 
573   /**
574    * Setting the table as read only sets all the columns in the table as read
575    * only. By default all tables are modifiable, but if the readOnly flag is
576    * set to true then the contents of the table can only be read but not modified.
577    *
578    * @param readOnly True if all of the columns in the table should be read
579    * only.
580    */
581   public HTableDescriptor setReadOnly(final boolean readOnly) {
582     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
583   }
584 
585   /**
586    * Check if the compaction enable flag of the table is true. If flag is
587    * false then no minor/major compactions will be done in real.
588    *
589    * @return true if table compaction enabled
590    */
591   public boolean isCompactionEnabled() {
592     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
593   }
594 
595   /**
596    * Setting the table compaction enable flag.
597    *
598    * @param isEnable True if enable compaction.
599    */
600   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
601     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
602     return this;
603   }
604 
605   /**
606    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
607    * @param durability enum value
608    */
609   public HTableDescriptor setDurability(Durability durability) {
610     this.durability = durability;
611     setValue(DURABILITY_KEY, durability.name());
612     return this;
613   }
614 
615   /**
616    * Returns the durability setting for the table.
617    * @return durability setting for the table.
618    */
619   public Durability getDurability() {
620     if (this.durability == null) {
621       byte[] durabilityValue = getValue(DURABILITY_KEY);
622       if (durabilityValue == null) {
623         this.durability = DEFAULT_DURABLITY;
624       } else {
625         try {
626           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
627         } catch (IllegalArgumentException ex) {
628           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
629             + " is not known. Durability:" + Bytes.toString(durabilityValue));
630           this.durability = DEFAULT_DURABLITY;
631         }
632       }
633     }
634     return this.durability;
635   }
636 
637   /**
638    * Get the name of the table
639    *
640    * @return TableName
641    */
642   public TableName getTableName() {
643     return name;
644   }
645 
646   /**
647    * Get the name of the table as a byte array.
648    *
649    * @return name of table
650    * @deprecated Use {@link #getTableName()} instead
651    */
652   @Deprecated
653   public byte[] getName() {
654     return name.getName();
655   }
656 
657   /**
658    * Get the name of the table as a String
659    *
660    * @return name of table as a String
661    */
662   public String getNameAsString() {
663     return name.getNameAsString();
664   }
665 
666   /**
667    * This sets the class associated with the region split policy which
668    * determines when a region split should occur.  The class used by
669    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
670    * @param clazz the class name
671    */
672   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
673     setValue(SPLIT_POLICY, clazz);
674     return this;
675   }
676 
677   /**
678    * This gets the class associated with the region split policy which
679    * determines when a region split should occur.  The class used by
680    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
681    *
682    * @return the class name of the region split policy for this table.
683    * If this returns null, the default split policy is used.
684    */
685    public String getRegionSplitPolicyClassName() {
686     return getValue(SPLIT_POLICY);
687   }
688 
689   /**
690    * Set the name of the table.
691    *
692    * @param name name of table
693    */
694   @Deprecated
695   public HTableDescriptor setName(byte[] name) {
696     setName(TableName.valueOf(name));
697     return this;
698   }
699 
700   @Deprecated
701   public HTableDescriptor setName(TableName name) {
702     this.name = name;
703     setMetaFlags(this.name);
704     return this;
705   }
706 
707   /**
708    * Returns the maximum size upto which a region can grow to after which a region
709    * split is triggered. The region size is represented by the size of the biggest
710    * store file in that region.
711    *
712    * @return max hregion size for table, -1 if not set.
713    *
714    * @see #setMaxFileSize(long)
715    */
716   public long getMaxFileSize() {
717     byte [] value = getValue(MAX_FILESIZE_KEY);
718     if (value != null) {
719       return Long.parseLong(Bytes.toString(value));
720     }
721     return -1;
722   }
723 
724   /**
725    * Sets the maximum size upto which a region can grow to after which a region
726    * split is triggered. The region size is represented by the size of the biggest
727    * store file in that region, i.e. If the biggest store file grows beyond the
728    * maxFileSize, then the region split is triggered. This defaults to a value of
729    * 256 MB.
730    * <p>
731    * This is not an absolute value and might vary. Assume that a single row exceeds
732    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
733    * a single row cannot be split across multiple regions
734    * </p>
735    *
736    * @param maxFileSize The maximum file size that a store file can grow to
737    * before a split is triggered.
738    */
739   public HTableDescriptor setMaxFileSize(long maxFileSize) {
740     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
741     return this;
742   }
743 
744   /**
745    * Returns the size of the memstore after which a flush to filesystem is triggered.
746    *
747    * @return memory cache flush size for each hregion, -1 if not set.
748    *
749    * @see #setMemStoreFlushSize(long)
750    */
751   public long getMemStoreFlushSize() {
752     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
753     if (value != null) {
754       return Long.parseLong(Bytes.toString(value));
755     }
756     return -1;
757   }
758 
759   /**
760    * Represents the maximum size of the memstore after which the contents of the
761    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
762    *
763    * @param memstoreFlushSize memory cache flush size for each hregion
764    */
765   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
766     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
767     return this;
768   }
769 
770   /**
771    * This sets the class associated with the flush policy which determines determines the stores
772    * need to be flushed when flushing a region. The class used by default is defined in
773    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
774    * @param clazz the class name
775    */
776   public HTableDescriptor setFlushPolicyClassName(String clazz) {
777     setValue(FLUSH_POLICY, clazz);
778     return this;
779   }
780 
781   /**
782    * This gets the class associated with the flush policy which determines the stores need to be
783    * flushed when flushing a region. The class used by default is defined in
784    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
785    * @return the class name of the flush policy for this table. If this returns null, the default
786    *         flush policy is used.
787    */
788   public String getFlushPolicyClassName() {
789     return getValue(FLUSH_POLICY);
790   }
791 
792   /**
793    * Adds a column family.
794    * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
795    * @param family HColumnDescriptor of family to add.
796    */
797   public HTableDescriptor addFamily(final HColumnDescriptor family) {
798     if (family.getName() == null || family.getName().length <= 0) {
799       throw new IllegalArgumentException("Family name cannot be null or empty");
800     }
801     if (hasFamily(family.getName())) {
802       throw new IllegalArgumentException("Family '" +
803         family.getNameAsString() + "' already exists so cannot be added");
804     }
805     this.families.put(family.getName(), family);
806     return this;
807   }
808 
809   /**
810    * Modifies the existing column family.
811    * @param family HColumnDescriptor of family to update
812    * @return this (for chained invocation)
813    */
814   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
815     if (family.getName() == null || family.getName().length <= 0) {
816       throw new IllegalArgumentException("Family name cannot be null or empty");
817     }
818     if (!hasFamily(family.getName())) {
819       throw new IllegalArgumentException("Column family '" + family.getNameAsString()
820         + "' does not exist");
821     }
822     this.families.put(family.getName(), family);
823     return this;
824   }
825 
826   /**
827    * Checks to see if this table contains the given column family
828    * @param familyName Family name or column name.
829    * @return true if the table contains the specified family name
830    */
831   public boolean hasFamily(final byte [] familyName) {
832     return families.containsKey(familyName);
833   }
834 
835   /**
836    * @return Name of this table and then a map of all of the column family
837    * descriptors.
838    * @see #getNameAsString()
839    */
840   @Override
841   public String toString() {
842     StringBuilder s = new StringBuilder();
843     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
844     s.append(getValues(true));
845     for (HColumnDescriptor f : families.values()) {
846       s.append(", ").append(f);
847     }
848     return s.toString();
849   }
850 
851   /**
852    * @return Name of this table and then a map of all of the column family
853    * descriptors (with only the non-default column family attributes)
854    */
855   public String toStringCustomizedValues() {
856     StringBuilder s = new StringBuilder();
857     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
858     s.append(getValues(false));
859     for(HColumnDescriptor hcd : families.values()) {
860       s.append(", ").append(hcd.toStringCustomizedValues());
861     }
862     return s.toString();
863   }
864 
865   private StringBuilder getValues(boolean printDefaults) {
866     StringBuilder s = new StringBuilder();
867 
868     // step 1: set partitioning and pruning
869     Set<Bytes> reservedKeys = new TreeSet<Bytes>();
870     Set<Bytes> userKeys = new TreeSet<Bytes>();
871     for (Bytes k : values.keySet()) {
872       if (k == null || k.get() == null) continue;
873       String key = Bytes.toString(k.get());
874       // in this section, print out reserved keywords + coprocessor info
875       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
876         userKeys.add(k);
877         continue;
878       }
879       // only print out IS_ROOT/IS_META if true
880       String value = Bytes.toString(values.get(k).get());
881       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
882         if (Boolean.valueOf(value) == false) continue;
883       }
884       // see if a reserved key is a default value. may not want to print it out
885       if (printDefaults
886           || !DEFAULT_VALUES.containsKey(key)
887           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
888         reservedKeys.add(k);
889       }
890     }
891 
892     // early exit optimization
893     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
894     if (!hasAttributes && configuration.isEmpty()) return s;
895 
896     s.append(", {");
897     // step 2: printing attributes
898     if (hasAttributes) {
899       s.append("TABLE_ATTRIBUTES => {");
900 
901       // print all reserved keys first
902       boolean printCommaForAttr = false;
903       for (Bytes k : reservedKeys) {
904         String key = Bytes.toString(k.get());
905         String value = Bytes.toStringBinary(values.get(k).get());
906         if (printCommaForAttr) s.append(", ");
907         printCommaForAttr = true;
908         s.append(key);
909         s.append(" => ");
910         s.append('\'').append(value).append('\'');
911       }
912 
913       if (!userKeys.isEmpty()) {
914         // print all non-reserved, advanced config keys as a separate subset
915         if (printCommaForAttr) s.append(", ");
916         printCommaForAttr = true;
917         s.append(HConstants.METADATA).append(" => ");
918         s.append("{");
919         boolean printCommaForCfg = false;
920         for (Bytes k : userKeys) {
921           String key = Bytes.toString(k.get());
922           String value = Bytes.toStringBinary(values.get(k).get());
923           if (printCommaForCfg) s.append(", ");
924           printCommaForCfg = true;
925           s.append('\'').append(key).append('\'');
926           s.append(" => ");
927           s.append('\'').append(value).append('\'');
928         }
929         s.append("}");
930       }
931     }
932 
933     // step 3: printing all configuration:
934     if (!configuration.isEmpty()) {
935       if (hasAttributes) {
936         s.append(", ");
937       }
938       s.append(HConstants.CONFIGURATION).append(" => ");
939       s.append('{');
940       boolean printCommaForConfig = false;
941       for (Map.Entry<String, String> e : configuration.entrySet()) {
942         if (printCommaForConfig) s.append(", ");
943         printCommaForConfig = true;
944         s.append('\'').append(e.getKey()).append('\'');
945         s.append(" => ");
946         s.append('\'').append(e.getValue()).append('\'');
947       }
948       s.append("}");
949     }
950     s.append("}"); // end METHOD
951     return s;
952   }
953 
954   /**
955    * Compare the contents of the descriptor with another one passed as a parameter.
956    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
957    * contents of the descriptors are compared.
958    *
959    * @return true if the contents of the the two descriptors exactly match
960    *
961    * @see java.lang.Object#equals(java.lang.Object)
962    */
963   @Override
964   public boolean equals(Object obj) {
965     if (this == obj) {
966       return true;
967     }
968     if (obj == null) {
969       return false;
970     }
971     if (!(obj instanceof HTableDescriptor)) {
972       return false;
973     }
974     return compareTo((HTableDescriptor)obj) == 0;
975   }
976 
977   /**
978    * @see java.lang.Object#hashCode()
979    */
980   @Override
981   public int hashCode() {
982     int result = this.name.hashCode();
983     if (this.families.size() > 0) {
984       for (HColumnDescriptor e: this.families.values()) {
985         result ^= e.hashCode();
986       }
987     }
988     result ^= values.hashCode();
989     result ^= configuration.hashCode();
990     return result;
991   }
992 
993   // Comparable
994 
995   /**
996    * Compares the descriptor with another descriptor which is passed as a parameter.
997    * This compares the content of the two descriptors and not the reference.
998    *
999    * @return 0 if the contents of the descriptors are exactly matching,
1000    *         1 if there is a mismatch in the contents
1001    */
1002   @Override
1003   public int compareTo(@Nonnull final HTableDescriptor other) {
1004     int result = this.name.compareTo(other.name);
1005     if (result == 0) {
1006       result = families.size() - other.families.size();
1007     }
1008     if (result == 0 && families.size() != other.families.size()) {
1009       result = Integer.valueOf(families.size()).compareTo(
1010           Integer.valueOf(other.families.size()));
1011     }
1012     if (result == 0) {
1013       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1014           it2 = other.families.values().iterator(); it.hasNext(); ) {
1015         result = it.next().compareTo(it2.next());
1016         if (result != 0) {
1017           break;
1018         }
1019       }
1020     }
1021     if (result == 0) {
1022       // punt on comparison for ordering, just calculate difference
1023       result = this.values.hashCode() - other.values.hashCode();
1024       if (result < 0)
1025         result = -1;
1026       else if (result > 0)
1027         result = 1;
1028     }
1029     if (result == 0) {
1030       result = this.configuration.hashCode() - other.configuration.hashCode();
1031       if (result < 0)
1032         result = -1;
1033       else if (result > 0)
1034         result = 1;
1035     }
1036     return result;
1037   }
1038 
1039   /**
1040    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1041    * of all the column families of the table.
1042    *
1043    * @return Immutable collection of {@link HColumnDescriptor} of all the
1044    * column families.
1045    */
1046   public Collection<HColumnDescriptor> getFamilies() {
1047     return Collections.unmodifiableCollection(this.families.values());
1048   }
1049 
1050   /**
1051    * Returns the configured replicas per region
1052    */
1053   public int getRegionReplication() {
1054     byte[] val = getValue(REGION_REPLICATION_KEY);
1055     if (val == null || val.length == 0) {
1056       return DEFAULT_REGION_REPLICATION;
1057     }
1058     return Integer.parseInt(Bytes.toString(val));
1059   }
1060 
1061   /**
1062    * Sets the number of replicas per region.
1063    * @param regionReplication the replication factor per region
1064    */
1065   public HTableDescriptor setRegionReplication(int regionReplication) {
1066     setValue(REGION_REPLICATION_KEY,
1067         new Bytes(Bytes.toBytes(Integer.toString(regionReplication))));
1068     return this;
1069   }
1070 
1071   /**
1072    * Returns all the column family names of the current table. The map of
1073    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1074    * This returns all the keys of the family map which represents the column
1075    * family names of the table.
1076    *
1077    * @return Immutable sorted set of the keys of the families.
1078    */
1079   public Set<byte[]> getFamiliesKeys() {
1080     return Collections.unmodifiableSet(this.families.keySet());
1081   }
1082 
1083   /**
1084    * Returns an array all the {@link HColumnDescriptor} of the column families
1085    * of the table.
1086    *
1087    * @return Array of all the HColumnDescriptors of the current table
1088    *
1089    * @see #getFamilies()
1090    */
1091   public HColumnDescriptor[] getColumnFamilies() {
1092     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1093     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1094   }
1095 
1096 
1097   /**
1098    * Returns the HColumnDescriptor for a specific column family with name as
1099    * specified by the parameter column.
1100    *
1101    * @param column Column family name
1102    * @return Column descriptor for the passed family name or the family on
1103    * passed in column.
1104    */
1105   public HColumnDescriptor getFamily(final byte [] column) {
1106     return this.families.get(column);
1107   }
1108 
1109 
1110   /**
1111    * Removes the HColumnDescriptor with name specified by the parameter column
1112    * from the table descriptor
1113    *
1114    * @param column Name of the column family to be removed.
1115    * @return Column descriptor for the passed family name or the family on
1116    * passed in column.
1117    */
1118   public HColumnDescriptor removeFamily(final byte [] column) {
1119     return this.families.remove(column);
1120   }
1121 
1122 
1123   /**
1124    * Add a table coprocessor to this table. The coprocessor
1125    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1126    * or Endpoint.
1127    * It won't check if the class can be loaded or not.
1128    * Whether a coprocessor is loadable or not will be determined when
1129    * a region is opened.
1130    * @param className Full class name.
1131    * @throws IOException
1132    */
1133   public HTableDescriptor addCoprocessor(String className) throws IOException {
1134     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1135     return this;
1136   }
1137 
1138 
1139   /**
1140    * Add a table coprocessor to this table. The coprocessor
1141    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1142    * or Endpoint.
1143    * It won't check if the class can be loaded or not.
1144    * Whether a coprocessor is loadable or not will be determined when
1145    * a region is opened.
1146    * @param jarFilePath Path of the jar file. If it's null, the class will be
1147    * loaded from default classloader.
1148    * @param className Full class name.
1149    * @param priority Priority
1150    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1151    * @throws IOException
1152    */
1153   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1154                              int priority, final Map<String, String> kvs)
1155   throws IOException {
1156     if (hasCoprocessor(className)) {
1157       throw new IOException("Coprocessor " + className + " already exists.");
1158     }
1159     // validate parameter kvs
1160     StringBuilder kvString = new StringBuilder();
1161     if (kvs != null) {
1162       for (Map.Entry<String, String> e: kvs.entrySet()) {
1163         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1164           throw new IOException("Illegal parameter key = " + e.getKey());
1165         }
1166         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1167           throw new IOException("Illegal parameter (" + e.getKey() +
1168               ") value = " + e.getValue());
1169         }
1170         if (kvString.length() != 0) {
1171           kvString.append(',');
1172         }
1173         kvString.append(e.getKey());
1174         kvString.append('=');
1175         kvString.append(e.getValue());
1176       }
1177     }
1178 
1179     // generate a coprocessor key
1180     int maxCoprocessorNumber = 0;
1181     Matcher keyMatcher;
1182     for (Map.Entry<Bytes, Bytes> e :
1183         this.values.entrySet()) {
1184       keyMatcher =
1185           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1186               Bytes.toString(e.getKey().get()));
1187       if (!keyMatcher.matches()) {
1188         continue;
1189       }
1190       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)),
1191           maxCoprocessorNumber);
1192     }
1193     maxCoprocessorNumber++;
1194 
1195     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1196     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1197         "|" + className + "|" + Integer.toString(priority) + "|" +
1198         kvString.toString();
1199     setValue(key, value);
1200     return this;
1201   }
1202 
1203 
1204   /**
1205    * Check if the table has an attached co-processor represented by the name className
1206    *
1207    * @param className - Class name of the co-processor
1208    * @return true of the table has a co-processor className
1209    */
1210   public boolean hasCoprocessor(String className) {
1211     Matcher keyMatcher;
1212     Matcher valueMatcher;
1213     for (Map.Entry<Bytes, Bytes> e :
1214         this.values.entrySet()) {
1215       keyMatcher =
1216           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1217               Bytes.toString(e.getKey().get()));
1218       if (!keyMatcher.matches()) {
1219         continue;
1220       }
1221       valueMatcher =
1222         HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(
1223             Bytes.toString(e.getValue().get()));
1224       if (!valueMatcher.matches()) {
1225         continue;
1226       }
1227       // get className and compare
1228       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1229       if (clazz.equals(className.trim())) {
1230         return true;
1231       }
1232     }
1233     return false;
1234   }
1235 
1236   /**
1237    * Return the list of attached co-processor represented by their name className
1238    *
1239    * @return The list of co-processors classNames
1240    */
1241   public List<String> getCoprocessors() {
1242     List<String> result = new ArrayList<String>();
1243     Matcher keyMatcher;
1244     Matcher valueMatcher;
1245     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1246       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1247       if (!keyMatcher.matches()) {
1248         continue;
1249       }
1250       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1251           .toString(e.getValue().get()));
1252       if (!valueMatcher.matches()) {
1253         continue;
1254       }
1255       result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
1256     }
1257     return result;
1258   }
1259 
1260   /**
1261    * Remove a coprocessor from those set on the table
1262    * @param className Class name of the co-processor
1263    */
1264   public void removeCoprocessor(String className) {
1265     Bytes match = null;
1266     Matcher keyMatcher;
1267     Matcher valueMatcher;
1268     for (Map.Entry<Bytes, Bytes> e : this.values
1269         .entrySet()) {
1270       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1271           .getKey().get()));
1272       if (!keyMatcher.matches()) {
1273         continue;
1274       }
1275       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1276           .toString(e.getValue().get()));
1277       if (!valueMatcher.matches()) {
1278         continue;
1279       }
1280       // get className and compare
1281       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1282       // remove the CP if it is present
1283       if (clazz.equals(className.trim())) {
1284         match = e.getKey();
1285         break;
1286       }
1287     }
1288     // if we found a match, remove it
1289     if (match != null)
1290       remove(match);
1291   }
1292 
1293   /**
1294    * Returns the {@link Path} object representing the table directory under
1295    * path rootdir
1296    *
1297    * Deprecated use FSUtils.getTableDir() instead.
1298    *
1299    * @param rootdir qualified path of HBase root directory
1300    * @param tableName name of table
1301    * @return {@link Path} for table
1302    */
1303   @Deprecated
1304   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1305     //This is bad I had to mirror code from FSUTils.getTableDir since
1306     //there is no module dependency between hbase-client and hbase-server
1307     TableName name = TableName.valueOf(tableName);
1308     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1309               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1310   }
1311 
1312   /** Table descriptor for <code>hbase:meta</code> catalog table
1313    * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
1314    * Admin#getTableDescriptor(TableName.META_TABLE) instead.
1315    */
1316   @Deprecated
1317   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1318       TableName.META_TABLE_NAME,
1319       new HColumnDescriptor[] {
1320           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1321               // Ten is arbitrary number.  Keep versions to help debugging.
1322               .setMaxVersions(10)
1323               .setInMemory(true)
1324               .setBlocksize(8 * 1024)
1325               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1326               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1327               .setBloomFilterType(BloomType.NONE)
1328               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1329               // e.g. if using CombinedBlockCache (BucketCache).
1330               .setCacheDataInL1(true)
1331       });
1332 
1333   static {
1334     try {
1335       META_TABLEDESC.addCoprocessor(
1336           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1337           null, Coprocessor.PRIORITY_SYSTEM, null);
1338     } catch (IOException ex) {
1339       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1340       throw new RuntimeException(ex);
1341     }
1342   }
1343 
1344   public final static String NAMESPACE_FAMILY_INFO = "info";
1345   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1346   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1347 
1348   /** Table descriptor for namespace table */
1349   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1350       TableName.NAMESPACE_TABLE_NAME,
1351       new HColumnDescriptor[] {
1352           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1353               // Ten is arbitrary number.  Keep versions to help debugging.
1354               .setMaxVersions(10)
1355               .setInMemory(true)
1356               .setBlocksize(8 * 1024)
1357               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1358               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1359               // e.g. if using CombinedBlockCache (BucketCache).
1360               .setCacheDataInL1(true)
1361       });
1362 
1363   @Deprecated
1364   public HTableDescriptor setOwner(User owner) {
1365     return setOwnerString(owner != null ? owner.getShortName() : null);
1366   }
1367 
1368   // used by admin.rb:alter(table_name,*args) to update owner.
1369   @Deprecated
1370   public HTableDescriptor setOwnerString(String ownerString) {
1371     if (ownerString != null) {
1372       setValue(OWNER_KEY, ownerString);
1373     } else {
1374       remove(OWNER_KEY);
1375     }
1376     return this;
1377   }
1378 
1379   @Deprecated
1380   public String getOwnerString() {
1381     if (getValue(OWNER_KEY) != null) {
1382       return Bytes.toString(getValue(OWNER_KEY));
1383     }
1384     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1385     // hbase:meta and -ROOT- should return system user as owner, not null (see
1386     // MasterFileSystem.java:bootstrap()).
1387     return null;
1388   }
1389 
1390   /**
1391    * @return This instance serialized with pb with pb magic prefix
1392    * @see #parseFrom(byte[])
1393    */
1394   public byte [] toByteArray() {
1395     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1396   }
1397 
1398   /**
1399    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1400    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1401    * @throws DeserializationException
1402    * @throws IOException
1403    * @see #toByteArray()
1404    */
1405   public static HTableDescriptor parseFrom(final byte [] bytes)
1406   throws DeserializationException, IOException {
1407     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1408       throw new DeserializationException("Expected PB encoded HTableDescriptor");
1409     }
1410     int pblen = ProtobufUtil.lengthOfPBMagic();
1411     TableSchema.Builder builder = TableSchema.newBuilder();
1412     TableSchema ts;
1413     try {
1414       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1415     } catch (InvalidProtocolBufferException e) {
1416       throw new DeserializationException(e);
1417     }
1418     return convert(ts);
1419   }
1420 
1421   /**
1422    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1423    */
1424   public TableSchema convert() {
1425     TableSchema.Builder builder = TableSchema.newBuilder();
1426     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1427     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1428       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1429       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1430       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1431       builder.addAttributes(aBuilder.build());
1432     }
1433     for (HColumnDescriptor hcd: getColumnFamilies()) {
1434       builder.addColumnFamilies(hcd.convert());
1435     }
1436     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1437       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1438       aBuilder.setName(e.getKey());
1439       aBuilder.setValue(e.getValue());
1440       builder.addConfiguration(aBuilder.build());
1441     }
1442     return builder.build();
1443   }
1444 
1445   /**
1446    * @param ts A pb TableSchema instance.
1447    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1448    */
1449   public static HTableDescriptor convert(final TableSchema ts) {
1450     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1451     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1452     int index = 0;
1453     for (ColumnFamilySchema cfs: list) {
1454       hcds[index++] = HColumnDescriptor.convert(cfs);
1455     }
1456     HTableDescriptor htd = new HTableDescriptor(
1457         ProtobufUtil.toTableName(ts.getTableName()),
1458         hcds);
1459     for (BytesBytesPair a: ts.getAttributesList()) {
1460       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1461     }
1462     for (NameStringPair a: ts.getConfigurationList()) {
1463       htd.setConfiguration(a.getName(), a.getValue());
1464     }
1465     return htd;
1466   }
1467 
1468   /**
1469    * Getter for accessing the configuration value by key
1470    */
1471   public String getConfigurationValue(String key) {
1472     return configuration.get(key);
1473   }
1474 
1475   /**
1476    * Getter for fetching an unmodifiable {@link #configuration} map.
1477    */
1478   public Map<String, String> getConfiguration() {
1479     // shallow pointer copy
1480     return Collections.unmodifiableMap(configuration);
1481   }
1482 
1483   /**
1484    * Setter for storing a configuration setting in {@link #configuration} map.
1485    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1486    * @param value String value. If null, removes the setting.
1487    */
1488   public HTableDescriptor setConfiguration(String key, String value) {
1489     if (value == null) {
1490       removeConfiguration(key);
1491     } else {
1492       configuration.put(key, value);
1493     }
1494     return this;
1495   }
1496 
1497   /**
1498    * Remove a config setting represented by the key from the {@link #configuration} map
1499    */
1500   public void removeConfiguration(final String key) {
1501     configuration.remove(key);
1502   }
1503 }