View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.ArrayList;
25  import java.util.Collection;
26  import java.util.Collections;
27  import java.util.HashMap;
28  import java.util.HashSet;
29  import java.util.Iterator;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.Set;
33  import java.util.TreeMap;
34  import java.util.TreeSet;
35  import java.util.regex.Matcher;
36  
37  import org.apache.hadoop.hbase.util.ByteStringer;
38  import org.apache.commons.logging.Log;
39  import org.apache.commons.logging.LogFactory;
40  import org.apache.hadoop.hbase.classification.InterfaceAudience;
41  import org.apache.hadoop.hbase.classification.InterfaceStability;
42  import org.apache.hadoop.conf.Configuration;
43  import org.apache.hadoop.fs.Path;
44  import org.apache.hadoop.hbase.client.Durability;
45  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
46  import org.apache.hadoop.hbase.exceptions.DeserializationException;
47  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
48  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
49  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
50  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
51  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
52  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
53  import org.apache.hadoop.hbase.regionserver.BloomType;
54  import org.apache.hadoop.hbase.security.User;
55  import org.apache.hadoop.hbase.util.Bytes;
56  import org.apache.hadoop.hbase.util.Writables;
57  import org.apache.hadoop.io.WritableComparable;
58  
59  /**
60   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
61   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
62   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
63   * when the region split should occur, coprocessors associated with it etc...
64   */
65  @InterfaceAudience.Public
66  @InterfaceStability.Evolving
67  public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
68  
69    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
70  
71    /**
72     *  Changes prior to version 3 were not recorded here.
73     *  Version 3 adds metadata as a map where keys and values are byte[].
74     *  Version 4 adds indexes
75     *  Version 5 removed transactional pollution -- e.g. indexes
76     *  Version 6 changed metadata to BytesBytesPair in PB
77     *  Version 7 adds table-level configuration
78     */
79    private static final byte TABLE_DESCRIPTOR_VERSION = 7;
80  
81    private TableName name = null;
82  
83    /**
84     * A map which holds the metadata information of the table. This metadata
85     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
86     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
87     */
88    private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
89      new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
90  
91    /**
92     * A map which holds the configuration specific to the table.
93     * The keys of the map have the same names as config keys and override the defaults with
94     * table-specific settings. Example usage may be for compactions, etc.
95     */
96    private final Map<String, String> configuration = new HashMap<String, String>();
97  
98    public static final String SPLIT_POLICY = "SPLIT_POLICY";
99  
100   /**
101    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
102    * attribute which denotes the maximum size of the store file after which
103    * a region split occurs
104    *
105    * @see #getMaxFileSize()
106    */
107   public static final String MAX_FILESIZE = "MAX_FILESIZE";
108   private static final ImmutableBytesWritable MAX_FILESIZE_KEY =
109     new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
110 
111   public static final String OWNER = "OWNER";
112   public static final ImmutableBytesWritable OWNER_KEY =
113     new ImmutableBytesWritable(Bytes.toBytes(OWNER));
114 
115   /**
116    * <em>INTERNAL</em> Used by rest interface to access this metadata
117    * attribute which denotes if the table is Read Only
118    *
119    * @see #isReadOnly()
120    */
121   public static final String READONLY = "READONLY";
122   private static final ImmutableBytesWritable READONLY_KEY =
123     new ImmutableBytesWritable(Bytes.toBytes(READONLY));
124 
125   /**
126    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
127    * attribute which denotes if the table is compaction enabled
128    *
129    * @see #isCompactionEnabled()
130    */
131   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
132   private static final ImmutableBytesWritable COMPACTION_ENABLED_KEY =
133     new ImmutableBytesWritable(Bytes.toBytes(COMPACTION_ENABLED));
134 
135   /**
136    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
137    * attribute which represents the maximum size of the memstore after which
138    * its contents are flushed onto the disk
139    *
140    * @see #getMemStoreFlushSize()
141    */
142   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
143   private static final ImmutableBytesWritable MEMSTORE_FLUSHSIZE_KEY =
144     new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
145 
146   public static final String FLUSH_POLICY = "FLUSH_POLICY";
147 
148   /**
149    * <em>INTERNAL</em> Used by rest interface to access this metadata
150    * attribute which denotes if the table is a -ROOT- region or not
151    *
152    * @see #isRootRegion()
153    */
154   public static final String IS_ROOT = "IS_ROOT";
155   private static final ImmutableBytesWritable IS_ROOT_KEY =
156     new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
157 
158   /**
159    * <em>INTERNAL</em> Used by rest interface to access this metadata
160    * attribute which denotes if it is a catalog table, either
161    * <code> hbase:meta </code> or <code> -ROOT- </code>
162    *
163    * @see #isMetaRegion()
164    */
165   public static final String IS_META = "IS_META";
166   private static final ImmutableBytesWritable IS_META_KEY =
167     new ImmutableBytesWritable(Bytes.toBytes(IS_META));
168 
169   /**
170    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
171    * attribute which denotes if the deferred log flush option is enabled.
172    * @deprecated Use {@link #DURABILITY} instead.
173    */
174   @Deprecated
175   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
176   @Deprecated
177   private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY =
178     new ImmutableBytesWritable(Bytes.toBytes(DEFERRED_LOG_FLUSH));
179 
180   /**
181    * <em>INTERNAL</em> {@link Durability} setting for the table.
182    */
183   public static final String DURABILITY = "DURABILITY";
184   private static final ImmutableBytesWritable DURABILITY_KEY =
185       new ImmutableBytesWritable(Bytes.toBytes("DURABILITY"));
186 
187   /**
188    * <em>INTERNAL</em> number of region replicas for the table.
189    */
190   public static final String REGION_REPLICATION = "REGION_REPLICATION";
191   private static final ImmutableBytesWritable REGION_REPLICATION_KEY =
192       new ImmutableBytesWritable(Bytes.toBytes(REGION_REPLICATION));
193 
194   /**
195    * <em>INTERNAL</em> flag to indicate whether or not the memstore should be replicated
196    * for read-replicas (CONSISTENCY => TIMELINE).
197    */
198   public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
199   private static final ImmutableBytesWritable REGION_MEMSTORE_REPLICATION_KEY =
200       new ImmutableBytesWritable(Bytes.toBytes(REGION_MEMSTORE_REPLICATION));
201 
202   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
203   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
204 
205   /*
206    *  The below are ugly but better than creating them each time till we
207    *  replace booleans being saved as Strings with plain booleans.  Need a
208    *  migration script to do this.  TODO.
209    */
210   private static final ImmutableBytesWritable FALSE =
211     new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString()));
212 
213   private static final ImmutableBytesWritable TRUE =
214     new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
215 
216   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
217 
218   /**
219    * Constant that denotes whether the table is READONLY by default and is false
220    */
221   public static final boolean DEFAULT_READONLY = false;
222 
223   /**
224    * Constant that denotes whether the table is compaction enabled by default
225    */
226   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
227 
228   /**
229    * Constant that denotes the maximum default size of the memstore after which
230    * the contents are flushed to the store files
231    */
232   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
233 
234   public static final int DEFAULT_REGION_REPLICATION = 1;
235 
236   public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true;
237 
238   private final static Map<String, String> DEFAULT_VALUES
239     = new HashMap<String, String>();
240   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
241     = new HashSet<ImmutableBytesWritable>();
242   static {
243     DEFAULT_VALUES.put(MAX_FILESIZE,
244         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
245     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
246     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
247         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
248     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
249         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
250     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
251     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
252     for (String s : DEFAULT_VALUES.keySet()) {
253       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
254     }
255     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
256     RESERVED_KEYWORDS.add(IS_META_KEY);
257   }
258 
259   /**
260    * Cache of whether this is a meta table or not.
261    */
262   private volatile Boolean meta = null;
263   /**
264    * Cache of whether this is root table or not.
265    */
266   private volatile Boolean root = null;
267 
268   /**
269    * Durability setting for the table
270    */
271   private Durability durability = null;
272 
273   /**
274    * Maps column family name to the respective HColumnDescriptors
275    */
276   private final Map<byte [], HColumnDescriptor> families =
277     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
278 
279   /**
280    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
281    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
282    */
283   @InterfaceAudience.Private
284   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
285     setName(name);
286     for(HColumnDescriptor descriptor : families) {
287       this.families.put(descriptor.getName(), descriptor);
288     }
289   }
290 
291   /**
292    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
293    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
294    */
295   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
296       Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
297     setName(name);
298     for(HColumnDescriptor descriptor : families) {
299       this.families.put(descriptor.getName(), descriptor);
300     }
301     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
302         values.entrySet()) {
303       setValue(entry.getKey(), entry.getValue());
304     }
305   }
306 
307   /**
308    * Default constructor which constructs an empty object.
309    * For deserializing an HTableDescriptor instance only.
310    * @deprecated As of release 0.96
311    *             (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
312    *             This will be removed in HBase 2.0.0.
313    *             Used by Writables and Writables are going away.
314    */
315   @Deprecated
316   public HTableDescriptor() {
317     super();
318   }
319 
320   /**
321    * Construct a table descriptor specifying a TableName object
322    * @param name Table name.
323    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
324    */
325   public HTableDescriptor(final TableName name) {
326     super();
327     setName(name);
328   }
329 
330   /**
331    * Construct a table descriptor specifying a byte array table name
332    * @param name Table name.
333    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
334    */
335   @Deprecated
336   public HTableDescriptor(final byte[] name) {
337     this(TableName.valueOf(name));
338   }
339 
340   /**
341    * Construct a table descriptor specifying a String table name
342    * @param name Table name.
343    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
344    */
345   @Deprecated
346   public HTableDescriptor(final String name) {
347     this(TableName.valueOf(name));
348   }
349 
350   /**
351    * Construct a table descriptor by cloning the descriptor passed as a parameter.
352    * <p>
353    * Makes a deep copy of the supplied descriptor.
354    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
355    * @param desc The descriptor.
356    */
357   public HTableDescriptor(final HTableDescriptor desc) {
358     super();
359     setName(desc.name);
360     setMetaFlags(this.name);
361     for (HColumnDescriptor c: desc.families.values()) {
362       this.families.put(c.getName(), new HColumnDescriptor(c));
363     }
364     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
365         desc.values.entrySet()) {
366       setValue(e.getKey(), e.getValue());
367     }
368     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
369       this.configuration.put(e.getKey(), e.getValue());
370     }
371   }
372 
373   /*
374    * Set meta flags on this table.
375    * IS_ROOT_KEY is set if its a -ROOT- table
376    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
377    * Called by constructors.
378    * @param name
379    */
380   private void setMetaFlags(final TableName name) {
381     setMetaRegion(isRootRegion() ||
382         name.equals(TableName.META_TABLE_NAME));
383   }
384 
385   /**
386    * Check if the descriptor represents a <code> -ROOT- </code> region.
387    *
388    * @return true if this is a <code> -ROOT- </code> region
389    */
390   public boolean isRootRegion() {
391     if (this.root == null) {
392       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
393     }
394     return this.root.booleanValue();
395   }
396 
397   /**
398    * <em> INTERNAL </em> Used to denote if the current table represents
399    * <code> -ROOT- </code> region. This is used internally by the
400    * HTableDescriptor constructors
401    *
402    * @param isRoot true if this is the <code> -ROOT- </code> region
403    */
404   protected void setRootRegion(boolean isRoot) {
405     // TODO: Make the value a boolean rather than String of boolean.
406     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
407   }
408 
409   /**
410    * Checks if this table is <code> hbase:meta </code>
411    * region.
412    *
413    * @return true if this table is <code> hbase:meta </code>
414    * region
415    */
416   public boolean isMetaRegion() {
417     if (this.meta == null) {
418       this.meta = calculateIsMetaRegion();
419     }
420     return this.meta.booleanValue();
421   }
422 
423   private synchronized Boolean calculateIsMetaRegion() {
424     byte [] value = getValue(IS_META_KEY);
425     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
426   }
427 
428   private boolean isSomething(final ImmutableBytesWritable key,
429       final boolean valueIfNull) {
430     byte [] value = getValue(key);
431     if (value != null) {
432       return Boolean.valueOf(Bytes.toString(value));
433     }
434     return valueIfNull;
435   }
436 
437   /**
438    * <em> INTERNAL </em> Used to denote if the current table represents
439    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
440    * internally by the HTableDescriptor constructors
441    *
442    * @param isMeta true if its either <code> -ROOT- </code> or
443    * <code> hbase:meta </code> region
444    */
445   protected void setMetaRegion(boolean isMeta) {
446     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
447   }
448 
449   /**
450    * Checks if the table is a <code>hbase:meta</code> table
451    *
452    * @return true if table is <code> hbase:meta </code> region.
453    */
454   public boolean isMetaTable() {
455     return isMetaRegion() && !isRootRegion();
456   }
457 
458   /**
459    * Getter for accessing the metadata associated with the key
460    *
461    * @param key The key.
462    * @return The value.
463    * @see #values
464    */
465   public byte[] getValue(byte[] key) {
466     return getValue(new ImmutableBytesWritable(key));
467   }
468 
469   private byte[] getValue(final ImmutableBytesWritable key) {
470     ImmutableBytesWritable ibw = values.get(key);
471     if (ibw == null)
472       return null;
473     return ibw.get();
474   }
475 
476   /**
477    * Getter for accessing the metadata associated with the key
478    *
479    * @param key The key.
480    * @return The value.
481    * @see #values
482    */
483   public String getValue(String key) {
484     byte[] value = getValue(Bytes.toBytes(key));
485     if (value == null)
486       return null;
487     return Bytes.toString(value);
488   }
489 
490   /**
491    * Getter for fetching an unmodifiable {@link #values} map.
492    *
493    * @return unmodifiable map {@link #values}.
494    * @see #values
495    */
496   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
497     // shallow pointer copy
498     return Collections.unmodifiableMap(values);
499   }
500 
501   /**
502    * Setter for storing metadata as a (key, value) pair in {@link #values} map
503    *
504    * @param key The key.
505    * @param value The value.
506    * @see #values
507    */
508   public HTableDescriptor setValue(byte[] key, byte[] value) {
509     setValue(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value));
510     return this;
511   }
512 
513   /*
514    * @param key The key.
515    * @param value The value.
516    */
517   private HTableDescriptor setValue(final ImmutableBytesWritable key,
518       final String value) {
519     setValue(key, new ImmutableBytesWritable(Bytes.toBytes(value)));
520     return this;
521   }
522 
523   /*
524    * Setter for storing metadata as a (key, value) pair in {@link #values} map
525    *
526    * @param key The key.
527    * @param value The value.
528    */
529   public HTableDescriptor setValue(final ImmutableBytesWritable key,
530       final ImmutableBytesWritable value) {
531     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
532       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
533       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
534           "use " + DURABILITY + " instead");
535       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
536       return this;
537     }
538     values.put(key, value);
539     return this;
540   }
541 
542   /**
543    * Setter for storing metadata as a (key, value) pair in {@link #values} map
544    *
545    * @param key The key.
546    * @param value The value.
547    * @see #values
548    */
549   public HTableDescriptor setValue(String key, String value) {
550     if (value == null) {
551       remove(key);
552     } else {
553       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
554     }
555     return this;
556   }
557 
558   /**
559    * Remove metadata represented by the key from the {@link #values} map
560    *
561    * @param key Key whose key and value we're to remove from HTableDescriptor
562    * parameters.
563    */
564   public void remove(final String key) {
565     remove(new ImmutableBytesWritable(Bytes.toBytes(key)));
566   }
567 
568   /**
569    * Remove metadata represented by the key from the {@link #values} map
570    *
571    * @param key Key whose key and value we're to remove from HTableDescriptor
572    * parameters.
573    */
574   public void remove(ImmutableBytesWritable key) {
575     values.remove(key);
576   }
577 
578   /**
579    * Remove metadata represented by the key from the {@link #values} map
580    *
581    * @param key Key whose key and value we're to remove from HTableDescriptor
582    * parameters.
583    */
584   public void remove(final byte [] key) {
585     remove(new ImmutableBytesWritable(key));
586   }
587 
588   /**
589    * Check if the readOnly flag of the table is set. If the readOnly flag is
590    * set then the contents of the table can only be read from but not modified.
591    *
592    * @return true if all columns in the table should be read only
593    */
594   public boolean isReadOnly() {
595     return isSomething(READONLY_KEY, DEFAULT_READONLY);
596   }
597 
598   /**
599    * Setting the table as read only sets all the columns in the table as read
600    * only. By default all tables are modifiable, but if the readOnly flag is
601    * set to true then the contents of the table can only be read but not modified.
602    *
603    * @param readOnly True if all of the columns in the table should be read
604    * only.
605    */
606   public HTableDescriptor setReadOnly(final boolean readOnly) {
607     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
608   }
609 
610   /**
611    * Check if the compaction enable flag of the table is true. If flag is
612    * false then no minor/major compactions will be done in real.
613    *
614    * @return true if table compaction enabled
615    */
616   public boolean isCompactionEnabled() {
617     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
618   }
619 
620   /**
621    * Setting the table compaction enable flag.
622    *
623    * @param isEnable True if enable compaction.
624    */
625   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
626     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
627     return this;
628   }
629 
630   /**
631    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
632    * @param durability enum value
633    */
634   public HTableDescriptor setDurability(Durability durability) {
635     this.durability = durability;
636     setValue(DURABILITY_KEY, durability.name());
637     return this;
638   }
639 
640   /**
641    * Returns the durability setting for the table.
642    * @return durability setting for the table.
643    */
644   public Durability getDurability() {
645     if (this.durability == null) {
646       byte[] durabilityValue = getValue(DURABILITY_KEY);
647       if (durabilityValue == null) {
648         this.durability = DEFAULT_DURABLITY;
649       } else {
650         try {
651           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
652         } catch (IllegalArgumentException ex) {
653           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
654             + " is not known. Durability:" + Bytes.toString(durabilityValue));
655           this.durability = DEFAULT_DURABLITY;
656         }
657       }
658     }
659     return this.durability;
660   }
661 
662   /**
663    * Get the name of the table
664    *
665    * @return TableName
666    */
667   public TableName getTableName() {
668     return name;
669   }
670 
671   /**
672    * Get the name of the table as a byte array.
673    *
674    * @return name of table
675    * @deprecated Use {@link #getTableName()} instead
676    */
677   @Deprecated
678   public byte[] getName() {
679     return name.getName();
680   }
681 
682   /**
683    * Get the name of the table as a String
684    *
685    * @return name of table as a String
686    */
687   public String getNameAsString() {
688     return name.getNameAsString();
689   }
690 
691   /**
692    * This sets the class associated with the region split policy which
693    * determines when a region split should occur.  The class used by
694    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
695    * @param clazz the class name
696    */
697   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
698     setValue(SPLIT_POLICY, clazz);
699     return this;
700   }
701 
702   /**
703    * This gets the class associated with the region split policy which
704    * determines when a region split should occur.  The class used by
705    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
706    *
707    * @return the class name of the region split policy for this table.
708    * If this returns null, the default split policy is used.
709    */
710    public String getRegionSplitPolicyClassName() {
711     return getValue(SPLIT_POLICY);
712   }
713 
714   /**
715    * Set the name of the table.
716    *
717    * @param name name of table
718    */
719   @Deprecated
720   public HTableDescriptor setName(byte[] name) {
721     setName(TableName.valueOf(name));
722     return this;
723   }
724 
725   @Deprecated
726   public HTableDescriptor setName(TableName name) {
727     this.name = name;
728     setMetaFlags(this.name);
729     return this;
730   }
731 
732   /**
733    * Returns the maximum size upto which a region can grow to after which a region
734    * split is triggered. The region size is represented by the size of the biggest
735    * store file in that region.
736    *
737    * @return max hregion size for table, -1 if not set.
738    *
739    * @see #setMaxFileSize(long)
740    */
741   public long getMaxFileSize() {
742     byte [] value = getValue(MAX_FILESIZE_KEY);
743     if (value != null) {
744       return Long.parseLong(Bytes.toString(value));
745     }
746     return -1;
747   }
748 
749   /**
750    * Sets the maximum size upto which a region can grow to after which a region
751    * split is triggered. The region size is represented by the size of the biggest
752    * store file in that region, i.e. If the biggest store file grows beyond the
753    * maxFileSize, then the region split is triggered. This defaults to a value of
754    * 256 MB.
755    * <p>
756    * This is not an absolute value and might vary. Assume that a single row exceeds
757    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
758    * a single row cannot be split across multiple regions
759    * </p>
760    *
761    * @param maxFileSize The maximum file size that a store file can grow to
762    * before a split is triggered.
763    */
764   public HTableDescriptor setMaxFileSize(long maxFileSize) {
765     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
766     return this;
767   }
768 
769   /**
770    * Returns the size of the memstore after which a flush to filesystem is triggered.
771    *
772    * @return memory cache flush size for each hregion, -1 if not set.
773    *
774    * @see #setMemStoreFlushSize(long)
775    */
776   public long getMemStoreFlushSize() {
777     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
778     if (value != null) {
779       return Long.parseLong(Bytes.toString(value));
780     }
781     return -1;
782   }
783 
784   /**
785    * Represents the maximum size of the memstore after which the contents of the
786    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
787    *
788    * @param memstoreFlushSize memory cache flush size for each hregion
789    */
790   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
791     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
792     return this;
793   }
794 
795   /**
796    * This sets the class associated with the flush policy which determines determines the stores
797    * need to be flushed when flushing a region. The class used by default is defined in
798    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
799    * @param clazz the class name
800    */
801   public HTableDescriptor setFlushPolicyClassName(String clazz) {
802     setValue(FLUSH_POLICY, clazz);
803     return this;
804   }
805 
806   /**
807    * This gets the class associated with the flush policy which determines the stores need to be
808    * flushed when flushing a region. The class used by default is defined in
809    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
810    * @return the class name of the flush policy for this table. If this returns null, the default
811    *         flush policy is used.
812    */
813   public String getFlushPolicyClassName() {
814     return getValue(FLUSH_POLICY);
815   }
816 
817   /**
818    * Adds a column family.
819    * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
820    * @param family HColumnDescriptor of family to add.
821    */
822   public HTableDescriptor addFamily(final HColumnDescriptor family) {
823     if (family.getName() == null || family.getName().length <= 0) {
824       throw new IllegalArgumentException("Family name cannot be null or empty");
825     }
826     if (hasFamily(family.getName())) {
827       throw new IllegalArgumentException("Family '" +
828         family.getNameAsString() + "' already exists so cannot be added");
829     }
830     this.families.put(family.getName(), family);
831     return this;
832   }
833 
834   /**
835    * Modifies the existing column family.
836    * @param family HColumnDescriptor of family to update
837    * @return this (for chained invocation)
838    */
839   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
840     if (family.getName() == null || family.getName().length <= 0) {
841       throw new IllegalArgumentException("Family name cannot be null or empty");
842     }
843     if (!hasFamily(family.getName())) {
844       throw new IllegalArgumentException("Column family '" + family.getNameAsString()
845         + "' does not exist");
846     }
847     this.families.put(family.getName(), family);
848     return this;
849   }
850 
851   /**
852    * Checks to see if this table contains the given column family
853    * @param familyName Family name or column name.
854    * @return true if the table contains the specified family name
855    */
856   public boolean hasFamily(final byte [] familyName) {
857     return families.containsKey(familyName);
858   }
859 
860   /**
861    * @return Name of this table and then a map of all of the column family
862    * descriptors.
863    * @see #getNameAsString()
864    */
865   @Override
866   public String toString() {
867     StringBuilder s = new StringBuilder();
868     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
869     s.append(getValues(true));
870     for (HColumnDescriptor f : families.values()) {
871       s.append(", ").append(f);
872     }
873     return s.toString();
874   }
875 
876   /**
877    * @return Name of this table and then a map of all of the column family
878    * descriptors (with only the non-default column family attributes)
879    */
880   public String toStringCustomizedValues() {
881     StringBuilder s = new StringBuilder();
882     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
883     s.append(getValues(false));
884     for(HColumnDescriptor hcd : families.values()) {
885       s.append(", ").append(hcd.toStringCustomizedValues());
886     }
887     return s.toString();
888   }
889 
890   /**
891    * @return map of all table attributes formatted into string.
892    */
893   public String toStringTableAttributes() {
894    return getValues(true).toString();
895   }
896 
897   private StringBuilder getValues(boolean printDefaults) {
898     StringBuilder s = new StringBuilder();
899 
900     // step 1: set partitioning and pruning
901     Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
902     Set<ImmutableBytesWritable> userKeys = new TreeSet<ImmutableBytesWritable>();
903     for (ImmutableBytesWritable k : values.keySet()) {
904       if (k == null || k.get() == null) continue;
905       String key = Bytes.toString(k.get());
906       // in this section, print out reserved keywords + coprocessor info
907       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
908         userKeys.add(k);
909         continue;
910       }
911       // only print out IS_ROOT/IS_META if true
912       String value = Bytes.toString(values.get(k).get());
913       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
914         if (Boolean.valueOf(value) == false) continue;
915       }
916       // see if a reserved key is a default value. may not want to print it out
917       if (printDefaults
918           || !DEFAULT_VALUES.containsKey(key)
919           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
920         reservedKeys.add(k);
921       }
922     }
923 
924     // early exit optimization
925     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
926     if (!hasAttributes && configuration.isEmpty()) return s;
927 
928     s.append(", {");
929     // step 2: printing attributes
930     if (hasAttributes) {
931       s.append("TABLE_ATTRIBUTES => {");
932 
933       // print all reserved keys first
934       boolean printCommaForAttr = false;
935       for (ImmutableBytesWritable k : reservedKeys) {
936         String key = Bytes.toString(k.get());
937         String value = Bytes.toStringBinary(values.get(k).get());
938         if (printCommaForAttr) s.append(", ");
939         printCommaForAttr = true;
940         s.append(key);
941         s.append(" => ");
942         s.append('\'').append(value).append('\'');
943       }
944 
945       if (!userKeys.isEmpty()) {
946         // print all non-reserved, advanced config keys as a separate subset
947         if (printCommaForAttr) s.append(", ");
948         printCommaForAttr = true;
949         s.append(HConstants.METADATA).append(" => ");
950         s.append("{");
951         boolean printCommaForCfg = false;
952         for (ImmutableBytesWritable k : userKeys) {
953           String key = Bytes.toString(k.get());
954           String value = Bytes.toStringBinary(values.get(k).get());
955           if (printCommaForCfg) s.append(", ");
956           printCommaForCfg = true;
957           s.append('\'').append(key).append('\'');
958           s.append(" => ");
959           s.append('\'').append(value).append('\'');
960         }
961         s.append("}");
962       }
963     }
964 
965     // step 3: printing all configuration:
966     if (!configuration.isEmpty()) {
967       if (hasAttributes) {
968         s.append(", ");
969       }
970       s.append(HConstants.CONFIGURATION).append(" => ");
971       s.append('{');
972       boolean printCommaForConfig = false;
973       for (Map.Entry<String, String> e : configuration.entrySet()) {
974         if (printCommaForConfig) s.append(", ");
975         printCommaForConfig = true;
976         s.append('\'').append(e.getKey()).append('\'');
977         s.append(" => ");
978         s.append('\'').append(e.getValue()).append('\'');
979       }
980       s.append("}");
981     }
982     s.append("}"); // end METHOD
983     return s;
984   }
985 
986   /**
987    * Compare the contents of the descriptor with another one passed as a parameter.
988    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
989    * contents of the descriptors are compared.
990    *
991    * @return true if the contents of the the two descriptors exactly match
992    *
993    * @see java.lang.Object#equals(java.lang.Object)
994    */
995   @Override
996   public boolean equals(Object obj) {
997     if (this == obj) {
998       return true;
999     }
1000     if (obj == null) {
1001       return false;
1002     }
1003     if (!(obj instanceof HTableDescriptor)) {
1004       return false;
1005     }
1006     return compareTo((HTableDescriptor)obj) == 0;
1007   }
1008 
1009   /**
1010    * @see java.lang.Object#hashCode()
1011    */
1012   @Override
1013   public int hashCode() {
1014     int result = this.name.hashCode();
1015     result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode();
1016     if (this.families != null && this.families.size() > 0) {
1017       for (HColumnDescriptor e: this.families.values()) {
1018         result ^= e.hashCode();
1019       }
1020     }
1021     result ^= values.hashCode();
1022     result ^= configuration.hashCode();
1023     return result;
1024   }
1025 
1026   /**
1027    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
1028    * and is used for de-serialization of the HTableDescriptor over RPC
1029    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
1030    */
1031   @Deprecated
1032   @Override
1033   public void readFields(DataInput in) throws IOException {
1034     int version = in.readInt();
1035     if (version < 3)
1036       throw new IOException("versions < 3 are not supported (and never existed!?)");
1037     // version 3+
1038     name = TableName.valueOf(Bytes.readByteArray(in));
1039     setRootRegion(in.readBoolean());
1040     setMetaRegion(in.readBoolean());
1041     values.clear();
1042     configuration.clear();
1043     int numVals = in.readInt();
1044     for (int i = 0; i < numVals; i++) {
1045       ImmutableBytesWritable key = new ImmutableBytesWritable();
1046       ImmutableBytesWritable value = new ImmutableBytesWritable();
1047       key.readFields(in);
1048       value.readFields(in);
1049       setValue(key, value);
1050     }
1051     families.clear();
1052     int numFamilies = in.readInt();
1053     for (int i = 0; i < numFamilies; i++) {
1054       HColumnDescriptor c = new HColumnDescriptor();
1055       c.readFields(in);
1056       families.put(c.getName(), c);
1057     }
1058     if (version >= 7) {
1059       int numConfigs = in.readInt();
1060       for (int i = 0; i < numConfigs; i++) {
1061         ImmutableBytesWritable key = new ImmutableBytesWritable();
1062         ImmutableBytesWritable value = new ImmutableBytesWritable();
1063         key.readFields(in);
1064         value.readFields(in);
1065         configuration.put(
1066           Bytes.toString(key.get(), key.getOffset(), key.getLength()),
1067           Bytes.toString(value.get(), value.getOffset(), value.getLength()));
1068       }
1069     }
1070   }
1071 
1072   /**
1073    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
1074    * and is used for serialization of the HTableDescriptor over RPC
1075    * @deprecated Writables are going away.
1076    * Use {@link com.google.protobuf.MessageLite#toByteArray} instead.
1077    */
1078   @Deprecated
1079   @Override
1080   public void write(DataOutput out) throws IOException {
1081     out.writeInt(TABLE_DESCRIPTOR_VERSION);
1082     Bytes.writeByteArray(out, name.toBytes());
1083     out.writeBoolean(isRootRegion());
1084     out.writeBoolean(isMetaRegion());
1085     out.writeInt(values.size());
1086     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1087         values.entrySet()) {
1088       e.getKey().write(out);
1089       e.getValue().write(out);
1090     }
1091     out.writeInt(families.size());
1092     for(Iterator<HColumnDescriptor> it = families.values().iterator();
1093         it.hasNext(); ) {
1094       HColumnDescriptor family = it.next();
1095       family.write(out);
1096     }
1097     out.writeInt(configuration.size());
1098     for (Map.Entry<String, String> e : configuration.entrySet()) {
1099       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1100       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1101     }
1102   }
1103 
1104   // Comparable
1105 
1106   /**
1107    * Compares the descriptor with another descriptor which is passed as a parameter.
1108    * This compares the content of the two descriptors and not the reference.
1109    *
1110    * @return 0 if the contents of the descriptors are exactly matching,
1111    *         1 if there is a mismatch in the contents
1112    */
1113   @Override
1114   public int compareTo(final HTableDescriptor other) {
1115     int result = this.name.compareTo(other.name);
1116     if (result == 0) {
1117       result = families.size() - other.families.size();
1118     }
1119     if (result == 0 && families.size() != other.families.size()) {
1120       result = Integer.valueOf(families.size()).compareTo(
1121           Integer.valueOf(other.families.size()));
1122     }
1123     if (result == 0) {
1124       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1125           it2 = other.families.values().iterator(); it.hasNext(); ) {
1126         result = it.next().compareTo(it2.next());
1127         if (result != 0) {
1128           break;
1129         }
1130       }
1131     }
1132     if (result == 0) {
1133       // punt on comparison for ordering, just calculate difference
1134       result = this.values.hashCode() - other.values.hashCode();
1135       if (result < 0)
1136         result = -1;
1137       else if (result > 0)
1138         result = 1;
1139     }
1140     if (result == 0) {
1141       result = this.configuration.hashCode() - other.configuration.hashCode();
1142       if (result < 0)
1143         result = -1;
1144       else if (result > 0)
1145         result = 1;
1146     }
1147     return result;
1148   }
1149 
1150   /**
1151    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1152    * of all the column families of the table.
1153    *
1154    * @return Immutable collection of {@link HColumnDescriptor} of all the
1155    * column families.
1156    */
1157   public Collection<HColumnDescriptor> getFamilies() {
1158     return Collections.unmodifiableCollection(this.families.values());
1159   }
1160 
1161   /**
1162    * Returns the configured replicas per region
1163    */
1164   public int getRegionReplication() {
1165     byte[] val = getValue(REGION_REPLICATION_KEY);
1166     if (val == null || val.length == 0) {
1167       return DEFAULT_REGION_REPLICATION;
1168     }
1169     return Integer.parseInt(Bytes.toString(val));
1170   }
1171 
1172   /**
1173    * Sets the number of replicas per region.
1174    * @param regionReplication the replication factor per region
1175    */
1176   public HTableDescriptor setRegionReplication(int regionReplication) {
1177     setValue(REGION_REPLICATION_KEY,
1178         new ImmutableBytesWritable(Bytes.toBytes(Integer.toString(regionReplication))));
1179     return this;
1180   }
1181 
1182   /**
1183    * @return true if the read-replicas memstore replication is enabled.
1184    */
1185   public boolean hasRegionMemstoreReplication() {
1186     return isSomething(REGION_MEMSTORE_REPLICATION_KEY, DEFAULT_REGION_MEMSTORE_REPLICATION);
1187   }
1188 
1189   /**
1190    * Enable or Disable the memstore replication from the primary region to the replicas.
1191    * The replication will be used only for meta operations (e.g. flush, compaction, ...)
1192    *
1193    * @param memstoreReplication true if the new data written to the primary region
1194    *                                 should be replicated.
1195    *                            false if the secondaries can tollerate to have new
1196    *                                  data only when the primary flushes the memstore.
1197    */
1198   public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) {
1199     setValue(REGION_MEMSTORE_REPLICATION_KEY, memstoreReplication ? TRUE : FALSE);
1200     // If the memstore replication is setup, we do not have to wait for observing a flush event
1201     // from primary before starting to serve reads, because gaps from replication is not applicable
1202     setConfiguration(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
1203       Boolean.toString(memstoreReplication));
1204     return this;
1205   }
1206 
1207   /**
1208    * Returns all the column family names of the current table. The map of
1209    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1210    * This returns all the keys of the family map which represents the column
1211    * family names of the table.
1212    *
1213    * @return Immutable sorted set of the keys of the families.
1214    */
1215   public Set<byte[]> getFamiliesKeys() {
1216     return Collections.unmodifiableSet(this.families.keySet());
1217   }
1218 
1219   /**
1220    * Returns an array all the {@link HColumnDescriptor} of the column families
1221    * of the table.
1222    *
1223    * @return Array of all the HColumnDescriptors of the current table
1224    *
1225    * @see #getFamilies()
1226    */
1227   public HColumnDescriptor[] getColumnFamilies() {
1228     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1229     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1230   }
1231 
1232 
1233   /**
1234    * Returns the HColumnDescriptor for a specific column family with name as
1235    * specified by the parameter column.
1236    *
1237    * @param column Column family name
1238    * @return Column descriptor for the passed family name or the family on
1239    * passed in column.
1240    */
1241   public HColumnDescriptor getFamily(final byte [] column) {
1242     return this.families.get(column);
1243   }
1244 
1245 
1246   /**
1247    * Removes the HColumnDescriptor with name specified by the parameter column
1248    * from the table descriptor
1249    *
1250    * @param column Name of the column family to be removed.
1251    * @return Column descriptor for the passed family name or the family on
1252    * passed in column.
1253    */
1254   public HColumnDescriptor removeFamily(final byte [] column) {
1255     return this.families.remove(column);
1256   }
1257 
1258   /**
1259    * Add a table coprocessor to this table. The coprocessor
1260    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1261    * or Endpoint.
1262    * It won't check if the class can be loaded or not.
1263    * Whether a coprocessor is loadable or not will be determined when
1264    * a region is opened.
1265    * @param className Full class name.
1266    * @throws IOException
1267    */
1268   public HTableDescriptor addCoprocessor(String className) throws IOException {
1269     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1270     return this;
1271   }
1272 
1273   /**
1274    * Add a table coprocessor to this table. The coprocessor
1275    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1276    * or Endpoint.
1277    * It won't check if the class can be loaded or not.
1278    * Whether a coprocessor is loadable or not will be determined when
1279    * a region is opened.
1280    * @param jarFilePath Path of the jar file. If it's null, the class will be
1281    * loaded from default classloader.
1282    * @param className Full class name.
1283    * @param priority Priority
1284    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1285    * @throws IOException
1286    */
1287   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1288                              int priority, final Map<String, String> kvs)
1289   throws IOException {
1290     checkHasCoprocessor(className);
1291 
1292     // Validate parameter kvs and then add key/values to kvString.
1293     StringBuilder kvString = new StringBuilder();
1294     if (kvs != null) {
1295       for (Map.Entry<String, String> e: kvs.entrySet()) {
1296         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1297           throw new IOException("Illegal parameter key = " + e.getKey());
1298         }
1299         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1300           throw new IOException("Illegal parameter (" + e.getKey() +
1301               ") value = " + e.getValue());
1302         }
1303         if (kvString.length() != 0) {
1304           kvString.append(',');
1305         }
1306         kvString.append(e.getKey());
1307         kvString.append('=');
1308         kvString.append(e.getValue());
1309       }
1310     }
1311 
1312     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1313         "|" + className + "|" + Integer.toString(priority) + "|" +
1314         kvString.toString();
1315     return addCoprocessorToMap(value);
1316   }
1317 
1318   /**
1319    * Add a table coprocessor to this table. The coprocessor
1320    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1321    * or Endpoint.
1322    * It won't check if the class can be loaded or not.
1323    * Whether a coprocessor is loadable or not will be determined when
1324    * a region is opened.
1325    * @param specStr The Coprocessor specification all in in one String formatted so matches
1326    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1327    * @throws IOException
1328    */
1329   public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
1330     String className = getCoprocessorClassNameFromSpecStr(specStr);
1331     if (className == null) {
1332       throw new IllegalArgumentException("Format does not match " +
1333         HConstants.CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr);
1334     }
1335     checkHasCoprocessor(className);
1336     return addCoprocessorToMap(specStr);
1337   }
1338 
1339   private void checkHasCoprocessor(final String className) throws IOException {
1340     if (hasCoprocessor(className)) {
1341       throw new IOException("Coprocessor " + className + " already exists.");
1342     }
1343   }
1344 
1345   /**
1346    * Add coprocessor to values Map
1347    * @param specStr The Coprocessor specification all in in one String formatted so matches
1348    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1349    * @return Returns <code>this</code>
1350    */
1351   private HTableDescriptor addCoprocessorToMap(final String specStr) {
1352     if (specStr == null) return this;
1353     // generate a coprocessor key
1354     int maxCoprocessorNumber = 0;
1355     Matcher keyMatcher;
1356     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1357         this.values.entrySet()) {
1358       keyMatcher =
1359           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1360               Bytes.toString(e.getKey().get()));
1361       if (!keyMatcher.matches()) {
1362         continue;
1363       }
1364       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
1365     }
1366     maxCoprocessorNumber++;
1367     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1368     this.values.put(new ImmutableBytesWritable(Bytes.toBytes(key)),
1369       new ImmutableBytesWritable(Bytes.toBytes(specStr)));
1370     return this;
1371   }
1372 
1373   /**
1374    * Check if the table has an attached co-processor represented by the name className
1375    *
1376    * @param classNameToMatch - Class name of the co-processor
1377    * @return true of the table has a co-processor className
1378    */
1379   public boolean hasCoprocessor(String classNameToMatch) {
1380     Matcher keyMatcher;
1381     Matcher valueMatcher;
1382     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1383         this.values.entrySet()) {
1384       keyMatcher =
1385           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1386               Bytes.toString(e.getKey().get()));
1387       if (!keyMatcher.matches()) {
1388         continue;
1389       }
1390       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1391       if (className == null) continue;
1392       if (className.equals(classNameToMatch.trim())) {
1393         return true;
1394       }
1395     }
1396     return false;
1397   }
1398 
1399   /**
1400    * Return the list of attached co-processor represented by their name className
1401    *
1402    * @return The list of co-processors classNames
1403    */
1404   public List<String> getCoprocessors() {
1405     List<String> result = new ArrayList<String>();
1406     Matcher keyMatcher;
1407     Matcher valueMatcher;
1408     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values.entrySet()) {
1409       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1410       if (!keyMatcher.matches()) {
1411         continue;
1412       }
1413       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1414       if (className == null) continue;
1415       result.add(className); // classname is the 2nd field
1416     }
1417     return result;
1418   }
1419 
1420   /**
1421    * @param spec String formatted as per {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1422    * @return Class parsed from passed in <code>spec</code> or null if no match or classpath found
1423    */
1424   private static String getCoprocessorClassNameFromSpecStr(final String spec) {
1425     Matcher matcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(spec);
1426     // Classname is the 2nd field
1427     return matcher != null && matcher.matches()? matcher.group(2).trim(): null;
1428   }
1429 
1430   /**
1431    * Remove a coprocessor from those set on the table
1432    * @param className Class name of the co-processor
1433    */
1434   public void removeCoprocessor(String className) {
1435     ImmutableBytesWritable match = null;
1436     Matcher keyMatcher;
1437     Matcher valueMatcher;
1438     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values
1439         .entrySet()) {
1440       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1441           .getKey().get()));
1442       if (!keyMatcher.matches()) {
1443         continue;
1444       }
1445       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1446           .toString(e.getValue().get()));
1447       if (!valueMatcher.matches()) {
1448         continue;
1449       }
1450       // get className and compare
1451       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1452       // remove the CP if it is present
1453       if (clazz.equals(className.trim())) {
1454         match = e.getKey();
1455         break;
1456       }
1457     }
1458     // if we found a match, remove it
1459     if (match != null)
1460       remove(match);
1461   }
1462 
1463   /**
1464    * Returns the {@link Path} object representing the table directory under
1465    * path rootdir
1466    *
1467    * Deprecated use FSUtils.getTableDir() instead.
1468    *
1469    * @param rootdir qualified path of HBase root directory
1470    * @param tableName name of table
1471    * @return {@link Path} for table
1472    */
1473   @Deprecated
1474   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1475     //This is bad I had to mirror code from FSUTils.getTableDir since
1476     //there is no module dependency between hbase-client and hbase-server
1477     TableName name = TableName.valueOf(tableName);
1478     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1479               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1480   }
1481 
1482   /**
1483    * Table descriptor for <code>hbase:meta</code> catalog table
1484    * @deprecated Use TableDescriptors#get(TableName.META_TABLE_NAME) or
1485    * HBaseAdmin#getTableDescriptor(TableName.META_TABLE_NAME) instead.
1486    */
1487   @Deprecated
1488   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1489       TableName.META_TABLE_NAME,
1490       new HColumnDescriptor[] {
1491           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1492               // Ten is arbitrary number.  Keep versions to help debugging.
1493               .setMaxVersions(HConstants.DEFAULT_HBASE_META_VERSIONS)
1494               .setInMemory(true)
1495               .setBlocksize(HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)
1496               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1497               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1498               .setBloomFilterType(BloomType.NONE)
1499               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1500               // e.g. if using CombinedBlockCache (BucketCache).
1501               .setCacheDataInL1(true)
1502       });
1503 
1504   static {
1505     try {
1506       META_TABLEDESC.addCoprocessor(
1507           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1508           null, Coprocessor.PRIORITY_SYSTEM, null);
1509     } catch (IOException ex) {
1510       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1511       throw new RuntimeException(ex);
1512     }
1513   }
1514 
1515   public final static String NAMESPACE_FAMILY_INFO = "info";
1516   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1517   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1518 
1519   /** Table descriptor for namespace table */
1520   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1521       TableName.NAMESPACE_TABLE_NAME,
1522       new HColumnDescriptor[] {
1523           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1524               // Ten is arbitrary number.  Keep versions to help debugging.
1525               .setMaxVersions(10)
1526               .setInMemory(true)
1527               .setBlocksize(8 * 1024)
1528               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1529               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1530               // e.g. if using CombinedBlockCache (BucketCache).
1531               .setCacheDataInL1(true)
1532       });
1533 
1534   @Deprecated
1535   public HTableDescriptor setOwner(User owner) {
1536     return setOwnerString(owner != null ? owner.getShortName() : null);
1537   }
1538 
1539   // used by admin.rb:alter(table_name,*args) to update owner.
1540   @Deprecated
1541   public HTableDescriptor setOwnerString(String ownerString) {
1542     if (ownerString != null) {
1543       setValue(OWNER_KEY, ownerString);
1544     } else {
1545       remove(OWNER_KEY);
1546     }
1547     return this;
1548   }
1549 
1550   @Deprecated
1551   public String getOwnerString() {
1552     if (getValue(OWNER_KEY) != null) {
1553       return Bytes.toString(getValue(OWNER_KEY));
1554     }
1555     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1556     // hbase:meta and -ROOT- should return system user as owner, not null (see
1557     // MasterFileSystem.java:bootstrap()).
1558     return null;
1559   }
1560 
1561   /**
1562    * @return This instance serialized with pb with pb magic prefix
1563    * @see #parseFrom(byte[])
1564    */
1565   public byte [] toByteArray() {
1566     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1567   }
1568 
1569   /**
1570    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1571    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1572    * @throws DeserializationException
1573    * @throws IOException
1574    * @see #toByteArray()
1575    */
1576   public static HTableDescriptor parseFrom(final byte [] bytes)
1577   throws DeserializationException, IOException {
1578     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1579       return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
1580     }
1581     int pblen = ProtobufUtil.lengthOfPBMagic();
1582     TableSchema.Builder builder = TableSchema.newBuilder();
1583     TableSchema ts;
1584     try {
1585       ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
1586       ts = builder.build();
1587     } catch (IOException e) {
1588       throw new DeserializationException(e);
1589     }
1590     return convert(ts);
1591   }
1592 
1593   /**
1594    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1595    */
1596   public TableSchema convert() {
1597     TableSchema.Builder builder = TableSchema.newBuilder();
1598     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1599     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1600       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1601       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1602       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1603       builder.addAttributes(aBuilder.build());
1604     }
1605     for (HColumnDescriptor hcd: getColumnFamilies()) {
1606       builder.addColumnFamilies(hcd.convert());
1607     }
1608     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1609       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1610       aBuilder.setName(e.getKey());
1611       aBuilder.setValue(e.getValue());
1612       builder.addConfiguration(aBuilder.build());
1613     }
1614     return builder.build();
1615   }
1616 
1617   /**
1618    * @param ts A pb TableSchema instance.
1619    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1620    */
1621   public static HTableDescriptor convert(final TableSchema ts) {
1622     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1623     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1624     int index = 0;
1625     for (ColumnFamilySchema cfs: list) {
1626       hcds[index++] = HColumnDescriptor.convert(cfs);
1627     }
1628     HTableDescriptor htd = new HTableDescriptor(
1629         ProtobufUtil.toTableName(ts.getTableName()),
1630         hcds);
1631     for (BytesBytesPair a: ts.getAttributesList()) {
1632       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1633     }
1634     for (NameStringPair a: ts.getConfigurationList()) {
1635       htd.setConfiguration(a.getName(), a.getValue());
1636     }
1637     return htd;
1638   }
1639 
1640   /**
1641    * Getter for accessing the configuration value by key
1642    */
1643   public String getConfigurationValue(String key) {
1644     return configuration.get(key);
1645   }
1646 
1647   /**
1648    * Getter for fetching an unmodifiable {@link #configuration} map.
1649    */
1650   public Map<String, String> getConfiguration() {
1651     // shallow pointer copy
1652     return Collections.unmodifiableMap(configuration);
1653   }
1654 
1655   /**
1656    * Setter for storing a configuration setting in {@link #configuration} map.
1657    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1658    * @param value String value. If null, removes the setting.
1659    */
1660   public HTableDescriptor setConfiguration(String key, String value) {
1661     if (value == null) {
1662       removeConfiguration(key);
1663     } else {
1664       configuration.put(key, value);
1665     }
1666     return this;
1667   }
1668 
1669   /**
1670    * Remove a config setting represented by the key from the {@link #configuration} map
1671    */
1672   public void removeConfiguration(final String key) {
1673     configuration.remove(key);
1674   }
1675 
1676   public static HTableDescriptor metaTableDescriptor(final Configuration conf)
1677       throws IOException {
1678     HTableDescriptor metaDescriptor = new HTableDescriptor(
1679       TableName.META_TABLE_NAME,
1680       new HColumnDescriptor[] {
1681         new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1682           .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
1683             HConstants.DEFAULT_HBASE_META_VERSIONS))
1684           .setInMemory(true)
1685           .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
1686             HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
1687           .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1688           // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1689           .setBloomFilterType(BloomType.NONE)
1690           .setCacheDataInL1(true)
1691          });
1692     metaDescriptor.addCoprocessor(
1693       "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1694       null, Coprocessor.PRIORITY_SYSTEM, null);
1695     return metaDescriptor;
1696   }
1697 
1698 }