View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.Collection;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Set;
31  import java.util.TreeMap;
32  import java.util.TreeSet;
33  import java.util.regex.Matcher;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.classification.InterfaceAudience;
39  import org.apache.hadoop.hbase.classification.InterfaceStability;
40  import org.apache.hadoop.hbase.client.Durability;
41  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
42  import org.apache.hadoop.hbase.exceptions.DeserializationException;
43  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
44  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
45  import org.apache.hadoop.hbase.regionserver.BloomType;
46  import org.apache.hadoop.hbase.security.User;
47  import org.apache.hadoop.hbase.util.Bytes;
48  
49  /**
50   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
51   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
52   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
53   * when the region split should occur, coprocessors associated with it etc...
54   */
55  @InterfaceAudience.Public
56  @InterfaceStability.Evolving
57  public class HTableDescriptor implements Comparable<HTableDescriptor> {
58  
59    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
60  
61    private TableName name = null;
62  
63    /**
64     * A map which holds the metadata information of the table. This metadata
65     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
66     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
67     */
68    private final Map<Bytes, Bytes> values =
69        new HashMap<Bytes, Bytes>();
70  
71    /**
72     * A map which holds the configuration specific to the table.
73     * The keys of the map have the same names as config keys and override the defaults with
74     * table-specific settings. Example usage may be for compactions, etc.
75     */
76    private final Map<String, String> configuration = new HashMap<String, String>();
77  
78    public static final String SPLIT_POLICY = "SPLIT_POLICY";
79  
80    /**
81     * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
82     * attribute which denotes the maximum size of the store file after which
83     * a region split occurs
84     *
85     * @see #getMaxFileSize()
86     */
87    public static final String MAX_FILESIZE = "MAX_FILESIZE";
88    private static final Bytes MAX_FILESIZE_KEY =
89        new Bytes(Bytes.toBytes(MAX_FILESIZE));
90  
91    public static final String OWNER = "OWNER";
92    public static final Bytes OWNER_KEY =
93        new Bytes(Bytes.toBytes(OWNER));
94  
95    /**
96     * <em>INTERNAL</em> Used by rest interface to access this metadata
97     * attribute which denotes if the table is Read Only
98     *
99     * @see #isReadOnly()
100    */
101   public static final String READONLY = "READONLY";
102   private static final Bytes READONLY_KEY =
103       new Bytes(Bytes.toBytes(READONLY));
104 
105   /**
106    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
107    * attribute which denotes if the table is compaction enabled
108    *
109    * @see #isCompactionEnabled()
110    */
111   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
112   private static final Bytes COMPACTION_ENABLED_KEY =
113       new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
114 
115   /**
116    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
117    * attribute which represents the maximum size of the memstore after which
118    * its contents are flushed onto the disk
119    *
120    * @see #getMemStoreFlushSize()
121    */
122   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
123   private static final Bytes MEMSTORE_FLUSHSIZE_KEY =
124       new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
125 
126   public static final String FLUSH_POLICY = "FLUSH_POLICY";
127 
128   /**
129    * <em>INTERNAL</em> Used by rest interface to access this metadata
130    * attribute which denotes if the table is a -ROOT- region or not
131    *
132    * @see #isRootRegion()
133    */
134   public static final String IS_ROOT = "IS_ROOT";
135   private static final Bytes IS_ROOT_KEY =
136       new Bytes(Bytes.toBytes(IS_ROOT));
137 
138   /**
139    * <em>INTERNAL</em> Used by rest interface to access this metadata
140    * attribute which denotes if it is a catalog table, either
141    * <code> hbase:meta </code> or <code> -ROOT- </code>
142    *
143    * @see #isMetaRegion()
144    */
145   public static final String IS_META = "IS_META";
146   private static final Bytes IS_META_KEY =
147       new Bytes(Bytes.toBytes(IS_META));
148 
149   /**
150    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
151    * attribute which denotes if the deferred log flush option is enabled.
152    * @deprecated Use {@link #DURABILITY} instead.
153    */
154   @Deprecated
155   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
156   @Deprecated
157   private static final Bytes DEFERRED_LOG_FLUSH_KEY =
158       new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH));
159 
160   /**
161    * <em>INTERNAL</em> {@link Durability} setting for the table.
162    */
163   public static final String DURABILITY = "DURABILITY";
164   private static final Bytes DURABILITY_KEY =
165       new Bytes(Bytes.toBytes("DURABILITY"));
166 
167   /**
168    * <em>INTERNAL</em> number of region replicas for the table.
169    */
170   public static final String REGION_REPLICATION = "REGION_REPLICATION";
171   private static final Bytes REGION_REPLICATION_KEY =
172       new Bytes(Bytes.toBytes(REGION_REPLICATION));
173 
174   /**
175    * <em>INTERNAL</em> flag to indicate whether or not the memstore should be replicated
176    * for read-replicas (CONSISTENCY =&gt; TIMELINE).
177    */
178   public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
179   private static final Bytes REGION_MEMSTORE_REPLICATION_KEY =
180       new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION));
181 
182   /**
183    * <em>INTERNAL</em> Used by shell/rest interface to access this metadata
184    * attribute which denotes if the table should be treated by region normalizer.
185    *
186    * @see #isNormalizationEnabled()
187    */
188   public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED";
189   private static final Bytes NORMALIZATION_ENABLED_KEY =
190     new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED));
191 
192   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
193   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
194 
195   /*
196    *  The below are ugly but better than creating them each time till we
197    *  replace booleans being saved as Strings with plain booleans.  Need a
198    *  migration script to do this.  TODO.
199    */
200   private static final Bytes FALSE =
201       new Bytes(Bytes.toBytes(Boolean.FALSE.toString()));
202 
203   private static final Bytes TRUE =
204       new Bytes(Bytes.toBytes(Boolean.TRUE.toString()));
205 
206   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
207 
208   /**
209    * Constant that denotes whether the table is READONLY by default and is false
210    */
211   public static final boolean DEFAULT_READONLY = false;
212 
213   /**
214    * Constant that denotes whether the table is compaction enabled by default
215    */
216   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
217 
218   /**
219    * Constant that denotes whether the table is normalized by default.
220    */
221   public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
222 
223   /**
224    * Constant that denotes the maximum default size of the memstore after which
225    * the contents are flushed to the store files
226    */
227   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
228 
229   public static final int DEFAULT_REGION_REPLICATION = 1;
230 
231   public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true;
232 
233   private final static Map<String, String> DEFAULT_VALUES
234     = new HashMap<String, String>();
235   private final static Set<Bytes> RESERVED_KEYWORDS
236       = new HashSet<Bytes>();
237 
238   static {
239     DEFAULT_VALUES.put(MAX_FILESIZE,
240         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
241     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
242     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
243         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
244     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
245         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
246     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
247     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
248     DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED));
249     for (String s : DEFAULT_VALUES.keySet()) {
250       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
251     }
252     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
253     RESERVED_KEYWORDS.add(IS_META_KEY);
254   }
255 
256   /**
257    * Cache of whether this is a meta table or not.
258    */
259   private volatile Boolean meta = null;
260   /**
261    * Cache of whether this is root table or not.
262    */
263   private volatile Boolean root = null;
264 
265   /**
266    * Durability setting for the table
267    */
268   private Durability durability = null;
269 
270   /**
271    * Maps column family name to the respective HColumnDescriptors
272    */
273   private final Map<byte [], HColumnDescriptor> families =
274     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
275 
276   /**
277    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
278    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
279    */
280   @InterfaceAudience.Private
281   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
282     setName(name);
283     for(HColumnDescriptor descriptor : families) {
284       this.families.put(descriptor.getName(), descriptor);
285     }
286   }
287 
288   /**
289    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
290    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
291    */
292   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
293       Map<Bytes, Bytes> values) {
294     setName(name);
295     for(HColumnDescriptor descriptor : families) {
296       this.families.put(descriptor.getName(), descriptor);
297     }
298     for (Map.Entry<Bytes, Bytes> entry :
299         values.entrySet()) {
300       setValue(entry.getKey(), entry.getValue());
301     }
302   }
303 
304   /**
305    * Default constructor which constructs an empty object.
306    * For deserializing an HTableDescriptor instance only.
307    * @deprecated As of release 0.96 (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
308    *             This was made protected in 2.0.0 and will be removed in HBase 3.0.0.
309    *             Used by Writables and Writables are going away.
310    */
311   @Deprecated
312   protected HTableDescriptor() {
313     super();
314   }
315 
316   /**
317    * Construct a table descriptor specifying a TableName object
318    * @param name Table name.
319    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug</a>
320    */
321   public HTableDescriptor(final TableName name) {
322     super();
323     setName(name);
324   }
325 
326   /**
327    * Construct a table descriptor specifying a byte array table name
328    * @param name Table name.
329    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 (HBASE-174) HBASE: Un-openable tablename bug</a>
330    */
331   @Deprecated
332   public HTableDescriptor(final byte[] name) {
333     this(TableName.valueOf(name));
334   }
335 
336   /**
337    * Construct a table descriptor specifying a String table name
338    * @param name Table name.
339    * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 (HBASE-174) HBASE: Un-openable tablename bug</a>
340    */
341   @Deprecated
342   public HTableDescriptor(final String name) {
343     this(TableName.valueOf(name));
344   }
345 
346   /**
347    * Construct a table descriptor by cloning the descriptor passed as a parameter.
348    * <p>
349    * Makes a deep copy of the supplied descriptor.
350    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
351    * @param desc The descriptor.
352    */
353   public HTableDescriptor(final HTableDescriptor desc) {
354     this(desc.name, desc);
355   }
356 
357   /**
358    * Construct a table descriptor by cloning the descriptor passed as a parameter
359    * but using a different table name.
360    * <p>
361    * Makes a deep copy of the supplied descriptor.
362    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
363    * @param name Table name.
364    * @param desc The descriptor.
365    */
366   public HTableDescriptor(final TableName name, final HTableDescriptor desc) {
367     super();
368     setName(name);
369     setMetaFlags(this.name);
370     for (HColumnDescriptor c: desc.families.values()) {
371       this.families.put(c.getName(), new HColumnDescriptor(c));
372     }
373     for (Map.Entry<Bytes, Bytes> e :
374         desc.values.entrySet()) {
375       setValue(e.getKey(), e.getValue());
376     }
377     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
378       this.configuration.put(e.getKey(), e.getValue());
379     }
380   }
381 
382   /*
383    * Set meta flags on this table.
384    * IS_ROOT_KEY is set if its a -ROOT- table
385    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
386    * Called by constructors.
387    * @param name
388    */
389   private void setMetaFlags(final TableName name) {
390     setMetaRegion(isRootRegion() ||
391         name.equals(TableName.META_TABLE_NAME));
392   }
393 
394   /**
395    * Check if the descriptor represents a <code> -ROOT- </code> region.
396    *
397    * @return true if this is a <code> -ROOT- </code> region
398    */
399   public boolean isRootRegion() {
400     if (this.root == null) {
401       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
402     }
403     return this.root.booleanValue();
404   }
405 
406   /**
407    * <em> INTERNAL </em> Used to denote if the current table represents
408    * <code> -ROOT- </code> region. This is used internally by the
409    * HTableDescriptor constructors
410    *
411    * @param isRoot true if this is the <code> -ROOT- </code> region
412    */
413   protected void setRootRegion(boolean isRoot) {
414     // TODO: Make the value a boolean rather than String of boolean.
415     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
416   }
417 
418   /**
419    * Checks if this table is <code> hbase:meta </code>
420    * region.
421    *
422    * @return true if this table is <code> hbase:meta </code>
423    * region
424    */
425   public boolean isMetaRegion() {
426     if (this.meta == null) {
427       this.meta = calculateIsMetaRegion();
428     }
429     return this.meta.booleanValue();
430   }
431 
432   private synchronized Boolean calculateIsMetaRegion() {
433     byte [] value = getValue(IS_META_KEY);
434     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
435   }
436 
437   private boolean isSomething(final Bytes key,
438       final boolean valueIfNull) {
439     byte [] value = getValue(key);
440     if (value != null) {
441       return Boolean.valueOf(Bytes.toString(value));
442     }
443     return valueIfNull;
444   }
445 
446   /**
447    * <em> INTERNAL </em> Used to denote if the current table represents
448    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
449    * internally by the HTableDescriptor constructors
450    *
451    * @param isMeta true if its either <code> -ROOT- </code> or
452    * <code> hbase:meta </code> region
453    */
454   protected void setMetaRegion(boolean isMeta) {
455     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
456   }
457 
458   /**
459    * Checks if the table is a <code>hbase:meta</code> table
460    *
461    * @return true if table is <code> hbase:meta </code> region.
462    */
463   public boolean isMetaTable() {
464     return isMetaRegion() && !isRootRegion();
465   }
466 
467   /**
468    * Getter for accessing the metadata associated with the key
469    *
470    * @param key The key.
471    * @return The value.
472    * @see #values
473    */
474   public byte[] getValue(byte[] key) {
475     return getValue(new Bytes(key));
476   }
477 
478   private byte[] getValue(final Bytes key) {
479     Bytes ibw = values.get(key);
480     if (ibw == null)
481       return null;
482     return ibw.get();
483   }
484 
485   /**
486    * Getter for accessing the metadata associated with the key
487    *
488    * @param key The key.
489    * @return The value.
490    * @see #values
491    */
492   public String getValue(String key) {
493     byte[] value = getValue(Bytes.toBytes(key));
494     if (value == null)
495       return null;
496     return Bytes.toString(value);
497   }
498 
499   /**
500    * Getter for fetching an unmodifiable {@link #values} map.
501    *
502    * @return unmodifiable map {@link #values}.
503    * @see #values
504    */
505   public Map<Bytes, Bytes> getValues() {
506     // shallow pointer copy
507     return Collections.unmodifiableMap(values);
508   }
509 
510   /**
511    * Setter for storing metadata as a (key, value) pair in {@link #values} map
512    *
513    * @param key The key.
514    * @param value The value.
515    * @see #values
516    */
517   public HTableDescriptor setValue(byte[] key, byte[] value) {
518     setValue(new Bytes(key), new Bytes(value));
519     return this;
520   }
521 
522   /*
523    * @param key The key.
524    * @param value The value.
525    */
526   private HTableDescriptor setValue(final Bytes key,
527       final String value) {
528     setValue(key, new Bytes(Bytes.toBytes(value)));
529     return this;
530   }
531 
532   /*
533    * Setter for storing metadata as a (key, value) pair in {@link #values} map
534    *
535    * @param key The key.
536    * @param value The value.
537    */
538   public HTableDescriptor setValue(final Bytes key, final Bytes value) {
539     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
540       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
541       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
542           "use " + DURABILITY + " instead");
543       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
544       return this;
545     }
546     values.put(key, value);
547     return this;
548   }
549 
550   /**
551    * Setter for storing metadata as a (key, value) pair in {@link #values} map
552    *
553    * @param key The key.
554    * @param value The value.
555    * @see #values
556    */
557   public HTableDescriptor setValue(String key, String value) {
558     if (value == null) {
559       remove(key);
560     } else {
561       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
562     }
563     return this;
564   }
565 
566   /**
567    * Remove metadata represented by the key from the {@link #values} map
568    *
569    * @param key Key whose key and value we're to remove from HTableDescriptor
570    * parameters.
571    */
572   public void remove(final String key) {
573     remove(new Bytes(Bytes.toBytes(key)));
574   }
575 
576   /**
577    * Remove metadata represented by the key from the {@link #values} map
578    *
579    * @param key Key whose key and value we're to remove from HTableDescriptor
580    * parameters.
581    */
582   public void remove(Bytes key) {
583     values.remove(key);
584   }
585 
586   /**
587    * Remove metadata represented by the key from the {@link #values} map
588    *
589    * @param key Key whose key and value we're to remove from HTableDescriptor
590    * parameters.
591    */
592   public void remove(final byte [] key) {
593     remove(new Bytes(key));
594   }
595 
596   /**
597    * Check if the readOnly flag of the table is set. If the readOnly flag is
598    * set then the contents of the table can only be read from but not modified.
599    *
600    * @return true if all columns in the table should be read only
601    */
602   public boolean isReadOnly() {
603     return isSomething(READONLY_KEY, DEFAULT_READONLY);
604   }
605 
606   /**
607    * Setting the table as read only sets all the columns in the table as read
608    * only. By default all tables are modifiable, but if the readOnly flag is
609    * set to true then the contents of the table can only be read but not modified.
610    *
611    * @param readOnly True if all of the columns in the table should be read
612    * only.
613    */
614   public HTableDescriptor setReadOnly(final boolean readOnly) {
615     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
616   }
617 
618   /**
619    * Check if the compaction enable flag of the table is true. If flag is
620    * false then no minor/major compactions will be done in real.
621    *
622    * @return true if table compaction enabled
623    */
624   public boolean isCompactionEnabled() {
625     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
626   }
627 
628   /**
629    * Setting the table compaction enable flag.
630    *
631    * @param isEnable True if enable compaction.
632    */
633   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
634     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
635     return this;
636   }
637 
638   /**
639    * Check if normalization enable flag of the table is true. If flag is
640    * false then no region normalizer won't attempt to normalize this table.
641    *
642    * @return true if region normalization is enabled for this table
643    */
644   public boolean isNormalizationEnabled() {
645     return isSomething(NORMALIZATION_ENABLED_KEY, DEFAULT_NORMALIZATION_ENABLED);
646   }
647 
648   /**
649    * Setting the table normalization enable flag.
650    *
651    * @param isEnable True if enable normalization.
652    */
653   public HTableDescriptor setNormalizationEnabled(final boolean isEnable) {
654     setValue(NORMALIZATION_ENABLED_KEY, isEnable ? TRUE : FALSE);
655     return this;
656   }
657 
658   /**
659    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
660    * @param durability enum value
661    */
662   public HTableDescriptor setDurability(Durability durability) {
663     this.durability = durability;
664     setValue(DURABILITY_KEY, durability.name());
665     return this;
666   }
667 
668   /**
669    * Returns the durability setting for the table.
670    * @return durability setting for the table.
671    */
672   public Durability getDurability() {
673     if (this.durability == null) {
674       byte[] durabilityValue = getValue(DURABILITY_KEY);
675       if (durabilityValue == null) {
676         this.durability = DEFAULT_DURABLITY;
677       } else {
678         try {
679           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
680         } catch (IllegalArgumentException ex) {
681           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
682             + " is not known. Durability:" + Bytes.toString(durabilityValue));
683           this.durability = DEFAULT_DURABLITY;
684         }
685       }
686     }
687     return this.durability;
688   }
689 
690   /**
691    * Get the name of the table
692    *
693    * @return TableName
694    */
695   public TableName getTableName() {
696     return name;
697   }
698 
699   /**
700    * Get the name of the table as a byte array.
701    *
702    * @return name of table
703    * @deprecated Use {@link #getTableName()} instead
704    */
705   @Deprecated
706   public byte[] getName() {
707     return name.getName();
708   }
709 
710   /**
711    * Get the name of the table as a String
712    *
713    * @return name of table as a String
714    */
715   public String getNameAsString() {
716     return name.getNameAsString();
717   }
718 
719   /**
720    * This sets the class associated with the region split policy which
721    * determines when a region split should occur.  The class used by
722    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
723    * @param clazz the class name
724    */
725   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
726     setValue(SPLIT_POLICY, clazz);
727     return this;
728   }
729 
730   /**
731    * This gets the class associated with the region split policy which
732    * determines when a region split should occur.  The class used by
733    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
734    *
735    * @return the class name of the region split policy for this table.
736    * If this returns null, the default split policy is used.
737    */
738    public String getRegionSplitPolicyClassName() {
739     return getValue(SPLIT_POLICY);
740   }
741 
742   /**
743    * Set the name of the table.
744    *
745    * @param name name of table
746    */
747   @Deprecated
748   public HTableDescriptor setName(byte[] name) {
749     setName(TableName.valueOf(name));
750     return this;
751   }
752 
753   @Deprecated
754   public HTableDescriptor setName(TableName name) {
755     this.name = name;
756     setMetaFlags(this.name);
757     return this;
758   }
759 
760   /**
761    * Returns the maximum size upto which a region can grow to after which a region
762    * split is triggered. The region size is represented by the size of the biggest
763    * store file in that region.
764    *
765    * @return max hregion size for table, -1 if not set.
766    *
767    * @see #setMaxFileSize(long)
768    */
769   public long getMaxFileSize() {
770     byte [] value = getValue(MAX_FILESIZE_KEY);
771     if (value != null) {
772       return Long.parseLong(Bytes.toString(value));
773     }
774     return -1;
775   }
776 
777   /**
778    * Sets the maximum size upto which a region can grow to after which a region
779    * split is triggered. The region size is represented by the size of the biggest
780    * store file in that region, i.e. If the biggest store file grows beyond the
781    * maxFileSize, then the region split is triggered. This defaults to a value of
782    * 256 MB.
783    * <p>
784    * This is not an absolute value and might vary. Assume that a single row exceeds
785    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
786    * a single row cannot be split across multiple regions
787    * </p>
788    *
789    * @param maxFileSize The maximum file size that a store file can grow to
790    * before a split is triggered.
791    */
792   public HTableDescriptor setMaxFileSize(long maxFileSize) {
793     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
794     return this;
795   }
796 
797   /**
798    * Returns the size of the memstore after which a flush to filesystem is triggered.
799    *
800    * @return memory cache flush size for each hregion, -1 if not set.
801    *
802    * @see #setMemStoreFlushSize(long)
803    */
804   public long getMemStoreFlushSize() {
805     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
806     if (value != null) {
807       return Long.parseLong(Bytes.toString(value));
808     }
809     return -1;
810   }
811 
812   /**
813    * Represents the maximum size of the memstore after which the contents of the
814    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
815    *
816    * @param memstoreFlushSize memory cache flush size for each hregion
817    */
818   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
819     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
820     return this;
821   }
822 
823   /**
824    * This sets the class associated with the flush policy which determines determines the stores
825    * need to be flushed when flushing a region. The class used by default is defined in
826    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
827    * @param clazz the class name
828    */
829   public HTableDescriptor setFlushPolicyClassName(String clazz) {
830     setValue(FLUSH_POLICY, clazz);
831     return this;
832   }
833 
834   /**
835    * This gets the class associated with the flush policy which determines the stores need to be
836    * flushed when flushing a region. The class used by default is defined in
837    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
838    * @return the class name of the flush policy for this table. If this returns null, the default
839    *         flush policy is used.
840    */
841   public String getFlushPolicyClassName() {
842     return getValue(FLUSH_POLICY);
843   }
844 
845   /**
846    * Adds a column family.
847    * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
848    * @param family HColumnDescriptor of family to add.
849    */
850   public HTableDescriptor addFamily(final HColumnDescriptor family) {
851     if (family.getName() == null || family.getName().length <= 0) {
852       throw new IllegalArgumentException("Family name cannot be null or empty");
853     }
854     if (hasFamily(family.getName())) {
855       throw new IllegalArgumentException("Family '" +
856         family.getNameAsString() + "' already exists so cannot be added");
857     }
858     this.families.put(family.getName(), family);
859     return this;
860   }
861 
862   /**
863    * Modifies the existing column family.
864    * @param family HColumnDescriptor of family to update
865    * @return this (for chained invocation)
866    */
867   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
868     if (family.getName() == null || family.getName().length <= 0) {
869       throw new IllegalArgumentException("Family name cannot be null or empty");
870     }
871     if (!hasFamily(family.getName())) {
872       throw new IllegalArgumentException("Column family '" + family.getNameAsString()
873         + "' does not exist");
874     }
875     this.families.put(family.getName(), family);
876     return this;
877   }
878 
879   /**
880    * Checks to see if this table contains the given column family
881    * @param familyName Family name or column name.
882    * @return true if the table contains the specified family name
883    */
884   public boolean hasFamily(final byte [] familyName) {
885     return families.containsKey(familyName);
886   }
887 
888   /**
889    * @return Name of this table and then a map of all of the column family
890    * descriptors.
891    * @see #getNameAsString()
892    */
893   @Override
894   public String toString() {
895     StringBuilder s = new StringBuilder();
896     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
897     s.append(getValues(true));
898     for (HColumnDescriptor f : families.values()) {
899       s.append(", ").append(f);
900     }
901     return s.toString();
902   }
903 
904   /**
905    * @return Name of this table and then a map of all of the column family
906    * descriptors (with only the non-default column family attributes)
907    */
908   public String toStringCustomizedValues() {
909     StringBuilder s = new StringBuilder();
910     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
911     s.append(getValues(false));
912     for(HColumnDescriptor hcd : families.values()) {
913       s.append(", ").append(hcd.toStringCustomizedValues());
914     }
915     return s.toString();
916   }
917 
918   /**
919    * @return map of all table attributes formatted into string.
920    */
921   public String toStringTableAttributes() {
922    return getValues(true).toString();
923   }
924 
925   private StringBuilder getValues(boolean printDefaults) {
926     StringBuilder s = new StringBuilder();
927 
928     // step 1: set partitioning and pruning
929     Set<Bytes> reservedKeys = new TreeSet<Bytes>();
930     Set<Bytes> userKeys = new TreeSet<Bytes>();
931     for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
932       if (entry.getKey() == null || entry.getKey().get() == null) continue;
933       String key = Bytes.toString(entry.getKey().get());
934       // in this section, print out reserved keywords + coprocessor info
935       if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) {
936         userKeys.add(entry.getKey());
937         continue;
938       }
939       // only print out IS_ROOT/IS_META if true
940       String value = Bytes.toString(entry.getValue().get());
941       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
942         if (Boolean.valueOf(value) == false) continue;
943       }
944       // see if a reserved key is a default value. may not want to print it out
945       if (printDefaults
946           || !DEFAULT_VALUES.containsKey(key)
947           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
948         reservedKeys.add(entry.getKey());
949       }
950     }
951 
952     // early exit optimization
953     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
954     if (!hasAttributes && configuration.isEmpty()) return s;
955 
956     s.append(", {");
957     // step 2: printing attributes
958     if (hasAttributes) {
959       s.append("TABLE_ATTRIBUTES => {");
960 
961       // print all reserved keys first
962       boolean printCommaForAttr = false;
963       for (Bytes k : reservedKeys) {
964         String key = Bytes.toString(k.get());
965         String value = Bytes.toStringBinary(values.get(k).get());
966         if (printCommaForAttr) s.append(", ");
967         printCommaForAttr = true;
968         s.append(key);
969         s.append(" => ");
970         s.append('\'').append(value).append('\'');
971       }
972 
973       if (!userKeys.isEmpty()) {
974         // print all non-reserved, advanced config keys as a separate subset
975         if (printCommaForAttr) s.append(", ");
976         printCommaForAttr = true;
977         s.append(HConstants.METADATA).append(" => ");
978         s.append("{");
979         boolean printCommaForCfg = false;
980         for (Bytes k : userKeys) {
981           String key = Bytes.toString(k.get());
982           String value = Bytes.toStringBinary(values.get(k).get());
983           if (printCommaForCfg) s.append(", ");
984           printCommaForCfg = true;
985           s.append('\'').append(key).append('\'');
986           s.append(" => ");
987           s.append('\'').append(value).append('\'');
988         }
989         s.append("}");
990       }
991     }
992 
993     // step 3: printing all configuration:
994     if (!configuration.isEmpty()) {
995       if (hasAttributes) {
996         s.append(", ");
997       }
998       s.append(HConstants.CONFIGURATION).append(" => ");
999       s.append('{');
1000       boolean printCommaForConfig = false;
1001       for (Map.Entry<String, String> e : configuration.entrySet()) {
1002         if (printCommaForConfig) s.append(", ");
1003         printCommaForConfig = true;
1004         s.append('\'').append(e.getKey()).append('\'');
1005         s.append(" => ");
1006         s.append('\'').append(e.getValue()).append('\'');
1007       }
1008       s.append("}");
1009     }
1010     s.append("}"); // end METHOD
1011     return s;
1012   }
1013 
1014   /**
1015    * Compare the contents of the descriptor with another one passed as a parameter.
1016    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
1017    * contents of the descriptors are compared.
1018    *
1019    * @return true if the contents of the the two descriptors exactly match
1020    *
1021    * @see java.lang.Object#equals(java.lang.Object)
1022    */
1023   @Override
1024   public boolean equals(Object obj) {
1025     if (this == obj) {
1026       return true;
1027     }
1028     if (obj == null) {
1029       return false;
1030     }
1031     if (!(obj instanceof HTableDescriptor)) {
1032       return false;
1033     }
1034     return compareTo((HTableDescriptor)obj) == 0;
1035   }
1036 
1037   /**
1038    * @see java.lang.Object#hashCode()
1039    */
1040   @Override
1041   public int hashCode() {
1042     int result = this.name.hashCode();
1043     if (this.families.size() > 0) {
1044       for (HColumnDescriptor e: this.families.values()) {
1045         result ^= e.hashCode();
1046       }
1047     }
1048     result ^= values.hashCode();
1049     result ^= configuration.hashCode();
1050     return result;
1051   }
1052 
1053   // Comparable
1054 
1055   /**
1056    * Compares the descriptor with another descriptor which is passed as a parameter.
1057    * This compares the content of the two descriptors and not the reference.
1058    *
1059    * @return 0 if the contents of the descriptors are exactly matching,
1060    *         1 if there is a mismatch in the contents
1061    */
1062   @Override
1063   public int compareTo(final HTableDescriptor other) {
1064     int result = this.name.compareTo(other.name);
1065     if (result == 0) {
1066       result = families.size() - other.families.size();
1067     }
1068     if (result == 0 && families.size() != other.families.size()) {
1069       result = Integer.valueOf(families.size()).compareTo(
1070           Integer.valueOf(other.families.size()));
1071     }
1072     if (result == 0) {
1073       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1074           it2 = other.families.values().iterator(); it.hasNext(); ) {
1075         result = it.next().compareTo(it2.next());
1076         if (result != 0) {
1077           break;
1078         }
1079       }
1080     }
1081     if (result == 0) {
1082       // punt on comparison for ordering, just calculate difference
1083       result = this.values.hashCode() - other.values.hashCode();
1084       if (result < 0)
1085         result = -1;
1086       else if (result > 0)
1087         result = 1;
1088     }
1089     if (result == 0) {
1090       result = this.configuration.hashCode() - other.configuration.hashCode();
1091       if (result < 0)
1092         result = -1;
1093       else if (result > 0)
1094         result = 1;
1095     }
1096     return result;
1097   }
1098 
1099   /**
1100    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1101    * of all the column families of the table.
1102    *
1103    * @return Immutable collection of {@link HColumnDescriptor} of all the
1104    * column families.
1105    */
1106   public Collection<HColumnDescriptor> getFamilies() {
1107     return Collections.unmodifiableCollection(this.families.values());
1108   }
1109 
1110   /**
1111    * Returns the configured replicas per region
1112    */
1113   public int getRegionReplication() {
1114     byte[] val = getValue(REGION_REPLICATION_KEY);
1115     if (val == null || val.length == 0) {
1116       return DEFAULT_REGION_REPLICATION;
1117     }
1118     return Integer.parseInt(Bytes.toString(val));
1119   }
1120 
1121   /**
1122    * Sets the number of replicas per region.
1123    * @param regionReplication the replication factor per region
1124    */
1125   public HTableDescriptor setRegionReplication(int regionReplication) {
1126     setValue(REGION_REPLICATION_KEY,
1127         new Bytes(Bytes.toBytes(Integer.toString(regionReplication))));
1128     return this;
1129   }
1130 
1131   /**
1132    * @return true if the read-replicas memstore replication is enabled.
1133    */
1134   public boolean hasRegionMemstoreReplication() {
1135     return isSomething(REGION_MEMSTORE_REPLICATION_KEY, DEFAULT_REGION_MEMSTORE_REPLICATION);
1136   }
1137 
1138   /**
1139    * Enable or Disable the memstore replication from the primary region to the replicas.
1140    * The replication will be used only for meta operations (e.g. flush, compaction, ...)
1141    *
1142    * @param memstoreReplication true if the new data written to the primary region
1143    *                                 should be replicated.
1144    *                            false if the secondaries can tollerate to have new
1145    *                                  data only when the primary flushes the memstore.
1146    */
1147   public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) {
1148     setValue(REGION_MEMSTORE_REPLICATION_KEY, memstoreReplication ? TRUE : FALSE);
1149     // If the memstore replication is setup, we do not have to wait for observing a flush event
1150     // from primary before starting to serve reads, because gaps from replication is not applicable
1151     setConfiguration(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
1152       Boolean.toString(memstoreReplication));
1153     return this;
1154   }
1155 
1156   /**
1157    * Returns all the column family names of the current table. The map of
1158    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1159    * This returns all the keys of the family map which represents the column
1160    * family names of the table.
1161    *
1162    * @return Immutable sorted set of the keys of the families.
1163    */
1164   public Set<byte[]> getFamiliesKeys() {
1165     return Collections.unmodifiableSet(this.families.keySet());
1166   }
1167 
1168   /**
1169    * Returns an array all the {@link HColumnDescriptor} of the column families
1170    * of the table.
1171    *
1172    * @return Array of all the HColumnDescriptors of the current table
1173    *
1174    * @see #getFamilies()
1175    */
1176   public HColumnDescriptor[] getColumnFamilies() {
1177     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1178     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1179   }
1180 
1181 
1182   /**
1183    * Returns the HColumnDescriptor for a specific column family with name as
1184    * specified by the parameter column.
1185    *
1186    * @param column Column family name
1187    * @return Column descriptor for the passed family name or the family on
1188    * passed in column.
1189    */
1190   public HColumnDescriptor getFamily(final byte [] column) {
1191     return this.families.get(column);
1192   }
1193 
1194 
1195   /**
1196    * Removes the HColumnDescriptor with name specified by the parameter column
1197    * from the table descriptor
1198    *
1199    * @param column Name of the column family to be removed.
1200    * @return Column descriptor for the passed family name or the family on
1201    * passed in column.
1202    */
1203   public HColumnDescriptor removeFamily(final byte [] column) {
1204     return this.families.remove(column);
1205   }
1206 
1207   /**
1208    * Add a table coprocessor to this table. The coprocessor
1209    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1210    * or Endpoint.
1211    * It won't check if the class can be loaded or not.
1212    * Whether a coprocessor is loadable or not will be determined when
1213    * a region is opened.
1214    * @param className Full class name.
1215    * @throws IOException
1216    */
1217   public HTableDescriptor addCoprocessor(String className) throws IOException {
1218     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1219     return this;
1220   }
1221 
1222   /**
1223    * Add a table coprocessor to this table. The coprocessor
1224    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1225    * or Endpoint.
1226    * It won't check if the class can be loaded or not.
1227    * Whether a coprocessor is loadable or not will be determined when
1228    * a region is opened.
1229    * @param jarFilePath Path of the jar file. If it's null, the class will be
1230    * loaded from default classloader.
1231    * @param className Full class name.
1232    * @param priority Priority
1233    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1234    * @throws IOException
1235    */
1236   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1237                              int priority, final Map<String, String> kvs)
1238   throws IOException {
1239     checkHasCoprocessor(className);
1240 
1241     // Validate parameter kvs and then add key/values to kvString.
1242     StringBuilder kvString = new StringBuilder();
1243     if (kvs != null) {
1244       for (Map.Entry<String, String> e: kvs.entrySet()) {
1245         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1246           throw new IOException("Illegal parameter key = " + e.getKey());
1247         }
1248         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1249           throw new IOException("Illegal parameter (" + e.getKey() +
1250               ") value = " + e.getValue());
1251         }
1252         if (kvString.length() != 0) {
1253           kvString.append(',');
1254         }
1255         kvString.append(e.getKey());
1256         kvString.append('=');
1257         kvString.append(e.getValue());
1258       }
1259     }
1260 
1261     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1262         "|" + className + "|" + Integer.toString(priority) + "|" +
1263         kvString.toString();
1264     return addCoprocessorToMap(value);
1265   }
1266 
1267   /**
1268    * Add a table coprocessor to this table. The coprocessor
1269    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1270    * or Endpoint.
1271    * It won't check if the class can be loaded or not.
1272    * Whether a coprocessor is loadable or not will be determined when
1273    * a region is opened.
1274    * @param specStr The Coprocessor specification all in in one String formatted so matches
1275    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1276    * @throws IOException
1277    */
1278   public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
1279     String className = getCoprocessorClassNameFromSpecStr(specStr);
1280     if (className == null) {
1281       throw new IllegalArgumentException("Format does not match " +
1282         HConstants.CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr);
1283     }
1284     checkHasCoprocessor(className);
1285     return addCoprocessorToMap(specStr);
1286   }
1287 
1288   private void checkHasCoprocessor(final String className) throws IOException {
1289     if (hasCoprocessor(className)) {
1290       throw new IOException("Coprocessor " + className + " already exists.");
1291     }
1292   }
1293 
1294   /**
1295    * Add coprocessor to values Map
1296    * @param specStr The Coprocessor specification all in in one String formatted so matches
1297    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1298    * @return Returns <code>this</code>
1299    */
1300   private HTableDescriptor addCoprocessorToMap(final String specStr) {
1301     if (specStr == null) return this;
1302     // generate a coprocessor key
1303     int maxCoprocessorNumber = 0;
1304     Matcher keyMatcher;
1305     for (Map.Entry<Bytes, Bytes> e: this.values.entrySet()) {
1306       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1307       if (!keyMatcher.matches()) {
1308         continue;
1309       }
1310       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
1311     }
1312     maxCoprocessorNumber++;
1313     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1314     this.values.put(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr)));
1315     return this;
1316   }
1317 
1318   /**
1319    * Check if the table has an attached co-processor represented by the name className
1320    *
1321    * @param classNameToMatch - Class name of the co-processor
1322    * @return true of the table has a co-processor className
1323    */
1324   public boolean hasCoprocessor(String classNameToMatch) {
1325     Matcher keyMatcher;
1326     for (Map.Entry<Bytes, Bytes> e :
1327         this.values.entrySet()) {
1328       keyMatcher =
1329           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1330               Bytes.toString(e.getKey().get()));
1331       if (!keyMatcher.matches()) {
1332         continue;
1333       }
1334       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1335       if (className == null) continue;
1336       if (className.equals(classNameToMatch.trim())) {
1337         return true;
1338       }
1339     }
1340     return false;
1341   }
1342 
1343   /**
1344    * Return the list of attached co-processor represented by their name className
1345    *
1346    * @return The list of co-processors classNames
1347    */
1348   public List<String> getCoprocessors() {
1349     List<String> result = new ArrayList<String>();
1350     Matcher keyMatcher;
1351     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1352       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1353       if (!keyMatcher.matches()) {
1354         continue;
1355       }
1356       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1357       if (className == null) continue;
1358       result.add(className); // classname is the 2nd field
1359     }
1360     return result;
1361   }
1362 
1363   /**
1364    * @param spec String formatted as per {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1365    * @return Class parsed from passed in <code>spec</code> or null if no match or classpath found
1366    */
1367   private static String getCoprocessorClassNameFromSpecStr(final String spec) {
1368     Matcher matcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(spec);
1369     // Classname is the 2nd field
1370     return matcher != null && matcher.matches()? matcher.group(2).trim(): null;
1371   }
1372 
1373   /**
1374    * Remove a coprocessor from those set on the table
1375    * @param className Class name of the co-processor
1376    */
1377   public void removeCoprocessor(String className) {
1378     Bytes match = null;
1379     Matcher keyMatcher;
1380     Matcher valueMatcher;
1381     for (Map.Entry<Bytes, Bytes> e : this.values
1382         .entrySet()) {
1383       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1384           .getKey().get()));
1385       if (!keyMatcher.matches()) {
1386         continue;
1387       }
1388       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1389           .toString(e.getValue().get()));
1390       if (!valueMatcher.matches()) {
1391         continue;
1392       }
1393       // get className and compare
1394       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1395       // remove the CP if it is present
1396       if (clazz.equals(className.trim())) {
1397         match = e.getKey();
1398         break;
1399       }
1400     }
1401     // if we found a match, remove it
1402     if (match != null)
1403       remove(match);
1404   }
1405 
1406   /**
1407    * Returns the {@link Path} object representing the table directory under
1408    * path rootdir
1409    *
1410    * Deprecated use FSUtils.getTableDir() instead.
1411    *
1412    * @param rootdir qualified path of HBase root directory
1413    * @param tableName name of table
1414    * @return {@link Path} for table
1415    */
1416   @Deprecated
1417   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1418     //This is bad I had to mirror code from FSUTils.getTableDir since
1419     //there is no module dependency between hbase-client and hbase-server
1420     TableName name = TableName.valueOf(tableName);
1421     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1422               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1423   }
1424 
1425   /** Table descriptor for <code>hbase:meta</code> catalog table
1426    * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
1427    * Admin#getTableDescriptor(TableName.META_TABLE) instead.
1428    */
1429   @Deprecated
1430   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1431       TableName.META_TABLE_NAME,
1432       new HColumnDescriptor[] {
1433           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1434               // Ten is arbitrary number.  Keep versions to help debugging.
1435               .setMaxVersions(10)
1436               .setInMemory(true)
1437               .setBlocksize(8 * 1024)
1438               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1439               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1440               .setBloomFilterType(BloomType.NONE)
1441               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1442               // e.g. if using CombinedBlockCache (BucketCache).
1443               .setCacheDataInL1(true),
1444           new HColumnDescriptor(HConstants.TABLE_FAMILY)
1445               // Ten is arbitrary number.  Keep versions to help debugging.
1446               .setMaxVersions(10)
1447               .setInMemory(true)
1448               .setBlocksize(8 * 1024)
1449               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1450                   // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1451               .setBloomFilterType(BloomType.NONE)
1452                   // Enable cache of data blocks in L1 if more than one caching tier deployed:
1453                   // e.g. if using CombinedBlockCache (BucketCache).
1454               .setCacheDataInL1(true)
1455       });
1456 
1457   static {
1458     try {
1459       META_TABLEDESC.addCoprocessor(
1460           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1461           null, Coprocessor.PRIORITY_SYSTEM, null);
1462     } catch (IOException ex) {
1463       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1464       throw new RuntimeException(ex);
1465     }
1466   }
1467 
1468   public final static String NAMESPACE_FAMILY_INFO = "info";
1469   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1470   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1471 
1472   /** Table descriptor for namespace table */
1473   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1474       TableName.NAMESPACE_TABLE_NAME,
1475       new HColumnDescriptor[] {
1476           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1477               // Ten is arbitrary number.  Keep versions to help debugging.
1478               .setMaxVersions(10)
1479               .setInMemory(true)
1480               .setBlocksize(8 * 1024)
1481               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1482               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1483               // e.g. if using CombinedBlockCache (BucketCache).
1484               .setCacheDataInL1(true)
1485       });
1486 
1487   @Deprecated
1488   public HTableDescriptor setOwner(User owner) {
1489     return setOwnerString(owner != null ? owner.getShortName() : null);
1490   }
1491 
1492   // used by admin.rb:alter(table_name,*args) to update owner.
1493   @Deprecated
1494   public HTableDescriptor setOwnerString(String ownerString) {
1495     if (ownerString != null) {
1496       setValue(OWNER_KEY, ownerString);
1497     } else {
1498       remove(OWNER_KEY);
1499     }
1500     return this;
1501   }
1502 
1503   @Deprecated
1504   public String getOwnerString() {
1505     if (getValue(OWNER_KEY) != null) {
1506       return Bytes.toString(getValue(OWNER_KEY));
1507     }
1508     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1509     // hbase:meta and -ROOT- should return system user as owner, not null (see
1510     // MasterFileSystem.java:bootstrap()).
1511     return null;
1512   }
1513 
1514   /**
1515    * @return This instance serialized with pb with pb magic prefix
1516    * @see #parseFrom(byte[])
1517    */
1518   public byte[] toByteArray() {
1519     return ProtobufUtil.prependPBMagic(ProtobufUtil.convertToTableSchema(this).toByteArray());
1520   }
1521 
1522   /**
1523    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1524    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1525    * @throws DeserializationException
1526    * @throws IOException
1527    * @see #toByteArray()
1528    */
1529   public static HTableDescriptor parseFrom(final byte [] bytes)
1530   throws DeserializationException, IOException {
1531     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1532       throw new DeserializationException("Expected PB encoded HTableDescriptor");
1533     }
1534     int pblen = ProtobufUtil.lengthOfPBMagic();
1535     TableSchema.Builder builder = TableSchema.newBuilder();
1536     TableSchema ts;
1537     try {
1538       ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
1539       ts = builder.build();
1540     } catch (IOException e) {
1541       throw new DeserializationException(e);
1542     }
1543     return ProtobufUtil.convertToHTableDesc(ts);
1544   }
1545 
1546   /**
1547    * Getter for accessing the configuration value by key
1548    */
1549   public String getConfigurationValue(String key) {
1550     return configuration.get(key);
1551   }
1552 
1553   /**
1554    * Getter for fetching an unmodifiable {@link #configuration} map.
1555    */
1556   public Map<String, String> getConfiguration() {
1557     // shallow pointer copy
1558     return Collections.unmodifiableMap(configuration);
1559   }
1560 
1561   /**
1562    * Setter for storing a configuration setting in {@link #configuration} map.
1563    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1564    * @param value String value. If null, removes the setting.
1565    */
1566   public HTableDescriptor setConfiguration(String key, String value) {
1567     if (value == null) {
1568       removeConfiguration(key);
1569     } else {
1570       configuration.put(key, value);
1571     }
1572     return this;
1573   }
1574 
1575   /**
1576    * Remove a config setting represented by the key from the {@link #configuration} map
1577    */
1578   public void removeConfiguration(final String key) {
1579     configuration.remove(key);
1580   }
1581 }