View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.Collection;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Set;
31  import java.util.TreeMap;
32  import java.util.TreeSet;
33  import java.util.regex.Matcher;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.classification.InterfaceAudience;
39  import org.apache.hadoop.hbase.classification.InterfaceStability;
40  import org.apache.hadoop.hbase.client.Durability;
41  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
42  import org.apache.hadoop.hbase.exceptions.DeserializationException;
43  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
44  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
45  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
46  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
47  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
48  import org.apache.hadoop.hbase.regionserver.BloomType;
49  import org.apache.hadoop.hbase.security.User;
50  import org.apache.hadoop.hbase.util.ByteStringer;
51  import org.apache.hadoop.hbase.util.Bytes;
52  
53  /**
54   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
55   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
56   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
57   * when the region split should occur, coprocessors associated with it etc...
58   */
59  @InterfaceAudience.Public
60  @InterfaceStability.Evolving
61  public class HTableDescriptor implements Comparable<HTableDescriptor> {
62  
63    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
64  
65    private TableName name = null;
66  
67    /**
68     * A map which holds the metadata information of the table. This metadata
69     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
70     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
71     */
72    private final Map<Bytes, Bytes> values =
73        new HashMap<Bytes, Bytes>();
74  
75    /**
76     * A map which holds the configuration specific to the table.
77     * The keys of the map have the same names as config keys and override the defaults with
78     * table-specific settings. Example usage may be for compactions, etc.
79     */
80    private final Map<String, String> configuration = new HashMap<String, String>();
81  
82    public static final String SPLIT_POLICY = "SPLIT_POLICY";
83  
84    /**
85     * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
86     * attribute which denotes the maximum size of the store file after which
87     * a region split occurs
88     *
89     * @see #getMaxFileSize()
90     */
91    public static final String MAX_FILESIZE = "MAX_FILESIZE";
92    private static final Bytes MAX_FILESIZE_KEY =
93        new Bytes(Bytes.toBytes(MAX_FILESIZE));
94  
95    public static final String OWNER = "OWNER";
96    public static final Bytes OWNER_KEY =
97        new Bytes(Bytes.toBytes(OWNER));
98  
99    /**
100    * <em>INTERNAL</em> Used by rest interface to access this metadata
101    * attribute which denotes if the table is Read Only
102    *
103    * @see #isReadOnly()
104    */
105   public static final String READONLY = "READONLY";
106   private static final Bytes READONLY_KEY =
107       new Bytes(Bytes.toBytes(READONLY));
108 
109   /**
110    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
111    * attribute which denotes if the table is compaction enabled
112    *
113    * @see #isCompactionEnabled()
114    */
115   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
116   private static final Bytes COMPACTION_ENABLED_KEY =
117       new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
118 
119   /**
120    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
121    * attribute which represents the maximum size of the memstore after which
122    * its contents are flushed onto the disk
123    *
124    * @see #getMemStoreFlushSize()
125    */
126   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
127   private static final Bytes MEMSTORE_FLUSHSIZE_KEY =
128       new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
129 
130   public static final String FLUSH_POLICY = "FLUSH_POLICY";
131 
132   /**
133    * <em>INTERNAL</em> Used by rest interface to access this metadata
134    * attribute which denotes if the table is a -ROOT- region or not
135    *
136    * @see #isRootRegion()
137    */
138   public static final String IS_ROOT = "IS_ROOT";
139   private static final Bytes IS_ROOT_KEY =
140       new Bytes(Bytes.toBytes(IS_ROOT));
141 
142   /**
143    * <em>INTERNAL</em> Used by rest interface to access this metadata
144    * attribute which denotes if it is a catalog table, either
145    * <code> hbase:meta </code> or <code> -ROOT- </code>
146    *
147    * @see #isMetaRegion()
148    */
149   public static final String IS_META = "IS_META";
150   private static final Bytes IS_META_KEY =
151       new Bytes(Bytes.toBytes(IS_META));
152 
153   /**
154    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
155    * attribute which denotes if the deferred log flush option is enabled.
156    * @deprecated Use {@link #DURABILITY} instead.
157    */
158   @Deprecated
159   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
160   @Deprecated
161   private static final Bytes DEFERRED_LOG_FLUSH_KEY =
162       new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH));
163 
164   /**
165    * <em>INTERNAL</em> {@link Durability} setting for the table.
166    */
167   public static final String DURABILITY = "DURABILITY";
168   private static final Bytes DURABILITY_KEY =
169       new Bytes(Bytes.toBytes("DURABILITY"));
170 
171   /**
172    * <em>INTERNAL</em> number of region replicas for the table.
173    */
174   public static final String REGION_REPLICATION = "REGION_REPLICATION";
175   private static final Bytes REGION_REPLICATION_KEY =
176       new Bytes(Bytes.toBytes(REGION_REPLICATION));
177 
178   /**
179    * <em>INTERNAL</em> flag to indicate whether or not the memstore should be replicated
180    * for read-replicas (CONSISTENCY =&gt; TIMELINE).
181    */
182   public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
183   private static final Bytes REGION_MEMSTORE_REPLICATION_KEY =
184       new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION));
185 
186   /**
187    * <em>INTERNAL</em> Used by shell/rest interface to access this metadata
188    * attribute which denotes if the table should be treated by region normalizer.
189    *
190    * @see #isNormalizationEnabled()
191    */
192   public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED";
193   private static final Bytes NORMALIZATION_ENABLED_KEY =
194     new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED));
195 
196   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
197   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
198 
199   /*
200    *  The below are ugly but better than creating them each time till we
201    *  replace booleans being saved as Strings with plain booleans.  Need a
202    *  migration script to do this.  TODO.
203    */
204   private static final Bytes FALSE =
205       new Bytes(Bytes.toBytes(Boolean.FALSE.toString()));
206 
207   private static final Bytes TRUE =
208       new Bytes(Bytes.toBytes(Boolean.TRUE.toString()));
209 
210   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
211 
212   /**
213    * Constant that denotes whether the table is READONLY by default and is false
214    */
215   public static final boolean DEFAULT_READONLY = false;
216 
217   /**
218    * Constant that denotes whether the table is compaction enabled by default
219    */
220   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
221 
222   /**
223    * Constant that denotes whether the table is normalized by default.
224    */
225   public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
226 
227   /**
228    * Constant that denotes the maximum default size of the memstore after which
229    * the contents are flushed to the store files
230    */
231   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
232 
233   public static final int DEFAULT_REGION_REPLICATION = 1;
234 
235   public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true;
236 
237   private final static Map<String, String> DEFAULT_VALUES
238     = new HashMap<String, String>();
239   private final static Set<Bytes> RESERVED_KEYWORDS
240       = new HashSet<Bytes>();
241 
242   static {
243     DEFAULT_VALUES.put(MAX_FILESIZE,
244         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
245     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
246     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
247         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
248     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
249         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
250     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
251     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
252     for (String s : DEFAULT_VALUES.keySet()) {
253       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
254     }
255     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
256     RESERVED_KEYWORDS.add(IS_META_KEY);
257   }
258 
259   /**
260    * Cache of whether this is a meta table or not.
261    */
262   private volatile Boolean meta = null;
263   /**
264    * Cache of whether this is root table or not.
265    */
266   private volatile Boolean root = null;
267 
268   /**
269    * Durability setting for the table
270    */
271   private Durability durability = null;
272 
273   /**
274    * Maps column family name to the respective HColumnDescriptors
275    */
276   private final Map<byte [], HColumnDescriptor> families =
277     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
278 
279   /**
280    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
281    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
282    */
283   @InterfaceAudience.Private
284   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
285     setName(name);
286     for(HColumnDescriptor descriptor : families) {
287       this.families.put(descriptor.getName(), descriptor);
288     }
289   }
290 
291   /**
292    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
293    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
294    */
295   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
296       Map<Bytes, Bytes> values) {
297     setName(name);
298     for(HColumnDescriptor descriptor : families) {
299       this.families.put(descriptor.getName(), descriptor);
300     }
301     for (Map.Entry<Bytes, Bytes> entry :
302         values.entrySet()) {
303       setValue(entry.getKey(), entry.getValue());
304     }
305   }
306 
307   /**
308    * Default constructor which constructs an empty object.
309    * For deserializing an HTableDescriptor instance only.
310    * @deprecated As of release 0.96 (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
311    *             This was made protected in 2.0.0 and will be removed in HBase 3.0.0.
312    *             Used by Writables and Writables are going away.
313    */
314   @Deprecated
315   protected HTableDescriptor() {
316     super();
317   }
318 
319   /**
320    * Construct a table descriptor specifying a TableName object
321    * @param name Table name.
322    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
323    */
324   public HTableDescriptor(final TableName name) {
325     super();
326     setName(name);
327   }
328 
329   /**
330    * Construct a table descriptor specifying a byte array table name
331    * @param name Table name.
332    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
333    */
334   @Deprecated
335   public HTableDescriptor(final byte[] name) {
336     this(TableName.valueOf(name));
337   }
338 
339   /**
340    * Construct a table descriptor specifying a String table name
341    * @param name Table name.
342    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
343    */
344   @Deprecated
345   public HTableDescriptor(final String name) {
346     this(TableName.valueOf(name));
347   }
348 
349   /**
350    * Construct a table descriptor by cloning the descriptor passed as a parameter.
351    * <p>
352    * Makes a deep copy of the supplied descriptor.
353    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
354    * @param desc The descriptor.
355    */
356   public HTableDescriptor(final HTableDescriptor desc) {
357     super();
358     setName(desc.name);
359     setMetaFlags(this.name);
360     for (HColumnDescriptor c: desc.families.values()) {
361       this.families.put(c.getName(), new HColumnDescriptor(c));
362     }
363     for (Map.Entry<Bytes, Bytes> e :
364         desc.values.entrySet()) {
365       setValue(e.getKey(), e.getValue());
366     }
367     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
368       this.configuration.put(e.getKey(), e.getValue());
369     }
370   }
371 
372   /*
373    * Set meta flags on this table.
374    * IS_ROOT_KEY is set if its a -ROOT- table
375    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
376    * Called by constructors.
377    * @param name
378    */
379   private void setMetaFlags(final TableName name) {
380     setMetaRegion(isRootRegion() ||
381         name.equals(TableName.META_TABLE_NAME));
382   }
383 
384   /**
385    * Check if the descriptor represents a <code> -ROOT- </code> region.
386    *
387    * @return true if this is a <code> -ROOT- </code> region
388    */
389   public boolean isRootRegion() {
390     if (this.root == null) {
391       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
392     }
393     return this.root.booleanValue();
394   }
395 
396   /**
397    * <em> INTERNAL </em> Used to denote if the current table represents
398    * <code> -ROOT- </code> region. This is used internally by the
399    * HTableDescriptor constructors
400    *
401    * @param isRoot true if this is the <code> -ROOT- </code> region
402    */
403   protected void setRootRegion(boolean isRoot) {
404     // TODO: Make the value a boolean rather than String of boolean.
405     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
406   }
407 
408   /**
409    * Checks if this table is <code> hbase:meta </code>
410    * region.
411    *
412    * @return true if this table is <code> hbase:meta </code>
413    * region
414    */
415   public boolean isMetaRegion() {
416     if (this.meta == null) {
417       this.meta = calculateIsMetaRegion();
418     }
419     return this.meta.booleanValue();
420   }
421 
422   private synchronized Boolean calculateIsMetaRegion() {
423     byte [] value = getValue(IS_META_KEY);
424     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
425   }
426 
427   private boolean isSomething(final Bytes key,
428       final boolean valueIfNull) {
429     byte [] value = getValue(key);
430     if (value != null) {
431       return Boolean.valueOf(Bytes.toString(value));
432     }
433     return valueIfNull;
434   }
435 
436   /**
437    * <em> INTERNAL </em> Used to denote if the current table represents
438    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
439    * internally by the HTableDescriptor constructors
440    *
441    * @param isMeta true if its either <code> -ROOT- </code> or
442    * <code> hbase:meta </code> region
443    */
444   protected void setMetaRegion(boolean isMeta) {
445     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
446   }
447 
448   /**
449    * Checks if the table is a <code>hbase:meta</code> table
450    *
451    * @return true if table is <code> hbase:meta </code> region.
452    */
453   public boolean isMetaTable() {
454     return isMetaRegion() && !isRootRegion();
455   }
456 
457   /**
458    * Getter for accessing the metadata associated with the key
459    *
460    * @param key The key.
461    * @return The value.
462    * @see #values
463    */
464   public byte[] getValue(byte[] key) {
465     return getValue(new Bytes(key));
466   }
467 
468   private byte[] getValue(final Bytes key) {
469     Bytes ibw = values.get(key);
470     if (ibw == null)
471       return null;
472     return ibw.get();
473   }
474 
475   /**
476    * Getter for accessing the metadata associated with the key
477    *
478    * @param key The key.
479    * @return The value.
480    * @see #values
481    */
482   public String getValue(String key) {
483     byte[] value = getValue(Bytes.toBytes(key));
484     if (value == null)
485       return null;
486     return Bytes.toString(value);
487   }
488 
489   /**
490    * Getter for fetching an unmodifiable {@link #values} map.
491    *
492    * @return unmodifiable map {@link #values}.
493    * @see #values
494    */
495   public Map<Bytes, Bytes> getValues() {
496     // shallow pointer copy
497     return Collections.unmodifiableMap(values);
498   }
499 
500   /**
501    * Setter for storing metadata as a (key, value) pair in {@link #values} map
502    *
503    * @param key The key.
504    * @param value The value.
505    * @see #values
506    */
507   public HTableDescriptor setValue(byte[] key, byte[] value) {
508     setValue(new Bytes(key), new Bytes(value));
509     return this;
510   }
511 
512   /*
513    * @param key The key.
514    * @param value The value.
515    */
516   private HTableDescriptor setValue(final Bytes key,
517       final String value) {
518     setValue(key, new Bytes(Bytes.toBytes(value)));
519     return this;
520   }
521 
522   /*
523    * Setter for storing metadata as a (key, value) pair in {@link #values} map
524    *
525    * @param key The key.
526    * @param value The value.
527    */
528   public HTableDescriptor setValue(final Bytes key, final Bytes value) {
529     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
530       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
531       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
532           "use " + DURABILITY + " instead");
533       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
534       return this;
535     }
536     Matcher matcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(key.get()));
537     if (matcher.matches()) {
538       LOG.warn("Use addCoprocessor* methods to add a coprocessor instead");
539     }
540     values.put(key, value);
541     return this;
542   }
543 
544   /**
545    * Setter for storing metadata as a (key, value) pair in {@link #values} map
546    *
547    * @param key The key.
548    * @param value The value.
549    * @see #values
550    */
551   public HTableDescriptor setValue(String key, String value) {
552     if (value == null) {
553       remove(key);
554     } else {
555       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
556     }
557     return this;
558   }
559 
560   /**
561    * Remove metadata represented by the key from the {@link #values} map
562    *
563    * @param key Key whose key and value we're to remove from HTableDescriptor
564    * parameters.
565    */
566   public void remove(final String key) {
567     remove(new Bytes(Bytes.toBytes(key)));
568   }
569 
570   /**
571    * Remove metadata represented by the key from the {@link #values} map
572    *
573    * @param key Key whose key and value we're to remove from HTableDescriptor
574    * parameters.
575    */
576   public void remove(Bytes key) {
577     values.remove(key);
578   }
579 
580   /**
581    * Remove metadata represented by the key from the {@link #values} map
582    *
583    * @param key Key whose key and value we're to remove from HTableDescriptor
584    * parameters.
585    */
586   public void remove(final byte [] key) {
587     remove(new Bytes(key));
588   }
589 
590   /**
591    * Check if the readOnly flag of the table is set. If the readOnly flag is
592    * set then the contents of the table can only be read from but not modified.
593    *
594    * @return true if all columns in the table should be read only
595    */
596   public boolean isReadOnly() {
597     return isSomething(READONLY_KEY, DEFAULT_READONLY);
598   }
599 
600   /**
601    * Setting the table as read only sets all the columns in the table as read
602    * only. By default all tables are modifiable, but if the readOnly flag is
603    * set to true then the contents of the table can only be read but not modified.
604    *
605    * @param readOnly True if all of the columns in the table should be read
606    * only.
607    */
608   public HTableDescriptor setReadOnly(final boolean readOnly) {
609     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
610   }
611 
612   /**
613    * Check if the compaction enable flag of the table is true. If flag is
614    * false then no minor/major compactions will be done in real.
615    *
616    * @return true if table compaction enabled
617    */
618   public boolean isCompactionEnabled() {
619     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
620   }
621 
622   /**
623    * Setting the table compaction enable flag.
624    *
625    * @param isEnable True if enable compaction.
626    */
627   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
628     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
629     return this;
630   }
631 
632   /**
633    * Check if normalization enable flag of the table is true. If flag is
634    * false then no region normalizer won't attempt to normalize this table.
635    *
636    * @return true if region normalization is enabled for this table
637    */
638   public boolean isNormalizationEnabled() {
639     return isSomething(NORMALIZATION_ENABLED_KEY, DEFAULT_NORMALIZATION_ENABLED);
640   }
641 
642   /**
643    * Setting the table normalization enable flag.
644    *
645    * @param isEnable True if enable normalization.
646    */
647   public HTableDescriptor setNormalizationEnabled(final boolean isEnable) {
648     setValue(NORMALIZATION_ENABLED_KEY, isEnable ? TRUE : FALSE);
649     return this;
650   }
651 
652   /**
653    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
654    * @param durability enum value
655    */
656   public HTableDescriptor setDurability(Durability durability) {
657     this.durability = durability;
658     setValue(DURABILITY_KEY, durability.name());
659     return this;
660   }
661 
662   /**
663    * Returns the durability setting for the table.
664    * @return durability setting for the table.
665    */
666   public Durability getDurability() {
667     if (this.durability == null) {
668       byte[] durabilityValue = getValue(DURABILITY_KEY);
669       if (durabilityValue == null) {
670         this.durability = DEFAULT_DURABLITY;
671       } else {
672         try {
673           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
674         } catch (IllegalArgumentException ex) {
675           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
676             + " is not known. Durability:" + Bytes.toString(durabilityValue));
677           this.durability = DEFAULT_DURABLITY;
678         }
679       }
680     }
681     return this.durability;
682   }
683 
684   /**
685    * Get the name of the table
686    *
687    * @return TableName
688    */
689   public TableName getTableName() {
690     return name;
691   }
692 
693   /**
694    * Get the name of the table as a byte array.
695    *
696    * @return name of table
697    * @deprecated Use {@link #getTableName()} instead
698    */
699   @Deprecated
700   public byte[] getName() {
701     return name.getName();
702   }
703 
704   /**
705    * Get the name of the table as a String
706    *
707    * @return name of table as a String
708    */
709   public String getNameAsString() {
710     return name.getNameAsString();
711   }
712 
713   /**
714    * This sets the class associated with the region split policy which
715    * determines when a region split should occur.  The class used by
716    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
717    * @param clazz the class name
718    */
719   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
720     setValue(SPLIT_POLICY, clazz);
721     return this;
722   }
723 
724   /**
725    * This gets the class associated with the region split policy which
726    * determines when a region split should occur.  The class used by
727    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
728    *
729    * @return the class name of the region split policy for this table.
730    * If this returns null, the default split policy is used.
731    */
732    public String getRegionSplitPolicyClassName() {
733     return getValue(SPLIT_POLICY);
734   }
735 
736   /**
737    * Set the name of the table.
738    *
739    * @param name name of table
740    */
741   @Deprecated
742   public HTableDescriptor setName(byte[] name) {
743     setName(TableName.valueOf(name));
744     return this;
745   }
746 
747   @Deprecated
748   public HTableDescriptor setName(TableName name) {
749     this.name = name;
750     setMetaFlags(this.name);
751     return this;
752   }
753 
754   /**
755    * Returns the maximum size upto which a region can grow to after which a region
756    * split is triggered. The region size is represented by the size of the biggest
757    * store file in that region.
758    *
759    * @return max hregion size for table, -1 if not set.
760    *
761    * @see #setMaxFileSize(long)
762    */
763   public long getMaxFileSize() {
764     byte [] value = getValue(MAX_FILESIZE_KEY);
765     if (value != null) {
766       return Long.parseLong(Bytes.toString(value));
767     }
768     return -1;
769   }
770 
771   /**
772    * Sets the maximum size upto which a region can grow to after which a region
773    * split is triggered. The region size is represented by the size of the biggest
774    * store file in that region, i.e. If the biggest store file grows beyond the
775    * maxFileSize, then the region split is triggered. This defaults to a value of
776    * 256 MB.
777    * <p>
778    * This is not an absolute value and might vary. Assume that a single row exceeds
779    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
780    * a single row cannot be split across multiple regions
781    * </p>
782    *
783    * @param maxFileSize The maximum file size that a store file can grow to
784    * before a split is triggered.
785    */
786   public HTableDescriptor setMaxFileSize(long maxFileSize) {
787     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
788     return this;
789   }
790 
791   /**
792    * Returns the size of the memstore after which a flush to filesystem is triggered.
793    *
794    * @return memory cache flush size for each hregion, -1 if not set.
795    *
796    * @see #setMemStoreFlushSize(long)
797    */
798   public long getMemStoreFlushSize() {
799     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
800     if (value != null) {
801       return Long.parseLong(Bytes.toString(value));
802     }
803     return -1;
804   }
805 
806   /**
807    * Represents the maximum size of the memstore after which the contents of the
808    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
809    *
810    * @param memstoreFlushSize memory cache flush size for each hregion
811    */
812   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
813     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
814     return this;
815   }
816 
817   /**
818    * This sets the class associated with the flush policy which determines determines the stores
819    * need to be flushed when flushing a region. The class used by default is defined in
820    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
821    * @param clazz the class name
822    */
823   public HTableDescriptor setFlushPolicyClassName(String clazz) {
824     setValue(FLUSH_POLICY, clazz);
825     return this;
826   }
827 
828   /**
829    * This gets the class associated with the flush policy which determines the stores need to be
830    * flushed when flushing a region. The class used by default is defined in
831    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
832    * @return the class name of the flush policy for this table. If this returns null, the default
833    *         flush policy is used.
834    */
835   public String getFlushPolicyClassName() {
836     return getValue(FLUSH_POLICY);
837   }
838 
839   /**
840    * Adds a column family.
841    * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
842    * @param family HColumnDescriptor of family to add.
843    */
844   public HTableDescriptor addFamily(final HColumnDescriptor family) {
845     if (family.getName() == null || family.getName().length <= 0) {
846       throw new IllegalArgumentException("Family name cannot be null or empty");
847     }
848     if (hasFamily(family.getName())) {
849       throw new IllegalArgumentException("Family '" +
850         family.getNameAsString() + "' already exists so cannot be added");
851     }
852     this.families.put(family.getName(), family);
853     return this;
854   }
855 
856   /**
857    * Modifies the existing column family.
858    * @param family HColumnDescriptor of family to update
859    * @return this (for chained invocation)
860    */
861   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
862     if (family.getName() == null || family.getName().length <= 0) {
863       throw new IllegalArgumentException("Family name cannot be null or empty");
864     }
865     if (!hasFamily(family.getName())) {
866       throw new IllegalArgumentException("Column family '" + family.getNameAsString()
867         + "' does not exist");
868     }
869     this.families.put(family.getName(), family);
870     return this;
871   }
872 
873   /**
874    * Checks to see if this table contains the given column family
875    * @param familyName Family name or column name.
876    * @return true if the table contains the specified family name
877    */
878   public boolean hasFamily(final byte [] familyName) {
879     return families.containsKey(familyName);
880   }
881 
882   /**
883    * @return Name of this table and then a map of all of the column family
884    * descriptors.
885    * @see #getNameAsString()
886    */
887   @Override
888   public String toString() {
889     StringBuilder s = new StringBuilder();
890     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
891     s.append(getValues(true));
892     for (HColumnDescriptor f : families.values()) {
893       s.append(", ").append(f);
894     }
895     return s.toString();
896   }
897 
898   /**
899    * @return Name of this table and then a map of all of the column family
900    * descriptors (with only the non-default column family attributes)
901    */
902   public String toStringCustomizedValues() {
903     StringBuilder s = new StringBuilder();
904     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
905     s.append(getValues(false));
906     for(HColumnDescriptor hcd : families.values()) {
907       s.append(", ").append(hcd.toStringCustomizedValues());
908     }
909     return s.toString();
910   }
911 
912   /**
913    * @return map of all table attributes formatted into string.
914    */
915   public String toStringTableAttributes() {
916    return getValues(true).toString();
917   }
918 
919   private StringBuilder getValues(boolean printDefaults) {
920     StringBuilder s = new StringBuilder();
921 
922     // step 1: set partitioning and pruning
923     Set<Bytes> reservedKeys = new TreeSet<Bytes>();
924     Set<Bytes> userKeys = new TreeSet<Bytes>();
925     for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
926       if (entry.getKey() == null || entry.getKey().get() == null) continue;
927       String key = Bytes.toString(entry.getKey().get());
928       // in this section, print out reserved keywords + coprocessor info
929       if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) {
930         userKeys.add(entry.getKey());
931         continue;
932       }
933       // only print out IS_ROOT/IS_META if true
934       String value = Bytes.toString(entry.getValue().get());
935       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
936         if (Boolean.valueOf(value) == false) continue;
937       }
938       // see if a reserved key is a default value. may not want to print it out
939       if (printDefaults
940           || !DEFAULT_VALUES.containsKey(key)
941           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
942         reservedKeys.add(entry.getKey());
943       }
944     }
945 
946     // early exit optimization
947     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
948     if (!hasAttributes && configuration.isEmpty()) return s;
949 
950     s.append(", {");
951     // step 2: printing attributes
952     if (hasAttributes) {
953       s.append("TABLE_ATTRIBUTES => {");
954 
955       // print all reserved keys first
956       boolean printCommaForAttr = false;
957       for (Bytes k : reservedKeys) {
958         String key = Bytes.toString(k.get());
959         String value = Bytes.toStringBinary(values.get(k).get());
960         if (printCommaForAttr) s.append(", ");
961         printCommaForAttr = true;
962         s.append(key);
963         s.append(" => ");
964         s.append('\'').append(value).append('\'');
965       }
966 
967       if (!userKeys.isEmpty()) {
968         // print all non-reserved, advanced config keys as a separate subset
969         if (printCommaForAttr) s.append(", ");
970         printCommaForAttr = true;
971         s.append(HConstants.METADATA).append(" => ");
972         s.append("{");
973         boolean printCommaForCfg = false;
974         for (Bytes k : userKeys) {
975           String key = Bytes.toString(k.get());
976           String value = Bytes.toStringBinary(values.get(k).get());
977           if (printCommaForCfg) s.append(", ");
978           printCommaForCfg = true;
979           s.append('\'').append(key).append('\'');
980           s.append(" => ");
981           s.append('\'').append(value).append('\'');
982         }
983         s.append("}");
984       }
985     }
986 
987     // step 3: printing all configuration:
988     if (!configuration.isEmpty()) {
989       if (hasAttributes) {
990         s.append(", ");
991       }
992       s.append(HConstants.CONFIGURATION).append(" => ");
993       s.append('{');
994       boolean printCommaForConfig = false;
995       for (Map.Entry<String, String> e : configuration.entrySet()) {
996         if (printCommaForConfig) s.append(", ");
997         printCommaForConfig = true;
998         s.append('\'').append(e.getKey()).append('\'');
999         s.append(" => ");
1000         s.append('\'').append(e.getValue()).append('\'');
1001       }
1002       s.append("}");
1003     }
1004     s.append("}"); // end METHOD
1005     return s;
1006   }
1007 
1008   /**
1009    * Compare the contents of the descriptor with another one passed as a parameter.
1010    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
1011    * contents of the descriptors are compared.
1012    *
1013    * @return true if the contents of the the two descriptors exactly match
1014    *
1015    * @see java.lang.Object#equals(java.lang.Object)
1016    */
1017   @Override
1018   public boolean equals(Object obj) {
1019     if (this == obj) {
1020       return true;
1021     }
1022     if (obj == null) {
1023       return false;
1024     }
1025     if (!(obj instanceof HTableDescriptor)) {
1026       return false;
1027     }
1028     return compareTo((HTableDescriptor)obj) == 0;
1029   }
1030 
1031   /**
1032    * @see java.lang.Object#hashCode()
1033    */
1034   @Override
1035   public int hashCode() {
1036     int result = this.name.hashCode();
1037     if (this.families.size() > 0) {
1038       for (HColumnDescriptor e: this.families.values()) {
1039         result ^= e.hashCode();
1040       }
1041     }
1042     result ^= values.hashCode();
1043     result ^= configuration.hashCode();
1044     return result;
1045   }
1046 
1047   // Comparable
1048 
1049   /**
1050    * Compares the descriptor with another descriptor which is passed as a parameter.
1051    * This compares the content of the two descriptors and not the reference.
1052    *
1053    * @return 0 if the contents of the descriptors are exactly matching,
1054    *         1 if there is a mismatch in the contents
1055    */
1056   @Override
1057   public int compareTo(final HTableDescriptor other) {
1058     int result = this.name.compareTo(other.name);
1059     if (result == 0) {
1060       result = families.size() - other.families.size();
1061     }
1062     if (result == 0 && families.size() != other.families.size()) {
1063       result = Integer.valueOf(families.size()).compareTo(
1064           Integer.valueOf(other.families.size()));
1065     }
1066     if (result == 0) {
1067       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1068           it2 = other.families.values().iterator(); it.hasNext(); ) {
1069         result = it.next().compareTo(it2.next());
1070         if (result != 0) {
1071           break;
1072         }
1073       }
1074     }
1075     if (result == 0) {
1076       // punt on comparison for ordering, just calculate difference
1077       result = this.values.hashCode() - other.values.hashCode();
1078       if (result < 0)
1079         result = -1;
1080       else if (result > 0)
1081         result = 1;
1082     }
1083     if (result == 0) {
1084       result = this.configuration.hashCode() - other.configuration.hashCode();
1085       if (result < 0)
1086         result = -1;
1087       else if (result > 0)
1088         result = 1;
1089     }
1090     return result;
1091   }
1092 
1093   /**
1094    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1095    * of all the column families of the table.
1096    *
1097    * @return Immutable collection of {@link HColumnDescriptor} of all the
1098    * column families.
1099    */
1100   public Collection<HColumnDescriptor> getFamilies() {
1101     return Collections.unmodifiableCollection(this.families.values());
1102   }
1103 
1104   /**
1105    * Returns the configured replicas per region
1106    */
1107   public int getRegionReplication() {
1108     byte[] val = getValue(REGION_REPLICATION_KEY);
1109     if (val == null || val.length == 0) {
1110       return DEFAULT_REGION_REPLICATION;
1111     }
1112     return Integer.parseInt(Bytes.toString(val));
1113   }
1114 
1115   /**
1116    * Sets the number of replicas per region.
1117    * @param regionReplication the replication factor per region
1118    */
1119   public HTableDescriptor setRegionReplication(int regionReplication) {
1120     setValue(REGION_REPLICATION_KEY,
1121         new Bytes(Bytes.toBytes(Integer.toString(regionReplication))));
1122     return this;
1123   }
1124 
1125   /**
1126    * @return true if the read-replicas memstore replication is enabled.
1127    */
1128   public boolean hasRegionMemstoreReplication() {
1129     return isSomething(REGION_MEMSTORE_REPLICATION_KEY, DEFAULT_REGION_MEMSTORE_REPLICATION);
1130   }
1131 
1132   /**
1133    * Enable or Disable the memstore replication from the primary region to the replicas.
1134    * The replication will be used only for meta operations (e.g. flush, compaction, ...)
1135    *
1136    * @param memstoreReplication true if the new data written to the primary region
1137    *                                 should be replicated.
1138    *                            false if the secondaries can tollerate to have new
1139    *                                  data only when the primary flushes the memstore.
1140    */
1141   public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) {
1142     setValue(REGION_MEMSTORE_REPLICATION_KEY, memstoreReplication ? TRUE : FALSE);
1143     // If the memstore replication is setup, we do not have to wait for observing a flush event
1144     // from primary before starting to serve reads, because gaps from replication is not applicable
1145     setConfiguration(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
1146       Boolean.toString(memstoreReplication));
1147     return this;
1148   }
1149 
1150   /**
1151    * Returns all the column family names of the current table. The map of
1152    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1153    * This returns all the keys of the family map which represents the column
1154    * family names of the table.
1155    *
1156    * @return Immutable sorted set of the keys of the families.
1157    */
1158   public Set<byte[]> getFamiliesKeys() {
1159     return Collections.unmodifiableSet(this.families.keySet());
1160   }
1161 
1162   /**
1163    * Returns an array all the {@link HColumnDescriptor} of the column families
1164    * of the table.
1165    *
1166    * @return Array of all the HColumnDescriptors of the current table
1167    *
1168    * @see #getFamilies()
1169    */
1170   public HColumnDescriptor[] getColumnFamilies() {
1171     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1172     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1173   }
1174 
1175 
1176   /**
1177    * Returns the HColumnDescriptor for a specific column family with name as
1178    * specified by the parameter column.
1179    *
1180    * @param column Column family name
1181    * @return Column descriptor for the passed family name or the family on
1182    * passed in column.
1183    */
1184   public HColumnDescriptor getFamily(final byte [] column) {
1185     return this.families.get(column);
1186   }
1187 
1188 
1189   /**
1190    * Removes the HColumnDescriptor with name specified by the parameter column
1191    * from the table descriptor
1192    *
1193    * @param column Name of the column family to be removed.
1194    * @return Column descriptor for the passed family name or the family on
1195    * passed in column.
1196    */
1197   public HColumnDescriptor removeFamily(final byte [] column) {
1198     return this.families.remove(column);
1199   }
1200 
1201   /**
1202    * Add a table coprocessor to this table. The coprocessor
1203    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1204    * or Endpoint.
1205    * It won't check if the class can be loaded or not.
1206    * Whether a coprocessor is loadable or not will be determined when
1207    * a region is opened.
1208    * @param className Full class name.
1209    * @throws IOException
1210    */
1211   public HTableDescriptor addCoprocessor(String className) throws IOException {
1212     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1213     return this;
1214   }
1215 
1216   /**
1217    * Add a table coprocessor to this table. The coprocessor
1218    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1219    * or Endpoint.
1220    * It won't check if the class can be loaded or not.
1221    * Whether a coprocessor is loadable or not will be determined when
1222    * a region is opened.
1223    * @param jarFilePath Path of the jar file. If it's null, the class will be
1224    * loaded from default classloader.
1225    * @param className Full class name.
1226    * @param priority Priority
1227    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1228    * @throws IOException
1229    */
1230   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1231                              int priority, final Map<String, String> kvs)
1232   throws IOException {
1233     checkHasCoprocessor(className);
1234 
1235     // Validate parameter kvs and then add key/values to kvString.
1236     StringBuilder kvString = new StringBuilder();
1237     if (kvs != null) {
1238       for (Map.Entry<String, String> e: kvs.entrySet()) {
1239         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1240           throw new IOException("Illegal parameter key = " + e.getKey());
1241         }
1242         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1243           throw new IOException("Illegal parameter (" + e.getKey() +
1244               ") value = " + e.getValue());
1245         }
1246         if (kvString.length() != 0) {
1247           kvString.append(',');
1248         }
1249         kvString.append(e.getKey());
1250         kvString.append('=');
1251         kvString.append(e.getValue());
1252       }
1253     }
1254 
1255     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1256         "|" + className + "|" + Integer.toString(priority) + "|" +
1257         kvString.toString();
1258     return addCoprocessorToMap(value);
1259   }
1260 
1261   /**
1262    * Add a table coprocessor to this table. The coprocessor
1263    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1264    * or Endpoint.
1265    * It won't check if the class can be loaded or not.
1266    * Whether a coprocessor is loadable or not will be determined when
1267    * a region is opened.
1268    * @param specStr The Coprocessor specification all in in one String formatted so matches
1269    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1270    * @throws IOException
1271    */
1272   // Pity about ugly method name. addCoprocessor(String) already taken above.
1273   public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
1274     String className = getCoprocessorClassNameFromSpecStr(specStr);
1275     if (className == null) {
1276       throw new IllegalArgumentException("Format does not match " +
1277         HConstants.CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr);
1278     }
1279     checkHasCoprocessor(className);
1280     return addCoprocessorToMap(specStr);
1281   }
1282 
1283   private void checkHasCoprocessor(final String className) throws IOException {
1284     if (hasCoprocessor(className)) {
1285       throw new IOException("Coprocessor " + className + " already exists.");
1286     }
1287   }
1288 
1289   /**
1290    * Add coprocessor to values Map
1291    * @param specStr The Coprocessor specification all in in one String formatted so matches
1292    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1293    * @return Returns <code>this</code>
1294    */
1295   private HTableDescriptor addCoprocessorToMap(final String specStr) {
1296     if (specStr == null) return this;
1297     // generate a coprocessor key
1298     int maxCoprocessorNumber = 0;
1299     Matcher keyMatcher;
1300     for (Map.Entry<Bytes, Bytes> e: this.values.entrySet()) {
1301       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1302       if (!keyMatcher.matches()) {
1303         continue;
1304       }
1305       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
1306     }
1307     maxCoprocessorNumber++;
1308     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1309     this.values.put(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr)));
1310     return this;
1311   }
1312 
1313   /**
1314    * Check if the table has an attached co-processor represented by the name className
1315    *
1316    * @param classNameToMatch - Class name of the co-processor
1317    * @return true of the table has a co-processor className
1318    */
1319   public boolean hasCoprocessor(String classNameToMatch) {
1320     Matcher keyMatcher;
1321     for (Map.Entry<Bytes, Bytes> e :
1322         this.values.entrySet()) {
1323       keyMatcher =
1324           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1325               Bytes.toString(e.getKey().get()));
1326       if (!keyMatcher.matches()) {
1327         continue;
1328       }
1329       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1330       if (className == null) continue;
1331       if (className.equals(classNameToMatch.trim())) {
1332         return true;
1333       }
1334     }
1335     return false;
1336   }
1337 
1338   /**
1339    * Return the list of attached co-processor represented by their name className
1340    *
1341    * @return The list of co-processors classNames
1342    */
1343   public List<String> getCoprocessors() {
1344     List<String> result = new ArrayList<String>();
1345     Matcher keyMatcher;
1346     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1347       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1348       if (!keyMatcher.matches()) {
1349         continue;
1350       }
1351       String className = getCoprocessorClassNameFromSpecStr(Bytes.toString(e.getValue().get()));
1352       if (className == null) continue;
1353       result.add(className); // classname is the 2nd field
1354     }
1355     return result;
1356   }
1357 
1358   /**
1359    * @param spec String formatted as per {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1360    * @return Class parsed from passed in <code>spec</code> or null if no match or classpath found
1361    */
1362   private static String getCoprocessorClassNameFromSpecStr(final String spec) {
1363     Matcher matcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(spec);
1364     // Classname is the 2nd field
1365     return matcher != null && matcher.matches()? matcher.group(2).trim(): null;
1366   }
1367 
1368   /**
1369    * Remove a coprocessor from those set on the table
1370    * @param className Class name of the co-processor
1371    */
1372   public void removeCoprocessor(String className) {
1373     Bytes match = null;
1374     Matcher keyMatcher;
1375     Matcher valueMatcher;
1376     for (Map.Entry<Bytes, Bytes> e : this.values
1377         .entrySet()) {
1378       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1379           .getKey().get()));
1380       if (!keyMatcher.matches()) {
1381         continue;
1382       }
1383       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1384           .toString(e.getValue().get()));
1385       if (!valueMatcher.matches()) {
1386         continue;
1387       }
1388       // get className and compare
1389       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1390       // remove the CP if it is present
1391       if (clazz.equals(className.trim())) {
1392         match = e.getKey();
1393         break;
1394       }
1395     }
1396     // if we found a match, remove it
1397     if (match != null)
1398       remove(match);
1399   }
1400 
1401   /**
1402    * Returns the {@link Path} object representing the table directory under
1403    * path rootdir
1404    *
1405    * Deprecated use FSUtils.getTableDir() instead.
1406    *
1407    * @param rootdir qualified path of HBase root directory
1408    * @param tableName name of table
1409    * @return {@link Path} for table
1410    */
1411   @Deprecated
1412   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1413     //This is bad I had to mirror code from FSUTils.getTableDir since
1414     //there is no module dependency between hbase-client and hbase-server
1415     TableName name = TableName.valueOf(tableName);
1416     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1417               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1418   }
1419 
1420   /** Table descriptor for <code>hbase:meta</code> catalog table
1421    * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
1422    * Admin#getTableDescriptor(TableName.META_TABLE) instead.
1423    */
1424   @Deprecated
1425   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1426       TableName.META_TABLE_NAME,
1427       new HColumnDescriptor[] {
1428           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1429               // Ten is arbitrary number.  Keep versions to help debugging.
1430               .setMaxVersions(10)
1431               .setInMemory(true)
1432               .setBlocksize(8 * 1024)
1433               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1434               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1435               .setBloomFilterType(BloomType.NONE)
1436               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1437               // e.g. if using CombinedBlockCache (BucketCache).
1438               .setCacheDataInL1(true),
1439           new HColumnDescriptor(HConstants.TABLE_FAMILY)
1440               // Ten is arbitrary number.  Keep versions to help debugging.
1441               .setMaxVersions(10)
1442               .setInMemory(true)
1443               .setBlocksize(8 * 1024)
1444               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1445                   // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1446               .setBloomFilterType(BloomType.NONE)
1447                   // Enable cache of data blocks in L1 if more than one caching tier deployed:
1448                   // e.g. if using CombinedBlockCache (BucketCache).
1449               .setCacheDataInL1(true)
1450       });
1451 
1452   static {
1453     try {
1454       META_TABLEDESC.addCoprocessor(
1455           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1456           null, Coprocessor.PRIORITY_SYSTEM, null);
1457     } catch (IOException ex) {
1458       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1459       throw new RuntimeException(ex);
1460     }
1461   }
1462 
1463   public final static String NAMESPACE_FAMILY_INFO = "info";
1464   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1465   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1466 
1467   /** Table descriptor for namespace table */
1468   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1469       TableName.NAMESPACE_TABLE_NAME,
1470       new HColumnDescriptor[] {
1471           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1472               // Ten is arbitrary number.  Keep versions to help debugging.
1473               .setMaxVersions(10)
1474               .setInMemory(true)
1475               .setBlocksize(8 * 1024)
1476               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1477               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1478               // e.g. if using CombinedBlockCache (BucketCache).
1479               .setCacheDataInL1(true)
1480       });
1481 
1482   @Deprecated
1483   public HTableDescriptor setOwner(User owner) {
1484     return setOwnerString(owner != null ? owner.getShortName() : null);
1485   }
1486 
1487   // used by admin.rb:alter(table_name,*args) to update owner.
1488   @Deprecated
1489   public HTableDescriptor setOwnerString(String ownerString) {
1490     if (ownerString != null) {
1491       setValue(OWNER_KEY, ownerString);
1492     } else {
1493       remove(OWNER_KEY);
1494     }
1495     return this;
1496   }
1497 
1498   @Deprecated
1499   public String getOwnerString() {
1500     if (getValue(OWNER_KEY) != null) {
1501       return Bytes.toString(getValue(OWNER_KEY));
1502     }
1503     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1504     // hbase:meta and -ROOT- should return system user as owner, not null (see
1505     // MasterFileSystem.java:bootstrap()).
1506     return null;
1507   }
1508 
1509   /**
1510    * @return This instance serialized with pb with pb magic prefix
1511    * @see #parseFrom(byte[])
1512    */
1513   public byte [] toByteArray() {
1514     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1515   }
1516 
1517   /**
1518    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1519    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1520    * @throws DeserializationException
1521    * @throws IOException
1522    * @see #toByteArray()
1523    */
1524   public static HTableDescriptor parseFrom(final byte [] bytes)
1525   throws DeserializationException, IOException {
1526     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1527       throw new DeserializationException("Expected PB encoded HTableDescriptor");
1528     }
1529     int pblen = ProtobufUtil.lengthOfPBMagic();
1530     TableSchema.Builder builder = TableSchema.newBuilder();
1531     TableSchema ts;
1532     try {
1533       ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
1534       ts = builder.build();
1535     } catch (IOException e) {
1536       throw new DeserializationException(e);
1537     }
1538     return convert(ts);
1539   }
1540 
1541   /**
1542    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1543    */
1544   public TableSchema convert() {
1545     TableSchema.Builder builder = TableSchema.newBuilder();
1546     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1547     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1548       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1549       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1550       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1551       builder.addAttributes(aBuilder.build());
1552     }
1553     for (HColumnDescriptor hcd: getColumnFamilies()) {
1554       builder.addColumnFamilies(hcd.convert());
1555     }
1556     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1557       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1558       aBuilder.setName(e.getKey());
1559       aBuilder.setValue(e.getValue());
1560       builder.addConfiguration(aBuilder.build());
1561     }
1562     return builder.build();
1563   }
1564 
1565   /**
1566    * @param ts A pb TableSchema instance.
1567    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1568    */
1569   public static HTableDescriptor convert(final TableSchema ts) {
1570     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1571     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1572     int index = 0;
1573     for (ColumnFamilySchema cfs: list) {
1574       hcds[index++] = HColumnDescriptor.convert(cfs);
1575     }
1576     HTableDescriptor htd = new HTableDescriptor(
1577         ProtobufUtil.toTableName(ts.getTableName()),
1578         hcds);
1579     for (BytesBytesPair a: ts.getAttributesList()) {
1580       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1581     }
1582     for (NameStringPair a: ts.getConfigurationList()) {
1583       htd.setConfiguration(a.getName(), a.getValue());
1584     }
1585     return htd;
1586   }
1587 
1588   /**
1589    * Getter for accessing the configuration value by key
1590    */
1591   public String getConfigurationValue(String key) {
1592     return configuration.get(key);
1593   }
1594 
1595   /**
1596    * Getter for fetching an unmodifiable {@link #configuration} map.
1597    */
1598   public Map<String, String> getConfiguration() {
1599     // shallow pointer copy
1600     return Collections.unmodifiableMap(configuration);
1601   }
1602 
1603   /**
1604    * Setter for storing a configuration setting in {@link #configuration} map.
1605    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1606    * @param value String value. If null, removes the setting.
1607    */
1608   public HTableDescriptor setConfiguration(String key, String value) {
1609     if (value == null) {
1610       removeConfiguration(key);
1611     } else {
1612       configuration.put(key, value);
1613     }
1614     return this;
1615   }
1616 
1617   /**
1618    * Remove a config setting represented by the key from the {@link #configuration} map
1619    */
1620   public void removeConfiguration(final String key) {
1621     configuration.remove(key);
1622   }
1623 }