View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.util.Collections;
22  import java.util.HashMap;
23  import java.util.HashSet;
24  import java.util.Map;
25  import java.util.Set;
26  
27  import org.apache.hadoop.hbase.classification.InterfaceAudience;
28  import org.apache.hadoop.hbase.classification.InterfaceStability;
29  import org.apache.hadoop.hbase.exceptions.DeserializationException;
30  import org.apache.hadoop.hbase.io.compress.Compression;
31  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
32  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
33  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
34  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
35  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
36  import org.apache.hadoop.hbase.regionserver.BloomType;
37  import org.apache.hadoop.hbase.util.ByteStringer;
38  import org.apache.hadoop.hbase.util.Bytes;
39  import org.apache.hadoop.hbase.util.PrettyPrinter;
40  import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
41  
42  import com.google.common.base.Preconditions;
43  import com.google.protobuf.InvalidProtocolBufferException;
44  
45  /**
46   * An HColumnDescriptor contains information about a column family such as the
47   * number of versions, compression settings, etc.
48   *
49   * It is used as input when creating a table or adding a column.
50   */
51  @InterfaceAudience.Public
52  @InterfaceStability.Evolving
53  public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
54    // For future backward compatibility
55  
56    // Version  3 was when column names become byte arrays and when we picked up
57    // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
58    // Version  5 was when bloom filter descriptors were removed.
59    // Version  6 adds metadata as a map where keys and values are byte[].
60    // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
61    // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
62    // Version  9 -- add data block encoding
63    // Version 10 -- change metadata to standard type.
64    // Version 11 -- add column family level configuration.
65    private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
66  
67    // These constants are used as FileInfo keys
68    public static final String COMPRESSION = "COMPRESSION";
69    public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
70    public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
71        "ENCODE_ON_DISK";
72    public static final String DATA_BLOCK_ENCODING =
73        "DATA_BLOCK_ENCODING";
74    /**
75     * Key for the BLOCKCACHE attribute.
76     * A more exact name would be CACHE_DATA_ON_READ because this flag sets whether or not we
77     * cache DATA blocks.  We always cache INDEX and BLOOM blocks; caching these blocks cannot be
78     * disabled.
79     */
80    public static final String BLOCKCACHE = "BLOCKCACHE";
81    public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
82    public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
83    public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
84    public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
85    /**
86     * Key for cache data into L1 if cache is set up with more than one tier.
87     * To set in the shell, do something like this:
88     * <code>hbase(main):003:0> create 't', {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}</code>
89     */
90    public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
91  
92    /**
93     * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
94     * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
95     * family will be loaded into the cache as soon as the file is opened. These
96     * loads will not count as cache misses.
97     */
98    public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
99  
100   /**
101    * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
102    * Use smaller block sizes for faster random-access at expense of larger
103    * indices (more memory consumption).
104    */
105   public static final String BLOCKSIZE = "BLOCKSIZE";
106 
107   public static final String LENGTH = "LENGTH";
108   public static final String TTL = "TTL";
109   public static final String BLOOMFILTER = "BLOOMFILTER";
110   public static final String FOREVER = "FOREVER";
111   public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
112   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
113   public static final String MIN_VERSIONS = "MIN_VERSIONS";
114   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
115   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
116 
117   public static final String ENCRYPTION = "ENCRYPTION";
118   public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
119 
120   /**
121    * Default compression type.
122    */
123   public static final String DEFAULT_COMPRESSION =
124     Compression.Algorithm.NONE.getName();
125 
126   /**
127    * Default value of the flag that enables data block encoding on disk, as
128    * opposed to encoding in cache only. We encode blocks everywhere by default,
129    * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
130    */
131   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
132 
133   /** Default data block encoding algorithm. */
134   public static final String DEFAULT_DATA_BLOCK_ENCODING =
135       DataBlockEncoding.NONE.toString();
136 
137   /**
138    * Default number of versions of a record to keep.
139    */
140   public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
141     "hbase.column.max.version", 1);
142 
143   /**
144    * Default is not to keep a minimum of versions.
145    */
146   public static final int DEFAULT_MIN_VERSIONS = 0;
147 
148   /*
149    * Cache here the HCD value.
150    * Question: its OK to cache since when we're reenable, we create a new HCD?
151    */
152   private volatile Integer blocksize = null;
153 
154   /**
155    * Default setting for whether to try and serve this column family from memory or not.
156    */
157   public static final boolean DEFAULT_IN_MEMORY = false;
158 
159   /**
160    * Default setting for preventing deleted from being collected immediately.
161    */
162   public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE;
163 
164   /**
165    * Default setting for whether to use a block cache or not.
166    */
167   public static final boolean DEFAULT_BLOCKCACHE = true;
168 
169   /**
170    * Default setting for whether to cache data blocks on write if block caching
171    * is enabled.
172    */
173   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
174 
175   /**
176    * Default setting for whether to cache data blocks in L1 tier.  Only makes sense if more than
177    * one tier in operations: i.e. if we have an L1 and a L2.  This will be the cases if we are
178    * using BucketCache.
179    */
180   public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false;
181 
182   /**
183    * Default setting for whether to cache index blocks on write if block
184    * caching is enabled.
185    */
186   public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
187 
188   /**
189    * Default size of blocks in files stored to the filesytem (hfiles).
190    */
191   public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
192 
193   /**
194    * Default setting for whether or not to use bloomfilters.
195    */
196   public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
197 
198   /**
199    * Default setting for whether to cache bloom filter blocks on write if block
200    * caching is enabled.
201    */
202   public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
203 
204   /**
205    * Default time to live of cell contents.
206    */
207   public static final int DEFAULT_TTL = HConstants.FOREVER;
208 
209   /**
210    * Default scope.
211    */
212   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
213 
214   /**
215    * Default setting for whether to evict cached blocks from the blockcache on
216    * close.
217    */
218   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
219 
220   /**
221    * Default compress tags along with any type of DataBlockEncoding.
222    */
223   public static final boolean DEFAULT_COMPRESS_TAGS = true;
224 
225   /*
226    * Default setting for whether to prefetch blocks into the blockcache on open.
227    */
228   public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
229 
230   private final static Map<String, String> DEFAULT_VALUES
231     = new HashMap<String, String>();
232   private final static Set<Bytes> RESERVED_KEYWORDS
233       = new HashSet<Bytes>();
234 
235   static {
236       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
237       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
238       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
239       DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
240       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
241       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
242       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
243       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
244       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
245       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
246       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
247       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
248       DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1));
249       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
250       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
251       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
252       DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
253       for (String s : DEFAULT_VALUES.keySet()) {
254         RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
255       }
256     RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION)));
257     RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY)));
258   }
259 
260   private static final int UNINITIALIZED = -1;
261 
262   // Column family name
263   private byte [] name;
264 
265   // Column metadata
266   private final Map<Bytes, Bytes> values =
267       new HashMap<Bytes, Bytes>();
268 
269   /**
270    * A map which holds the configuration specific to the column family.
271    * The keys of the map have the same names as config keys and override the defaults with
272    * cf-specific settings. Example usage may be for compactions, etc.
273    */
274   private final Map<String, String> configuration = new HashMap<String, String>();
275 
276   /*
277    * Cache the max versions rather than calculate it every time.
278    */
279   private int cachedMaxVersions = UNINITIALIZED;
280 
281   /**
282    * Default constructor. Must be present for Writable.
283    * @deprecated Used by Writables and Writables are going away.
284    */
285   @Deprecated
286   // Make this private rather than remove after deprecation period elapses.  Its needed by pb
287   // deserializations.
288   public HColumnDescriptor() {
289     this.name = null;
290   }
291 
292   /**
293    * Construct a column descriptor specifying only the family name
294    * The other attributes are defaulted.
295    *
296    * @param familyName Column family name. Must be 'printable' -- digit or
297    * letter -- and may not contain a <code>:<code>
298    */
299   public HColumnDescriptor(final String familyName) {
300     this(Bytes.toBytes(familyName));
301   }
302 
303   /**
304    * Construct a column descriptor specifying only the family name
305    * The other attributes are defaulted.
306    *
307    * @param familyName Column family name. Must be 'printable' -- digit or
308    * letter -- and may not contain a <code>:<code>
309    */
310   public HColumnDescriptor(final byte [] familyName) {
311     this (familyName == null || familyName.length <= 0?
312       HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
313       DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
314       DEFAULT_TTL, DEFAULT_BLOOMFILTER);
315   }
316 
317   /**
318    * Constructor.
319    * Makes a deep copy of the supplied descriptor.
320    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
321    * @param desc The descriptor.
322    */
323   public HColumnDescriptor(HColumnDescriptor desc) {
324     super();
325     this.name = desc.name.clone();
326     for (Map.Entry<Bytes, Bytes> e :
327         desc.values.entrySet()) {
328       this.values.put(e.getKey(), e.getValue());
329     }
330     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
331       this.configuration.put(e.getKey(), e.getValue());
332     }
333     setMaxVersions(desc.getMaxVersions());
334   }
335 
336   /**
337    * Constructor
338    * @param familyName Column family name. Must be 'printable' -- digit or
339    * letter -- and may not contain a <code>:<code>
340    * @param maxVersions Maximum number of versions to keep
341    * @param compression Compression type
342    * @param inMemory If true, column data should be kept in an HRegionServer's
343    * cache
344    * @param blockCacheEnabled If true, MapFile blocks should be cached
345    * @param timeToLive Time-to-live of cell contents, in seconds
346    * (use HConstants.FOREVER for unlimited TTL)
347    * @param bloomFilter Bloom filter type for this column
348    *
349    * @throws IllegalArgumentException if passed a family name that is made of
350    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
351    * a <code>:</code>
352    * @throws IllegalArgumentException if the number of versions is &lt;= 0
353    * @deprecated use {@link #HColumnDescriptor(String)} and setters
354    */
355   @Deprecated
356   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
357       final String compression, final boolean inMemory,
358       final boolean blockCacheEnabled,
359       final int timeToLive, final String bloomFilter) {
360     this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
361       DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
362   }
363 
364   /**
365    * Constructor
366    * @param familyName Column family name. Must be 'printable' -- digit or
367    * letter -- and may not contain a <code>:<code>
368    * @param maxVersions Maximum number of versions to keep
369    * @param compression Compression type
370    * @param inMemory If true, column data should be kept in an HRegionServer's
371    * cache
372    * @param blockCacheEnabled If true, MapFile blocks should be cached
373    * @param blocksize Block size to use when writing out storefiles.  Use
374    * smaller block sizes for faster random-access at expense of larger indices
375    * (more memory consumption).  Default is usually 64k.
376    * @param timeToLive Time-to-live of cell contents, in seconds
377    * (use HConstants.FOREVER for unlimited TTL)
378    * @param bloomFilter Bloom filter type for this column
379    * @param scope The scope tag for this column
380    *
381    * @throws IllegalArgumentException if passed a family name that is made of
382    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
383    * a <code>:</code>
384    * @throws IllegalArgumentException if the number of versions is &lt;= 0
385    * @deprecated use {@link #HColumnDescriptor(String)} and setters
386    */
387   @Deprecated
388   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
389       final String compression, final boolean inMemory,
390       final boolean blockCacheEnabled, final int blocksize,
391       final int timeToLive, final String bloomFilter, final int scope) {
392     this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
393         compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
394         inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
395         scope);
396   }
397 
398   /**
399    * Constructor
400    * @param familyName Column family name. Must be 'printable' -- digit or
401    * letter -- and may not contain a <code>:<code>
402    * @param minVersions Minimum number of versions to keep
403    * @param maxVersions Maximum number of versions to keep
404    * @param keepDeletedCells Whether to retain deleted cells until they expire
405    *        up to maxVersions versions.
406    * @param compression Compression type
407    * @param encodeOnDisk whether to use the specified data block encoding
408    *        on disk. If false, the encoding will be used in cache only.
409    * @param dataBlockEncoding data block encoding
410    * @param inMemory If true, column data should be kept in an HRegionServer's
411    * cache
412    * @param blockCacheEnabled If true, MapFile blocks should be cached
413    * @param blocksize Block size to use when writing out storefiles.  Use
414    * smaller blocksizes for faster random-access at expense of larger indices
415    * (more memory consumption).  Default is usually 64k.
416    * @param timeToLive Time-to-live of cell contents, in seconds
417    * (use HConstants.FOREVER for unlimited TTL)
418    * @param bloomFilter Bloom filter type for this column
419    * @param scope The scope tag for this column
420    *
421    * @throws IllegalArgumentException if passed a family name that is made of
422    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
423    * a <code>:</code>
424    * @throws IllegalArgumentException if the number of versions is &lt;= 0
425    * @deprecated use {@link #HColumnDescriptor(String)} and setters
426    */
427   @Deprecated
428   public HColumnDescriptor(final byte[] familyName, final int minVersions,
429       final int maxVersions, final KeepDeletedCells keepDeletedCells,
430       final String compression, final boolean encodeOnDisk,
431       final String dataBlockEncoding, final boolean inMemory,
432       final boolean blockCacheEnabled, final int blocksize,
433       final int timeToLive, final String bloomFilter, final int scope) {
434     isLegalFamilyName(familyName);
435     this.name = familyName;
436 
437     if (maxVersions <= 0) {
438       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
439       // Until there is support, consider 0 or < 0 -- a configuration error.
440       throw new IllegalArgumentException("Maximum versions must be positive");
441     }
442 
443     if (minVersions > 0) {
444       if (timeToLive == HConstants.FOREVER) {
445         throw new IllegalArgumentException("Minimum versions requires TTL.");
446       }
447       if (minVersions >= maxVersions) {
448         throw new IllegalArgumentException("Minimum versions must be < "
449             + "maximum versions.");
450       }
451     }
452 
453     setMaxVersions(maxVersions);
454     setMinVersions(minVersions);
455     setKeepDeletedCells(keepDeletedCells);
456     setInMemory(inMemory);
457     setBlockCacheEnabled(blockCacheEnabled);
458     setTimeToLive(timeToLive);
459     setCompressionType(Compression.Algorithm.
460       valueOf(compression.toUpperCase()));
461     setDataBlockEncoding(DataBlockEncoding.
462         valueOf(dataBlockEncoding.toUpperCase()));
463     setBloomFilterType(BloomType.
464       valueOf(bloomFilter.toUpperCase()));
465     setBlocksize(blocksize);
466     setScope(scope);
467   }
468 
469   /**
470    * @param b Family name.
471    * @return <code>b</code>
472    * @throws IllegalArgumentException If not null and not a legitimate family
473    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
474    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
475    * either. Also Family can not be an empty value or equal "recovered.edits".
476    */
477   public static byte [] isLegalFamilyName(final byte [] b) {
478     if (b == null) {
479       return b;
480     }
481     Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
482     if (b[0] == '.') {
483       throw new IllegalArgumentException("Family names cannot start with a " +
484         "period: " + Bytes.toString(b));
485     }
486     for (int i = 0; i < b.length; i++) {
487       if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
488         throw new IllegalArgumentException("Illegal character <" + b[i] +
489           ">. Family names cannot contain control characters or colons: " +
490           Bytes.toString(b));
491       }
492     }
493     byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
494     if (Bytes.equals(recoveredEdit, b)) {
495       throw new IllegalArgumentException("Family name cannot be: " +
496           HConstants.RECOVERED_EDITS_DIR);
497     }
498     return b;
499   }
500 
501   /**
502    * @return Name of this column family
503    */
504   public byte [] getName() {
505     return name;
506   }
507 
508   /**
509    * @return Name of this column family
510    */
511   public String getNameAsString() {
512     return Bytes.toString(this.name);
513   }
514 
515   /**
516    * @param key The key.
517    * @return The value.
518    */
519   public byte[] getValue(byte[] key) {
520     Bytes ibw = values.get(new Bytes(key));
521     if (ibw == null)
522       return null;
523     return ibw.get();
524   }
525 
526   /**
527    * @param key The key.
528    * @return The value as a string.
529    */
530   public String getValue(String key) {
531     byte[] value = getValue(Bytes.toBytes(key));
532     if (value == null)
533       return null;
534     return Bytes.toString(value);
535   }
536 
537   /**
538    * @return All values.
539    */
540   public Map<Bytes, Bytes> getValues() {
541     // shallow pointer copy
542     return Collections.unmodifiableMap(values);
543   }
544 
545   /**
546    * @param key The key.
547    * @param value The value.
548    * @return this (for chained invocation)
549    */
550   public HColumnDescriptor setValue(byte[] key, byte[] value) {
551     values.put(new Bytes(key),
552         new Bytes(value));
553     return this;
554   }
555 
556   /**
557    * @param key Key whose key and value we're to remove from HCD parameters.
558    */
559   public void remove(final byte [] key) {
560     values.remove(new Bytes(key));
561   }
562 
563   /**
564    * @param key The key.
565    * @param value The value.
566    * @return this (for chained invocation)
567    */
568   public HColumnDescriptor setValue(String key, String value) {
569     if (value == null) {
570       remove(Bytes.toBytes(key));
571     } else {
572       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
573     }
574     return this;
575   }
576 
577   /** @return compression type being used for the column family */
578   public Compression.Algorithm getCompression() {
579     String n = getValue(COMPRESSION);
580     if (n == null) {
581       return Compression.Algorithm.NONE;
582     }
583     return Compression.Algorithm.valueOf(n.toUpperCase());
584   }
585 
586   /** @return compression type being used for the column family for major
587       compression */
588   public Compression.Algorithm getCompactionCompression() {
589     String n = getValue(COMPRESSION_COMPACT);
590     if (n == null) {
591       return getCompression();
592     }
593     return Compression.Algorithm.valueOf(n.toUpperCase());
594   }
595 
596   /** @return maximum number of versions */
597   public int getMaxVersions() {
598     if (this.cachedMaxVersions == UNINITIALIZED) {
599       String v = getValue(HConstants.VERSIONS);
600       this.cachedMaxVersions = Integer.parseInt(v);
601     }
602     return this.cachedMaxVersions;
603   }
604 
605   /**
606    * @param maxVersions maximum number of versions
607    * @return this (for chained invocation)
608    */
609   public HColumnDescriptor setMaxVersions(int maxVersions) {
610     if (maxVersions <= 0) {
611       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
612       // Until there is support, consider 0 or < 0 -- a configuration error.
613       throw new IllegalArgumentException("Maximum versions must be positive");
614     }
615     if (maxVersions < this.getMinVersions()) {
616         throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
617             + " while minVersion is " + this.getMinVersions()
618             + ". Maximum versions must be >= minimum versions ");
619     }
620     setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
621     cachedMaxVersions = maxVersions;
622     return this;
623   }
624 
625   /**
626    * @return The storefile/hfile blocksize for this column family.
627    */
628   public synchronized int getBlocksize() {
629     if (this.blocksize == null) {
630       String value = getValue(BLOCKSIZE);
631       this.blocksize = (value != null)?
632         Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
633     }
634     return this.blocksize.intValue();
635 
636   }
637 
638   /**
639    * @param s Blocksize to use when writing out storefiles/hfiles on this
640    * column family.
641    * @return this (for chained invocation)
642    */
643   public HColumnDescriptor setBlocksize(int s) {
644     setValue(BLOCKSIZE, Integer.toString(s));
645     this.blocksize = null;
646     return this;
647   }
648 
649   /**
650    * @return Compression type setting.
651    */
652   public Compression.Algorithm getCompressionType() {
653     return getCompression();
654   }
655 
656   /**
657    * Compression types supported in hbase.
658    * LZO is not bundled as part of the hbase distribution.
659    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
660    * for how to enable it.
661    * @param type Compression type setting.
662    * @return this (for chained invocation)
663    */
664   public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
665     return setValue(COMPRESSION, type.getName().toUpperCase());
666   }
667 
668   /**
669    * @return data block encoding algorithm used on disk
670    * @deprecated See getDataBlockEncoding()
671    */
672   @Deprecated
673   public DataBlockEncoding getDataBlockEncodingOnDisk() {
674     return getDataBlockEncoding();
675   }
676 
677   /**
678    * This method does nothing now. Flag ENCODE_ON_DISK is not used
679    * any more. Data blocks have the same encoding in cache as on disk.
680    * @return this (for chained invocation)
681    * @deprecated This does nothing now.
682    */
683   @Deprecated
684   public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
685     return this;
686   }
687 
688   /**
689    * @return the data block encoding algorithm used in block cache and
690    *         optionally on disk
691    */
692   public DataBlockEncoding getDataBlockEncoding() {
693     String type = getValue(DATA_BLOCK_ENCODING);
694     if (type == null) {
695       type = DEFAULT_DATA_BLOCK_ENCODING;
696     }
697     return DataBlockEncoding.valueOf(type);
698   }
699 
700   /**
701    * Set data block encoding algorithm used in block cache.
702    * @param type What kind of data block encoding will be used.
703    * @return this (for chained invocation)
704    */
705   public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
706     String name;
707     if (type != null) {
708       name = type.toString();
709     } else {
710       name = DataBlockEncoding.NONE.toString();
711     }
712     return setValue(DATA_BLOCK_ENCODING, name);
713   }
714 
715   /**
716    * Set whether the tags should be compressed along with DataBlockEncoding. When no
717    * DataBlockEncoding is been used, this is having no effect.
718    *
719    * @param compressTags
720    * @return this (for chained invocation)
721    */
722   public HColumnDescriptor setCompressTags(boolean compressTags) {
723     return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
724   }
725 
726   /**
727    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
728    *         DataBlockEncoding is been used, this is having no effect.
729    * @deprecated Use {@link #isCompressTags()} instead
730    */
731   @Deprecated
732   public boolean shouldCompressTags() {
733     String compressTagsStr = getValue(COMPRESS_TAGS);
734     boolean compressTags = DEFAULT_COMPRESS_TAGS;
735     if (compressTagsStr != null) {
736       compressTags = Boolean.valueOf(compressTagsStr);
737     }
738     return compressTags;
739   }
740 
741   /**
742    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
743    *         DataBlockEncoding is been used, this is having no effect.
744    */
745   public boolean isCompressTags() {
746     String compressTagsStr = getValue(COMPRESS_TAGS);
747     boolean compressTags = DEFAULT_COMPRESS_TAGS;
748     if (compressTagsStr != null) {
749       compressTags = Boolean.valueOf(compressTagsStr);
750     }
751     return compressTags;
752   }
753 
754   /**
755    * @return Compression type setting.
756    */
757   public Compression.Algorithm getCompactionCompressionType() {
758     return getCompactionCompression();
759   }
760 
761   /**
762    * Compression types supported in hbase.
763    * LZO is not bundled as part of the hbase distribution.
764    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
765    * for how to enable it.
766    * @param type Compression type setting.
767    * @return this (for chained invocation)
768    */
769   public HColumnDescriptor setCompactionCompressionType(
770       Compression.Algorithm type) {
771     return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase());
772   }
773 
774   /**
775    * @return True if we are to favor keeping all values for this column family in the
776    * HRegionServer cache.
777    */
778   public boolean isInMemory() {
779     String value = getValue(HConstants.IN_MEMORY);
780     if (value != null)
781       return Boolean.valueOf(value).booleanValue();
782     return DEFAULT_IN_MEMORY;
783   }
784 
785   /**
786    * @param inMemory True if we are to favor keeping all values for this column family in the
787    * HRegionServer cache
788    * @return this (for chained invocation)
789    */
790   public HColumnDescriptor setInMemory(boolean inMemory) {
791     return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
792   }
793 
794   public KeepDeletedCells getKeepDeletedCells() {
795     String value = getValue(KEEP_DELETED_CELLS);
796     if (value != null) {
797       // toUpperCase for backwards compatibility
798       return KeepDeletedCells.valueOf(value.toUpperCase());
799     }
800     return DEFAULT_KEEP_DELETED;
801   }
802 
803   /**
804    * @param keepDeletedCells True if deleted rows should not be collected
805    * immediately.
806    * @return this (for chained invocation)
807    * @deprecated use {@link #setKeepDeletedCells(KeepDeletedCells)}
808    */
809   @Deprecated
810   public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
811     return setValue(KEEP_DELETED_CELLS, (keepDeletedCells ? KeepDeletedCells.TRUE
812         : KeepDeletedCells.FALSE).toString());
813   }
814 
815   /**
816    * @param keepDeletedCells True if deleted rows should not be collected
817    * immediately.
818    * @return this (for chained invocation)
819    */
820   public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) {
821     return setValue(KEEP_DELETED_CELLS, keepDeletedCells.toString());
822   }
823 
824   /**
825    * @return Time-to-live of cell contents, in seconds.
826    */
827   public int getTimeToLive() {
828     String value = getValue(TTL);
829     return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL;
830   }
831 
832   /**
833    * @param timeToLive Time-to-live of cell contents, in seconds.
834    * @return this (for chained invocation)
835    */
836   public HColumnDescriptor setTimeToLive(int timeToLive) {
837     return setValue(TTL, Integer.toString(timeToLive));
838   }
839 
840   /**
841    * @return The minimum number of versions to keep.
842    */
843   public int getMinVersions() {
844     String value = getValue(MIN_VERSIONS);
845     return (value != null)? Integer.valueOf(value).intValue(): 0;
846   }
847 
848   /**
849    * @param minVersions The minimum number of versions to keep.
850    * (used when timeToLive is set)
851    * @return this (for chained invocation)
852    */
853   public HColumnDescriptor setMinVersions(int minVersions) {
854     return setValue(MIN_VERSIONS, Integer.toString(minVersions));
855   }
856 
857   /**
858    * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX
859    * and BLOOM type blocks).
860    */
861   public boolean isBlockCacheEnabled() {
862     String value = getValue(BLOCKCACHE);
863     if (value != null)
864       return Boolean.valueOf(value).booleanValue();
865     return DEFAULT_BLOCKCACHE;
866   }
867 
868   /**
869    * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache
870    * INDEX and BLOOM blocks; you cannot turn this off).
871    * @return this (for chained invocation)
872    */
873   public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
874     return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
875   }
876 
877   /**
878    * @return bloom filter type used for new StoreFiles in ColumnFamily
879    */
880   public BloomType getBloomFilterType() {
881     String n = getValue(BLOOMFILTER);
882     if (n == null) {
883       n = DEFAULT_BLOOMFILTER;
884     }
885     return BloomType.valueOf(n.toUpperCase());
886   }
887 
888   /**
889    * @param bt bloom filter type
890    * @return this (for chained invocation)
891    */
892   public HColumnDescriptor setBloomFilterType(final BloomType bt) {
893     return setValue(BLOOMFILTER, bt.toString());
894   }
895 
896    /**
897     * @return the scope tag
898     */
899   public int getScope() {
900     byte[] value = getValue(REPLICATION_SCOPE_BYTES);
901     if (value != null) {
902       return Integer.valueOf(Bytes.toString(value));
903     }
904     return DEFAULT_REPLICATION_SCOPE;
905   }
906 
907  /**
908   * @param scope the scope tag
909   * @return this (for chained invocation)
910   */
911   public HColumnDescriptor setScope(int scope) {
912     return setValue(REPLICATION_SCOPE, Integer.toString(scope));
913   }
914 
915   /**
916    * @return true if we should cache data blocks on write
917    * @deprecated Use {@link #isCacheDataOnWrite()} instead
918    */
919   @Deprecated
920   public boolean shouldCacheDataOnWrite() {
921     return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
922   }
923 
924   /**
925    * @return true if we should cache data blocks on write
926    */
927   public boolean isCacheDataOnWrite() {
928     return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
929   }
930 
931   /**
932    * @param value true if we should cache data blocks on write
933    * @return this (for chained invocation)
934    */
935   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
936     return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
937   }
938 
939   /**
940    * @return true if we should cache data blocks in the L1 cache (if block cache deploy
941    * has more than one tier; e.g. we are using CombinedBlockCache).
942    * @deprecated Use {@link #isCacheDataInL1()} instead
943    */
944   @Deprecated
945   public boolean shouldCacheDataInL1() {
946     return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
947   }
948 
949   /**
950    * @return true if we should cache data blocks in the L1 cache (if block cache deploy has more
951    *         than one tier; e.g. we are using CombinedBlockCache).
952    */
953   public boolean isCacheDataInL1() {
954     return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
955   }
956 
957   /**
958    * @param value true if we should cache data blocks in the L1 cache (if block cache deploy
959    * has more than one tier; e.g. we are using CombinedBlockCache).
960    * @return this (for chained invocation)
961    */
962   public HColumnDescriptor setCacheDataInL1(boolean value) {
963     return setValue(CACHE_DATA_IN_L1, Boolean.toString(value));
964   }
965 
966   private boolean setAndGetBoolean(final String key, final boolean defaultSetting) {
967     String value = getValue(key);
968     if (value != null) return Boolean.valueOf(value).booleanValue();
969     return defaultSetting;
970   }
971 
972   /**
973    * @return true if we should cache index blocks on write
974    * @deprecated Use {@link #isCacheIndexesOnWrite()} instead
975    */
976   @Deprecated
977   public boolean shouldCacheIndexesOnWrite() {
978     return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
979   }
980 
981   /**
982    * @return true if we should cache index blocks on write
983    */
984   public boolean isCacheIndexesOnWrite() {
985     return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
986   }
987 
988   /**
989    * @param value true if we should cache index blocks on write
990    * @return this (for chained invocation)
991    */
992   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
993     return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
994   }
995 
996   /**
997    * @return true if we should cache bloomfilter blocks on write
998    * @deprecated Use {@link #isCacheBloomsOnWrite()} instead
999    */
1000   @Deprecated
1001   public boolean shouldCacheBloomsOnWrite() {
1002     return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
1003   }
1004 
1005   /**
1006    * @return true if we should cache bloomfilter blocks on write
1007    */
1008   public boolean isCacheBloomsOnWrite() {
1009     return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
1010   }
1011 
1012   /**
1013    * @param value true if we should cache bloomfilter blocks on write
1014    * @return this (for chained invocation)
1015    */
1016   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
1017     return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
1018   }
1019 
1020   /**
1021    * @return true if we should evict cached blocks from the blockcache on
1022    * close
1023    * @deprecated {@link #isEvictBlocksOnClose()} instead
1024    */
1025   @Deprecated
1026   public boolean shouldEvictBlocksOnClose() {
1027     return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
1028   }
1029 
1030   /**
1031    * @return true if we should evict cached blocks from the blockcache on close
1032    */
1033   public boolean isEvictBlocksOnClose() {
1034     return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
1035   }
1036 
1037   /**
1038    * @param value true if we should evict cached blocks from the blockcache on
1039    * close
1040    * @return this (for chained invocation)
1041    */
1042   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
1043     return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
1044   }
1045 
1046   /**
1047    * @return true if we should prefetch blocks into the blockcache on open
1048    * @deprecated Use {@link #isPrefetchBlocksOnOpen()} instead
1049    */
1050   @Deprecated
1051   public boolean shouldPrefetchBlocksOnOpen() {
1052     return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
1053   }
1054 
1055   /**
1056    * @return true if we should prefetch blocks into the blockcache on open
1057    */
1058   public boolean isPrefetchBlocksOnOpen() {
1059     return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
1060   }
1061 
1062   /**
1063    * @param value true if we should prefetch blocks into the blockcache on open
1064    * @return this (for chained invocation)
1065    */
1066   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
1067     return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
1068   }
1069 
1070   /**
1071    * @see java.lang.Object#toString()
1072    */
1073   @Override
1074   public String toString() {
1075     StringBuilder s = new StringBuilder();
1076 
1077     s.append('{');
1078     s.append(HConstants.NAME);
1079     s.append(" => '");
1080     s.append(Bytes.toString(name));
1081     s.append("'");
1082     s.append(getValues(true));
1083     s.append('}');
1084     return s.toString();
1085   }
1086 
1087   /**
1088    * @return Column family descriptor with only the customized attributes.
1089    */
1090   public String toStringCustomizedValues() {
1091     StringBuilder s = new StringBuilder();
1092     s.append('{');
1093     s.append(HConstants.NAME);
1094     s.append(" => '");
1095     s.append(Bytes.toString(name));
1096     s.append("'");
1097     s.append(getValues(false));
1098     s.append('}');
1099     return s.toString();
1100   }
1101 
1102   private StringBuilder getValues(boolean printDefaults) {
1103     StringBuilder s = new StringBuilder();
1104 
1105     boolean hasConfigKeys = false;
1106 
1107     // print all reserved keys first
1108     for (Bytes k : values.keySet()) {
1109       if (!RESERVED_KEYWORDS.contains(k)) {
1110         hasConfigKeys = true;
1111         continue;
1112       }
1113       String key = Bytes.toString(k.get());
1114       String value = Bytes.toStringBinary(values.get(k).get());
1115       if (printDefaults
1116           || !DEFAULT_VALUES.containsKey(key)
1117           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1118         s.append(", ");
1119         s.append(key);
1120         s.append(" => ");
1121         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1122       }
1123     }
1124 
1125     // print all non-reserved, advanced config keys as a separate subset
1126     if (hasConfigKeys) {
1127       s.append(", ");
1128       s.append(HConstants.METADATA).append(" => ");
1129       s.append('{');
1130       boolean printComma = false;
1131       for (Bytes k : values.keySet()) {
1132         if (RESERVED_KEYWORDS.contains(k)) {
1133           continue;
1134         }
1135         String key = Bytes.toString(k.get());
1136         String value = Bytes.toStringBinary(values.get(k).get());
1137         if (printComma) {
1138           s.append(", ");
1139         }
1140         printComma = true;
1141         s.append('\'').append(key).append('\'');
1142         s.append(" => ");
1143         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1144       }
1145       s.append('}');
1146     }
1147 
1148     if (!configuration.isEmpty()) {
1149       s.append(", ");
1150       s.append(HConstants.CONFIGURATION).append(" => ");
1151       s.append('{');
1152       boolean printCommaForConfiguration = false;
1153       for (Map.Entry<String, String> e : configuration.entrySet()) {
1154         if (printCommaForConfiguration) s.append(", ");
1155         printCommaForConfiguration = true;
1156         s.append('\'').append(e.getKey()).append('\'');
1157         s.append(" => ");
1158         s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
1159       }
1160       s.append("}");
1161     }
1162     return s;
1163   }
1164 
1165   public static Unit getUnit(String key) {
1166     Unit unit;
1167       /* TTL for now, we can add more as we neeed */
1168     if (key.equals(HColumnDescriptor.TTL)) {
1169       unit = Unit.TIME_INTERVAL;
1170     } else {
1171       unit = Unit.NONE;
1172     }
1173     return unit;
1174   }
1175 
1176   public static Map<String, String> getDefaultValues() {
1177     return Collections.unmodifiableMap(DEFAULT_VALUES);
1178   }
1179 
1180   /**
1181    * @see java.lang.Object#equals(java.lang.Object)
1182    */
1183   @Override
1184   public boolean equals(Object obj) {
1185     if (this == obj) {
1186       return true;
1187     }
1188     if (obj == null) {
1189       return false;
1190     }
1191     if (!(obj instanceof HColumnDescriptor)) {
1192       return false;
1193     }
1194     return compareTo((HColumnDescriptor)obj) == 0;
1195   }
1196 
1197   /**
1198    * @see java.lang.Object#hashCode()
1199    */
1200   @Override
1201   public int hashCode() {
1202     int result = Bytes.hashCode(this.name);
1203     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
1204     result ^= values.hashCode();
1205     result ^= configuration.hashCode();
1206     return result;
1207   }
1208 
1209   // Comparable
1210   @Override
1211   public int compareTo(HColumnDescriptor o) {
1212     int result = Bytes.compareTo(this.name, o.getName());
1213     if (result == 0) {
1214       // punt on comparison for ordering, just calculate difference
1215       result = this.values.hashCode() - o.values.hashCode();
1216       if (result < 0)
1217         result = -1;
1218       else if (result > 0)
1219         result = 1;
1220     }
1221     if (result == 0) {
1222       result = this.configuration.hashCode() - o.configuration.hashCode();
1223       if (result < 0)
1224         result = -1;
1225       else if (result > 0)
1226         result = 1;
1227     }
1228     return result;
1229   }
1230 
1231   /**
1232    * @return This instance serialized with pb with pb magic prefix
1233    * @see #parseFrom(byte[])
1234    */
1235   public byte [] toByteArray() {
1236     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1237   }
1238 
1239   /**
1240    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
1241    * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
1242    * @throws DeserializationException
1243    * @see #toByteArray()
1244    */
1245   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
1246     if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
1247     int pblen = ProtobufUtil.lengthOfPBMagic();
1248     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1249     ColumnFamilySchema cfs = null;
1250     try {
1251       cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1252     } catch (InvalidProtocolBufferException e) {
1253       throw new DeserializationException(e);
1254     }
1255     return convert(cfs);
1256   }
1257 
1258   /**
1259    * @param cfs
1260    * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
1261    */
1262   public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
1263     // Use the empty constructor so we preserve the initial values set on construction for things
1264     // like maxVersion.  Otherwise, we pick up wrong values on deserialization which makes for
1265     // unrelated-looking test failures that are hard to trace back to here.
1266     HColumnDescriptor hcd = new HColumnDescriptor();
1267     hcd.name = cfs.getName().toByteArray();
1268     for (BytesBytesPair a: cfs.getAttributesList()) {
1269       hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1270     }
1271     for (NameStringPair a: cfs.getConfigurationList()) {
1272       hcd.setConfiguration(a.getName(), a.getValue());
1273     }
1274     return hcd;
1275   }
1276 
1277   /**
1278    * @return Convert this instance to a the pb column family type
1279    */
1280   public ColumnFamilySchema convert() {
1281     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1282     builder.setName(ByteStringer.wrap(getName()));
1283     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1284       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1285       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1286       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1287       builder.addAttributes(aBuilder.build());
1288     }
1289     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1290       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1291       aBuilder.setName(e.getKey());
1292       aBuilder.setValue(e.getValue());
1293       builder.addConfiguration(aBuilder.build());
1294     }
1295     return builder.build();
1296   }
1297 
1298   /**
1299    * Getter for accessing the configuration value by key.
1300    */
1301   public String getConfigurationValue(String key) {
1302     return configuration.get(key);
1303   }
1304 
1305   /**
1306    * Getter for fetching an unmodifiable {@link #configuration} map.
1307    */
1308   public Map<String, String> getConfiguration() {
1309     // shallow pointer copy
1310     return Collections.unmodifiableMap(configuration);
1311   }
1312 
1313   /**
1314    * Setter for storing a configuration setting in {@link #configuration} map.
1315    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1316    * @param value String value. If null, removes the configuration.
1317    */
1318   public HColumnDescriptor setConfiguration(String key, String value) {
1319     if (value == null) {
1320       removeConfiguration(key);
1321     } else {
1322       configuration.put(key, value);
1323     }
1324     return this;
1325   }
1326 
1327   /**
1328    * Remove a configuration setting represented by the key from the {@link #configuration} map.
1329    */
1330   public void removeConfiguration(final String key) {
1331     configuration.remove(key);
1332   }
1333 
1334   /**
1335    * Return the encryption algorithm in use by this family
1336    */
1337   public String getEncryptionType() {
1338     return getValue(ENCRYPTION);
1339   }
1340 
1341   /**
1342    * Set the encryption algorithm for use with this family
1343    * @param algorithm
1344    */
1345   public HColumnDescriptor setEncryptionType(String algorithm) {
1346     setValue(ENCRYPTION, algorithm);
1347     return this;
1348   }
1349 
1350   /** Return the raw crypto key attribute for the family, or null if not set  */
1351   public byte[] getEncryptionKey() {
1352     return getValue(Bytes.toBytes(ENCRYPTION_KEY));
1353   }
1354 
1355   /** Set the raw crypto key attribute for the family */
1356   public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
1357     setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
1358     return this;
1359   }
1360 }