View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.util.Collections;
22  import java.util.HashMap;
23  import java.util.HashSet;
24  import java.util.Map;
25  import java.util.Set;
26  
27  import com.google.common.base.Preconditions;
28  import com.google.protobuf.InvalidProtocolBufferException;
29  import org.apache.hadoop.classification.InterfaceAudience;
30  import org.apache.hadoop.classification.InterfaceStability;
31  import org.apache.hadoop.hbase.exceptions.DeserializationException;
32  import org.apache.hadoop.hbase.io.compress.Compression;
33  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
34  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
35  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
36  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
37  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
38  import org.apache.hadoop.hbase.regionserver.BloomType;
39  import org.apache.hadoop.hbase.util.ByteStringer;
40  import org.apache.hadoop.hbase.util.Bytes;
41  import org.apache.hadoop.hbase.util.PrettyPrinter;
42  import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
43  
44  /**
45   * An HColumnDescriptor contains information about a column family such as the
46   * number of versions, compression settings, etc.
47   *
48   * It is used as input when creating a table or adding a column.
49   */
50  @InterfaceAudience.Public
51  @InterfaceStability.Evolving
52  public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
53    // For future backward compatibility
54  
55    // Version  3 was when column names become byte arrays and when we picked up
56    // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
57    // Version  5 was when bloom filter descriptors were removed.
58    // Version  6 adds metadata as a map where keys and values are byte[].
59    // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
60    // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
61    // Version  9 -- add data block encoding
62    // Version 10 -- change metadata to standard type.
63    // Version 11 -- add column family level configuration.
64    private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
65  
66    // These constants are used as FileInfo keys
67    public static final String COMPRESSION = "COMPRESSION";
68    public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
69    public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
70        "ENCODE_ON_DISK";
71    public static final String DATA_BLOCK_ENCODING =
72        "DATA_BLOCK_ENCODING";
73    /**
74     * Key for the BLOCKCACHE attribute.
75     * A more exact name would be CACHE_DATA_ON_READ because this flag sets whether or not we
76     * cache DATA blocks.  We always cache INDEX and BLOOM blocks; caching these blocks cannot be
77     * disabled.
78     */
79    public static final String BLOCKCACHE = "BLOCKCACHE";
80    public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
81    public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
82    public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
83    public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
84    /**
85     * Key for cache data into L1 if cache is set up with more than one tier.
86     * To set in the shell, do something like this:
87     * <code>hbase(main):003:0> create 't', {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}</code>
88     */
89    public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
90  
91    /**
92     * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
93     * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
94     * family will be loaded into the cache as soon as the file is opened. These
95     * loads will not count as cache misses.
96     */
97    public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
98  
99    /**
100    * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
101    * Use smaller block sizes for faster random-access at expense of larger
102    * indices (more memory consumption).
103    */
104   public static final String BLOCKSIZE = "BLOCKSIZE";
105 
106   public static final String LENGTH = "LENGTH";
107   public static final String TTL = "TTL";
108   public static final String BLOOMFILTER = "BLOOMFILTER";
109   public static final String FOREVER = "FOREVER";
110   public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
111   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
112   public static final String MIN_VERSIONS = "MIN_VERSIONS";
113   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
114   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
115 
116   public static final String ENCRYPTION = "ENCRYPTION";
117   public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
118 
119   /**
120    * Default compression type.
121    */
122   public static final String DEFAULT_COMPRESSION =
123     Compression.Algorithm.NONE.getName();
124 
125   /**
126    * Default value of the flag that enables data block encoding on disk, as
127    * opposed to encoding in cache only. We encode blocks everywhere by default,
128    * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
129    */
130   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
131 
132   /** Default data block encoding algorithm. */
133   public static final String DEFAULT_DATA_BLOCK_ENCODING =
134       DataBlockEncoding.NONE.toString();
135 
136   /**
137    * Default number of versions of a record to keep.
138    */
139   public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
140     "hbase.column.max.version", 1);
141 
142   /**
143    * Default is not to keep a minimum of versions.
144    */
145   public static final int DEFAULT_MIN_VERSIONS = 0;
146 
147   /*
148    * Cache here the HCD value.
149    * Question: its OK to cache since when we're reenable, we create a new HCD?
150    */
151   private volatile Integer blocksize = null;
152 
153   /**
154    * Default setting for whether to try and serve this column family from memory or not.
155    */
156   public static final boolean DEFAULT_IN_MEMORY = false;
157 
158   /**
159    * Default setting for preventing deleted from being collected immediately.
160    */
161   public static final boolean DEFAULT_KEEP_DELETED = false;
162 
163   /**
164    * Default setting for whether to use a block cache or not.
165    */
166   public static final boolean DEFAULT_BLOCKCACHE = true;
167 
168   /**
169    * Default setting for whether to cache data blocks on write if block caching
170    * is enabled.
171    */
172   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
173 
174   /**
175    * Default setting for whether to cache data blocks in L1 tier.  Only makes sense if more than
176    * one tier in operations: i.e. if we have an L1 and a L2.  This will be the cases if we are
177    * using BucketCache.
178    */
179   public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false;
180 
181   /**
182    * Default setting for whether to cache index blocks on write if block
183    * caching is enabled.
184    */
185   public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
186 
187   /**
188    * Default size of blocks in files stored to the filesytem (hfiles).
189    */
190   public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
191 
192   /**
193    * Default setting for whether or not to use bloomfilters.
194    */
195   public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
196 
197   /**
198    * Default setting for whether to cache bloom filter blocks on write if block
199    * caching is enabled.
200    */
201   public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
202 
203   /**
204    * Default time to live of cell contents.
205    */
206   public static final int DEFAULT_TTL = HConstants.FOREVER;
207 
208   /**
209    * Default scope.
210    */
211   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
212 
213   /**
214    * Default setting for whether to evict cached blocks from the blockcache on
215    * close.
216    */
217   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
218 
219   /**
220    * Default compress tags along with any type of DataBlockEncoding.
221    */
222   public static final boolean DEFAULT_COMPRESS_TAGS = true;
223 
224   /*
225    * Default setting for whether to prefetch blocks into the blockcache on open.
226    */
227   public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
228 
229   private final static Map<String, String> DEFAULT_VALUES
230     = new HashMap<String, String>();
231   private final static Set<Bytes> RESERVED_KEYWORDS
232       = new HashSet<Bytes>();
233 
234   static {
235       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
236       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
237       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
238       DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
239       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
240       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
241       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
242       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
243       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
244       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
245       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
246       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
247       DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1));
248       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
249       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
250       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
251       DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
252       for (String s : DEFAULT_VALUES.keySet()) {
253         RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
254       }
255     RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION)));
256     RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY)));
257   }
258 
259   private static final int UNINITIALIZED = -1;
260 
261   // Column family name
262   private byte [] name;
263 
264   // Column metadata
265   private final Map<Bytes, Bytes> values =
266       new HashMap<Bytes, Bytes>();
267 
268   /**
269    * A map which holds the configuration specific to the column family.
270    * The keys of the map have the same names as config keys and override the defaults with
271    * cf-specific settings. Example usage may be for compactions, etc.
272    */
273   private final Map<String, String> configuration = new HashMap<String, String>();
274 
275   /*
276    * Cache the max versions rather than calculate it every time.
277    */
278   private int cachedMaxVersions = UNINITIALIZED;
279 
280   /**
281    * Default constructor. Must be present for Writable.
282    * @deprecated Used by Writables and Writables are going away.
283    */
284   @Deprecated
285   // Make this private rather than remove after deprecation period elapses.  Its needed by pb
286   // deserializations.
287   public HColumnDescriptor() {
288     this.name = null;
289   }
290 
291   /**
292    * Construct a column descriptor specifying only the family name
293    * The other attributes are defaulted.
294    *
295    * @param familyName Column family name. Must be 'printable' -- digit or
296    * letter -- and may not contain a <code>:<code>
297    */
298   public HColumnDescriptor(final String familyName) {
299     this(Bytes.toBytes(familyName));
300   }
301 
302   /**
303    * Construct a column descriptor specifying only the family name
304    * The other attributes are defaulted.
305    *
306    * @param familyName Column family name. Must be 'printable' -- digit or
307    * letter -- and may not contain a <code>:<code>
308    */
309   public HColumnDescriptor(final byte [] familyName) {
310     this (familyName == null || familyName.length <= 0?
311       HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
312       DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
313       DEFAULT_TTL, DEFAULT_BLOOMFILTER);
314   }
315 
316   /**
317    * Constructor.
318    * Makes a deep copy of the supplied descriptor.
319    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
320    * @param desc The descriptor.
321    */
322   public HColumnDescriptor(HColumnDescriptor desc) {
323     super();
324     this.name = desc.name.clone();
325     for (Map.Entry<Bytes, Bytes> e :
326         desc.values.entrySet()) {
327       this.values.put(e.getKey(), e.getValue());
328     }
329     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
330       this.configuration.put(e.getKey(), e.getValue());
331     }
332     setMaxVersions(desc.getMaxVersions());
333   }
334 
335   /**
336    * Constructor
337    * @param familyName Column family name. Must be 'printable' -- digit or
338    * letter -- and may not contain a <code>:<code>
339    * @param maxVersions Maximum number of versions to keep
340    * @param compression Compression type
341    * @param inMemory If true, column data should be kept in an HRegionServer's
342    * cache
343    * @param blockCacheEnabled If true, MapFile blocks should be cached
344    * @param timeToLive Time-to-live of cell contents, in seconds
345    * (use HConstants.FOREVER for unlimited TTL)
346    * @param bloomFilter Bloom filter type for this column
347    *
348    * @throws IllegalArgumentException if passed a family name that is made of
349    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
350    * a <code>:</code>
351    * @throws IllegalArgumentException if the number of versions is &lt;= 0
352    * @deprecated use {@link #HColumnDescriptor(String)} and setters
353    */
354   @Deprecated
355   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
356       final String compression, final boolean inMemory,
357       final boolean blockCacheEnabled,
358       final int timeToLive, final String bloomFilter) {
359     this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
360       DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
361   }
362 
363   /**
364    * Constructor
365    * @param familyName Column family name. Must be 'printable' -- digit or
366    * letter -- and may not contain a <code>:<code>
367    * @param maxVersions Maximum number of versions to keep
368    * @param compression Compression type
369    * @param inMemory If true, column data should be kept in an HRegionServer's
370    * cache
371    * @param blockCacheEnabled If true, MapFile blocks should be cached
372    * @param blocksize Block size to use when writing out storefiles.  Use
373    * smaller block sizes for faster random-access at expense of larger indices
374    * (more memory consumption).  Default is usually 64k.
375    * @param timeToLive Time-to-live of cell contents, in seconds
376    * (use HConstants.FOREVER for unlimited TTL)
377    * @param bloomFilter Bloom filter type for this column
378    * @param scope The scope tag for this column
379    *
380    * @throws IllegalArgumentException if passed a family name that is made of
381    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
382    * a <code>:</code>
383    * @throws IllegalArgumentException if the number of versions is &lt;= 0
384    * @deprecated use {@link #HColumnDescriptor(String)} and setters
385    */
386   @Deprecated
387   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
388       final String compression, final boolean inMemory,
389       final boolean blockCacheEnabled, final int blocksize,
390       final int timeToLive, final String bloomFilter, final int scope) {
391     this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
392         compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
393         inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
394         scope);
395   }
396 
397   /**
398    * Constructor
399    * @param familyName Column family name. Must be 'printable' -- digit or
400    * letter -- and may not contain a <code>:<code>
401    * @param minVersions Minimum number of versions to keep
402    * @param maxVersions Maximum number of versions to keep
403    * @param keepDeletedCells Whether to retain deleted cells until they expire
404    *        up to maxVersions versions.
405    * @param compression Compression type
406    * @param encodeOnDisk whether to use the specified data block encoding
407    *        on disk. If false, the encoding will be used in cache only.
408    * @param dataBlockEncoding data block encoding
409    * @param inMemory If true, column data should be kept in an HRegionServer's
410    * cache
411    * @param blockCacheEnabled If true, MapFile blocks should be cached
412    * @param blocksize Block size to use when writing out storefiles.  Use
413    * smaller blocksizes for faster random-access at expense of larger indices
414    * (more memory consumption).  Default is usually 64k.
415    * @param timeToLive Time-to-live of cell contents, in seconds
416    * (use HConstants.FOREVER for unlimited TTL)
417    * @param bloomFilter Bloom filter type for this column
418    * @param scope The scope tag for this column
419    *
420    * @throws IllegalArgumentException if passed a family name that is made of
421    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
422    * a <code>:</code>
423    * @throws IllegalArgumentException if the number of versions is &lt;= 0
424    * @deprecated use {@link #HColumnDescriptor(String)} and setters
425    */
426   @Deprecated
427   public HColumnDescriptor(final byte[] familyName, final int minVersions,
428       final int maxVersions, final boolean keepDeletedCells,
429       final String compression, final boolean encodeOnDisk,
430       final String dataBlockEncoding, final boolean inMemory,
431       final boolean blockCacheEnabled, final int blocksize,
432       final int timeToLive, final String bloomFilter, final int scope) {
433     isLegalFamilyName(familyName);
434     this.name = familyName;
435 
436     if (maxVersions <= 0) {
437       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
438       // Until there is support, consider 0 or < 0 -- a configuration error.
439       throw new IllegalArgumentException("Maximum versions must be positive");
440     }
441 
442     if (minVersions > 0) {
443       if (timeToLive == HConstants.FOREVER) {
444         throw new IllegalArgumentException("Minimum versions requires TTL.");
445       }
446       if (minVersions >= maxVersions) {
447         throw new IllegalArgumentException("Minimum versions must be < "
448             + "maximum versions.");
449       }
450     }
451 
452     setMaxVersions(maxVersions);
453     setMinVersions(minVersions);
454     setKeepDeletedCells(keepDeletedCells);
455     setInMemory(inMemory);
456     setBlockCacheEnabled(blockCacheEnabled);
457     setTimeToLive(timeToLive);
458     setCompressionType(Compression.Algorithm.
459       valueOf(compression.toUpperCase()));
460     setDataBlockEncoding(DataBlockEncoding.
461         valueOf(dataBlockEncoding.toUpperCase()));
462     setBloomFilterType(BloomType.
463       valueOf(bloomFilter.toUpperCase()));
464     setBlocksize(blocksize);
465     setScope(scope);
466   }
467 
468   /**
469    * @param b Family name.
470    * @return <code>b</code>
471    * @throws IllegalArgumentException If not null and not a legitimate family
472    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
473    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
474    * either. Also Family can not be an empty value or equal "recovered.edits".
475    */
476   public static byte [] isLegalFamilyName(final byte [] b) {
477     if (b == null) {
478       return b;
479     }
480     Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
481     if (b[0] == '.') {
482       throw new IllegalArgumentException("Family names cannot start with a " +
483         "period: " + Bytes.toString(b));
484     }
485     for (int i = 0; i < b.length; i++) {
486       if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
487         throw new IllegalArgumentException("Illegal character <" + b[i] +
488           ">. Family names cannot contain control characters or colons: " +
489           Bytes.toString(b));
490       }
491     }
492     byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
493     if (Bytes.equals(recoveredEdit, b)) {
494       throw new IllegalArgumentException("Family name cannot be: " +
495           HConstants.RECOVERED_EDITS_DIR);
496     }
497     return b;
498   }
499 
500   /**
501    * @return Name of this column family
502    */
503   public byte [] getName() {
504     return name;
505   }
506 
507   /**
508    * @return Name of this column family
509    */
510   public String getNameAsString() {
511     return Bytes.toString(this.name);
512   }
513 
514   /**
515    * @param key The key.
516    * @return The value.
517    */
518   public byte[] getValue(byte[] key) {
519     Bytes ibw = values.get(new Bytes(key));
520     if (ibw == null)
521       return null;
522     return ibw.get();
523   }
524 
525   /**
526    * @param key The key.
527    * @return The value as a string.
528    */
529   public String getValue(String key) {
530     byte[] value = getValue(Bytes.toBytes(key));
531     if (value == null)
532       return null;
533     return Bytes.toString(value);
534   }
535 
536   /**
537    * @return All values.
538    */
539   public Map<Bytes, Bytes> getValues() {
540     // shallow pointer copy
541     return Collections.unmodifiableMap(values);
542   }
543 
544   /**
545    * @param key The key.
546    * @param value The value.
547    * @return this (for chained invocation)
548    */
549   public HColumnDescriptor setValue(byte[] key, byte[] value) {
550     values.put(new Bytes(key),
551         new Bytes(value));
552     return this;
553   }
554 
555   /**
556    * @param key Key whose key and value we're to remove from HCD parameters.
557    */
558   public void remove(final byte [] key) {
559     values.remove(new Bytes(key));
560   }
561 
562   /**
563    * @param key The key.
564    * @param value The value.
565    * @return this (for chained invocation)
566    */
567   public HColumnDescriptor setValue(String key, String value) {
568     if (value == null) {
569       remove(Bytes.toBytes(key));
570     } else {
571       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
572     }
573     return this;
574   }
575 
576   /** @return compression type being used for the column family */
577   public Compression.Algorithm getCompression() {
578     String n = getValue(COMPRESSION);
579     if (n == null) {
580       return Compression.Algorithm.NONE;
581     }
582     return Compression.Algorithm.valueOf(n.toUpperCase());
583   }
584 
585   /** @return compression type being used for the column family for major
586       compression */
587   public Compression.Algorithm getCompactionCompression() {
588     String n = getValue(COMPRESSION_COMPACT);
589     if (n == null) {
590       return getCompression();
591     }
592     return Compression.Algorithm.valueOf(n.toUpperCase());
593   }
594 
595   /** @return maximum number of versions */
596   public int getMaxVersions() {
597     if (this.cachedMaxVersions == UNINITIALIZED) {
598       String v = getValue(HConstants.VERSIONS);
599       this.cachedMaxVersions = Integer.parseInt(v);
600     }
601     return this.cachedMaxVersions;
602   }
603 
604   /**
605    * @param maxVersions maximum number of versions
606    * @return this (for chained invocation)
607    */
608   public HColumnDescriptor setMaxVersions(int maxVersions) {
609     if (maxVersions <= 0) {
610       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
611       // Until there is support, consider 0 or < 0 -- a configuration error.
612       throw new IllegalArgumentException("Maximum versions must be positive");
613     }    
614     if (maxVersions < this.getMinVersions()) {      
615         throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
616             + " while minVersion is " + this.getMinVersions()
617             + ". Maximum versions must be >= minimum versions ");      
618     }
619     setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
620     cachedMaxVersions = maxVersions;
621     return this;
622   }
623 
624   /**
625    * @return The storefile/hfile blocksize for this column family.
626    */
627   public synchronized int getBlocksize() {
628     if (this.blocksize == null) {
629       String value = getValue(BLOCKSIZE);
630       this.blocksize = (value != null)?
631         Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
632     }
633     return this.blocksize.intValue();
634   }
635 
636   /**
637    * @param s Blocksize to use when writing out storefiles/hfiles on this
638    * column family.
639    * @return this (for chained invocation)
640    */
641   public HColumnDescriptor setBlocksize(int s) {
642     setValue(BLOCKSIZE, Integer.toString(s));
643     this.blocksize = null;
644     return this;
645   }
646 
647   /**
648    * @return Compression type setting.
649    */
650   public Compression.Algorithm getCompressionType() {
651     return getCompression();
652   }
653 
654   /**
655    * Compression types supported in hbase.
656    * LZO is not bundled as part of the hbase distribution.
657    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
658    * for how to enable it.
659    * @param type Compression type setting.
660    * @return this (for chained invocation)
661    */
662   public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
663     return setValue(COMPRESSION, type.getName().toUpperCase());
664   }
665 
666   /** @return data block encoding algorithm used on disk */
667   @Deprecated
668   public DataBlockEncoding getDataBlockEncodingOnDisk() {
669     return getDataBlockEncoding();
670   }
671 
672   /**
673    * This method does nothing now. Flag ENCODE_ON_DISK is not used
674    * any more. Data blocks have the same encoding in cache as on disk.
675    * @return this (for chained invocation)
676    */
677   @Deprecated
678   public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
679     return this;
680   }
681 
682   /**
683    * @return the data block encoding algorithm used in block cache and
684    *         optionally on disk
685    */
686   public DataBlockEncoding getDataBlockEncoding() {
687     String type = getValue(DATA_BLOCK_ENCODING);
688     if (type == null) {
689       type = DEFAULT_DATA_BLOCK_ENCODING;
690     }
691     return DataBlockEncoding.valueOf(type);
692   }
693 
694   /**
695    * Set data block encoding algorithm used in block cache.
696    * @param type What kind of data block encoding will be used.
697    * @return this (for chained invocation)
698    */
699   public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
700     String name;
701     if (type != null) {
702       name = type.toString();
703     } else {
704       name = DataBlockEncoding.NONE.toString();
705     }
706     return setValue(DATA_BLOCK_ENCODING, name);
707   }
708 
709   /**
710    * Set whether the tags should be compressed along with DataBlockEncoding. When no
711    * DataBlockEncoding is been used, this is having no effect.
712    * 
713    * @param compressTags
714    * @return this (for chained invocation)
715    */
716   public HColumnDescriptor setCompressTags(boolean compressTags) {
717     return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
718   }
719 
720   /**
721    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
722    *         DataBlockEncoding is been used, this is having no effect.
723    */
724   public boolean shouldCompressTags() {
725     String compressTagsStr = getValue(COMPRESS_TAGS);
726     boolean compressTags = DEFAULT_COMPRESS_TAGS;
727     if (compressTagsStr != null) {
728       compressTags = Boolean.valueOf(compressTagsStr);
729     }
730     return compressTags;
731   }
732 
733   /**
734    * @return Compression type setting.
735    */
736   public Compression.Algorithm getCompactionCompressionType() {
737     return getCompactionCompression();
738   }
739 
740   /**
741    * Compression types supported in hbase.
742    * LZO is not bundled as part of the hbase distribution.
743    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
744    * for how to enable it.
745    * @param type Compression type setting.
746    * @return this (for chained invocation)
747    */
748   public HColumnDescriptor setCompactionCompressionType(
749       Compression.Algorithm type) {
750     return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase());
751   }
752 
753   /**
754    * @return True if we are to favor keeping all values for this column family in the 
755    * HRegionServer cache.
756    */
757   public boolean isInMemory() {
758     String value = getValue(HConstants.IN_MEMORY);
759     if (value != null)
760       return Boolean.valueOf(value).booleanValue();
761     return DEFAULT_IN_MEMORY;
762   }
763 
764   /**
765    * @param inMemory True if we are to favor keeping all values for this column family in the
766    * HRegionServer cache
767    * @return this (for chained invocation)
768    */
769   public HColumnDescriptor setInMemory(boolean inMemory) {
770     return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
771   }
772 
773   public boolean getKeepDeletedCells() {
774     String value = getValue(KEEP_DELETED_CELLS);
775     if (value != null) {
776       return Boolean.valueOf(value).booleanValue();
777     }
778     return DEFAULT_KEEP_DELETED;
779   }
780 
781   /**
782    * @param keepDeletedCells True if deleted rows should not be collected
783    * immediately.
784    * @return this (for chained invocation)
785    */
786   public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
787     return setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells));
788   }
789 
790   /**
791    * @return Time-to-live of cell contents, in seconds.
792    */
793   public int getTimeToLive() {
794     String value = getValue(TTL);
795     return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL;
796   }
797 
798   /**
799    * @param timeToLive Time-to-live of cell contents, in seconds.
800    * @return this (for chained invocation)
801    */
802   public HColumnDescriptor setTimeToLive(int timeToLive) {
803     return setValue(TTL, Integer.toString(timeToLive));
804   }
805 
806   /**
807    * @return The minimum number of versions to keep.
808    */
809   public int getMinVersions() {
810     String value = getValue(MIN_VERSIONS);
811     return (value != null)? Integer.valueOf(value).intValue(): 0;
812   }
813 
814   /**
815    * @param minVersions The minimum number of versions to keep.
816    * (used when timeToLive is set)
817    * @return this (for chained invocation)
818    */
819   public HColumnDescriptor setMinVersions(int minVersions) {
820     return setValue(MIN_VERSIONS, Integer.toString(minVersions));
821   }
822 
823   /**
824    * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX
825    * and BLOOM type blocks).
826    */
827   public boolean isBlockCacheEnabled() {
828     String value = getValue(BLOCKCACHE);
829     if (value != null)
830       return Boolean.valueOf(value).booleanValue();
831     return DEFAULT_BLOCKCACHE;
832   }
833 
834   /**
835    * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache
836    * INDEX and BLOOM blocks; you cannot turn this off).
837    * @return this (for chained invocation)
838    */
839   public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
840     return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
841   }
842 
843   /**
844    * @return bloom filter type used for new StoreFiles in ColumnFamily
845    */
846   public BloomType getBloomFilterType() {
847     String n = getValue(BLOOMFILTER);
848     if (n == null) {
849       n = DEFAULT_BLOOMFILTER;
850     }
851     return BloomType.valueOf(n.toUpperCase());
852   }
853 
854   /**
855    * @param bt bloom filter type
856    * @return this (for chained invocation)
857    */
858   public HColumnDescriptor setBloomFilterType(final BloomType bt) {
859     return setValue(BLOOMFILTER, bt.toString());
860   }
861 
862    /**
863     * @return the scope tag
864     */
865   public int getScope() {
866     byte[] value = getValue(REPLICATION_SCOPE_BYTES);
867     if (value != null) {
868       return Integer.valueOf(Bytes.toString(value));
869     }
870     return DEFAULT_REPLICATION_SCOPE;
871   }
872 
873  /**
874   * @param scope the scope tag
875   * @return this (for chained invocation)
876   */
877   public HColumnDescriptor setScope(int scope) {
878     return setValue(REPLICATION_SCOPE, Integer.toString(scope));
879   }
880 
881   /**
882    * @return true if we should cache data blocks on write
883    */
884   public boolean shouldCacheDataOnWrite() {
885     return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
886   }
887 
888   /**
889    * @param value true if we should cache data blocks on write
890    * @return this (for chained invocation)
891    */
892   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
893     return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
894   }
895 
896   /**
897    * @return true if we should cache data blocks in the L1 cache (if block cache deploy
898    * has more than one tier; e.g. we are using CombinedBlockCache).
899    */
900   public boolean shouldCacheDataInL1() {
901     return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
902   }
903 
904   /**
905    * @param value true if we should cache data blocks in the L1 cache (if block cache deploy
906    * has more than one tier; e.g. we are using CombinedBlockCache).
907    * @return this (for chained invocation)
908    */
909   public HColumnDescriptor setCacheDataInL1(boolean value) {
910     return setValue(CACHE_DATA_IN_L1, Boolean.toString(value));
911   }
912 
913   private boolean setAndGetBoolean(final String key, final boolean defaultSetting) {
914     String value = getValue(key);
915     if (value != null) return Boolean.valueOf(value).booleanValue();
916     return defaultSetting;
917   }
918 
919   /**
920    * @return true if we should cache index blocks on write
921    */
922   public boolean shouldCacheIndexesOnWrite() {
923     return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
924   }
925 
926   /**
927    * @param value true if we should cache index blocks on write
928    * @return this (for chained invocation)
929    */
930   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
931     return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
932   }
933 
934   /**
935    * @return true if we should cache bloomfilter blocks on write
936    */
937   public boolean shouldCacheBloomsOnWrite() {
938     return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
939   }
940 
941   /**
942    * @param value true if we should cache bloomfilter blocks on write
943    * @return this (for chained invocation)
944    */
945   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
946     return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
947   }
948 
949   /**
950    * @return true if we should evict cached blocks from the blockcache on
951    * close
952    */
953   public boolean shouldEvictBlocksOnClose() {
954     return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
955   }
956 
957   /**
958    * @param value true if we should evict cached blocks from the blockcache on
959    * close
960    * @return this (for chained invocation)
961    */
962   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
963     return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
964   }
965 
966   /**
967    * @return true if we should prefetch blocks into the blockcache on open
968    */
969   public boolean shouldPrefetchBlocksOnOpen() {
970     return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
971   }
972 
973   /**
974    * @param value true if we should prefetch blocks into the blockcache on open
975    * @return this (for chained invocation)
976    */
977   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
978     return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
979   }
980 
981   /**
982    * @see java.lang.Object#toString()
983    */
984   @Override
985   public String toString() {
986     StringBuilder s = new StringBuilder();
987 
988     s.append('{');
989     s.append(HConstants.NAME);
990     s.append(" => '");
991     s.append(Bytes.toString(name));
992     s.append("'");
993     s.append(getValues(true));
994     s.append('}');
995     return s.toString();
996   }
997 
998   /**
999    * @return Column family descriptor with only the customized attributes.
1000    */
1001   public String toStringCustomizedValues() {
1002     StringBuilder s = new StringBuilder();
1003     s.append('{');
1004     s.append(HConstants.NAME);
1005     s.append(" => '");
1006     s.append(Bytes.toString(name));
1007     s.append("'");
1008     s.append(getValues(false));
1009     s.append('}');
1010     return s.toString();
1011   }
1012 
1013   private StringBuilder getValues(boolean printDefaults) {
1014     StringBuilder s = new StringBuilder();
1015 
1016     boolean hasConfigKeys = false;
1017 
1018     // print all reserved keys first
1019     for (Bytes k : values.keySet()) {
1020       if (!RESERVED_KEYWORDS.contains(k)) {
1021         hasConfigKeys = true;
1022         continue;
1023       }
1024       String key = Bytes.toString(k.get());
1025       String value = Bytes.toStringBinary(values.get(k).get());
1026       if (printDefaults
1027           || !DEFAULT_VALUES.containsKey(key)
1028           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1029         s.append(", ");
1030         s.append(key);
1031         s.append(" => ");
1032         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1033       }
1034     }
1035 
1036     // print all non-reserved, advanced config keys as a separate subset
1037     if (hasConfigKeys) {
1038       s.append(", ");
1039       s.append(HConstants.METADATA).append(" => ");
1040       s.append('{');
1041       boolean printComma = false;
1042       for (Bytes k : values.keySet()) {
1043         if (RESERVED_KEYWORDS.contains(k)) {
1044           continue;
1045         }
1046         String key = Bytes.toString(k.get());
1047         String value = Bytes.toStringBinary(values.get(k).get());
1048         if (printComma) {
1049           s.append(", ");
1050         }
1051         printComma = true;
1052         s.append('\'').append(key).append('\'');
1053         s.append(" => ");
1054         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1055       }
1056       s.append('}');
1057     }
1058 
1059     if (!configuration.isEmpty()) {
1060       s.append(", ");
1061       s.append(HConstants.CONFIGURATION).append(" => ");
1062       s.append('{');
1063       boolean printCommaForConfiguration = false;
1064       for (Map.Entry<String, String> e : configuration.entrySet()) {
1065         if (printCommaForConfiguration) s.append(", ");
1066         printCommaForConfiguration = true;
1067         s.append('\'').append(e.getKey()).append('\'');
1068         s.append(" => ");
1069         s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
1070       }
1071       s.append("}");
1072     }
1073     return s;
1074   }
1075 
1076   public static Unit getUnit(String key) {
1077     Unit unit;
1078       /* TTL for now, we can add more as we neeed */
1079     if (key.equals(HColumnDescriptor.TTL)) {
1080       unit = Unit.TIME_INTERVAL;
1081     } else {
1082       unit = Unit.NONE;
1083     }
1084     return unit;
1085   }
1086 
1087   public static Map<String, String> getDefaultValues() {
1088     return Collections.unmodifiableMap(DEFAULT_VALUES);
1089   }
1090 
1091   /**
1092    * @see java.lang.Object#equals(java.lang.Object)
1093    */
1094   @Override
1095   public boolean equals(Object obj) {
1096     if (this == obj) {
1097       return true;
1098     }
1099     if (obj == null) {
1100       return false;
1101     }
1102     if (!(obj instanceof HColumnDescriptor)) {
1103       return false;
1104     }
1105     return compareTo((HColumnDescriptor)obj) == 0;
1106   }
1107 
1108   /**
1109    * @see java.lang.Object#hashCode()
1110    */
1111   @Override
1112   public int hashCode() {
1113     int result = Bytes.hashCode(this.name);
1114     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
1115     result ^= values.hashCode();
1116     result ^= configuration.hashCode();
1117     return result;
1118   }
1119 
1120   // Comparable
1121   public int compareTo(HColumnDescriptor o) {
1122     int result = Bytes.compareTo(this.name, o.getName());
1123     if (result == 0) {
1124       // punt on comparison for ordering, just calculate difference
1125       result = this.values.hashCode() - o.values.hashCode();
1126       if (result < 0)
1127         result = -1;
1128       else if (result > 0)
1129         result = 1;
1130     }
1131     if (result == 0) {
1132       result = this.configuration.hashCode() - o.configuration.hashCode();
1133       if (result < 0)
1134         result = -1;
1135       else if (result > 0)
1136         result = 1;
1137     }
1138     return result;
1139   }
1140 
1141   /**
1142    * @return This instance serialized with pb with pb magic prefix
1143    * @see #parseFrom(byte[])
1144    */
1145   public byte [] toByteArray() {
1146     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1147   }
1148 
1149   /**
1150    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
1151    * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
1152    * @throws DeserializationException
1153    * @see #toByteArray()
1154    */
1155   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
1156     if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
1157     int pblen = ProtobufUtil.lengthOfPBMagic();
1158     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1159     ColumnFamilySchema cfs = null;
1160     try {
1161       cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1162     } catch (InvalidProtocolBufferException e) {
1163       throw new DeserializationException(e);
1164     }
1165     return convert(cfs);
1166   }
1167 
1168   /**
1169    * @param cfs
1170    * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
1171    */
1172   public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
1173     // Use the empty constructor so we preserve the initial values set on construction for things
1174     // like maxVersion.  Otherwise, we pick up wrong values on deserialization which makes for
1175     // unrelated-looking test failures that are hard to trace back to here.
1176     HColumnDescriptor hcd = new HColumnDescriptor();
1177     hcd.name = cfs.getName().toByteArray();
1178     for (BytesBytesPair a: cfs.getAttributesList()) {
1179       hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1180     }
1181     for (NameStringPair a: cfs.getConfigurationList()) {
1182       hcd.setConfiguration(a.getName(), a.getValue());
1183     }
1184     return hcd;
1185   }
1186 
1187   /**
1188    * @return Convert this instance to a the pb column family type
1189    */
1190   public ColumnFamilySchema convert() {
1191     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1192     builder.setName(ByteStringer.wrap(getName()));
1193     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1194       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1195       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1196       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1197       builder.addAttributes(aBuilder.build());
1198     }
1199     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1200       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1201       aBuilder.setName(e.getKey());
1202       aBuilder.setValue(e.getValue());
1203       builder.addConfiguration(aBuilder.build());
1204     }
1205     return builder.build();
1206   }
1207 
1208   /**
1209    * Getter for accessing the configuration value by key.
1210    */
1211   public String getConfigurationValue(String key) {
1212     return configuration.get(key);
1213   }
1214 
1215   /**
1216    * Getter for fetching an unmodifiable {@link #configuration} map.
1217    */
1218   public Map<String, String> getConfiguration() {
1219     // shallow pointer copy
1220     return Collections.unmodifiableMap(configuration);
1221   }
1222 
1223   /**
1224    * Setter for storing a configuration setting in {@link #configuration} map.
1225    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1226    * @param value String value. If null, removes the configuration.
1227    */
1228   public void setConfiguration(String key, String value) {
1229     if (value == null) {
1230       removeConfiguration(key);
1231     } else {
1232       configuration.put(key, value);
1233     }
1234   }
1235 
1236   /**
1237    * Remove a configuration setting represented by the key from the {@link #configuration} map.
1238    */
1239   public void removeConfiguration(final String key) {
1240     configuration.remove(key);
1241   }
1242 
1243   /**
1244    * Return the encryption algorithm in use by this family
1245    */
1246   public String getEncryptionType() {
1247     return getValue(ENCRYPTION);
1248   }
1249 
1250   /**
1251    * Set the encryption algorithm for use with this family
1252    * @param algorithm
1253    */
1254   public HColumnDescriptor setEncryptionType(String algorithm) {
1255     setValue(ENCRYPTION, algorithm);
1256     return this;
1257   }
1258 
1259   /** Return the raw crypto key attribute for the family, or null if not set  */
1260   public byte[] getEncryptionKey() {
1261     return getValue(Bytes.toBytes(ENCRYPTION_KEY));
1262   }
1263 
1264   /** Set the raw crypto key attribute for the family */
1265   public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
1266     setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
1267     return this;
1268   }
1269 }