View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.util.Collections;
22  import java.util.HashMap;
23  import java.util.HashSet;
24  import java.util.Map;
25  import java.util.Set;
26  
27  import org.apache.hadoop.hbase.classification.InterfaceAudience;
28  import org.apache.hadoop.hbase.classification.InterfaceStability;
29  import org.apache.hadoop.hbase.exceptions.DeserializationException;
30  import org.apache.hadoop.hbase.io.compress.Compression;
31  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
32  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
33  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
34  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
35  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
36  import org.apache.hadoop.hbase.regionserver.BloomType;
37  import org.apache.hadoop.hbase.util.ByteStringer;
38  import org.apache.hadoop.hbase.util.Bytes;
39  import org.apache.hadoop.hbase.util.PrettyPrinter;
40  import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
41  
42  import com.google.common.base.Preconditions;
43  import com.google.protobuf.InvalidProtocolBufferException;
44  
45  /**
46   * An HColumnDescriptor contains information about a column family such as the
47   * number of versions, compression settings, etc.
48   *
49   * It is used as input when creating a table or adding a column.
50   */
51  @InterfaceAudience.Public
52  @InterfaceStability.Evolving
53  public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
54    // For future backward compatibility
55  
56    // Version  3 was when column names become byte arrays and when we picked up
57    // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
58    // Version  5 was when bloom filter descriptors were removed.
59    // Version  6 adds metadata as a map where keys and values are byte[].
60    // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
61    // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
62    // Version  9 -- add data block encoding
63    // Version 10 -- change metadata to standard type.
64    // Version 11 -- add column family level configuration.
65    private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
66  
67    // These constants are used as FileInfo keys
68    public static final String COMPRESSION = "COMPRESSION";
69    public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
70    public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
71        "ENCODE_ON_DISK";
72    public static final String DATA_BLOCK_ENCODING =
73        "DATA_BLOCK_ENCODING";
74    /**
75     * Key for the BLOCKCACHE attribute.
76     * A more exact name would be CACHE_DATA_ON_READ because this flag sets whether or not we
77     * cache DATA blocks.  We always cache INDEX and BLOOM blocks; caching these blocks cannot be
78     * disabled.
79     */
80    public static final String BLOCKCACHE = "BLOCKCACHE";
81    public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
82    public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
83    public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
84    public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
85    /**
86     * Key for cache data into L1 if cache is set up with more than one tier.
87     * To set in the shell, do something like this:
88     * <code>hbase(main):003:0> create 't', {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}</code>
89     */
90    public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
91  
92    /**
93     * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
94     * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
95     * family will be loaded into the cache as soon as the file is opened. These
96     * loads will not count as cache misses.
97     */
98    public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
99  
100   /**
101    * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
102    * Use smaller block sizes for faster random-access at expense of larger
103    * indices (more memory consumption).
104    */
105   public static final String BLOCKSIZE = "BLOCKSIZE";
106 
107   public static final String LENGTH = "LENGTH";
108   public static final String TTL = "TTL";
109   public static final String BLOOMFILTER = "BLOOMFILTER";
110   public static final String FOREVER = "FOREVER";
111   public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
112   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
113   public static final String MIN_VERSIONS = "MIN_VERSIONS";
114   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
115   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
116 
117   public static final String ENCRYPTION = "ENCRYPTION";
118   public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
119 
120   /**
121    * Default compression type.
122    */
123   public static final String DEFAULT_COMPRESSION =
124     Compression.Algorithm.NONE.getName();
125 
126   /**
127    * Default value of the flag that enables data block encoding on disk, as
128    * opposed to encoding in cache only. We encode blocks everywhere by default,
129    * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
130    */
131   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
132 
133   /** Default data block encoding algorithm. */
134   public static final String DEFAULT_DATA_BLOCK_ENCODING =
135       DataBlockEncoding.NONE.toString();
136 
137   /**
138    * Default number of versions of a record to keep.
139    */
140   public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
141     "hbase.column.max.version", 1);
142 
143   /**
144    * Default is not to keep a minimum of versions.
145    */
146   public static final int DEFAULT_MIN_VERSIONS = 0;
147 
148   /*
149    * Cache here the HCD value.
150    * Question: its OK to cache since when we're reenable, we create a new HCD?
151    */
152   private volatile Integer blocksize = null;
153 
154   /**
155    * Default setting for whether to try and serve this column family from memory or not.
156    */
157   public static final boolean DEFAULT_IN_MEMORY = false;
158 
159   /**
160    * Default setting for preventing deleted from being collected immediately.
161    */
162   public static final boolean DEFAULT_KEEP_DELETED = false;
163 
164   /**
165    * Default setting for whether to use a block cache or not.
166    */
167   public static final boolean DEFAULT_BLOCKCACHE = true;
168 
169   /**
170    * Default setting for whether to cache data blocks on write if block caching
171    * is enabled.
172    */
173   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
174 
175   /**
176    * Default setting for whether to cache data blocks in L1 tier.  Only makes sense if more than
177    * one tier in operations: i.e. if we have an L1 and a L2.  This will be the cases if we are
178    * using BucketCache.
179    */
180   public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false;
181 
182   /**
183    * Default setting for whether to cache index blocks on write if block
184    * caching is enabled.
185    */
186   public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
187 
188   /**
189    * Default size of blocks in files stored to the filesytem (hfiles).
190    */
191   public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
192 
193   /**
194    * Default setting for whether or not to use bloomfilters.
195    */
196   public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
197 
198   /**
199    * Default setting for whether to cache bloom filter blocks on write if block
200    * caching is enabled.
201    */
202   public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
203 
204   /**
205    * Default time to live of cell contents.
206    */
207   public static final int DEFAULT_TTL = HConstants.FOREVER;
208 
209   /**
210    * Default scope.
211    */
212   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
213 
214   /**
215    * Default setting for whether to evict cached blocks from the blockcache on
216    * close.
217    */
218   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
219 
220   /**
221    * Default compress tags along with any type of DataBlockEncoding.
222    */
223   public static final boolean DEFAULT_COMPRESS_TAGS = true;
224 
225   /*
226    * Default setting for whether to prefetch blocks into the blockcache on open.
227    */
228   public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
229 
230   private final static Map<String, String> DEFAULT_VALUES
231     = new HashMap<String, String>();
232   private final static Set<Bytes> RESERVED_KEYWORDS
233       = new HashSet<Bytes>();
234 
235   static {
236       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
237       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
238       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
239       DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
240       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
241       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
242       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
243       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
244       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
245       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
246       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
247       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
248       DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1));
249       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
250       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
251       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
252       DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
253       for (String s : DEFAULT_VALUES.keySet()) {
254         RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
255       }
256     RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION)));
257     RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY)));
258   }
259 
260   private static final int UNINITIALIZED = -1;
261 
262   // Column family name
263   private byte [] name;
264 
265   // Column metadata
266   private final Map<Bytes, Bytes> values =
267       new HashMap<Bytes, Bytes>();
268 
269   /**
270    * A map which holds the configuration specific to the column family.
271    * The keys of the map have the same names as config keys and override the defaults with
272    * cf-specific settings. Example usage may be for compactions, etc.
273    */
274   private final Map<String, String> configuration = new HashMap<String, String>();
275 
276   /*
277    * Cache the max versions rather than calculate it every time.
278    */
279   private int cachedMaxVersions = UNINITIALIZED;
280 
281   /**
282    * Default constructor. Must be present for Writable.
283    * @deprecated Used by Writables and Writables are going away.
284    */
285   @Deprecated
286   // Make this private rather than remove after deprecation period elapses.  Its needed by pb
287   // deserializations.
288   public HColumnDescriptor() {
289     this.name = null;
290   }
291 
292   /**
293    * Construct a column descriptor specifying only the family name
294    * The other attributes are defaulted.
295    *
296    * @param familyName Column family name. Must be 'printable' -- digit or
297    * letter -- and may not contain a <code>:<code>
298    */
299   public HColumnDescriptor(final String familyName) {
300     this(Bytes.toBytes(familyName));
301   }
302 
303   /**
304    * Construct a column descriptor specifying only the family name
305    * The other attributes are defaulted.
306    *
307    * @param familyName Column family name. Must be 'printable' -- digit or
308    * letter -- and may not contain a <code>:<code>
309    */
310   public HColumnDescriptor(final byte [] familyName) {
311     this (familyName == null || familyName.length <= 0?
312       HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
313       DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
314       DEFAULT_TTL, DEFAULT_BLOOMFILTER);
315   }
316 
317   /**
318    * Constructor.
319    * Makes a deep copy of the supplied descriptor.
320    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
321    * @param desc The descriptor.
322    */
323   public HColumnDescriptor(HColumnDescriptor desc) {
324     super();
325     this.name = desc.name.clone();
326     for (Map.Entry<Bytes, Bytes> e :
327         desc.values.entrySet()) {
328       this.values.put(e.getKey(), e.getValue());
329     }
330     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
331       this.configuration.put(e.getKey(), e.getValue());
332     }
333     setMaxVersions(desc.getMaxVersions());
334   }
335 
336   /**
337    * Constructor
338    * @param familyName Column family name. Must be 'printable' -- digit or
339    * letter -- and may not contain a <code>:<code>
340    * @param maxVersions Maximum number of versions to keep
341    * @param compression Compression type
342    * @param inMemory If true, column data should be kept in an HRegionServer's
343    * cache
344    * @param blockCacheEnabled If true, MapFile blocks should be cached
345    * @param timeToLive Time-to-live of cell contents, in seconds
346    * (use HConstants.FOREVER for unlimited TTL)
347    * @param bloomFilter Bloom filter type for this column
348    *
349    * @throws IllegalArgumentException if passed a family name that is made of
350    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
351    * a <code>:</code>
352    * @throws IllegalArgumentException if the number of versions is &lt;= 0
353    * @deprecated use {@link #HColumnDescriptor(String)} and setters
354    */
355   @Deprecated
356   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
357       final String compression, final boolean inMemory,
358       final boolean blockCacheEnabled,
359       final int timeToLive, final String bloomFilter) {
360     this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
361       DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
362   }
363 
364   /**
365    * Constructor
366    * @param familyName Column family name. Must be 'printable' -- digit or
367    * letter -- and may not contain a <code>:<code>
368    * @param maxVersions Maximum number of versions to keep
369    * @param compression Compression type
370    * @param inMemory If true, column data should be kept in an HRegionServer's
371    * cache
372    * @param blockCacheEnabled If true, MapFile blocks should be cached
373    * @param blocksize Block size to use when writing out storefiles.  Use
374    * smaller block sizes for faster random-access at expense of larger indices
375    * (more memory consumption).  Default is usually 64k.
376    * @param timeToLive Time-to-live of cell contents, in seconds
377    * (use HConstants.FOREVER for unlimited TTL)
378    * @param bloomFilter Bloom filter type for this column
379    * @param scope The scope tag for this column
380    *
381    * @throws IllegalArgumentException if passed a family name that is made of
382    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
383    * a <code>:</code>
384    * @throws IllegalArgumentException if the number of versions is &lt;= 0
385    * @deprecated use {@link #HColumnDescriptor(String)} and setters
386    */
387   @Deprecated
388   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
389       final String compression, final boolean inMemory,
390       final boolean blockCacheEnabled, final int blocksize,
391       final int timeToLive, final String bloomFilter, final int scope) {
392     this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
393         compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
394         inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
395         scope);
396   }
397 
398   /**
399    * Constructor
400    * @param familyName Column family name. Must be 'printable' -- digit or
401    * letter -- and may not contain a <code>:<code>
402    * @param minVersions Minimum number of versions to keep
403    * @param maxVersions Maximum number of versions to keep
404    * @param keepDeletedCells Whether to retain deleted cells until they expire
405    *        up to maxVersions versions.
406    * @param compression Compression type
407    * @param encodeOnDisk whether to use the specified data block encoding
408    *        on disk. If false, the encoding will be used in cache only.
409    * @param dataBlockEncoding data block encoding
410    * @param inMemory If true, column data should be kept in an HRegionServer's
411    * cache
412    * @param blockCacheEnabled If true, MapFile blocks should be cached
413    * @param blocksize Block size to use when writing out storefiles.  Use
414    * smaller blocksizes for faster random-access at expense of larger indices
415    * (more memory consumption).  Default is usually 64k.
416    * @param timeToLive Time-to-live of cell contents, in seconds
417    * (use HConstants.FOREVER for unlimited TTL)
418    * @param bloomFilter Bloom filter type for this column
419    * @param scope The scope tag for this column
420    *
421    * @throws IllegalArgumentException if passed a family name that is made of
422    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
423    * a <code>:</code>
424    * @throws IllegalArgumentException if the number of versions is &lt;= 0
425    * @deprecated use {@link #HColumnDescriptor(String)} and setters
426    */
427   @Deprecated
428   public HColumnDescriptor(final byte[] familyName, final int minVersions,
429       final int maxVersions, final boolean keepDeletedCells,
430       final String compression, final boolean encodeOnDisk,
431       final String dataBlockEncoding, final boolean inMemory,
432       final boolean blockCacheEnabled, final int blocksize,
433       final int timeToLive, final String bloomFilter, final int scope) {
434     isLegalFamilyName(familyName);
435     this.name = familyName;
436 
437     if (maxVersions <= 0) {
438       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
439       // Until there is support, consider 0 or < 0 -- a configuration error.
440       throw new IllegalArgumentException("Maximum versions must be positive");
441     }
442 
443     if (minVersions > 0) {
444       if (timeToLive == HConstants.FOREVER) {
445         throw new IllegalArgumentException("Minimum versions requires TTL.");
446       }
447       if (minVersions >= maxVersions) {
448         throw new IllegalArgumentException("Minimum versions must be < "
449             + "maximum versions.");
450       }
451     }
452 
453     setMaxVersions(maxVersions);
454     setMinVersions(minVersions);
455     setKeepDeletedCells(keepDeletedCells);
456     setInMemory(inMemory);
457     setBlockCacheEnabled(blockCacheEnabled);
458     setTimeToLive(timeToLive);
459     setCompressionType(Compression.Algorithm.
460       valueOf(compression.toUpperCase()));
461     setDataBlockEncoding(DataBlockEncoding.
462         valueOf(dataBlockEncoding.toUpperCase()));
463     setBloomFilterType(BloomType.
464       valueOf(bloomFilter.toUpperCase()));
465     setBlocksize(blocksize);
466     setScope(scope);
467   }
468 
469   /**
470    * @param b Family name.
471    * @return <code>b</code>
472    * @throws IllegalArgumentException If not null and not a legitimate family
473    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
474    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
475    * either. Also Family can not be an empty value or equal "recovered.edits".
476    */
477   public static byte [] isLegalFamilyName(final byte [] b) {
478     if (b == null) {
479       return b;
480     }
481     Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
482     if (b[0] == '.') {
483       throw new IllegalArgumentException("Family names cannot start with a " +
484         "period: " + Bytes.toString(b));
485     }
486     for (int i = 0; i < b.length; i++) {
487       if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
488         throw new IllegalArgumentException("Illegal character <" + b[i] +
489           ">. Family names cannot contain control characters or colons: " +
490           Bytes.toString(b));
491       }
492     }
493     byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
494     if (Bytes.equals(recoveredEdit, b)) {
495       throw new IllegalArgumentException("Family name cannot be: " +
496           HConstants.RECOVERED_EDITS_DIR);
497     }
498     return b;
499   }
500 
501   /**
502    * @return Name of this column family
503    */
504   public byte [] getName() {
505     return name;
506   }
507 
508   /**
509    * @return Name of this column family
510    */
511   public String getNameAsString() {
512     return Bytes.toString(this.name);
513   }
514 
515   /**
516    * @param key The key.
517    * @return The value.
518    */
519   public byte[] getValue(byte[] key) {
520     Bytes ibw = values.get(new Bytes(key));
521     if (ibw == null)
522       return null;
523     return ibw.get();
524   }
525 
526   /**
527    * @param key The key.
528    * @return The value as a string.
529    */
530   public String getValue(String key) {
531     byte[] value = getValue(Bytes.toBytes(key));
532     if (value == null)
533       return null;
534     return Bytes.toString(value);
535   }
536 
537   /**
538    * @return All values.
539    */
540   public Map<Bytes, Bytes> getValues() {
541     // shallow pointer copy
542     return Collections.unmodifiableMap(values);
543   }
544 
545   /**
546    * @param key The key.
547    * @param value The value.
548    * @return this (for chained invocation)
549    */
550   public HColumnDescriptor setValue(byte[] key, byte[] value) {
551     values.put(new Bytes(key),
552         new Bytes(value));
553     return this;
554   }
555 
556   /**
557    * @param key Key whose key and value we're to remove from HCD parameters.
558    */
559   public void remove(final byte [] key) {
560     values.remove(new Bytes(key));
561   }
562 
563   /**
564    * @param key The key.
565    * @param value The value.
566    * @return this (for chained invocation)
567    */
568   public HColumnDescriptor setValue(String key, String value) {
569     if (value == null) {
570       remove(Bytes.toBytes(key));
571     } else {
572       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
573     }
574     return this;
575   }
576 
577   /** @return compression type being used for the column family */
578   public Compression.Algorithm getCompression() {
579     String n = getValue(COMPRESSION);
580     if (n == null) {
581       return Compression.Algorithm.NONE;
582     }
583     return Compression.Algorithm.valueOf(n.toUpperCase());
584   }
585 
586   /** @return compression type being used for the column family for major
587       compression */
588   public Compression.Algorithm getCompactionCompression() {
589     String n = getValue(COMPRESSION_COMPACT);
590     if (n == null) {
591       return getCompression();
592     }
593     return Compression.Algorithm.valueOf(n.toUpperCase());
594   }
595 
596   /** @return maximum number of versions */
597   public int getMaxVersions() {
598     if (this.cachedMaxVersions == UNINITIALIZED) {
599       String v = getValue(HConstants.VERSIONS);
600       this.cachedMaxVersions = Integer.parseInt(v);
601     }
602     return this.cachedMaxVersions;
603   }
604 
605   /**
606    * @param maxVersions maximum number of versions
607    * @return this (for chained invocation)
608    */
609   public HColumnDescriptor setMaxVersions(int maxVersions) {
610     if (maxVersions <= 0) {
611       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
612       // Until there is support, consider 0 or < 0 -- a configuration error.
613       throw new IllegalArgumentException("Maximum versions must be positive");
614     }
615     if (maxVersions < this.getMinVersions()) {
616         throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
617             + " while minVersion is " + this.getMinVersions()
618             + ". Maximum versions must be >= minimum versions ");
619     }
620     setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
621     cachedMaxVersions = maxVersions;
622     return this;
623   }
624 
625   /**
626    * @return The storefile/hfile blocksize for this column family.
627    */
628   public synchronized int getBlocksize() {
629     if (this.blocksize == null) {
630       String value = getValue(BLOCKSIZE);
631       this.blocksize = (value != null)?
632         Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
633     }
634     return this.blocksize.intValue();
635   }
636 
637   /**
638    * @param s Blocksize to use when writing out storefiles/hfiles on this
639    * column family.
640    * @return this (for chained invocation)
641    */
642   public HColumnDescriptor setBlocksize(int s) {
643     setValue(BLOCKSIZE, Integer.toString(s));
644     this.blocksize = null;
645     return this;
646   }
647 
648   /**
649    * @return Compression type setting.
650    */
651   public Compression.Algorithm getCompressionType() {
652     return getCompression();
653   }
654 
655   /**
656    * Compression types supported in hbase.
657    * LZO is not bundled as part of the hbase distribution.
658    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
659    * for how to enable it.
660    * @param type Compression type setting.
661    * @return this (for chained invocation)
662    */
663   public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
664     return setValue(COMPRESSION, type.getName().toUpperCase());
665   }
666 
667   /** @return data block encoding algorithm used on disk */
668   @Deprecated
669   public DataBlockEncoding getDataBlockEncodingOnDisk() {
670     return getDataBlockEncoding();
671   }
672 
673   /**
674    * This method does nothing now. Flag ENCODE_ON_DISK is not used
675    * any more. Data blocks have the same encoding in cache as on disk.
676    * @return this (for chained invocation)
677    */
678   @Deprecated
679   public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
680     return this;
681   }
682 
683   /**
684    * @return the data block encoding algorithm used in block cache and
685    *         optionally on disk
686    */
687   public DataBlockEncoding getDataBlockEncoding() {
688     String type = getValue(DATA_BLOCK_ENCODING);
689     if (type == null) {
690       type = DEFAULT_DATA_BLOCK_ENCODING;
691     }
692     return DataBlockEncoding.valueOf(type);
693   }
694 
695   /**
696    * Set data block encoding algorithm used in block cache.
697    * @param type What kind of data block encoding will be used.
698    * @return this (for chained invocation)
699    */
700   public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
701     String name;
702     if (type != null) {
703       name = type.toString();
704     } else {
705       name = DataBlockEncoding.NONE.toString();
706     }
707     return setValue(DATA_BLOCK_ENCODING, name);
708   }
709 
710   /**
711    * Set whether the tags should be compressed along with DataBlockEncoding. When no
712    * DataBlockEncoding is been used, this is having no effect.
713    *
714    * @param compressTags
715    * @return this (for chained invocation)
716    */
717   public HColumnDescriptor setCompressTags(boolean compressTags) {
718     return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
719   }
720 
721   /**
722    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
723    *         DataBlockEncoding is been used, this is having no effect.
724    */
725   public boolean shouldCompressTags() {
726     String compressTagsStr = getValue(COMPRESS_TAGS);
727     boolean compressTags = DEFAULT_COMPRESS_TAGS;
728     if (compressTagsStr != null) {
729       compressTags = Boolean.valueOf(compressTagsStr);
730     }
731     return compressTags;
732   }
733 
734   /**
735    * @return Compression type setting.
736    */
737   public Compression.Algorithm getCompactionCompressionType() {
738     return getCompactionCompression();
739   }
740 
741   /**
742    * Compression types supported in hbase.
743    * LZO is not bundled as part of the hbase distribution.
744    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
745    * for how to enable it.
746    * @param type Compression type setting.
747    * @return this (for chained invocation)
748    */
749   public HColumnDescriptor setCompactionCompressionType(
750       Compression.Algorithm type) {
751     return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase());
752   }
753 
754   /**
755    * @return True if we are to favor keeping all values for this column family in the
756    * HRegionServer cache.
757    */
758   public boolean isInMemory() {
759     String value = getValue(HConstants.IN_MEMORY);
760     if (value != null)
761       return Boolean.valueOf(value).booleanValue();
762     return DEFAULT_IN_MEMORY;
763   }
764 
765   /**
766    * @param inMemory True if we are to favor keeping all values for this column family in the
767    * HRegionServer cache
768    * @return this (for chained invocation)
769    */
770   public HColumnDescriptor setInMemory(boolean inMemory) {
771     return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
772   }
773 
774   public boolean getKeepDeletedCells() {
775     String value = getValue(KEEP_DELETED_CELLS);
776     if (value != null) {
777       return Boolean.valueOf(value).booleanValue();
778     }
779     return DEFAULT_KEEP_DELETED;
780   }
781 
782   /**
783    * @param keepDeletedCells True if deleted rows should not be collected
784    * immediately.
785    * @return this (for chained invocation)
786    */
787   public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
788     return setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells));
789   }
790 
791   /**
792    * @return Time-to-live of cell contents, in seconds.
793    */
794   public int getTimeToLive() {
795     String value = getValue(TTL);
796     return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL;
797   }
798 
799   /**
800    * @param timeToLive Time-to-live of cell contents, in seconds.
801    * @return this (for chained invocation)
802    */
803   public HColumnDescriptor setTimeToLive(int timeToLive) {
804     return setValue(TTL, Integer.toString(timeToLive));
805   }
806 
807   /**
808    * @return The minimum number of versions to keep.
809    */
810   public int getMinVersions() {
811     String value = getValue(MIN_VERSIONS);
812     return (value != null)? Integer.valueOf(value).intValue(): 0;
813   }
814 
815   /**
816    * @param minVersions The minimum number of versions to keep.
817    * (used when timeToLive is set)
818    * @return this (for chained invocation)
819    */
820   public HColumnDescriptor setMinVersions(int minVersions) {
821     return setValue(MIN_VERSIONS, Integer.toString(minVersions));
822   }
823 
824   /**
825    * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX
826    * and BLOOM type blocks).
827    */
828   public boolean isBlockCacheEnabled() {
829     String value = getValue(BLOCKCACHE);
830     if (value != null)
831       return Boolean.valueOf(value).booleanValue();
832     return DEFAULT_BLOCKCACHE;
833   }
834 
835   /**
836    * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache
837    * INDEX and BLOOM blocks; you cannot turn this off).
838    * @return this (for chained invocation)
839    */
840   public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
841     return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
842   }
843 
844   /**
845    * @return bloom filter type used for new StoreFiles in ColumnFamily
846    */
847   public BloomType getBloomFilterType() {
848     String n = getValue(BLOOMFILTER);
849     if (n == null) {
850       n = DEFAULT_BLOOMFILTER;
851     }
852     return BloomType.valueOf(n.toUpperCase());
853   }
854 
855   /**
856    * @param bt bloom filter type
857    * @return this (for chained invocation)
858    */
859   public HColumnDescriptor setBloomFilterType(final BloomType bt) {
860     return setValue(BLOOMFILTER, bt.toString());
861   }
862 
863    /**
864     * @return the scope tag
865     */
866   public int getScope() {
867     byte[] value = getValue(REPLICATION_SCOPE_BYTES);
868     if (value != null) {
869       return Integer.valueOf(Bytes.toString(value));
870     }
871     return DEFAULT_REPLICATION_SCOPE;
872   }
873 
874  /**
875   * @param scope the scope tag
876   * @return this (for chained invocation)
877   */
878   public HColumnDescriptor setScope(int scope) {
879     return setValue(REPLICATION_SCOPE, Integer.toString(scope));
880   }
881 
882   /**
883    * @return true if we should cache data blocks on write
884    */
885   public boolean shouldCacheDataOnWrite() {
886     return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
887   }
888 
889   /**
890    * @param value true if we should cache data blocks on write
891    * @return this (for chained invocation)
892    */
893   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
894     return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
895   }
896 
897   /**
898    * @return true if we should cache data blocks in the L1 cache (if block cache deploy
899    * has more than one tier; e.g. we are using CombinedBlockCache).
900    */
901   public boolean shouldCacheDataInL1() {
902     return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
903   }
904 
905   /**
906    * @param value true if we should cache data blocks in the L1 cache (if block cache deploy
907    * has more than one tier; e.g. we are using CombinedBlockCache).
908    * @return this (for chained invocation)
909    */
910   public HColumnDescriptor setCacheDataInL1(boolean value) {
911     return setValue(CACHE_DATA_IN_L1, Boolean.toString(value));
912   }
913 
914   private boolean setAndGetBoolean(final String key, final boolean defaultSetting) {
915     String value = getValue(key);
916     if (value != null) return Boolean.valueOf(value).booleanValue();
917     return defaultSetting;
918   }
919 
920   /**
921    * @return true if we should cache index blocks on write
922    */
923   public boolean shouldCacheIndexesOnWrite() {
924     return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
925   }
926 
927   /**
928    * @param value true if we should cache index blocks on write
929    * @return this (for chained invocation)
930    */
931   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
932     return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
933   }
934 
935   /**
936    * @return true if we should cache bloomfilter blocks on write
937    */
938   public boolean shouldCacheBloomsOnWrite() {
939     return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
940   }
941 
942   /**
943    * @param value true if we should cache bloomfilter blocks on write
944    * @return this (for chained invocation)
945    */
946   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
947     return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
948   }
949 
950   /**
951    * @return true if we should evict cached blocks from the blockcache on
952    * close
953    */
954   public boolean shouldEvictBlocksOnClose() {
955     return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
956   }
957 
958   /**
959    * @param value true if we should evict cached blocks from the blockcache on
960    * close
961    * @return this (for chained invocation)
962    */
963   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
964     return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
965   }
966 
967   /**
968    * @return true if we should prefetch blocks into the blockcache on open
969    */
970   public boolean shouldPrefetchBlocksOnOpen() {
971     return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
972   }
973 
974   /**
975    * @param value true if we should prefetch blocks into the blockcache on open
976    * @return this (for chained invocation)
977    */
978   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
979     return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
980   }
981 
982   /**
983    * @see java.lang.Object#toString()
984    */
985   @Override
986   public String toString() {
987     StringBuilder s = new StringBuilder();
988 
989     s.append('{');
990     s.append(HConstants.NAME);
991     s.append(" => '");
992     s.append(Bytes.toString(name));
993     s.append("'");
994     s.append(getValues(true));
995     s.append('}');
996     return s.toString();
997   }
998 
999   /**
1000    * @return Column family descriptor with only the customized attributes.
1001    */
1002   public String toStringCustomizedValues() {
1003     StringBuilder s = new StringBuilder();
1004     s.append('{');
1005     s.append(HConstants.NAME);
1006     s.append(" => '");
1007     s.append(Bytes.toString(name));
1008     s.append("'");
1009     s.append(getValues(false));
1010     s.append('}');
1011     return s.toString();
1012   }
1013 
1014   private StringBuilder getValues(boolean printDefaults) {
1015     StringBuilder s = new StringBuilder();
1016 
1017     boolean hasConfigKeys = false;
1018 
1019     // print all reserved keys first
1020     for (Bytes k : values.keySet()) {
1021       if (!RESERVED_KEYWORDS.contains(k)) {
1022         hasConfigKeys = true;
1023         continue;
1024       }
1025       String key = Bytes.toString(k.get());
1026       String value = Bytes.toStringBinary(values.get(k).get());
1027       if (printDefaults
1028           || !DEFAULT_VALUES.containsKey(key)
1029           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1030         s.append(", ");
1031         s.append(key);
1032         s.append(" => ");
1033         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1034       }
1035     }
1036 
1037     // print all non-reserved, advanced config keys as a separate subset
1038     if (hasConfigKeys) {
1039       s.append(", ");
1040       s.append(HConstants.METADATA).append(" => ");
1041       s.append('{');
1042       boolean printComma = false;
1043       for (Bytes k : values.keySet()) {
1044         if (RESERVED_KEYWORDS.contains(k)) {
1045           continue;
1046         }
1047         String key = Bytes.toString(k.get());
1048         String value = Bytes.toStringBinary(values.get(k).get());
1049         if (printComma) {
1050           s.append(", ");
1051         }
1052         printComma = true;
1053         s.append('\'').append(key).append('\'');
1054         s.append(" => ");
1055         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1056       }
1057       s.append('}');
1058     }
1059 
1060     if (!configuration.isEmpty()) {
1061       s.append(", ");
1062       s.append(HConstants.CONFIGURATION).append(" => ");
1063       s.append('{');
1064       boolean printCommaForConfiguration = false;
1065       for (Map.Entry<String, String> e : configuration.entrySet()) {
1066         if (printCommaForConfiguration) s.append(", ");
1067         printCommaForConfiguration = true;
1068         s.append('\'').append(e.getKey()).append('\'');
1069         s.append(" => ");
1070         s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
1071       }
1072       s.append("}");
1073     }
1074     return s;
1075   }
1076 
1077   public static Unit getUnit(String key) {
1078     Unit unit;
1079       /* TTL for now, we can add more as we neeed */
1080     if (key.equals(HColumnDescriptor.TTL)) {
1081       unit = Unit.TIME_INTERVAL;
1082     } else {
1083       unit = Unit.NONE;
1084     }
1085     return unit;
1086   }
1087 
1088   public static Map<String, String> getDefaultValues() {
1089     return Collections.unmodifiableMap(DEFAULT_VALUES);
1090   }
1091 
1092   /**
1093    * @see java.lang.Object#equals(java.lang.Object)
1094    */
1095   @Override
1096   public boolean equals(Object obj) {
1097     if (this == obj) {
1098       return true;
1099     }
1100     if (obj == null) {
1101       return false;
1102     }
1103     if (!(obj instanceof HColumnDescriptor)) {
1104       return false;
1105     }
1106     return compareTo((HColumnDescriptor)obj) == 0;
1107   }
1108 
1109   /**
1110    * @see java.lang.Object#hashCode()
1111    */
1112   @Override
1113   public int hashCode() {
1114     int result = Bytes.hashCode(this.name);
1115     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
1116     result ^= values.hashCode();
1117     result ^= configuration.hashCode();
1118     return result;
1119   }
1120 
1121   // Comparable
1122   @Override
1123   public int compareTo(HColumnDescriptor o) {
1124     int result = Bytes.compareTo(this.name, o.getName());
1125     if (result == 0) {
1126       // punt on comparison for ordering, just calculate difference
1127       result = this.values.hashCode() - o.values.hashCode();
1128       if (result < 0)
1129         result = -1;
1130       else if (result > 0)
1131         result = 1;
1132     }
1133     if (result == 0) {
1134       result = this.configuration.hashCode() - o.configuration.hashCode();
1135       if (result < 0)
1136         result = -1;
1137       else if (result > 0)
1138         result = 1;
1139     }
1140     return result;
1141   }
1142 
1143   /**
1144    * @return This instance serialized with pb with pb magic prefix
1145    * @see #parseFrom(byte[])
1146    */
1147   public byte [] toByteArray() {
1148     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1149   }
1150 
1151   /**
1152    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
1153    * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
1154    * @throws DeserializationException
1155    * @see #toByteArray()
1156    */
1157   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
1158     if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
1159     int pblen = ProtobufUtil.lengthOfPBMagic();
1160     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1161     ColumnFamilySchema cfs = null;
1162     try {
1163       cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1164     } catch (InvalidProtocolBufferException e) {
1165       throw new DeserializationException(e);
1166     }
1167     return convert(cfs);
1168   }
1169 
1170   /**
1171    * @param cfs
1172    * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
1173    */
1174   public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
1175     // Use the empty constructor so we preserve the initial values set on construction for things
1176     // like maxVersion.  Otherwise, we pick up wrong values on deserialization which makes for
1177     // unrelated-looking test failures that are hard to trace back to here.
1178     HColumnDescriptor hcd = new HColumnDescriptor();
1179     hcd.name = cfs.getName().toByteArray();
1180     for (BytesBytesPair a: cfs.getAttributesList()) {
1181       hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1182     }
1183     for (NameStringPair a: cfs.getConfigurationList()) {
1184       hcd.setConfiguration(a.getName(), a.getValue());
1185     }
1186     return hcd;
1187   }
1188 
1189   /**
1190    * @return Convert this instance to a the pb column family type
1191    */
1192   public ColumnFamilySchema convert() {
1193     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1194     builder.setName(ByteStringer.wrap(getName()));
1195     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1196       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1197       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1198       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1199       builder.addAttributes(aBuilder.build());
1200     }
1201     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1202       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1203       aBuilder.setName(e.getKey());
1204       aBuilder.setValue(e.getValue());
1205       builder.addConfiguration(aBuilder.build());
1206     }
1207     return builder.build();
1208   }
1209 
1210   /**
1211    * Getter for accessing the configuration value by key.
1212    */
1213   public String getConfigurationValue(String key) {
1214     return configuration.get(key);
1215   }
1216 
1217   /**
1218    * Getter for fetching an unmodifiable {@link #configuration} map.
1219    */
1220   public Map<String, String> getConfiguration() {
1221     // shallow pointer copy
1222     return Collections.unmodifiableMap(configuration);
1223   }
1224 
1225   /**
1226    * Setter for storing a configuration setting in {@link #configuration} map.
1227    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1228    * @param value String value. If null, removes the configuration.
1229    */
1230   public HColumnDescriptor setConfiguration(String key, String value) {
1231     if (value == null) {
1232       removeConfiguration(key);
1233     } else {
1234       configuration.put(key, value);
1235     }
1236     return this;
1237   }
1238 
1239   /**
1240    * Remove a configuration setting represented by the key from the {@link #configuration} map.
1241    */
1242   public void removeConfiguration(final String key) {
1243     configuration.remove(key);
1244   }
1245 
1246   /**
1247    * Return the encryption algorithm in use by this family
1248    */
1249   public String getEncryptionType() {
1250     return getValue(ENCRYPTION);
1251   }
1252 
1253   /**
1254    * Set the encryption algorithm for use with this family
1255    * @param algorithm
1256    */
1257   public HColumnDescriptor setEncryptionType(String algorithm) {
1258     setValue(ENCRYPTION, algorithm);
1259     return this;
1260   }
1261 
1262   /** Return the raw crypto key attribute for the family, or null if not set  */
1263   public byte[] getEncryptionKey() {
1264     return getValue(Bytes.toBytes(ENCRYPTION_KEY));
1265   }
1266 
1267   /** Set the raw crypto key attribute for the family */
1268   public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
1269     setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
1270     return this;
1271   }
1272 }