View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.util.Collections;
22  import java.util.HashMap;
23  import java.util.HashSet;
24  import java.util.Map;
25  import java.util.Set;
26  
27  import org.apache.hadoop.hbase.classification.InterfaceAudience;
28  import org.apache.hadoop.hbase.classification.InterfaceStability;
29  import org.apache.hadoop.hbase.exceptions.DeserializationException;
30  import org.apache.hadoop.hbase.io.compress.Compression;
31  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
32  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
33  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
34  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
35  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
36  import org.apache.hadoop.hbase.regionserver.BloomType;
37  import org.apache.hadoop.hbase.util.ByteStringer;
38  import org.apache.hadoop.hbase.util.Bytes;
39  import org.apache.hadoop.hbase.util.PrettyPrinter;
40  import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
41  
42  import com.google.common.base.Preconditions;
43  import com.google.protobuf.InvalidProtocolBufferException;
44  
45  /**
46   * An HColumnDescriptor contains information about a column family such as the
47   * number of versions, compression settings, etc.
48   *
49   * It is used as input when creating a table or adding a column.
50   */
51  @InterfaceAudience.Public
52  @InterfaceStability.Evolving
53  public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
54    // For future backward compatibility
55  
56    // Version  3 was when column names become byte arrays and when we picked up
57    // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
58    // Version  5 was when bloom filter descriptors were removed.
59    // Version  6 adds metadata as a map where keys and values are byte[].
60    // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
61    // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
62    // Version  9 -- add data block encoding
63    // Version 10 -- change metadata to standard type.
64    // Version 11 -- add column family level configuration.
65    private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
66  
67    // These constants are used as FileInfo keys
68    public static final String COMPRESSION = "COMPRESSION";
69    public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
70    public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
71        "ENCODE_ON_DISK";
72    public static final String DATA_BLOCK_ENCODING =
73        "DATA_BLOCK_ENCODING";
74    /**
75     * Key for the BLOCKCACHE attribute.
76     * A more exact name would be CACHE_DATA_ON_READ because this flag sets whether or not we
77     * cache DATA blocks.  We always cache INDEX and BLOOM blocks; caching these blocks cannot be
78     * disabled.
79     */
80    public static final String BLOCKCACHE = "BLOCKCACHE";
81    public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
82    public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
83    public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
84    public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
85    /**
86     * Key for cache data into L1 if cache is set up with more than one tier.
87     * To set in the shell, do something like this:
88     * <code>hbase(main):003:0> create 't', {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}</code>
89     */
90    public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
91  
92    /**
93     * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
94     * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
95     * family will be loaded into the cache as soon as the file is opened. These
96     * loads will not count as cache misses.
97     */
98    public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
99  
100   /**
101    * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
102    * Use smaller block sizes for faster random-access at expense of larger
103    * indices (more memory consumption).
104    */
105   public static final String BLOCKSIZE = "BLOCKSIZE";
106 
107   public static final String LENGTH = "LENGTH";
108   public static final String TTL = "TTL";
109   public static final String BLOOMFILTER = "BLOOMFILTER";
110   public static final String FOREVER = "FOREVER";
111   public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
112   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
113   public static final String MIN_VERSIONS = "MIN_VERSIONS";
114   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
115   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
116 
117   public static final String ENCRYPTION = "ENCRYPTION";
118   public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
119 
120   /**
121    * Default compression type.
122    */
123   public static final String DEFAULT_COMPRESSION =
124     Compression.Algorithm.NONE.getName();
125 
126   /**
127    * Default value of the flag that enables data block encoding on disk, as
128    * opposed to encoding in cache only. We encode blocks everywhere by default,
129    * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
130    */
131   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
132 
133   /** Default data block encoding algorithm. */
134   public static final String DEFAULT_DATA_BLOCK_ENCODING =
135       DataBlockEncoding.NONE.toString();
136 
137   /**
138    * Default number of versions of a record to keep.
139    */
140   public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
141     "hbase.column.max.version", 1);
142 
143   /**
144    * Default is not to keep a minimum of versions.
145    */
146   public static final int DEFAULT_MIN_VERSIONS = 0;
147 
148   /*
149    * Cache here the HCD value.
150    * Question: its OK to cache since when we're reenable, we create a new HCD?
151    */
152   private volatile Integer blocksize = null;
153 
154   /**
155    * Default setting for whether to try and serve this column family from memory or not.
156    */
157   public static final boolean DEFAULT_IN_MEMORY = false;
158 
159   /**
160    * Default setting for preventing deleted from being collected immediately.
161    */
162   public static final boolean DEFAULT_KEEP_DELETED = false;
163 
164   /**
165    * Default setting for whether to use a block cache or not.
166    */
167   public static final boolean DEFAULT_BLOCKCACHE = true;
168 
169   /**
170    * Default setting for whether to cache data blocks on write if block caching
171    * is enabled.
172    */
173   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
174 
175   /**
176    * Default setting for whether to cache data blocks in L1 tier.  Only makes sense if more than
177    * one tier in operations: i.e. if we have an L1 and a L2.  This will be the cases if we are
178    * using BucketCache.
179    */
180   public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false;
181 
182   /**
183    * Default setting for whether to cache index blocks on write if block
184    * caching is enabled.
185    */
186   public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
187 
188   /**
189    * Default size of blocks in files stored to the filesytem (hfiles).
190    */
191   public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
192 
193   /**
194    * Default setting for whether or not to use bloomfilters.
195    */
196   public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
197 
198   /**
199    * Default setting for whether to cache bloom filter blocks on write if block
200    * caching is enabled.
201    */
202   public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
203 
204   /**
205    * Default time to live of cell contents.
206    */
207   public static final int DEFAULT_TTL = HConstants.FOREVER;
208 
209   /**
210    * Default scope.
211    */
212   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
213 
214   /**
215    * Default setting for whether to evict cached blocks from the blockcache on
216    * close.
217    */
218   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
219 
220   /**
221    * Default compress tags along with any type of DataBlockEncoding.
222    */
223   public static final boolean DEFAULT_COMPRESS_TAGS = true;
224 
225   /*
226    * Default setting for whether to prefetch blocks into the blockcache on open.
227    */
228   public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
229 
230   private final static Map<String, String> DEFAULT_VALUES
231     = new HashMap<String, String>();
232   private final static Set<Bytes> RESERVED_KEYWORDS
233       = new HashSet<Bytes>();
234 
235   static {
236       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
237       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
238       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
239       DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
240       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
241       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
242       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
243       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
244       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
245       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
246       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
247       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
248       DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1));
249       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
250       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
251       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
252       DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
253       for (String s : DEFAULT_VALUES.keySet()) {
254         RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
255       }
256     RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION)));
257     RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY)));
258   }
259 
260   private static final int UNINITIALIZED = -1;
261 
262   // Column family name
263   private byte [] name;
264 
265   // Column metadata
266   private final Map<Bytes, Bytes> values =
267       new HashMap<Bytes, Bytes>();
268 
269   /**
270    * A map which holds the configuration specific to the column family.
271    * The keys of the map have the same names as config keys and override the defaults with
272    * cf-specific settings. Example usage may be for compactions, etc.
273    */
274   private final Map<String, String> configuration = new HashMap<String, String>();
275 
276   /*
277    * Cache the max versions rather than calculate it every time.
278    */
279   private int cachedMaxVersions = UNINITIALIZED;
280 
281   /**
282    * Default constructor. Must be present for Writable.
283    * @deprecated Used by Writables and Writables are going away.
284    */
285   @Deprecated
286   // Make this private rather than remove after deprecation period elapses.  Its needed by pb
287   // deserializations.
288   public HColumnDescriptor() {
289     this.name = null;
290   }
291 
292   /**
293    * Construct a column descriptor specifying only the family name
294    * The other attributes are defaulted.
295    *
296    * @param familyName Column family name. Must be 'printable' -- digit or
297    * letter -- and may not contain a <code>:<code>
298    */
299   public HColumnDescriptor(final String familyName) {
300     this(Bytes.toBytes(familyName));
301   }
302 
303   /**
304    * Construct a column descriptor specifying only the family name
305    * The other attributes are defaulted.
306    *
307    * @param familyName Column family name. Must be 'printable' -- digit or
308    * letter -- and may not contain a <code>:<code>
309    */
310   public HColumnDescriptor(final byte [] familyName) {
311     this (familyName == null || familyName.length <= 0?
312       HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
313       DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
314       DEFAULT_TTL, DEFAULT_BLOOMFILTER);
315   }
316 
317   /**
318    * Constructor.
319    * Makes a deep copy of the supplied descriptor.
320    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
321    * @param desc The descriptor.
322    */
323   public HColumnDescriptor(HColumnDescriptor desc) {
324     super();
325     this.name = desc.name.clone();
326     for (Map.Entry<Bytes, Bytes> e :
327         desc.values.entrySet()) {
328       this.values.put(e.getKey(), e.getValue());
329     }
330     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
331       this.configuration.put(e.getKey(), e.getValue());
332     }
333     setMaxVersions(desc.getMaxVersions());
334   }
335 
336   /**
337    * Constructor
338    * @param familyName Column family name. Must be 'printable' -- digit or
339    * letter -- and may not contain a <code>:<code>
340    * @param maxVersions Maximum number of versions to keep
341    * @param compression Compression type
342    * @param inMemory If true, column data should be kept in an HRegionServer's
343    * cache
344    * @param blockCacheEnabled If true, MapFile blocks should be cached
345    * @param timeToLive Time-to-live of cell contents, in seconds
346    * (use HConstants.FOREVER for unlimited TTL)
347    * @param bloomFilter Bloom filter type for this column
348    *
349    * @throws IllegalArgumentException if passed a family name that is made of
350    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
351    * a <code>:</code>
352    * @throws IllegalArgumentException if the number of versions is &lt;= 0
353    * @deprecated use {@link #HColumnDescriptor(String)} and setters
354    */
355   @Deprecated
356   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
357       final String compression, final boolean inMemory,
358       final boolean blockCacheEnabled,
359       final int timeToLive, final String bloomFilter) {
360     this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
361       DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
362   }
363 
364   /**
365    * Constructor
366    * @param familyName Column family name. Must be 'printable' -- digit or
367    * letter -- and may not contain a <code>:<code>
368    * @param maxVersions Maximum number of versions to keep
369    * @param compression Compression type
370    * @param inMemory If true, column data should be kept in an HRegionServer's
371    * cache
372    * @param blockCacheEnabled If true, MapFile blocks should be cached
373    * @param blocksize Block size to use when writing out storefiles.  Use
374    * smaller block sizes for faster random-access at expense of larger indices
375    * (more memory consumption).  Default is usually 64k.
376    * @param timeToLive Time-to-live of cell contents, in seconds
377    * (use HConstants.FOREVER for unlimited TTL)
378    * @param bloomFilter Bloom filter type for this column
379    * @param scope The scope tag for this column
380    *
381    * @throws IllegalArgumentException if passed a family name that is made of
382    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
383    * a <code>:</code>
384    * @throws IllegalArgumentException if the number of versions is &lt;= 0
385    * @deprecated use {@link #HColumnDescriptor(String)} and setters
386    */
387   @Deprecated
388   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
389       final String compression, final boolean inMemory,
390       final boolean blockCacheEnabled, final int blocksize,
391       final int timeToLive, final String bloomFilter, final int scope) {
392     this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
393         compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
394         inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
395         scope);
396   }
397 
398   /**
399    * Constructor
400    * @param familyName Column family name. Must be 'printable' -- digit or
401    * letter -- and may not contain a <code>:<code>
402    * @param minVersions Minimum number of versions to keep
403    * @param maxVersions Maximum number of versions to keep
404    * @param keepDeletedCells Whether to retain deleted cells until they expire
405    *        up to maxVersions versions.
406    * @param compression Compression type
407    * @param encodeOnDisk whether to use the specified data block encoding
408    *        on disk. If false, the encoding will be used in cache only.
409    * @param dataBlockEncoding data block encoding
410    * @param inMemory If true, column data should be kept in an HRegionServer's
411    * cache
412    * @param blockCacheEnabled If true, MapFile blocks should be cached
413    * @param blocksize Block size to use when writing out storefiles.  Use
414    * smaller blocksizes for faster random-access at expense of larger indices
415    * (more memory consumption).  Default is usually 64k.
416    * @param timeToLive Time-to-live of cell contents, in seconds
417    * (use HConstants.FOREVER for unlimited TTL)
418    * @param bloomFilter Bloom filter type for this column
419    * @param scope The scope tag for this column
420    *
421    * @throws IllegalArgumentException if passed a family name that is made of
422    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
423    * a <code>:</code>
424    * @throws IllegalArgumentException if the number of versions is &lt;= 0
425    * @deprecated use {@link #HColumnDescriptor(String)} and setters
426    */
427   @Deprecated
428   public HColumnDescriptor(final byte[] familyName, final int minVersions,
429       final int maxVersions, final boolean keepDeletedCells,
430       final String compression, final boolean encodeOnDisk,
431       final String dataBlockEncoding, final boolean inMemory,
432       final boolean blockCacheEnabled, final int blocksize,
433       final int timeToLive, final String bloomFilter, final int scope) {
434     isLegalFamilyName(familyName);
435     this.name = familyName;
436 
437     if (maxVersions <= 0) {
438       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
439       // Until there is support, consider 0 or < 0 -- a configuration error.
440       throw new IllegalArgumentException("Maximum versions must be positive");
441     }
442 
443     if (minVersions > 0) {
444       if (timeToLive == HConstants.FOREVER) {
445         throw new IllegalArgumentException("Minimum versions requires TTL.");
446       }
447       if (minVersions >= maxVersions) {
448         throw new IllegalArgumentException("Minimum versions must be < "
449             + "maximum versions.");
450       }
451     }
452 
453     setMaxVersions(maxVersions);
454     setMinVersions(minVersions);
455     setKeepDeletedCells(keepDeletedCells);
456     setInMemory(inMemory);
457     setBlockCacheEnabled(blockCacheEnabled);
458     setTimeToLive(timeToLive);
459     setCompressionType(Compression.Algorithm.
460       valueOf(compression.toUpperCase()));
461     setDataBlockEncoding(DataBlockEncoding.
462         valueOf(dataBlockEncoding.toUpperCase()));
463     setBloomFilterType(BloomType.
464       valueOf(bloomFilter.toUpperCase()));
465     setBlocksize(blocksize);
466     setScope(scope);
467   }
468 
469   /**
470    * @param b Family name.
471    * @return <code>b</code>
472    * @throws IllegalArgumentException If not null and not a legitimate family
473    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
474    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
475    * either. Also Family can not be an empty value or equal "recovered.edits".
476    */
477   public static byte [] isLegalFamilyName(final byte [] b) {
478     if (b == null) {
479       return b;
480     }
481     Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
482     if (b[0] == '.') {
483       throw new IllegalArgumentException("Family names cannot start with a " +
484         "period: " + Bytes.toString(b));
485     }
486     for (int i = 0; i < b.length; i++) {
487       if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
488         throw new IllegalArgumentException("Illegal character <" + b[i] +
489           ">. Family names cannot contain control characters or colons: " +
490           Bytes.toString(b));
491       }
492     }
493     byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
494     if (Bytes.equals(recoveredEdit, b)) {
495       throw new IllegalArgumentException("Family name cannot be: " +
496           HConstants.RECOVERED_EDITS_DIR);
497     }
498     return b;
499   }
500 
501   /**
502    * @return Name of this column family
503    */
504   public byte [] getName() {
505     return name;
506   }
507 
508   /**
509    * @return Name of this column family
510    */
511   public String getNameAsString() {
512     return Bytes.toString(this.name);
513   }
514 
515   /**
516    * @param key The key.
517    * @return The value.
518    */
519   public byte[] getValue(byte[] key) {
520     Bytes ibw = values.get(new Bytes(key));
521     if (ibw == null)
522       return null;
523     return ibw.get();
524   }
525 
526   /**
527    * @param key The key.
528    * @return The value as a string.
529    */
530   public String getValue(String key) {
531     byte[] value = getValue(Bytes.toBytes(key));
532     if (value == null)
533       return null;
534     return Bytes.toString(value);
535   }
536 
537   /**
538    * @return All values.
539    */
540   public Map<Bytes, Bytes> getValues() {
541     // shallow pointer copy
542     return Collections.unmodifiableMap(values);
543   }
544 
545   /**
546    * @param key The key.
547    * @param value The value.
548    * @return this (for chained invocation)
549    */
550   public HColumnDescriptor setValue(byte[] key, byte[] value) {
551     values.put(new Bytes(key),
552         new Bytes(value));
553     return this;
554   }
555 
556   /**
557    * @param key Key whose key and value we're to remove from HCD parameters.
558    */
559   public void remove(final byte [] key) {
560     values.remove(new Bytes(key));
561   }
562 
563   /**
564    * @param key The key.
565    * @param value The value.
566    * @return this (for chained invocation)
567    */
568   public HColumnDescriptor setValue(String key, String value) {
569     if (value == null) {
570       remove(Bytes.toBytes(key));
571     } else {
572       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
573     }
574     return this;
575   }
576 
577   /** @return compression type being used for the column family */
578   public Compression.Algorithm getCompression() {
579     String n = getValue(COMPRESSION);
580     if (n == null) {
581       return Compression.Algorithm.NONE;
582     }
583     return Compression.Algorithm.valueOf(n.toUpperCase());
584   }
585 
586   /** @return compression type being used for the column family for major
587       compression */
588   public Compression.Algorithm getCompactionCompression() {
589     String n = getValue(COMPRESSION_COMPACT);
590     if (n == null) {
591       return getCompression();
592     }
593     return Compression.Algorithm.valueOf(n.toUpperCase());
594   }
595 
596   /** @return maximum number of versions */
597   public int getMaxVersions() {
598     if (this.cachedMaxVersions == UNINITIALIZED) {
599       String v = getValue(HConstants.VERSIONS);
600       this.cachedMaxVersions = Integer.parseInt(v);
601     }
602     return this.cachedMaxVersions;
603   }
604 
605   /**
606    * @param maxVersions maximum number of versions
607    * @return this (for chained invocation)
608    */
609   public HColumnDescriptor setMaxVersions(int maxVersions) {
610     if (maxVersions <= 0) {
611       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
612       // Until there is support, consider 0 or < 0 -- a configuration error.
613       throw new IllegalArgumentException("Maximum versions must be positive");
614     }
615     if (maxVersions < this.getMinVersions()) {
616         throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
617             + " while minVersion is " + this.getMinVersions()
618             + ". Maximum versions must be >= minimum versions ");
619     }
620     setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
621     cachedMaxVersions = maxVersions;
622     return this;
623   }
624 
625   /**
626    * @return The storefile/hfile blocksize for this column family.
627    */
628   public synchronized int getBlocksize() {
629     if (this.blocksize == null) {
630       String value = getValue(BLOCKSIZE);
631       this.blocksize = (value != null)?
632         Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
633     }
634     return this.blocksize.intValue();
635 
636   }
637 
638   /**
639    * @param s Blocksize to use when writing out storefiles/hfiles on this
640    * column family.
641    * @return this (for chained invocation)
642    */
643   public HColumnDescriptor setBlocksize(int s) {
644     setValue(BLOCKSIZE, Integer.toString(s));
645     this.blocksize = null;
646     return this;
647   }
648 
649   /**
650    * @return Compression type setting.
651    */
652   public Compression.Algorithm getCompressionType() {
653     return getCompression();
654   }
655 
656   /**
657    * Compression types supported in hbase.
658    * LZO is not bundled as part of the hbase distribution.
659    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
660    * for how to enable it.
661    * @param type Compression type setting.
662    * @return this (for chained invocation)
663    */
664   public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
665     return setValue(COMPRESSION, type.getName().toUpperCase());
666   }
667 
668   /**
669    * @return data block encoding algorithm used on disk
670    * @deprecated See getDataBlockEncoding()
671    */
672   @Deprecated
673   public DataBlockEncoding getDataBlockEncodingOnDisk() {
674     return getDataBlockEncoding();
675   }
676 
677   /**
678    * This method does nothing now. Flag ENCODE_ON_DISK is not used
679    * any more. Data blocks have the same encoding in cache as on disk.
680    * @return this (for chained invocation)
681    * @deprecated This does nothing now.
682    */
683   @Deprecated
684   public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
685     return this;
686   }
687 
688   /**
689    * @return the data block encoding algorithm used in block cache and
690    *         optionally on disk
691    */
692   public DataBlockEncoding getDataBlockEncoding() {
693     String type = getValue(DATA_BLOCK_ENCODING);
694     if (type == null) {
695       type = DEFAULT_DATA_BLOCK_ENCODING;
696     }
697     return DataBlockEncoding.valueOf(type);
698   }
699 
700   /**
701    * Set data block encoding algorithm used in block cache.
702    * @param type What kind of data block encoding will be used.
703    * @return this (for chained invocation)
704    */
705   public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
706     String name;
707     if (type != null) {
708       name = type.toString();
709     } else {
710       name = DataBlockEncoding.NONE.toString();
711     }
712     return setValue(DATA_BLOCK_ENCODING, name);
713   }
714 
715   /**
716    * Set whether the tags should be compressed along with DataBlockEncoding. When no
717    * DataBlockEncoding is been used, this is having no effect.
718    *
719    * @param compressTags
720    * @return this (for chained invocation)
721    */
722   public HColumnDescriptor setCompressTags(boolean compressTags) {
723     return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
724   }
725 
726   /**
727    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
728    *         DataBlockEncoding is been used, this is having no effect.
729    */
730   public boolean shouldCompressTags() {
731     String compressTagsStr = getValue(COMPRESS_TAGS);
732     boolean compressTags = DEFAULT_COMPRESS_TAGS;
733     if (compressTagsStr != null) {
734       compressTags = Boolean.valueOf(compressTagsStr);
735     }
736     return compressTags;
737   }
738 
739   /**
740    * @return Compression type setting.
741    */
742   public Compression.Algorithm getCompactionCompressionType() {
743     return getCompactionCompression();
744   }
745 
746   /**
747    * Compression types supported in hbase.
748    * LZO is not bundled as part of the hbase distribution.
749    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
750    * for how to enable it.
751    * @param type Compression type setting.
752    * @return this (for chained invocation)
753    */
754   public HColumnDescriptor setCompactionCompressionType(
755       Compression.Algorithm type) {
756     return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase());
757   }
758 
759   /**
760    * @return True if we are to favor keeping all values for this column family in the
761    * HRegionServer cache.
762    */
763   public boolean isInMemory() {
764     String value = getValue(HConstants.IN_MEMORY);
765     if (value != null)
766       return Boolean.valueOf(value).booleanValue();
767     return DEFAULT_IN_MEMORY;
768   }
769 
770   /**
771    * @param inMemory True if we are to favor keeping all values for this column family in the
772    * HRegionServer cache
773    * @return this (for chained invocation)
774    */
775   public HColumnDescriptor setInMemory(boolean inMemory) {
776     return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
777   }
778 
779   public boolean getKeepDeletedCells() {
780     String value = getValue(KEEP_DELETED_CELLS);
781     if (value != null) {
782       return Boolean.valueOf(value).booleanValue();
783     }
784     return DEFAULT_KEEP_DELETED;
785   }
786 
787   /**
788    * @param keepDeletedCells True if deleted rows should not be collected
789    * immediately.
790    * @return this (for chained invocation)
791    */
792   public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
793     return setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells));
794   }
795 
796   /**
797    * @return Time-to-live of cell contents, in seconds.
798    */
799   public int getTimeToLive() {
800     String value = getValue(TTL);
801     return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL;
802   }
803 
804   /**
805    * @param timeToLive Time-to-live of cell contents, in seconds.
806    * @return this (for chained invocation)
807    */
808   public HColumnDescriptor setTimeToLive(int timeToLive) {
809     return setValue(TTL, Integer.toString(timeToLive));
810   }
811 
812   /**
813    * @return The minimum number of versions to keep.
814    */
815   public int getMinVersions() {
816     String value = getValue(MIN_VERSIONS);
817     return (value != null)? Integer.valueOf(value).intValue(): 0;
818   }
819 
820   /**
821    * @param minVersions The minimum number of versions to keep.
822    * (used when timeToLive is set)
823    * @return this (for chained invocation)
824    */
825   public HColumnDescriptor setMinVersions(int minVersions) {
826     return setValue(MIN_VERSIONS, Integer.toString(minVersions));
827   }
828 
829   /**
830    * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX
831    * and BLOOM type blocks).
832    */
833   public boolean isBlockCacheEnabled() {
834     String value = getValue(BLOCKCACHE);
835     if (value != null)
836       return Boolean.valueOf(value).booleanValue();
837     return DEFAULT_BLOCKCACHE;
838   }
839 
840   /**
841    * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache
842    * INDEX and BLOOM blocks; you cannot turn this off).
843    * @return this (for chained invocation)
844    */
845   public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
846     return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
847   }
848 
849   /**
850    * @return bloom filter type used for new StoreFiles in ColumnFamily
851    */
852   public BloomType getBloomFilterType() {
853     String n = getValue(BLOOMFILTER);
854     if (n == null) {
855       n = DEFAULT_BLOOMFILTER;
856     }
857     return BloomType.valueOf(n.toUpperCase());
858   }
859 
860   /**
861    * @param bt bloom filter type
862    * @return this (for chained invocation)
863    */
864   public HColumnDescriptor setBloomFilterType(final BloomType bt) {
865     return setValue(BLOOMFILTER, bt.toString());
866   }
867 
868    /**
869     * @return the scope tag
870     */
871   public int getScope() {
872     byte[] value = getValue(REPLICATION_SCOPE_BYTES);
873     if (value != null) {
874       return Integer.valueOf(Bytes.toString(value));
875     }
876     return DEFAULT_REPLICATION_SCOPE;
877   }
878 
879  /**
880   * @param scope the scope tag
881   * @return this (for chained invocation)
882   */
883   public HColumnDescriptor setScope(int scope) {
884     return setValue(REPLICATION_SCOPE, Integer.toString(scope));
885   }
886 
887   /**
888    * @return true if we should cache data blocks on write
889    */
890   public boolean shouldCacheDataOnWrite() {
891     return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
892   }
893 
894   /**
895    * @param value true if we should cache data blocks on write
896    * @return this (for chained invocation)
897    */
898   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
899     return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
900   }
901 
902   /**
903    * @return true if we should cache data blocks in the L1 cache (if block cache deploy
904    * has more than one tier; e.g. we are using CombinedBlockCache).
905    */
906   public boolean shouldCacheDataInL1() {
907     return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
908   }
909 
910   /**
911    * @param value true if we should cache data blocks in the L1 cache (if block cache deploy
912    * has more than one tier; e.g. we are using CombinedBlockCache).
913    * @return this (for chained invocation)
914    */
915   public HColumnDescriptor setCacheDataInL1(boolean value) {
916     return setValue(CACHE_DATA_IN_L1, Boolean.toString(value));
917   }
918 
919   private boolean setAndGetBoolean(final String key, final boolean defaultSetting) {
920     String value = getValue(key);
921     if (value != null) return Boolean.valueOf(value).booleanValue();
922     return defaultSetting;
923   }
924 
925   /**
926    * @return true if we should cache index blocks on write
927    */
928   public boolean shouldCacheIndexesOnWrite() {
929     return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
930   }
931 
932   /**
933    * @param value true if we should cache index blocks on write
934    * @return this (for chained invocation)
935    */
936   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
937     return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
938   }
939 
940   /**
941    * @return true if we should cache bloomfilter blocks on write
942    */
943   public boolean shouldCacheBloomsOnWrite() {
944     return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
945   }
946 
947   /**
948    * @param value true if we should cache bloomfilter blocks on write
949    * @return this (for chained invocation)
950    */
951   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
952     return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
953   }
954 
955   /**
956    * @return true if we should evict cached blocks from the blockcache on
957    * close
958    */
959   public boolean shouldEvictBlocksOnClose() {
960     return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
961   }
962 
963   /**
964    * @param value true if we should evict cached blocks from the blockcache on
965    * close
966    * @return this (for chained invocation)
967    */
968   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
969     return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
970   }
971 
972   /**
973    * @return true if we should prefetch blocks into the blockcache on open
974    */
975   public boolean shouldPrefetchBlocksOnOpen() {
976     return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
977   }
978 
979   /**
980    * @param value true if we should prefetch blocks into the blockcache on open
981    * @return this (for chained invocation)
982    */
983   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
984     return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
985   }
986 
987   /**
988    * @see java.lang.Object#toString()
989    */
990   @Override
991   public String toString() {
992     StringBuilder s = new StringBuilder();
993 
994     s.append('{');
995     s.append(HConstants.NAME);
996     s.append(" => '");
997     s.append(Bytes.toString(name));
998     s.append("'");
999     s.append(getValues(true));
1000     s.append('}');
1001     return s.toString();
1002   }
1003 
1004   /**
1005    * @return Column family descriptor with only the customized attributes.
1006    */
1007   public String toStringCustomizedValues() {
1008     StringBuilder s = new StringBuilder();
1009     s.append('{');
1010     s.append(HConstants.NAME);
1011     s.append(" => '");
1012     s.append(Bytes.toString(name));
1013     s.append("'");
1014     s.append(getValues(false));
1015     s.append('}');
1016     return s.toString();
1017   }
1018 
1019   private StringBuilder getValues(boolean printDefaults) {
1020     StringBuilder s = new StringBuilder();
1021 
1022     boolean hasConfigKeys = false;
1023 
1024     // print all reserved keys first
1025     for (Bytes k : values.keySet()) {
1026       if (!RESERVED_KEYWORDS.contains(k)) {
1027         hasConfigKeys = true;
1028         continue;
1029       }
1030       String key = Bytes.toString(k.get());
1031       String value = Bytes.toStringBinary(values.get(k).get());
1032       if (printDefaults
1033           || !DEFAULT_VALUES.containsKey(key)
1034           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1035         s.append(", ");
1036         s.append(key);
1037         s.append(" => ");
1038         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1039       }
1040     }
1041 
1042     // print all non-reserved, advanced config keys as a separate subset
1043     if (hasConfigKeys) {
1044       s.append(", ");
1045       s.append(HConstants.METADATA).append(" => ");
1046       s.append('{');
1047       boolean printComma = false;
1048       for (Bytes k : values.keySet()) {
1049         if (RESERVED_KEYWORDS.contains(k)) {
1050           continue;
1051         }
1052         String key = Bytes.toString(k.get());
1053         String value = Bytes.toStringBinary(values.get(k).get());
1054         if (printComma) {
1055           s.append(", ");
1056         }
1057         printComma = true;
1058         s.append('\'').append(key).append('\'');
1059         s.append(" => ");
1060         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1061       }
1062       s.append('}');
1063     }
1064 
1065     if (!configuration.isEmpty()) {
1066       s.append(", ");
1067       s.append(HConstants.CONFIGURATION).append(" => ");
1068       s.append('{');
1069       boolean printCommaForConfiguration = false;
1070       for (Map.Entry<String, String> e : configuration.entrySet()) {
1071         if (printCommaForConfiguration) s.append(", ");
1072         printCommaForConfiguration = true;
1073         s.append('\'').append(e.getKey()).append('\'');
1074         s.append(" => ");
1075         s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
1076       }
1077       s.append("}");
1078     }
1079     return s;
1080   }
1081 
1082   public static Unit getUnit(String key) {
1083     Unit unit;
1084       /* TTL for now, we can add more as we neeed */
1085     if (key.equals(HColumnDescriptor.TTL)) {
1086       unit = Unit.TIME_INTERVAL;
1087     } else {
1088       unit = Unit.NONE;
1089     }
1090     return unit;
1091   }
1092 
1093   public static Map<String, String> getDefaultValues() {
1094     return Collections.unmodifiableMap(DEFAULT_VALUES);
1095   }
1096 
1097   /**
1098    * @see java.lang.Object#equals(java.lang.Object)
1099    */
1100   @Override
1101   public boolean equals(Object obj) {
1102     if (this == obj) {
1103       return true;
1104     }
1105     if (obj == null) {
1106       return false;
1107     }
1108     if (!(obj instanceof HColumnDescriptor)) {
1109       return false;
1110     }
1111     return compareTo((HColumnDescriptor)obj) == 0;
1112   }
1113 
1114   /**
1115    * @see java.lang.Object#hashCode()
1116    */
1117   @Override
1118   public int hashCode() {
1119     int result = Bytes.hashCode(this.name);
1120     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
1121     result ^= values.hashCode();
1122     result ^= configuration.hashCode();
1123     return result;
1124   }
1125 
1126   // Comparable
1127   @Override
1128   public int compareTo(HColumnDescriptor o) {
1129     int result = Bytes.compareTo(this.name, o.getName());
1130     if (result == 0) {
1131       // punt on comparison for ordering, just calculate difference
1132       result = this.values.hashCode() - o.values.hashCode();
1133       if (result < 0)
1134         result = -1;
1135       else if (result > 0)
1136         result = 1;
1137     }
1138     if (result == 0) {
1139       result = this.configuration.hashCode() - o.configuration.hashCode();
1140       if (result < 0)
1141         result = -1;
1142       else if (result > 0)
1143         result = 1;
1144     }
1145     return result;
1146   }
1147 
1148   /**
1149    * @return This instance serialized with pb with pb magic prefix
1150    * @see #parseFrom(byte[])
1151    */
1152   public byte [] toByteArray() {
1153     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1154   }
1155 
1156   /**
1157    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
1158    * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
1159    * @throws DeserializationException
1160    * @see #toByteArray()
1161    */
1162   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
1163     if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
1164     int pblen = ProtobufUtil.lengthOfPBMagic();
1165     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1166     ColumnFamilySchema cfs = null;
1167     try {
1168       cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1169     } catch (InvalidProtocolBufferException e) {
1170       throw new DeserializationException(e);
1171     }
1172     return convert(cfs);
1173   }
1174 
1175   /**
1176    * @param cfs
1177    * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
1178    */
1179   public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
1180     // Use the empty constructor so we preserve the initial values set on construction for things
1181     // like maxVersion.  Otherwise, we pick up wrong values on deserialization which makes for
1182     // unrelated-looking test failures that are hard to trace back to here.
1183     HColumnDescriptor hcd = new HColumnDescriptor();
1184     hcd.name = cfs.getName().toByteArray();
1185     for (BytesBytesPair a: cfs.getAttributesList()) {
1186       hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1187     }
1188     for (NameStringPair a: cfs.getConfigurationList()) {
1189       hcd.setConfiguration(a.getName(), a.getValue());
1190     }
1191     return hcd;
1192   }
1193 
1194   /**
1195    * @return Convert this instance to a the pb column family type
1196    */
1197   public ColumnFamilySchema convert() {
1198     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1199     builder.setName(ByteStringer.wrap(getName()));
1200     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1201       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1202       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1203       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1204       builder.addAttributes(aBuilder.build());
1205     }
1206     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1207       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1208       aBuilder.setName(e.getKey());
1209       aBuilder.setValue(e.getValue());
1210       builder.addConfiguration(aBuilder.build());
1211     }
1212     return builder.build();
1213   }
1214 
1215   /**
1216    * Getter for accessing the configuration value by key.
1217    */
1218   public String getConfigurationValue(String key) {
1219     return configuration.get(key);
1220   }
1221 
1222   /**
1223    * Getter for fetching an unmodifiable {@link #configuration} map.
1224    */
1225   public Map<String, String> getConfiguration() {
1226     // shallow pointer copy
1227     return Collections.unmodifiableMap(configuration);
1228   }
1229 
1230   /**
1231    * Setter for storing a configuration setting in {@link #configuration} map.
1232    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1233    * @param value String value. If null, removes the configuration.
1234    */
1235   public HColumnDescriptor setConfiguration(String key, String value) {
1236     if (value == null) {
1237       removeConfiguration(key);
1238     } else {
1239       configuration.put(key, value);
1240     }
1241     return this;
1242   }
1243 
1244   /**
1245    * Remove a configuration setting represented by the key from the {@link #configuration} map.
1246    */
1247   public void removeConfiguration(final String key) {
1248     configuration.remove(key);
1249   }
1250 
1251   /**
1252    * Return the encryption algorithm in use by this family
1253    */
1254   public String getEncryptionType() {
1255     return getValue(ENCRYPTION);
1256   }
1257 
1258   /**
1259    * Set the encryption algorithm for use with this family
1260    * @param algorithm
1261    */
1262   public HColumnDescriptor setEncryptionType(String algorithm) {
1263     setValue(ENCRYPTION, algorithm);
1264     return this;
1265   }
1266 
1267   /** Return the raw crypto key attribute for the family, or null if not set  */
1268   public byte[] getEncryptionKey() {
1269     return getValue(Bytes.toBytes(ENCRYPTION_KEY));
1270   }
1271 
1272   /** Set the raw crypto key attribute for the family */
1273   public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
1274     setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
1275     return this;
1276   }
1277 }