View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.util.Collections;
22  import java.util.HashMap;
23  import java.util.HashSet;
24  import java.util.Map;
25  import java.util.Set;
26  
27  import org.apache.hadoop.hbase.classification.InterfaceAudience;
28  import org.apache.hadoop.hbase.classification.InterfaceStability;
29  import org.apache.hadoop.hbase.exceptions.DeserializationException;
30  import org.apache.hadoop.hbase.io.compress.Compression;
31  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
32  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
33  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
34  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
35  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
36  import org.apache.hadoop.hbase.regionserver.BloomType;
37  import org.apache.hadoop.hbase.util.ByteStringer;
38  import org.apache.hadoop.hbase.util.Bytes;
39  import org.apache.hadoop.hbase.util.PrettyPrinter;
40  import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
41  
42  import com.google.common.base.Preconditions;
43  import com.google.protobuf.InvalidProtocolBufferException;
44  
45  /**
46   * An HColumnDescriptor contains information about a column family such as the
47   * number of versions, compression settings, etc.
48   *
49   * It is used as input when creating a table or adding a column.
50   */
51  @InterfaceAudience.Public
52  @InterfaceStability.Evolving
53  public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
54    // For future backward compatibility
55  
56    // Version  3 was when column names become byte arrays and when we picked up
57    // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
58    // Version  5 was when bloom filter descriptors were removed.
59    // Version  6 adds metadata as a map where keys and values are byte[].
60    // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
61    // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
62    // Version  9 -- add data block encoding
63    // Version 10 -- change metadata to standard type.
64    // Version 11 -- add column family level configuration.
65    private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
66  
67    // These constants are used as FileInfo keys
68    public static final String COMPRESSION = "COMPRESSION";
69    public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
70    public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
71        "ENCODE_ON_DISK";
72    public static final String DATA_BLOCK_ENCODING =
73        "DATA_BLOCK_ENCODING";
74    /**
75     * Key for the BLOCKCACHE attribute.
76     * A more exact name would be CACHE_DATA_ON_READ because this flag sets whether or not we
77     * cache DATA blocks.  We always cache INDEX and BLOOM blocks; caching these blocks cannot be
78     * disabled.
79     */
80    public static final String BLOCKCACHE = "BLOCKCACHE";
81    public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
82    public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
83    public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
84    public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
85    /**
86     * Key for cache data into L1 if cache is set up with more than one tier.
87     * To set in the shell, do something like this:
88     * <code>hbase(main):003:0> create 't',
89     *    {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}</code>
90     */
91    public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
92  
93    /**
94     * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
95     * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
96     * family will be loaded into the cache as soon as the file is opened. These
97     * loads will not count as cache misses.
98     */
99    public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
100 
101   /**
102    * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
103    * Use smaller block sizes for faster random-access at expense of larger
104    * indices (more memory consumption).
105    */
106   public static final String BLOCKSIZE = "BLOCKSIZE";
107 
108   public static final String LENGTH = "LENGTH";
109   public static final String TTL = "TTL";
110   public static final String BLOOMFILTER = "BLOOMFILTER";
111   public static final String FOREVER = "FOREVER";
112   public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
113   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
114   public static final String MIN_VERSIONS = "MIN_VERSIONS";
115   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
116   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
117 
118   public static final String ENCRYPTION = "ENCRYPTION";
119   public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
120 
121   /**
122    * Default compression type.
123    */
124   public static final String DEFAULT_COMPRESSION =
125     Compression.Algorithm.NONE.getName();
126 
127   /**
128    * Default value of the flag that enables data block encoding on disk, as
129    * opposed to encoding in cache only. We encode blocks everywhere by default,
130    * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
131    */
132   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
133 
134   /** Default data block encoding algorithm. */
135   public static final String DEFAULT_DATA_BLOCK_ENCODING =
136       DataBlockEncoding.NONE.toString();
137 
138   /**
139    * Default number of versions of a record to keep.
140    */
141   public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
142     "hbase.column.max.version", 1);
143 
144   /**
145    * Default is not to keep a minimum of versions.
146    */
147   public static final int DEFAULT_MIN_VERSIONS = 0;
148 
149   /*
150    * Cache here the HCD value.
151    * Question: its OK to cache since when we're reenable, we create a new HCD?
152    */
153   private volatile Integer blocksize = null;
154 
155   /**
156    * Default setting for whether to try and serve this column family from memory or not.
157    */
158   public static final boolean DEFAULT_IN_MEMORY = false;
159 
160   /**
161    * Default setting for preventing deleted from being collected immediately.
162    */
163   public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE;
164 
165   /**
166    * Default setting for whether to use a block cache or not.
167    */
168   public static final boolean DEFAULT_BLOCKCACHE = true;
169 
170   /**
171    * Default setting for whether to cache data blocks on write if block caching
172    * is enabled.
173    */
174   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
175 
176   /**
177    * Default setting for whether to cache data blocks in L1 tier.  Only makes sense if more than
178    * one tier in operations: i.e. if we have an L1 and a L2.  This will be the cases if we are
179    * using BucketCache.
180    */
181   public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false;
182 
183   /**
184    * Default setting for whether to cache index blocks on write if block
185    * caching is enabled.
186    */
187   public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
188 
189   /**
190    * Default size of blocks in files stored to the filesytem (hfiles).
191    */
192   public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
193 
194   /**
195    * Default setting for whether or not to use bloomfilters.
196    */
197   public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
198 
199   /**
200    * Default setting for whether to cache bloom filter blocks on write if block
201    * caching is enabled.
202    */
203   public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
204 
205   /**
206    * Default time to live of cell contents.
207    */
208   public static final int DEFAULT_TTL = HConstants.FOREVER;
209 
210   /**
211    * Default scope.
212    */
213   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
214 
215   /**
216    * Default setting for whether to evict cached blocks from the blockcache on
217    * close.
218    */
219   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
220 
221   /**
222    * Default compress tags along with any type of DataBlockEncoding.
223    */
224   public static final boolean DEFAULT_COMPRESS_TAGS = true;
225 
226   /*
227    * Default setting for whether to prefetch blocks into the blockcache on open.
228    */
229   public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
230 
231   private final static Map<String, String> DEFAULT_VALUES
232     = new HashMap<String, String>();
233   private final static Set<Bytes> RESERVED_KEYWORDS
234       = new HashSet<Bytes>();
235 
236   static {
237       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
238       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
239       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
240       DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
241       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
242       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
243       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
244       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
245       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
246       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
247       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
248       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
249       DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1));
250       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
251       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
252       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
253       DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
254       for (String s : DEFAULT_VALUES.keySet()) {
255         RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
256       }
257     RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION)));
258     RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY)));
259   }
260 
261   private static final int UNINITIALIZED = -1;
262 
263   // Column family name
264   private byte [] name;
265 
266   // Column metadata
267   private final Map<Bytes, Bytes> values =
268       new HashMap<Bytes, Bytes>();
269 
270   /**
271    * A map which holds the configuration specific to the column family.
272    * The keys of the map have the same names as config keys and override the defaults with
273    * cf-specific settings. Example usage may be for compactions, etc.
274    */
275   private final Map<String, String> configuration = new HashMap<String, String>();
276 
277   /*
278    * Cache the max versions rather than calculate it every time.
279    */
280   private int cachedMaxVersions = UNINITIALIZED;
281 
282   /**
283    * Default constructor. Must be present for Writable.
284    * @deprecated Used by Writables and Writables are going away.
285    */
286   @Deprecated
287   // Make this private rather than remove after deprecation period elapses.  Its needed by pb
288   // deserializations.
289   public HColumnDescriptor() {
290     this.name = null;
291   }
292 
293   /**
294    * Construct a column descriptor specifying only the family name
295    * The other attributes are defaulted.
296    *
297    * @param familyName Column family name. Must be 'printable' -- digit or
298    * letter -- and may not contain a <code>:<code>
299    */
300   public HColumnDescriptor(final String familyName) {
301     this(Bytes.toBytes(familyName));
302   }
303 
304   /**
305    * Construct a column descriptor specifying only the family name
306    * The other attributes are defaulted.
307    *
308    * @param familyName Column family name. Must be 'printable' -- digit or
309    * letter -- and may not contain a <code>:<code>
310    */
311   public HColumnDescriptor(final byte [] familyName) {
312     this (familyName == null || familyName.length <= 0?
313       HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
314       DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
315       DEFAULT_TTL, DEFAULT_BLOOMFILTER);
316   }
317 
318   /**
319    * Constructor.
320    * Makes a deep copy of the supplied descriptor.
321    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
322    * @param desc The descriptor.
323    */
324   public HColumnDescriptor(HColumnDescriptor desc) {
325     super();
326     this.name = desc.name.clone();
327     for (Map.Entry<Bytes, Bytes> e :
328         desc.values.entrySet()) {
329       this.values.put(e.getKey(), e.getValue());
330     }
331     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
332       this.configuration.put(e.getKey(), e.getValue());
333     }
334     setMaxVersions(desc.getMaxVersions());
335   }
336 
337   /**
338    * Constructor
339    * @param familyName Column family name. Must be 'printable' -- digit or
340    * letter -- and may not contain a <code>:<code>
341    * @param maxVersions Maximum number of versions to keep
342    * @param compression Compression type
343    * @param inMemory If true, column data should be kept in an HRegionServer's
344    * cache
345    * @param blockCacheEnabled If true, MapFile blocks should be cached
346    * @param timeToLive Time-to-live of cell contents, in seconds
347    * (use HConstants.FOREVER for unlimited TTL)
348    * @param bloomFilter Bloom filter type for this column
349    *
350    * @throws IllegalArgumentException if passed a family name that is made of
351    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
352    * a <code>:</code>
353    * @throws IllegalArgumentException if the number of versions is &lt;= 0
354    * @deprecated use {@link #HColumnDescriptor(String)} and setters
355    */
356   @Deprecated
357   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
358       final String compression, final boolean inMemory,
359       final boolean blockCacheEnabled,
360       final int timeToLive, final String bloomFilter) {
361     this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
362       DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
363   }
364 
365   /**
366    * Constructor
367    * @param familyName Column family name. Must be 'printable' -- digit or
368    * letter -- and may not contain a <code>:<code>
369    * @param maxVersions Maximum number of versions to keep
370    * @param compression Compression type
371    * @param inMemory If true, column data should be kept in an HRegionServer's
372    * cache
373    * @param blockCacheEnabled If true, MapFile blocks should be cached
374    * @param blocksize Block size to use when writing out storefiles.  Use
375    * smaller block sizes for faster random-access at expense of larger indices
376    * (more memory consumption).  Default is usually 64k.
377    * @param timeToLive Time-to-live of cell contents, in seconds
378    * (use HConstants.FOREVER for unlimited TTL)
379    * @param bloomFilter Bloom filter type for this column
380    * @param scope The scope tag for this column
381    *
382    * @throws IllegalArgumentException if passed a family name that is made of
383    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
384    * a <code>:</code>
385    * @throws IllegalArgumentException if the number of versions is &lt;= 0
386    * @deprecated use {@link #HColumnDescriptor(String)} and setters
387    */
388   @Deprecated
389   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
390       final String compression, final boolean inMemory,
391       final boolean blockCacheEnabled, final int blocksize,
392       final int timeToLive, final String bloomFilter, final int scope) {
393     this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
394         compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
395         inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
396         scope);
397   }
398 
399   /**
400    * Constructor
401    * @param familyName Column family name. Must be 'printable' -- digit or
402    * letter -- and may not contain a <code>:<code>
403    * @param minVersions Minimum number of versions to keep
404    * @param maxVersions Maximum number of versions to keep
405    * @param keepDeletedCells Whether to retain deleted cells until they expire
406    *        up to maxVersions versions.
407    * @param compression Compression type
408    * @param encodeOnDisk whether to use the specified data block encoding
409    *        on disk. If false, the encoding will be used in cache only.
410    * @param dataBlockEncoding data block encoding
411    * @param inMemory If true, column data should be kept in an HRegionServer's
412    * cache
413    * @param blockCacheEnabled If true, MapFile blocks should be cached
414    * @param blocksize Block size to use when writing out storefiles.  Use
415    * smaller blocksizes for faster random-access at expense of larger indices
416    * (more memory consumption).  Default is usually 64k.
417    * @param timeToLive Time-to-live of cell contents, in seconds
418    * (use HConstants.FOREVER for unlimited TTL)
419    * @param bloomFilter Bloom filter type for this column
420    * @param scope The scope tag for this column
421    *
422    * @throws IllegalArgumentException if passed a family name that is made of
423    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
424    * a <code>:</code>
425    * @throws IllegalArgumentException if the number of versions is &lt;= 0
426    * @deprecated use {@link #HColumnDescriptor(String)} and setters
427    */
428   @Deprecated
429   public HColumnDescriptor(final byte[] familyName, final int minVersions,
430       final int maxVersions, final KeepDeletedCells keepDeletedCells,
431       final String compression, final boolean encodeOnDisk,
432       final String dataBlockEncoding, final boolean inMemory,
433       final boolean blockCacheEnabled, final int blocksize,
434       final int timeToLive, final String bloomFilter, final int scope) {
435     isLegalFamilyName(familyName);
436     this.name = familyName;
437 
438     if (maxVersions <= 0) {
439       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
440       // Until there is support, consider 0 or < 0 -- a configuration error.
441       throw new IllegalArgumentException("Maximum versions must be positive");
442     }
443 
444     if (minVersions > 0) {
445       if (timeToLive == HConstants.FOREVER) {
446         throw new IllegalArgumentException("Minimum versions requires TTL.");
447       }
448       if (minVersions >= maxVersions) {
449         throw new IllegalArgumentException("Minimum versions must be < "
450             + "maximum versions.");
451       }
452     }
453 
454     setMaxVersions(maxVersions);
455     setMinVersions(minVersions);
456     setKeepDeletedCells(keepDeletedCells);
457     setInMemory(inMemory);
458     setBlockCacheEnabled(blockCacheEnabled);
459     setTimeToLive(timeToLive);
460     setCompressionType(Compression.Algorithm.
461       valueOf(compression.toUpperCase()));
462     setDataBlockEncoding(DataBlockEncoding.
463         valueOf(dataBlockEncoding.toUpperCase()));
464     setBloomFilterType(BloomType.
465       valueOf(bloomFilter.toUpperCase()));
466     setBlocksize(blocksize);
467     setScope(scope);
468   }
469 
470   /**
471    * @param b Family name.
472    * @return <code>b</code>
473    * @throws IllegalArgumentException If not null and not a legitimate family
474    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
475    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
476    * either. Also Family can not be an empty value or equal "recovered.edits".
477    */
478   public static byte [] isLegalFamilyName(final byte [] b) {
479     if (b == null) {
480       return b;
481     }
482     Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
483     if (b[0] == '.') {
484       throw new IllegalArgumentException("Family names cannot start with a " +
485         "period: " + Bytes.toString(b));
486     }
487     for (int i = 0; i < b.length; i++) {
488       if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
489         throw new IllegalArgumentException("Illegal character <" + b[i] +
490           ">. Family names cannot contain control characters or colons: " +
491           Bytes.toString(b));
492       }
493     }
494     byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
495     if (Bytes.equals(recoveredEdit, b)) {
496       throw new IllegalArgumentException("Family name cannot be: " +
497           HConstants.RECOVERED_EDITS_DIR);
498     }
499     return b;
500   }
501 
502   /**
503    * @return Name of this column family
504    */
505   public byte [] getName() {
506     return name;
507   }
508 
509   /**
510    * @return Name of this column family
511    */
512   public String getNameAsString() {
513     return Bytes.toString(this.name);
514   }
515 
516   /**
517    * @param key The key.
518    * @return The value.
519    */
520   public byte[] getValue(byte[] key) {
521     Bytes ibw = values.get(new Bytes(key));
522     if (ibw == null)
523       return null;
524     return ibw.get();
525   }
526 
527   /**
528    * @param key The key.
529    * @return The value as a string.
530    */
531   public String getValue(String key) {
532     byte[] value = getValue(Bytes.toBytes(key));
533     if (value == null)
534       return null;
535     return Bytes.toString(value);
536   }
537 
538   /**
539    * @return All values.
540    */
541   public Map<Bytes, Bytes> getValues() {
542     // shallow pointer copy
543     return Collections.unmodifiableMap(values);
544   }
545 
546   /**
547    * @param key The key.
548    * @param value The value.
549    * @return this (for chained invocation)
550    */
551   public HColumnDescriptor setValue(byte[] key, byte[] value) {
552     values.put(new Bytes(key),
553         new Bytes(value));
554     return this;
555   }
556 
557   /**
558    * @param key Key whose key and value we're to remove from HCD parameters.
559    */
560   public void remove(final byte [] key) {
561     values.remove(new Bytes(key));
562   }
563 
564   /**
565    * @param key The key.
566    * @param value The value.
567    * @return this (for chained invocation)
568    */
569   public HColumnDescriptor setValue(String key, String value) {
570     if (value == null) {
571       remove(Bytes.toBytes(key));
572     } else {
573       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
574     }
575     return this;
576   }
577 
578   /** @return compression type being used for the column family */
579   public Compression.Algorithm getCompression() {
580     String n = getValue(COMPRESSION);
581     if (n == null) {
582       return Compression.Algorithm.NONE;
583     }
584     return Compression.Algorithm.valueOf(n.toUpperCase());
585   }
586 
587   /** @return compression type being used for the column family for major
588       compression */
589   public Compression.Algorithm getCompactionCompression() {
590     String n = getValue(COMPRESSION_COMPACT);
591     if (n == null) {
592       return getCompression();
593     }
594     return Compression.Algorithm.valueOf(n.toUpperCase());
595   }
596 
597   /** @return maximum number of versions */
598   public int getMaxVersions() {
599     if (this.cachedMaxVersions == UNINITIALIZED) {
600       String v = getValue(HConstants.VERSIONS);
601       this.cachedMaxVersions = Integer.parseInt(v);
602     }
603     return this.cachedMaxVersions;
604   }
605 
606   /**
607    * @param maxVersions maximum number of versions
608    * @return this (for chained invocation)
609    */
610   public HColumnDescriptor setMaxVersions(int maxVersions) {
611     if (maxVersions <= 0) {
612       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
613       // Until there is support, consider 0 or < 0 -- a configuration error.
614       throw new IllegalArgumentException("Maximum versions must be positive");
615     }
616     if (maxVersions < this.getMinVersions()) {
617         throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
618             + " while minVersion is " + this.getMinVersions()
619             + ". Maximum versions must be >= minimum versions ");
620     }
621     setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
622     cachedMaxVersions = maxVersions;
623     return this;
624   }
625 
626   /**
627    * @return The storefile/hfile blocksize for this column family.
628    */
629   public synchronized int getBlocksize() {
630     if (this.blocksize == null) {
631       String value = getValue(BLOCKSIZE);
632       this.blocksize = (value != null)?
633         Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
634     }
635     return this.blocksize.intValue();
636 
637   }
638 
639   /**
640    * @param s Blocksize to use when writing out storefiles/hfiles on this
641    * column family.
642    * @return this (for chained invocation)
643    */
644   public HColumnDescriptor setBlocksize(int s) {
645     setValue(BLOCKSIZE, Integer.toString(s));
646     this.blocksize = null;
647     return this;
648   }
649 
650   /**
651    * @return Compression type setting.
652    */
653   public Compression.Algorithm getCompressionType() {
654     return getCompression();
655   }
656 
657   /**
658    * Compression types supported in hbase.
659    * LZO is not bundled as part of the hbase distribution.
660    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
661    * for how to enable it.
662    * @param type Compression type setting.
663    * @return this (for chained invocation)
664    */
665   public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
666     return setValue(COMPRESSION, type.getName().toUpperCase());
667   }
668 
669   /**
670    * @return data block encoding algorithm used on disk
671    * @deprecated See getDataBlockEncoding()
672    */
673   @Deprecated
674   public DataBlockEncoding getDataBlockEncodingOnDisk() {
675     return getDataBlockEncoding();
676   }
677 
678   /**
679    * This method does nothing now. Flag ENCODE_ON_DISK is not used
680    * any more. Data blocks have the same encoding in cache as on disk.
681    * @return this (for chained invocation)
682    * @deprecated This does nothing now.
683    */
684   @Deprecated
685   public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
686     return this;
687   }
688 
689   /**
690    * @return the data block encoding algorithm used in block cache and
691    *         optionally on disk
692    */
693   public DataBlockEncoding getDataBlockEncoding() {
694     String type = getValue(DATA_BLOCK_ENCODING);
695     if (type == null) {
696       type = DEFAULT_DATA_BLOCK_ENCODING;
697     }
698     return DataBlockEncoding.valueOf(type);
699   }
700 
701   /**
702    * Set data block encoding algorithm used in block cache.
703    * @param type What kind of data block encoding will be used.
704    * @return this (for chained invocation)
705    */
706   public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
707     String name;
708     if (type != null) {
709       name = type.toString();
710     } else {
711       name = DataBlockEncoding.NONE.toString();
712     }
713     return setValue(DATA_BLOCK_ENCODING, name);
714   }
715 
716   /**
717    * Set whether the tags should be compressed along with DataBlockEncoding. When no
718    * DataBlockEncoding is been used, this is having no effect.
719    *
720    * @param compressTags
721    * @return this (for chained invocation)
722    */
723   public HColumnDescriptor setCompressTags(boolean compressTags) {
724     return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
725   }
726 
727   /**
728    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
729    *         DataBlockEncoding is been used, this is having no effect.
730    * @deprecated Use {@link #isCompressTags()} instead
731    */
732   @Deprecated
733   public boolean shouldCompressTags() {
734     String compressTagsStr = getValue(COMPRESS_TAGS);
735     boolean compressTags = DEFAULT_COMPRESS_TAGS;
736     if (compressTagsStr != null) {
737       compressTags = Boolean.valueOf(compressTagsStr);
738     }
739     return compressTags;
740   }
741 
742   /**
743    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
744    *         DataBlockEncoding is been used, this is having no effect.
745    */
746   public boolean isCompressTags() {
747     String compressTagsStr = getValue(COMPRESS_TAGS);
748     boolean compressTags = DEFAULT_COMPRESS_TAGS;
749     if (compressTagsStr != null) {
750       compressTags = Boolean.valueOf(compressTagsStr);
751     }
752     return compressTags;
753   }
754 
755   /**
756    * @return Compression type setting.
757    */
758   public Compression.Algorithm getCompactionCompressionType() {
759     return getCompactionCompression();
760   }
761 
762   /**
763    * Compression types supported in hbase.
764    * LZO is not bundled as part of the hbase distribution.
765    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
766    * for how to enable it.
767    * @param type Compression type setting.
768    * @return this (for chained invocation)
769    */
770   public HColumnDescriptor setCompactionCompressionType(
771       Compression.Algorithm type) {
772     return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase());
773   }
774 
775   /**
776    * @return True if we are to favor keeping all values for this column family in the
777    * HRegionServer cache.
778    */
779   public boolean isInMemory() {
780     String value = getValue(HConstants.IN_MEMORY);
781     if (value != null)
782       return Boolean.valueOf(value).booleanValue();
783     return DEFAULT_IN_MEMORY;
784   }
785 
786   /**
787    * @param inMemory True if we are to favor keeping all values for this column family in the
788    * HRegionServer cache
789    * @return this (for chained invocation)
790    */
791   public HColumnDescriptor setInMemory(boolean inMemory) {
792     return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
793   }
794 
795   public KeepDeletedCells getKeepDeletedCells() {
796     String value = getValue(KEEP_DELETED_CELLS);
797     if (value != null) {
798       // toUpperCase for backwards compatibility
799       return KeepDeletedCells.valueOf(value.toUpperCase());
800     }
801     return DEFAULT_KEEP_DELETED;
802   }
803 
804   /**
805    * @param keepDeletedCells True if deleted rows should not be collected
806    * immediately.
807    * @return this (for chained invocation)
808    * @deprecated use {@link #setKeepDeletedCells(KeepDeletedCells)}
809    */
810   @Deprecated
811   public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
812     return setValue(KEEP_DELETED_CELLS, (keepDeletedCells ? KeepDeletedCells.TRUE
813         : KeepDeletedCells.FALSE).toString());
814   }
815 
816   /**
817    * @param keepDeletedCells True if deleted rows should not be collected
818    * immediately.
819    * @return this (for chained invocation)
820    */
821   public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) {
822     return setValue(KEEP_DELETED_CELLS, keepDeletedCells.toString());
823   }
824 
825   /**
826    * @return Time-to-live of cell contents, in seconds.
827    */
828   public int getTimeToLive() {
829     String value = getValue(TTL);
830     return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL;
831   }
832 
833   /**
834    * @param timeToLive Time-to-live of cell contents, in seconds.
835    * @return this (for chained invocation)
836    */
837   public HColumnDescriptor setTimeToLive(int timeToLive) {
838     return setValue(TTL, Integer.toString(timeToLive));
839   }
840 
841   /**
842    * @return The minimum number of versions to keep.
843    */
844   public int getMinVersions() {
845     String value = getValue(MIN_VERSIONS);
846     return (value != null)? Integer.valueOf(value).intValue(): 0;
847   }
848 
849   /**
850    * @param minVersions The minimum number of versions to keep.
851    * (used when timeToLive is set)
852    * @return this (for chained invocation)
853    */
854   public HColumnDescriptor setMinVersions(int minVersions) {
855     return setValue(MIN_VERSIONS, Integer.toString(minVersions));
856   }
857 
858   /**
859    * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX
860    * and BLOOM type blocks).
861    */
862   public boolean isBlockCacheEnabled() {
863     String value = getValue(BLOCKCACHE);
864     if (value != null)
865       return Boolean.valueOf(value).booleanValue();
866     return DEFAULT_BLOCKCACHE;
867   }
868 
869   /**
870    * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache
871    * INDEX and BLOOM blocks; you cannot turn this off).
872    * @return this (for chained invocation)
873    */
874   public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
875     return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
876   }
877 
878   /**
879    * @return bloom filter type used for new StoreFiles in ColumnFamily
880    */
881   public BloomType getBloomFilterType() {
882     String n = getValue(BLOOMFILTER);
883     if (n == null) {
884       n = DEFAULT_BLOOMFILTER;
885     }
886     return BloomType.valueOf(n.toUpperCase());
887   }
888 
889   /**
890    * @param bt bloom filter type
891    * @return this (for chained invocation)
892    */
893   public HColumnDescriptor setBloomFilterType(final BloomType bt) {
894     return setValue(BLOOMFILTER, bt.toString());
895   }
896 
897    /**
898     * @return the scope tag
899     */
900   public int getScope() {
901     byte[] value = getValue(REPLICATION_SCOPE_BYTES);
902     if (value != null) {
903       return Integer.valueOf(Bytes.toString(value));
904     }
905     return DEFAULT_REPLICATION_SCOPE;
906   }
907 
908  /**
909   * @param scope the scope tag
910   * @return this (for chained invocation)
911   */
912   public HColumnDescriptor setScope(int scope) {
913     return setValue(REPLICATION_SCOPE, Integer.toString(scope));
914   }
915 
916   /**
917    * @return true if we should cache data blocks on write
918    * @deprecated Use {@link #isCacheDataOnWrite()} instead
919    */
920   @Deprecated
921   public boolean shouldCacheDataOnWrite() {
922     return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
923   }
924 
925   /**
926    * @return true if we should cache data blocks on write
927    */
928   public boolean isCacheDataOnWrite() {
929     return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
930   }
931 
932   /**
933    * @param value true if we should cache data blocks on write
934    * @return this (for chained invocation)
935    */
936   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
937     return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
938   }
939 
940   /**
941    * @return true if we should cache data blocks in the L1 cache (if block cache deploy
942    * has more than one tier; e.g. we are using CombinedBlockCache).
943    * @deprecated Use {@link #isCacheDataInL1()} instead
944    */
945   @Deprecated
946   public boolean shouldCacheDataInL1() {
947     return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
948   }
949 
950   /**
951    * @return true if we should cache data blocks in the L1 cache (if block cache deploy has more
952    *         than one tier; e.g. we are using CombinedBlockCache).
953    */
954   public boolean isCacheDataInL1() {
955     return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
956   }
957 
958   /**
959    * @param value true if we should cache data blocks in the L1 cache (if block cache deploy
960    * has more than one tier; e.g. we are using CombinedBlockCache).
961    * @return this (for chained invocation)
962    */
963   public HColumnDescriptor setCacheDataInL1(boolean value) {
964     return setValue(CACHE_DATA_IN_L1, Boolean.toString(value));
965   }
966 
967   private boolean setAndGetBoolean(final String key, final boolean defaultSetting) {
968     String value = getValue(key);
969     if (value != null) return Boolean.valueOf(value).booleanValue();
970     return defaultSetting;
971   }
972 
973   /**
974    * @return true if we should cache index blocks on write
975    * @deprecated Use {@link #isCacheIndexesOnWrite()} instead
976    */
977   @Deprecated
978   public boolean shouldCacheIndexesOnWrite() {
979     return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
980   }
981 
982   /**
983    * @return true if we should cache index blocks on write
984    */
985   public boolean isCacheIndexesOnWrite() {
986     return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
987   }
988 
989   /**
990    * @param value true if we should cache index blocks on write
991    * @return this (for chained invocation)
992    */
993   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
994     return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
995   }
996 
997   /**
998    * @return true if we should cache bloomfilter blocks on write
999    * @deprecated Use {@link #isCacheBloomsOnWrite()} instead
1000    */
1001   @Deprecated
1002   public boolean shouldCacheBloomsOnWrite() {
1003     return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
1004   }
1005 
1006   /**
1007    * @return true if we should cache bloomfilter blocks on write
1008    */
1009   public boolean isCacheBloomsOnWrite() {
1010     return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
1011   }
1012 
1013   /**
1014    * @param value true if we should cache bloomfilter blocks on write
1015    * @return this (for chained invocation)
1016    */
1017   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
1018     return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
1019   }
1020 
1021   /**
1022    * @return true if we should evict cached blocks from the blockcache on
1023    * close
1024    * @deprecated {@link #isEvictBlocksOnClose()} instead
1025    */
1026   @Deprecated
1027   public boolean shouldEvictBlocksOnClose() {
1028     return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
1029   }
1030 
1031   /**
1032    * @return true if we should evict cached blocks from the blockcache on close
1033    */
1034   public boolean isEvictBlocksOnClose() {
1035     return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
1036   }
1037 
1038   /**
1039    * @param value true if we should evict cached blocks from the blockcache on
1040    * close
1041    * @return this (for chained invocation)
1042    */
1043   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
1044     return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
1045   }
1046 
1047   /**
1048    * @return true if we should prefetch blocks into the blockcache on open
1049    * @deprecated Use {@link #isPrefetchBlocksOnOpen()} instead
1050    */
1051   @Deprecated
1052   public boolean shouldPrefetchBlocksOnOpen() {
1053     return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
1054   }
1055 
1056   /**
1057    * @return true if we should prefetch blocks into the blockcache on open
1058    */
1059   public boolean isPrefetchBlocksOnOpen() {
1060     return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
1061   }
1062 
1063   /**
1064    * @param value true if we should prefetch blocks into the blockcache on open
1065    * @return this (for chained invocation)
1066    */
1067   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
1068     return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
1069   }
1070 
1071   /**
1072    * @see java.lang.Object#toString()
1073    */
1074   @Override
1075   public String toString() {
1076     StringBuilder s = new StringBuilder();
1077 
1078     s.append('{');
1079     s.append(HConstants.NAME);
1080     s.append(" => '");
1081     s.append(Bytes.toString(name));
1082     s.append("'");
1083     s.append(getValues(true));
1084     s.append('}');
1085     return s.toString();
1086   }
1087 
1088   /**
1089    * @return Column family descriptor with only the customized attributes.
1090    */
1091   public String toStringCustomizedValues() {
1092     StringBuilder s = new StringBuilder();
1093     s.append('{');
1094     s.append(HConstants.NAME);
1095     s.append(" => '");
1096     s.append(Bytes.toString(name));
1097     s.append("'");
1098     s.append(getValues(false));
1099     s.append('}');
1100     return s.toString();
1101   }
1102 
1103   private StringBuilder getValues(boolean printDefaults) {
1104     StringBuilder s = new StringBuilder();
1105 
1106     boolean hasConfigKeys = false;
1107 
1108     // print all reserved keys first
1109     for (Bytes k : values.keySet()) {
1110       if (!RESERVED_KEYWORDS.contains(k)) {
1111         hasConfigKeys = true;
1112         continue;
1113       }
1114       String key = Bytes.toString(k.get());
1115       String value = Bytes.toStringBinary(values.get(k).get());
1116       if (printDefaults
1117           || !DEFAULT_VALUES.containsKey(key)
1118           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1119         s.append(", ");
1120         s.append(key);
1121         s.append(" => ");
1122         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1123       }
1124     }
1125 
1126     // print all non-reserved, advanced config keys as a separate subset
1127     if (hasConfigKeys) {
1128       s.append(", ");
1129       s.append(HConstants.METADATA).append(" => ");
1130       s.append('{');
1131       boolean printComma = false;
1132       for (Bytes k : values.keySet()) {
1133         if (RESERVED_KEYWORDS.contains(k)) {
1134           continue;
1135         }
1136         String key = Bytes.toString(k.get());
1137         String value = Bytes.toStringBinary(values.get(k).get());
1138         if (printComma) {
1139           s.append(", ");
1140         }
1141         printComma = true;
1142         s.append('\'').append(key).append('\'');
1143         s.append(" => ");
1144         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1145       }
1146       s.append('}');
1147     }
1148 
1149     if (!configuration.isEmpty()) {
1150       s.append(", ");
1151       s.append(HConstants.CONFIGURATION).append(" => ");
1152       s.append('{');
1153       boolean printCommaForConfiguration = false;
1154       for (Map.Entry<String, String> e : configuration.entrySet()) {
1155         if (printCommaForConfiguration) s.append(", ");
1156         printCommaForConfiguration = true;
1157         s.append('\'').append(e.getKey()).append('\'');
1158         s.append(" => ");
1159         s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
1160       }
1161       s.append("}");
1162     }
1163     return s;
1164   }
1165 
1166   public static Unit getUnit(String key) {
1167     Unit unit;
1168       /* TTL for now, we can add more as we neeed */
1169     if (key.equals(HColumnDescriptor.TTL)) {
1170       unit = Unit.TIME_INTERVAL;
1171     } else {
1172       unit = Unit.NONE;
1173     }
1174     return unit;
1175   }
1176 
1177   public static Map<String, String> getDefaultValues() {
1178     return Collections.unmodifiableMap(DEFAULT_VALUES);
1179   }
1180 
1181   /**
1182    * @see java.lang.Object#equals(java.lang.Object)
1183    */
1184   @Override
1185   public boolean equals(Object obj) {
1186     if (this == obj) {
1187       return true;
1188     }
1189     if (obj == null) {
1190       return false;
1191     }
1192     if (!(obj instanceof HColumnDescriptor)) {
1193       return false;
1194     }
1195     return compareTo((HColumnDescriptor)obj) == 0;
1196   }
1197 
1198   /**
1199    * @see java.lang.Object#hashCode()
1200    */
1201   @Override
1202   public int hashCode() {
1203     int result = Bytes.hashCode(this.name);
1204     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
1205     result ^= values.hashCode();
1206     result ^= configuration.hashCode();
1207     return result;
1208   }
1209 
1210   // Comparable
1211   @Override
1212   public int compareTo(HColumnDescriptor o) {
1213     int result = Bytes.compareTo(this.name, o.getName());
1214     if (result == 0) {
1215       // punt on comparison for ordering, just calculate difference
1216       result = this.values.hashCode() - o.values.hashCode();
1217       if (result < 0)
1218         result = -1;
1219       else if (result > 0)
1220         result = 1;
1221     }
1222     if (result == 0) {
1223       result = this.configuration.hashCode() - o.configuration.hashCode();
1224       if (result < 0)
1225         result = -1;
1226       else if (result > 0)
1227         result = 1;
1228     }
1229     return result;
1230   }
1231 
1232   /**
1233    * @return This instance serialized with pb with pb magic prefix
1234    * @see #parseFrom(byte[])
1235    */
1236   public byte [] toByteArray() {
1237     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1238   }
1239 
1240   /**
1241    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
1242    * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
1243    * @throws DeserializationException
1244    * @see #toByteArray()
1245    */
1246   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
1247     if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
1248     int pblen = ProtobufUtil.lengthOfPBMagic();
1249     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1250     ColumnFamilySchema cfs = null;
1251     try {
1252       cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1253     } catch (InvalidProtocolBufferException e) {
1254       throw new DeserializationException(e);
1255     }
1256     return convert(cfs);
1257   }
1258 
1259   /**
1260    * @param cfs
1261    * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
1262    */
1263   public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
1264     // Use the empty constructor so we preserve the initial values set on construction for things
1265     // like maxVersion.  Otherwise, we pick up wrong values on deserialization which makes for
1266     // unrelated-looking test failures that are hard to trace back to here.
1267     HColumnDescriptor hcd = new HColumnDescriptor();
1268     hcd.name = cfs.getName().toByteArray();
1269     for (BytesBytesPair a: cfs.getAttributesList()) {
1270       hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1271     }
1272     for (NameStringPair a: cfs.getConfigurationList()) {
1273       hcd.setConfiguration(a.getName(), a.getValue());
1274     }
1275     return hcd;
1276   }
1277 
1278   /**
1279    * @return Convert this instance to a the pb column family type
1280    */
1281   public ColumnFamilySchema convert() {
1282     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1283     builder.setName(ByteStringer.wrap(getName()));
1284     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1285       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1286       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1287       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1288       builder.addAttributes(aBuilder.build());
1289     }
1290     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1291       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1292       aBuilder.setName(e.getKey());
1293       aBuilder.setValue(e.getValue());
1294       builder.addConfiguration(aBuilder.build());
1295     }
1296     return builder.build();
1297   }
1298 
1299   /**
1300    * Getter for accessing the configuration value by key.
1301    */
1302   public String getConfigurationValue(String key) {
1303     return configuration.get(key);
1304   }
1305 
1306   /**
1307    * Getter for fetching an unmodifiable {@link #configuration} map.
1308    */
1309   public Map<String, String> getConfiguration() {
1310     // shallow pointer copy
1311     return Collections.unmodifiableMap(configuration);
1312   }
1313 
1314   /**
1315    * Setter for storing a configuration setting in {@link #configuration} map.
1316    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1317    * @param value String value. If null, removes the configuration.
1318    */
1319   public HColumnDescriptor setConfiguration(String key, String value) {
1320     if (value == null) {
1321       removeConfiguration(key);
1322     } else {
1323       configuration.put(key, value);
1324     }
1325     return this;
1326   }
1327 
1328   /**
1329    * Remove a configuration setting represented by the key from the {@link #configuration} map.
1330    */
1331   public void removeConfiguration(final String key) {
1332     configuration.remove(key);
1333   }
1334 
1335   /**
1336    * Return the encryption algorithm in use by this family
1337    */
1338   public String getEncryptionType() {
1339     return getValue(ENCRYPTION);
1340   }
1341 
1342   /**
1343    * Set the encryption algorithm for use with this family
1344    * @param algorithm
1345    */
1346   public HColumnDescriptor setEncryptionType(String algorithm) {
1347     setValue(ENCRYPTION, algorithm);
1348     return this;
1349   }
1350 
1351   /** Return the raw crypto key attribute for the family, or null if not set  */
1352   public byte[] getEncryptionKey() {
1353     return getValue(Bytes.toBytes(ENCRYPTION_KEY));
1354   }
1355 
1356   /** Set the raw crypto key attribute for the family */
1357   public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
1358     setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
1359     return this;
1360   }
1361 }