View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Map;
28  import java.util.Set;
29  
30  import org.apache.hadoop.classification.InterfaceAudience;
31  import org.apache.hadoop.classification.InterfaceStability;
32  import org.apache.hadoop.hbase.exceptions.DeserializationException;
33  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
34  import org.apache.hadoop.hbase.io.compress.Compression;
35  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
36  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
37  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
38  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
39  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
40  import org.apache.hadoop.hbase.regionserver.BloomType;
41  import org.apache.hadoop.hbase.util.Bytes;
42  import org.apache.hadoop.hbase.util.PrettyPrinter;
43  import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
44  import org.apache.hadoop.io.Text;
45  import org.apache.hadoop.io.WritableComparable;
46  
47  import com.google.common.base.Preconditions;
48  import com.google.protobuf.HBaseZeroCopyByteString;
49  import com.google.protobuf.InvalidProtocolBufferException;
50  
51  /**
52   * An HColumnDescriptor contains information about a column family such as the
53   * number of versions, compression settings, etc.
54   *
55   * It is used as input when creating a table or adding a column.
56   */
57  @InterfaceAudience.Public
58  @InterfaceStability.Evolving
59  public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
60    // For future backward compatibility
61  
62    // Version  3 was when column names become byte arrays and when we picked up
63    // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
64    // Version  5 was when bloom filter descriptors were removed.
65    // Version  6 adds metadata as a map where keys and values are byte[].
66    // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
67    // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
68    // Version  9 -- add data block encoding
69    // Version 10 -- change metadata to standard type.
70    // Version 11 -- add column family level configuration.
71    private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
72  
73    // These constants are used as FileInfo keys
74    public static final String COMPRESSION = "COMPRESSION";
75    public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
76    public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
77        "ENCODE_ON_DISK";
78    public static final String DATA_BLOCK_ENCODING =
79        "DATA_BLOCK_ENCODING";
80    /**
81     * Key for the BLOCKCACHE attribute.
82     * A more exact name would be CACHE_DATA_ON_READ because this flag sets whether or not we
83     * cache DATA blocks.  We always cache INDEX and BLOOM blocks; caching these blocks cannot be
84     * disabled.
85     */
86    public static final String BLOCKCACHE = "BLOCKCACHE";
87    public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
88    public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
89    public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
90    public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
91    /**
92     * Key for cache data into L1 if cache is set up with more than one tier.
93     * To set in the shell, do something like this:
94     * <code>hbase(main):003:0> create 't', {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}</code>
95     */
96    public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
97  
98    /**
99     * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
100    * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
101    * family will be loaded into the cache as soon as the file is opened. These
102    * loads will not count as cache misses.
103    */
104   public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
105 
106   /**
107    * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
108    * Use smaller block sizes for faster random-access at expense of larger
109    * indices (more memory consumption).
110    */
111   public static final String BLOCKSIZE = "BLOCKSIZE";
112 
113   public static final String LENGTH = "LENGTH";
114   public static final String TTL = "TTL";
115   public static final String BLOOMFILTER = "BLOOMFILTER";
116   public static final String FOREVER = "FOREVER";
117   public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
118   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
119   public static final String MIN_VERSIONS = "MIN_VERSIONS";
120   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
121   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
122 
123   public static final String ENCRYPTION = "ENCRYPTION";
124   public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
125 
126   /**
127    * Default compression type.
128    */
129   public static final String DEFAULT_COMPRESSION =
130     Compression.Algorithm.NONE.getName();
131 
132   /**
133    * Default value of the flag that enables data block encoding on disk, as
134    * opposed to encoding in cache only. We encode blocks everywhere by default,
135    * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
136    */
137   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
138 
139   /** Default data block encoding algorithm. */
140   public static final String DEFAULT_DATA_BLOCK_ENCODING =
141       DataBlockEncoding.NONE.toString();
142 
143   /**
144    * Default number of versions of a record to keep.
145    */
146   public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
147     "hbase.column.max.version", 1);
148 
149   /**
150    * Default is not to keep a minimum of versions.
151    */
152   public static final int DEFAULT_MIN_VERSIONS = 0;
153 
154   /*
155    * Cache here the HCD value.
156    * Question: its OK to cache since when we're reenable, we create a new HCD?
157    */
158   private volatile Integer blocksize = null;
159 
160   /**
161    * Default setting for whether to try and serve this column family from memory or not.
162    */
163   public static final boolean DEFAULT_IN_MEMORY = false;
164 
165   /**
166    * Default setting for preventing deleted from being collected immediately.
167    */
168   public static final boolean DEFAULT_KEEP_DELETED = false;
169 
170   /**
171    * Default setting for whether to use a block cache or not.
172    */
173   public static final boolean DEFAULT_BLOCKCACHE = true;
174 
175   /**
176    * Default setting for whether to cache data blocks on write if block caching
177    * is enabled.
178    */
179   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
180 
181   /**
182    * Default setting for whether to cache data blocks in L1 tier.  Only makes sense if more than
183    * one tier in operations: i.e. if we have an L1 and a L2.  This will be the cases if we are
184    * using BucketCache.
185    */
186   public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false;
187 
188   /**
189    * Default setting for whether to cache index blocks on write if block
190    * caching is enabled.
191    */
192   public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
193 
194   /**
195    * Default size of blocks in files stored to the filesytem (hfiles).
196    */
197   public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
198 
199   /**
200    * Default setting for whether or not to use bloomfilters.
201    */
202   public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
203 
204   /**
205    * Default setting for whether to cache bloom filter blocks on write if block
206    * caching is enabled.
207    */
208   public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
209 
210   /**
211    * Default time to live of cell contents.
212    */
213   public static final int DEFAULT_TTL = HConstants.FOREVER;
214 
215   /**
216    * Default scope.
217    */
218   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
219 
220   /**
221    * Default setting for whether to evict cached blocks from the blockcache on
222    * close.
223    */
224   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
225 
226   /**
227    * Default compress tags along with any type of DataBlockEncoding.
228    */
229   public static final boolean DEFAULT_COMPRESS_TAGS = true;
230 
231   /*
232    * Default setting for whether to prefetch blocks into the blockcache on open.
233    */
234   public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
235 
236   private final static Map<String, String> DEFAULT_VALUES
237     = new HashMap<String, String>();
238   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
239     = new HashSet<ImmutableBytesWritable>();
240   static {
241       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
242       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
243       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
244       DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
245       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
246       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
247       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
248       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
249       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
250       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
251       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
252       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
253       DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1));
254       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
255       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
256       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
257       DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
258       for (String s : DEFAULT_VALUES.keySet()) {
259         RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
260       }
261       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION)));
262       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION_KEY)));
263   }
264 
265   private static final int UNINITIALIZED = -1;
266 
267   // Column family name
268   private byte [] name;
269 
270   // Column metadata
271   private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
272     new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
273 
274   /**
275    * A map which holds the configuration specific to the column family.
276    * The keys of the map have the same names as config keys and override the defaults with
277    * cf-specific settings. Example usage may be for compactions, etc.
278    */
279   private final Map<String, String> configuration = new HashMap<String, String>();
280 
281   /*
282    * Cache the max versions rather than calculate it every time.
283    */
284   private int cachedMaxVersions = UNINITIALIZED;
285 
286   /**
287    * Default constructor. Must be present for Writable.
288    * @deprecated Used by Writables and Writables are going away.
289    */
290   @Deprecated
291   // Make this private rather than remove after deprecation period elapses.  Its needed by pb
292   // deserializations.
293   public HColumnDescriptor() {
294     this.name = null;
295   }
296 
297   /**
298    * Construct a column descriptor specifying only the family name
299    * The other attributes are defaulted.
300    *
301    * @param familyName Column family name. Must be 'printable' -- digit or
302    * letter -- and may not contain a <code>:<code>
303    */
304   public HColumnDescriptor(final String familyName) {
305     this(Bytes.toBytes(familyName));
306   }
307 
308   /**
309    * Construct a column descriptor specifying only the family name
310    * The other attributes are defaulted.
311    *
312    * @param familyName Column family name. Must be 'printable' -- digit or
313    * letter -- and may not contain a <code>:<code>
314    */
315   public HColumnDescriptor(final byte [] familyName) {
316     this (familyName == null || familyName.length <= 0?
317       HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
318       DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
319       DEFAULT_TTL, DEFAULT_BLOOMFILTER);
320   }
321 
322   /**
323    * Constructor.
324    * Makes a deep copy of the supplied descriptor.
325    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
326    * @param desc The descriptor.
327    */
328   public HColumnDescriptor(HColumnDescriptor desc) {
329     super();
330     this.name = desc.name.clone();
331     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
332         desc.values.entrySet()) {
333       this.values.put(e.getKey(), e.getValue());
334     }
335     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
336       this.configuration.put(e.getKey(), e.getValue());
337     }
338     setMaxVersions(desc.getMaxVersions());
339   }
340 
341   /**
342    * Constructor
343    * @param familyName Column family name. Must be 'printable' -- digit or
344    * letter -- and may not contain a <code>:<code>
345    * @param maxVersions Maximum number of versions to keep
346    * @param compression Compression type
347    * @param inMemory If true, column data should be kept in an HRegionServer's
348    * cache
349    * @param blockCacheEnabled If true, MapFile blocks should be cached
350    * @param timeToLive Time-to-live of cell contents, in seconds
351    * (use HConstants.FOREVER for unlimited TTL)
352    * @param bloomFilter Bloom filter type for this column
353    *
354    * @throws IllegalArgumentException if passed a family name that is made of
355    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
356    * a <code>:</code>
357    * @throws IllegalArgumentException if the number of versions is &lt;= 0
358    * @deprecated use {@link #HColumnDescriptor(String)} and setters
359    */
360   @Deprecated
361   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
362       final String compression, final boolean inMemory,
363       final boolean blockCacheEnabled,
364       final int timeToLive, final String bloomFilter) {
365     this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
366       DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
367   }
368 
369   /**
370    * Constructor
371    * @param familyName Column family name. Must be 'printable' -- digit or
372    * letter -- and may not contain a <code>:<code>
373    * @param maxVersions Maximum number of versions to keep
374    * @param compression Compression type
375    * @param inMemory If true, column data should be kept in an HRegionServer's
376    * cache
377    * @param blockCacheEnabled If true, MapFile blocks should be cached
378    * @param blocksize Block size to use when writing out storefiles.  Use
379    * smaller block sizes for faster random-access at expense of larger indices
380    * (more memory consumption).  Default is usually 64k.
381    * @param timeToLive Time-to-live of cell contents, in seconds
382    * (use HConstants.FOREVER for unlimited TTL)
383    * @param bloomFilter Bloom filter type for this column
384    * @param scope The scope tag for this column
385    *
386    * @throws IllegalArgumentException if passed a family name that is made of
387    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
388    * a <code>:</code>
389    * @throws IllegalArgumentException if the number of versions is &lt;= 0
390    * @deprecated use {@link #HColumnDescriptor(String)} and setters
391    */
392   @Deprecated
393   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
394       final String compression, final boolean inMemory,
395       final boolean blockCacheEnabled, final int blocksize,
396       final int timeToLive, final String bloomFilter, final int scope) {
397     this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
398         compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
399         inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
400         scope);
401   }
402 
403   /**
404    * Constructor
405    * @param familyName Column family name. Must be 'printable' -- digit or
406    * letter -- and may not contain a <code>:<code>
407    * @param minVersions Minimum number of versions to keep
408    * @param maxVersions Maximum number of versions to keep
409    * @param keepDeletedCells Whether to retain deleted cells until they expire
410    *        up to maxVersions versions.
411    * @param compression Compression type
412    * @param encodeOnDisk whether to use the specified data block encoding
413    *        on disk. If false, the encoding will be used in cache only.
414    * @param dataBlockEncoding data block encoding
415    * @param inMemory If true, column data should be kept in an HRegionServer's
416    * cache
417    * @param blockCacheEnabled If true, MapFile blocks should be cached
418    * @param blocksize Block size to use when writing out storefiles.  Use
419    * smaller blocksizes for faster random-access at expense of larger indices
420    * (more memory consumption).  Default is usually 64k.
421    * @param timeToLive Time-to-live of cell contents, in seconds
422    * (use HConstants.FOREVER for unlimited TTL)
423    * @param bloomFilter Bloom filter type for this column
424    * @param scope The scope tag for this column
425    *
426    * @throws IllegalArgumentException if passed a family name that is made of
427    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
428    * a <code>:</code>
429    * @throws IllegalArgumentException if the number of versions is &lt;= 0
430    * @deprecated use {@link #HColumnDescriptor(String)} and setters
431    */
432   @Deprecated
433   public HColumnDescriptor(final byte[] familyName, final int minVersions,
434       final int maxVersions, final boolean keepDeletedCells,
435       final String compression, final boolean encodeOnDisk,
436       final String dataBlockEncoding, final boolean inMemory,
437       final boolean blockCacheEnabled, final int blocksize,
438       final int timeToLive, final String bloomFilter, final int scope) {
439     isLegalFamilyName(familyName);
440     this.name = familyName;
441 
442     if (maxVersions <= 0) {
443       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
444       // Until there is support, consider 0 or < 0 -- a configuration error.
445       throw new IllegalArgumentException("Maximum versions must be positive");
446     }
447 
448     if (minVersions > 0) {
449       if (timeToLive == HConstants.FOREVER) {
450         throw new IllegalArgumentException("Minimum versions requires TTL.");
451       }
452       if (minVersions >= maxVersions) {
453         throw new IllegalArgumentException("Minimum versions must be < "
454             + "maximum versions.");
455       }
456     }
457 
458     setMaxVersions(maxVersions);
459     setMinVersions(minVersions);
460     setKeepDeletedCells(keepDeletedCells);
461     setInMemory(inMemory);
462     setBlockCacheEnabled(blockCacheEnabled);
463     setTimeToLive(timeToLive);
464     setCompressionType(Compression.Algorithm.
465       valueOf(compression.toUpperCase()));
466     setDataBlockEncoding(DataBlockEncoding.
467         valueOf(dataBlockEncoding.toUpperCase()));
468     setBloomFilterType(BloomType.
469       valueOf(bloomFilter.toUpperCase()));
470     setBlocksize(blocksize);
471     setScope(scope);
472   }
473 
474   /**
475    * @param b Family name.
476    * @return <code>b</code>
477    * @throws IllegalArgumentException If not null and not a legitimate family
478    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
479    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
480    * either. Also Family can not be an empty value or equal "recovered.edits".
481    */
482   public static byte [] isLegalFamilyName(final byte [] b) {
483     if (b == null) {
484       return b;
485     }
486     Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
487     if (b[0] == '.') {
488       throw new IllegalArgumentException("Family names cannot start with a " +
489         "period: " + Bytes.toString(b));
490     }
491     for (int i = 0; i < b.length; i++) {
492       if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
493         throw new IllegalArgumentException("Illegal character <" + b[i] +
494           ">. Family names cannot contain control characters or colons: " +
495           Bytes.toString(b));
496       }
497     }
498     byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
499     if (Bytes.equals(recoveredEdit, b)) {
500       throw new IllegalArgumentException("Family name cannot be: " +
501           HConstants.RECOVERED_EDITS_DIR);
502     }
503     return b;
504   }
505 
506   /**
507    * @return Name of this column family
508    */
509   public byte [] getName() {
510     return name;
511   }
512 
513   /**
514    * @return Name of this column family
515    */
516   public String getNameAsString() {
517     return Bytes.toString(this.name);
518   }
519 
520   /**
521    * @param key The key.
522    * @return The value.
523    */
524   public byte[] getValue(byte[] key) {
525     ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key));
526     if (ibw == null)
527       return null;
528     return ibw.get();
529   }
530 
531   /**
532    * @param key The key.
533    * @return The value as a string.
534    */
535   public String getValue(String key) {
536     byte[] value = getValue(Bytes.toBytes(key));
537     if (value == null)
538       return null;
539     return Bytes.toString(value);
540   }
541 
542   /**
543    * @return All values.
544    */
545   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
546     // shallow pointer copy
547     return Collections.unmodifiableMap(values);
548   }
549 
550   /**
551    * @param key The key.
552    * @param value The value.
553    * @return this (for chained invocation)
554    */
555   public HColumnDescriptor setValue(byte[] key, byte[] value) {
556     values.put(new ImmutableBytesWritable(key),
557       new ImmutableBytesWritable(value));
558     return this;
559   }
560 
561   /**
562    * @param key Key whose key and value we're to remove from HCD parameters.
563    */
564   public void remove(final byte [] key) {
565     values.remove(new ImmutableBytesWritable(key));
566   }
567 
568   /**
569    * @param key The key.
570    * @param value The value.
571    * @return this (for chained invocation)
572    */
573   public HColumnDescriptor setValue(String key, String value) {
574     if (value == null) {
575       remove(Bytes.toBytes(key));
576     } else {
577       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
578     }
579     return this;
580   }
581 
582   /** @return compression type being used for the column family */
583   public Compression.Algorithm getCompression() {
584     String n = getValue(COMPRESSION);
585     if (n == null) {
586       return Compression.Algorithm.NONE;
587     }
588     return Compression.Algorithm.valueOf(n.toUpperCase());
589   }
590 
591   /** @return compression type being used for the column family for major
592       compression */
593   public Compression.Algorithm getCompactionCompression() {
594     String n = getValue(COMPRESSION_COMPACT);
595     if (n == null) {
596       return getCompression();
597     }
598     return Compression.Algorithm.valueOf(n.toUpperCase());
599   }
600 
601   /** @return maximum number of versions */
602   public int getMaxVersions() {
603     if (this.cachedMaxVersions == UNINITIALIZED) {
604       String v = getValue(HConstants.VERSIONS);
605       this.cachedMaxVersions = Integer.parseInt(v);
606     }
607     return this.cachedMaxVersions;
608   }
609 
610   /**
611    * @param maxVersions maximum number of versions
612    * @return this (for chained invocation)
613    */
614   public HColumnDescriptor setMaxVersions(int maxVersions) {
615     if (maxVersions <= 0) {
616       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
617       // Until there is support, consider 0 or < 0 -- a configuration error.
618       throw new IllegalArgumentException("Maximum versions must be positive");
619     }    
620     if (maxVersions < this.getMinVersions()) {      
621         throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
622             + " while minVersion is " + this.getMinVersions()
623             + ". Maximum versions must be >= minimum versions ");      
624     }
625     setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
626     cachedMaxVersions = maxVersions;
627     return this;
628   }
629 
630   /**
631    * @return The storefile/hfile blocksize for this column family.
632    */
633   public synchronized int getBlocksize() {
634     if (this.blocksize == null) {
635       String value = getValue(BLOCKSIZE);
636       this.blocksize = (value != null)?
637         Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
638     }
639     return this.blocksize.intValue();
640   }
641 
642   /**
643    * @param s Blocksize to use when writing out storefiles/hfiles on this
644    * column family.
645    * @return this (for chained invocation)
646    */
647   public HColumnDescriptor setBlocksize(int s) {
648     setValue(BLOCKSIZE, Integer.toString(s));
649     this.blocksize = null;
650     return this;
651   }
652 
653   /**
654    * @return Compression type setting.
655    */
656   public Compression.Algorithm getCompressionType() {
657     return getCompression();
658   }
659 
660   /**
661    * Compression types supported in hbase.
662    * LZO is not bundled as part of the hbase distribution.
663    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
664    * for how to enable it.
665    * @param type Compression type setting.
666    * @return this (for chained invocation)
667    */
668   public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
669     return setValue(COMPRESSION, type.getName().toUpperCase());
670   }
671 
672   /** @return data block encoding algorithm used on disk */
673   @Deprecated
674   public DataBlockEncoding getDataBlockEncodingOnDisk() {
675     return getDataBlockEncoding();
676   }
677 
678   /**
679    * This method does nothing now. Flag ENCODE_ON_DISK is not used
680    * any more. Data blocks have the same encoding in cache as on disk.
681    * @return this (for chained invocation)
682    */
683   @Deprecated
684   public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
685     return this;
686   }
687 
688   /**
689    * @return the data block encoding algorithm used in block cache and
690    *         optionally on disk
691    */
692   public DataBlockEncoding getDataBlockEncoding() {
693     String type = getValue(DATA_BLOCK_ENCODING);
694     if (type == null) {
695       type = DEFAULT_DATA_BLOCK_ENCODING;
696     }
697     return DataBlockEncoding.valueOf(type);
698   }
699 
700   /**
701    * Set data block encoding algorithm used in block cache.
702    * @param type What kind of data block encoding will be used.
703    * @return this (for chained invocation)
704    */
705   public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
706     String name;
707     if (type != null) {
708       name = type.toString();
709     } else {
710       name = DataBlockEncoding.NONE.toString();
711     }
712     return setValue(DATA_BLOCK_ENCODING, name);
713   }
714 
715   /**
716    * Set whether the tags should be compressed along with DataBlockEncoding. When no
717    * DataBlockEncoding is been used, this is having no effect.
718    * 
719    * @param compressTags
720    * @return this (for chained invocation)
721    */
722   public HColumnDescriptor setCompressTags(boolean compressTags) {
723     return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
724   }
725 
726   /**
727    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
728    *         DataBlockEncoding is been used, this is having no effect.
729    */
730   public boolean shouldCompressTags() {
731     String compressTagsStr = getValue(COMPRESS_TAGS);
732     boolean compressTags = DEFAULT_COMPRESS_TAGS;
733     if (compressTagsStr != null) {
734       compressTags = Boolean.valueOf(compressTagsStr);
735     }
736     return compressTags;
737   }
738 
739   /**
740    * @return Compression type setting.
741    */
742   public Compression.Algorithm getCompactionCompressionType() {
743     return getCompactionCompression();
744   }
745 
746   /**
747    * Compression types supported in hbase.
748    * LZO is not bundled as part of the hbase distribution.
749    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
750    * for how to enable it.
751    * @param type Compression type setting.
752    * @return this (for chained invocation)
753    */
754   public HColumnDescriptor setCompactionCompressionType(
755       Compression.Algorithm type) {
756     return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase());
757   }
758 
759   /**
760    * @return True if we are to favor keeping all values for this column family in the 
761    * HRegionServer cache.
762    */
763   public boolean isInMemory() {
764     String value = getValue(HConstants.IN_MEMORY);
765     if (value != null)
766       return Boolean.valueOf(value).booleanValue();
767     return DEFAULT_IN_MEMORY;
768   }
769 
770   /**
771    * @param inMemory True if we are to favor keeping all values for this column family in the
772    * HRegionServer cache
773    * @return this (for chained invocation)
774    */
775   public HColumnDescriptor setInMemory(boolean inMemory) {
776     return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
777   }
778 
779   public boolean getKeepDeletedCells() {
780     String value = getValue(KEEP_DELETED_CELLS);
781     if (value != null) {
782       return Boolean.valueOf(value).booleanValue();
783     }
784     return DEFAULT_KEEP_DELETED;
785   }
786 
787   /**
788    * @param keepDeletedCells True if deleted rows should not be collected
789    * immediately.
790    * @return this (for chained invocation)
791    */
792   public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
793     return setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells));
794   }
795 
796   /**
797    * @return Time-to-live of cell contents, in seconds.
798    */
799   public int getTimeToLive() {
800     String value = getValue(TTL);
801     return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL;
802   }
803 
804   /**
805    * @param timeToLive Time-to-live of cell contents, in seconds.
806    * @return this (for chained invocation)
807    */
808   public HColumnDescriptor setTimeToLive(int timeToLive) {
809     return setValue(TTL, Integer.toString(timeToLive));
810   }
811 
812   /**
813    * @return The minimum number of versions to keep.
814    */
815   public int getMinVersions() {
816     String value = getValue(MIN_VERSIONS);
817     return (value != null)? Integer.valueOf(value).intValue(): 0;
818   }
819 
820   /**
821    * @param minVersions The minimum number of versions to keep.
822    * (used when timeToLive is set)
823    * @return this (for chained invocation)
824    */
825   public HColumnDescriptor setMinVersions(int minVersions) {
826     return setValue(MIN_VERSIONS, Integer.toString(minVersions));
827   }
828 
829   /**
830    * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX
831    * and BLOOM type blocks).
832    */
833   public boolean isBlockCacheEnabled() {
834     String value = getValue(BLOCKCACHE);
835     if (value != null)
836       return Boolean.valueOf(value).booleanValue();
837     return DEFAULT_BLOCKCACHE;
838   }
839 
840   /**
841    * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache
842    * INDEX and BLOOM blocks; you cannot turn this off).
843    * @return this (for chained invocation)
844    */
845   public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
846     return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
847   }
848 
849   /**
850    * @return bloom filter type used for new StoreFiles in ColumnFamily
851    */
852   public BloomType getBloomFilterType() {
853     String n = getValue(BLOOMFILTER);
854     if (n == null) {
855       n = DEFAULT_BLOOMFILTER;
856     }
857     return BloomType.valueOf(n.toUpperCase());
858   }
859 
860   /**
861    * @param bt bloom filter type
862    * @return this (for chained invocation)
863    */
864   public HColumnDescriptor setBloomFilterType(final BloomType bt) {
865     return setValue(BLOOMFILTER, bt.toString());
866   }
867 
868    /**
869     * @return the scope tag
870     */
871   public int getScope() {
872     byte[] value = getValue(REPLICATION_SCOPE_BYTES);
873     if (value != null) {
874       return Integer.valueOf(Bytes.toString(value));
875     }
876     return DEFAULT_REPLICATION_SCOPE;
877   }
878 
879  /**
880   * @param scope the scope tag
881   * @return this (for chained invocation)
882   */
883   public HColumnDescriptor setScope(int scope) {
884     return setValue(REPLICATION_SCOPE, Integer.toString(scope));
885   }
886 
887   /**
888    * @return true if we should cache data blocks on write
889    */
890   public boolean shouldCacheDataOnWrite() {
891     return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
892   }
893 
894   /**
895    * @param value true if we should cache data blocks on write
896    * @return this (for chained invocation)
897    */
898   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
899     return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
900   }
901 
902   /**
903    * @return true if we should cache data blocks in the L1 cache (if block cache deploy
904    * has more than one tier; e.g. we are using CombinedBlockCache).
905    */
906   public boolean shouldCacheDataInL1() {
907     return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
908   }
909 
910   /**
911    * @param value true if we should cache data blocks in the L1 cache (if block cache deploy
912    * has more than one tier; e.g. we are using CombinedBlockCache).
913    * @return this (for chained invocation)
914    */
915   public HColumnDescriptor setCacheDataInL1(boolean value) {
916     return setValue(CACHE_DATA_IN_L1, Boolean.toString(value));
917   }
918 
919   private boolean setAndGetBoolean(final String key, final boolean defaultSetting) {
920     String value = getValue(key);
921     if (value != null) return Boolean.valueOf(value).booleanValue();
922     return defaultSetting;
923   }
924 
925   /**
926    * @return true if we should cache index blocks on write
927    */
928   public boolean shouldCacheIndexesOnWrite() {
929     return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
930   }
931 
932   /**
933    * @param value true if we should cache index blocks on write
934    * @return this (for chained invocation)
935    */
936   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
937     return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
938   }
939 
940   /**
941    * @return true if we should cache bloomfilter blocks on write
942    */
943   public boolean shouldCacheBloomsOnWrite() {
944     return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
945   }
946 
947   /**
948    * @param value true if we should cache bloomfilter blocks on write
949    * @return this (for chained invocation)
950    */
951   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
952     return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
953   }
954 
955   /**
956    * @return true if we should evict cached blocks from the blockcache on
957    * close
958    */
959   public boolean shouldEvictBlocksOnClose() {
960     return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
961   }
962 
963   /**
964    * @param value true if we should evict cached blocks from the blockcache on
965    * close
966    * @return this (for chained invocation)
967    */
968   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
969     return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
970   }
971 
972   /**
973    * @return true if we should prefetch blocks into the blockcache on open
974    */
975   public boolean shouldPrefetchBlocksOnOpen() {
976     return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
977   }
978 
979   /**
980    * @param value true if we should prefetch blocks into the blockcache on open
981    * @return this (for chained invocation)
982    */
983   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
984     return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
985   }
986 
987   /**
988    * @see java.lang.Object#toString()
989    */
990   @Override
991   public String toString() {
992     StringBuilder s = new StringBuilder();
993 
994     s.append('{');
995     s.append(HConstants.NAME);
996     s.append(" => '");
997     s.append(Bytes.toString(name));
998     s.append("'");
999     s.append(getValues(true));
1000     s.append('}');
1001     return s.toString();
1002   }
1003 
1004   /**
1005    * @return Column family descriptor with only the customized attributes.
1006    */
1007   public String toStringCustomizedValues() {
1008     StringBuilder s = new StringBuilder();
1009     s.append('{');
1010     s.append(HConstants.NAME);
1011     s.append(" => '");
1012     s.append(Bytes.toString(name));
1013     s.append("'");
1014     s.append(getValues(false));
1015     s.append('}');
1016     return s.toString();
1017   }
1018 
1019   private StringBuilder getValues(boolean printDefaults) {
1020     StringBuilder s = new StringBuilder();
1021 
1022     boolean hasConfigKeys = false;
1023 
1024     // print all reserved keys first
1025     for (ImmutableBytesWritable k : values.keySet()) {
1026       if (!RESERVED_KEYWORDS.contains(k)) {
1027         hasConfigKeys = true;
1028         continue;
1029       }
1030       String key = Bytes.toString(k.get());
1031       String value = Bytes.toStringBinary(values.get(k).get());
1032       if (printDefaults
1033           || !DEFAULT_VALUES.containsKey(key)
1034           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1035         s.append(", ");
1036         s.append(key);
1037         s.append(" => ");
1038         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1039       }
1040     }
1041 
1042     // print all non-reserved, advanced config keys as a separate subset
1043     if (hasConfigKeys) {
1044       s.append(", ");
1045       s.append(HConstants.METADATA).append(" => ");
1046       s.append('{');
1047       boolean printComma = false;
1048       for (ImmutableBytesWritable k : values.keySet()) {
1049         if (RESERVED_KEYWORDS.contains(k)) {
1050           continue;
1051         }
1052         String key = Bytes.toString(k.get());
1053         String value = Bytes.toStringBinary(values.get(k).get());
1054         if (printComma) {
1055           s.append(", ");
1056         }
1057         printComma = true;
1058         s.append('\'').append(key).append('\'');
1059         s.append(" => ");
1060         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1061       }
1062       s.append('}');
1063     }
1064 
1065     if (!configuration.isEmpty()) {
1066       s.append(", ");
1067       s.append(HConstants.CONFIGURATION).append(" => ");
1068       s.append('{');
1069       boolean printCommaForConfiguration = false;
1070       for (Map.Entry<String, String> e : configuration.entrySet()) {
1071         if (printCommaForConfiguration) s.append(", ");
1072         printCommaForConfiguration = true;
1073         s.append('\'').append(e.getKey()).append('\'');
1074         s.append(" => ");
1075         s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
1076       }
1077       s.append("}");
1078     }
1079     return s;
1080   }
1081 
1082   public static Unit getUnit(String key) {
1083     Unit unit;
1084       /* TTL for now, we can add more as we neeed */
1085     if (key.equals(HColumnDescriptor.TTL)) {
1086       unit = Unit.TIME_INTERVAL;
1087     } else {
1088       unit = Unit.NONE;
1089     }
1090     return unit;
1091   }
1092 
1093   public static Map<String, String> getDefaultValues() {
1094     return Collections.unmodifiableMap(DEFAULT_VALUES);
1095   }
1096 
1097   /**
1098    * @see java.lang.Object#equals(java.lang.Object)
1099    */
1100   @Override
1101   public boolean equals(Object obj) {
1102     if (this == obj) {
1103       return true;
1104     }
1105     if (obj == null) {
1106       return false;
1107     }
1108     if (!(obj instanceof HColumnDescriptor)) {
1109       return false;
1110     }
1111     return compareTo((HColumnDescriptor)obj) == 0;
1112   }
1113 
1114   /**
1115    * @see java.lang.Object#hashCode()
1116    */
1117   @Override
1118   public int hashCode() {
1119     int result = Bytes.hashCode(this.name);
1120     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
1121     result ^= values.hashCode();
1122     result ^= configuration.hashCode();
1123     return result;
1124   }
1125 
1126   /**
1127    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
1128    */
1129   @Deprecated
1130   public void readFields(DataInput in) throws IOException {
1131     int version = in.readByte();
1132     if (version < 6) {
1133       if (version <= 2) {
1134         Text t = new Text();
1135         t.readFields(in);
1136         this.name = t.getBytes();
1137 //        if(KeyValue.getFamilyDelimiterIndex(this.name, 0, this.name.length)
1138 //            > 0) {
1139 //          this.name = stripColon(this.name);
1140 //        }
1141       } else {
1142         this.name = Bytes.readByteArray(in);
1143       }
1144       this.values.clear();
1145       setMaxVersions(in.readInt());
1146       int ordinal = in.readInt();
1147       setCompressionType(Compression.Algorithm.values()[ordinal]);
1148       setInMemory(in.readBoolean());
1149       setBloomFilterType(in.readBoolean() ? BloomType.ROW : BloomType.NONE);
1150       if (getBloomFilterType() != BloomType.NONE && version < 5) {
1151         // If a bloomFilter is enabled and the column descriptor is less than
1152         // version 5, we need to skip over it to read the rest of the column
1153         // descriptor. There are no BloomFilterDescriptors written to disk for
1154         // column descriptors with a version number >= 5
1155         throw new UnsupportedClassVersionError(this.getClass().getName() +
1156             " does not support backward compatibility with versions older " +
1157             "than version 5");
1158       }
1159       if (version > 1) {
1160         setBlockCacheEnabled(in.readBoolean());
1161       }
1162       if (version > 2) {
1163        setTimeToLive(in.readInt());
1164       }
1165     } else {
1166       // version 6+
1167       this.name = Bytes.readByteArray(in);
1168       this.values.clear();
1169       int numValues = in.readInt();
1170       for (int i = 0; i < numValues; i++) {
1171         ImmutableBytesWritable key = new ImmutableBytesWritable();
1172         ImmutableBytesWritable value = new ImmutableBytesWritable();
1173         key.readFields(in);
1174         value.readFields(in);
1175 
1176         // in version 8, the BloomFilter setting changed from bool to enum
1177         if (version < 8 && Bytes.toString(key.get()).equals(BLOOMFILTER)) {
1178           value.set(Bytes.toBytes(
1179               Boolean.getBoolean(Bytes.toString(value.get()))
1180                 ? BloomType.ROW.toString()
1181                 : BloomType.NONE.toString()));
1182         }
1183 
1184         values.put(key, value);
1185       }
1186       if (version == 6) {
1187         // Convert old values.
1188         setValue(COMPRESSION, Compression.Algorithm.NONE.getName());
1189       }
1190       String value = getValue(HConstants.VERSIONS);
1191       this.cachedMaxVersions = (value != null)?
1192           Integer.valueOf(value).intValue(): DEFAULT_VERSIONS;
1193       if (version > 10) {
1194         configuration.clear();
1195         int numConfigs = in.readInt();
1196         for (int i = 0; i < numConfigs; i++) {
1197           ImmutableBytesWritable key = new ImmutableBytesWritable();
1198           ImmutableBytesWritable val = new ImmutableBytesWritable();
1199           key.readFields(in);
1200           val.readFields(in);
1201           configuration.put(
1202             Bytes.toString(key.get(), key.getOffset(), key.getLength()),
1203             Bytes.toString(val.get(), val.getOffset(), val.getLength()));
1204         }
1205       }
1206     }
1207   }
1208 
1209   /**
1210    * @deprecated Writables are going away.  Use {@link #toByteArray()} instead.
1211    */
1212   @Deprecated
1213   public void write(DataOutput out) throws IOException {
1214     out.writeByte(COLUMN_DESCRIPTOR_VERSION);
1215     Bytes.writeByteArray(out, this.name);
1216     out.writeInt(values.size());
1217     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1218         values.entrySet()) {
1219       e.getKey().write(out);
1220       e.getValue().write(out);
1221     }
1222     out.writeInt(configuration.size());
1223     for (Map.Entry<String, String> e : configuration.entrySet()) {
1224       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1225       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1226     }
1227   }
1228 
1229   // Comparable
1230 
1231   public int compareTo(HColumnDescriptor o) {
1232     int result = Bytes.compareTo(this.name, o.getName());
1233     if (result == 0) {
1234       // punt on comparison for ordering, just calculate difference
1235       result = this.values.hashCode() - o.values.hashCode();
1236       if (result < 0)
1237         result = -1;
1238       else if (result > 0)
1239         result = 1;
1240     }
1241     if (result == 0) {
1242       result = this.configuration.hashCode() - o.configuration.hashCode();
1243       if (result < 0)
1244         result = -1;
1245       else if (result > 0)
1246         result = 1;
1247     }
1248     return result;
1249   }
1250 
1251   /**
1252    * @return This instance serialized with pb with pb magic prefix
1253    * @see #parseFrom(byte[])
1254    */
1255   public byte [] toByteArray() {
1256     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1257   }
1258 
1259   /**
1260    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
1261    * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
1262    * @throws DeserializationException
1263    * @see #toByteArray()
1264    */
1265   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
1266     if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
1267     int pblen = ProtobufUtil.lengthOfPBMagic();
1268     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1269     ColumnFamilySchema cfs = null;
1270     try {
1271       cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1272     } catch (InvalidProtocolBufferException e) {
1273       throw new DeserializationException(e);
1274     }
1275     return convert(cfs);
1276   }
1277 
1278   /**
1279    * @param cfs
1280    * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
1281    */
1282   public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
1283     // Use the empty constructor so we preserve the initial values set on construction for things
1284     // like maxVersion.  Otherwise, we pick up wrong values on deserialization which makes for
1285     // unrelated-looking test failures that are hard to trace back to here.
1286     HColumnDescriptor hcd = new HColumnDescriptor();
1287     hcd.name = cfs.getName().toByteArray();
1288     for (BytesBytesPair a: cfs.getAttributesList()) {
1289       hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1290     }
1291     for (NameStringPair a: cfs.getConfigurationList()) {
1292       hcd.setConfiguration(a.getName(), a.getValue());
1293     }
1294     return hcd;
1295   }
1296 
1297   /**
1298    * @return Convert this instance to a the pb column family type
1299    */
1300   public ColumnFamilySchema convert() {
1301     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1302     builder.setName(HBaseZeroCopyByteString.wrap(getName()));
1303     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1304       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1305       aBuilder.setFirst(HBaseZeroCopyByteString.wrap(e.getKey().get()));
1306       aBuilder.setSecond(HBaseZeroCopyByteString.wrap(e.getValue().get()));
1307       builder.addAttributes(aBuilder.build());
1308     }
1309     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1310       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1311       aBuilder.setName(e.getKey());
1312       aBuilder.setValue(e.getValue());
1313       builder.addConfiguration(aBuilder.build());
1314     }
1315     return builder.build();
1316   }
1317 
1318   /**
1319    * Getter for accessing the configuration value by key.
1320    */
1321   public String getConfigurationValue(String key) {
1322     return configuration.get(key);
1323   }
1324 
1325   /**
1326    * Getter for fetching an unmodifiable {@link #configuration} map.
1327    */
1328   public Map<String, String> getConfiguration() {
1329     // shallow pointer copy
1330     return Collections.unmodifiableMap(configuration);
1331   }
1332 
1333   /**
1334    * Setter for storing a configuration setting in {@link #configuration} map.
1335    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1336    * @param value String value. If null, removes the configuration.
1337    */
1338   public void setConfiguration(String key, String value) {
1339     if (value == null) {
1340       removeConfiguration(key);
1341     } else {
1342       configuration.put(key, value);
1343     }
1344   }
1345 
1346   /**
1347    * Remove a configuration setting represented by the key from the {@link #configuration} map.
1348    */
1349   public void removeConfiguration(final String key) {
1350     configuration.remove(key);
1351   }
1352 
1353   /**
1354    * Return the encryption algorithm in use by this family
1355    */
1356   public String getEncryptionType() {
1357     return getValue(ENCRYPTION);
1358   }
1359 
1360   /**
1361    * Set the encryption algorithm for use with this family
1362    * @param algorithm
1363    */
1364   public HColumnDescriptor setEncryptionType(String algorithm) {
1365     setValue(ENCRYPTION, algorithm);
1366     return this;
1367   }
1368 
1369   /** Return the raw crypto key attribute for the family, or null if not set  */
1370   public byte[] getEncryptionKey() {
1371     return getValue(Bytes.toBytes(ENCRYPTION_KEY));
1372   }
1373 
1374   /** Set the raw crypto key attribute for the family */
1375   public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
1376     setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
1377     return this;
1378   }
1379 }