View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.Collections;
23  import java.util.HashMap;
24  import java.util.HashSet;
25  import java.util.Map;
26  import java.util.Set;
27  
28  import org.apache.hadoop.hbase.classification.InterfaceAudience;
29  import org.apache.hadoop.hbase.classification.InterfaceStability;
30  import org.apache.hadoop.hbase.exceptions.DeserializationException;
31  import org.apache.hadoop.hbase.exceptions.HBaseException;
32  import org.apache.hadoop.hbase.io.compress.Compression;
33  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
34  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
35  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
36  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
37  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
38  import org.apache.hadoop.hbase.regionserver.BloomType;
39  import org.apache.hadoop.hbase.util.ByteStringer;
40  import org.apache.hadoop.hbase.util.Bytes;
41  import org.apache.hadoop.hbase.util.PrettyPrinter;
42  import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
43  
44  import com.google.common.base.Preconditions;
45  
46  /**
47   * An HColumnDescriptor contains information about a column family such as the
48   * number of versions, compression settings, etc.
49   *
50   * It is used as input when creating a table or adding a column.
51   */
52  @InterfaceAudience.Public
53  @InterfaceStability.Evolving
54  public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
55    // For future backward compatibility
56  
57    // Version  3 was when column names become byte arrays and when we picked up
58    // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
59    // Version  5 was when bloom filter descriptors were removed.
60    // Version  6 adds metadata as a map where keys and values are byte[].
61    // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
62    // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
63    // Version  9 -- add data block encoding
64    // Version 10 -- change metadata to standard type.
65    // Version 11 -- add column family level configuration.
66    private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
67  
68    // These constants are used as FileInfo keys
69    public static final String COMPRESSION = "COMPRESSION";
70    public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
71    public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
72        "ENCODE_ON_DISK";
73    public static final String DATA_BLOCK_ENCODING =
74        "DATA_BLOCK_ENCODING";
75    /**
76     * Key for the BLOCKCACHE attribute.
77     * A more exact name would be CACHE_DATA_ON_READ because this flag sets whether or not we
78     * cache DATA blocks.  We always cache INDEX and BLOOM blocks; caching these blocks cannot be
79     * disabled.
80     */
81    public static final String BLOCKCACHE = "BLOCKCACHE";
82    public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
83    public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
84    public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
85    public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
86    /**
87     * Key for cache data into L1 if cache is set up with more than one tier.
88     * To set in the shell, do something like this:
89     * <code>hbase(main):003:0&gt; create 't',
90     *    {NAME =&gt; 't', CONFIGURATION =&gt; {CACHE_DATA_IN_L1 =&gt; 'true'}}</code>
91     */
92    public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
93  
94    /**
95     * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
96     * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
97     * family will be loaded into the cache as soon as the file is opened. These
98     * loads will not count as cache misses.
99     */
100   public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
101 
102   /**
103    * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
104    * Use smaller block sizes for faster random-access at expense of larger
105    * indices (more memory consumption).
106    */
107   public static final String BLOCKSIZE = "BLOCKSIZE";
108 
109   public static final String LENGTH = "LENGTH";
110   public static final String TTL = "TTL";
111   public static final String BLOOMFILTER = "BLOOMFILTER";
112   public static final String FOREVER = "FOREVER";
113   public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
114   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
115   public static final String MIN_VERSIONS = "MIN_VERSIONS";
116   /**
117    * Retain all cells across flushes and compactions even if they fall behind
118    * a delete tombstone. To see all retained cells, do a 'raw' scan; see
119    * Scan#setRaw or pass RAW =&gt; true attribute in the shell.
120    */
121   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
122   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
123 
124   public static final String ENCRYPTION = "ENCRYPTION";
125   public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
126 
127   public static final String IS_MOB = "IS_MOB";
128   public static final byte[] IS_MOB_BYTES = Bytes.toBytes(IS_MOB);
129   public static final String MOB_THRESHOLD = "MOB_THRESHOLD";
130   public static final byte[] MOB_THRESHOLD_BYTES = Bytes.toBytes(MOB_THRESHOLD);
131   public static final long DEFAULT_MOB_THRESHOLD = 100 * 1024; // 100k
132 
133   public static final String DFS_REPLICATION = "DFS_REPLICATION";
134   public static final short DEFAULT_DFS_REPLICATION = 0;
135 
136   /**
137    * Default compression type.
138    */
139   public static final String DEFAULT_COMPRESSION =
140     Compression.Algorithm.NONE.getName();
141 
142   /**
143    * Default value of the flag that enables data block encoding on disk, as
144    * opposed to encoding in cache only. We encode blocks everywhere by default,
145    * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
146    */
147   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
148 
149   /** Default data block encoding algorithm. */
150   public static final String DEFAULT_DATA_BLOCK_ENCODING =
151       DataBlockEncoding.NONE.toString();
152 
153   /**
154    * Default number of versions of a record to keep.
155    */
156   public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
157     "hbase.column.max.version", 1);
158 
159   /**
160    * Default is not to keep a minimum of versions.
161    */
162   public static final int DEFAULT_MIN_VERSIONS = 0;
163 
164   /*
165    * Cache here the HCD value.
166    * Question: its OK to cache since when we're reenable, we create a new HCD?
167    */
168   private volatile Integer blocksize = null;
169 
170   /**
171    * Default setting for whether to try and serve this column family from memory or not.
172    */
173   public static final boolean DEFAULT_IN_MEMORY = false;
174 
175   /**
176    * Default setting for preventing deleted from being collected immediately.
177    */
178   public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE;
179 
180   /**
181    * Default setting for whether to use a block cache or not.
182    */
183   public static final boolean DEFAULT_BLOCKCACHE = true;
184 
185   /**
186    * Default setting for whether to cache data blocks on write if block caching
187    * is enabled.
188    */
189   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
190 
191   /**
192    * Default setting for whether to cache data blocks in L1 tier.  Only makes sense if more than
193    * one tier in operations: i.e. if we have an L1 and a L2.  This will be the cases if we are
194    * using BucketCache.
195    */
196   public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false;
197 
198   /**
199    * Default setting for whether to cache index blocks on write if block
200    * caching is enabled.
201    */
202   public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
203 
204   /**
205    * Default size of blocks in files stored to the filesytem (hfiles).
206    */
207   public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
208 
209   /**
210    * Default setting for whether or not to use bloomfilters.
211    */
212   public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
213 
214   /**
215    * Default setting for whether to cache bloom filter blocks on write if block
216    * caching is enabled.
217    */
218   public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
219 
220   /**
221    * Default time to live of cell contents.
222    */
223   public static final int DEFAULT_TTL = HConstants.FOREVER;
224 
225   /**
226    * Default scope.
227    */
228   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
229 
230   /**
231    * Default setting for whether to evict cached blocks from the blockcache on
232    * close.
233    */
234   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
235 
236   /**
237    * Default compress tags along with any type of DataBlockEncoding.
238    */
239   public static final boolean DEFAULT_COMPRESS_TAGS = true;
240 
241   /*
242    * Default setting for whether to prefetch blocks into the blockcache on open.
243    */
244   public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
245 
246   private final static Map<String, String> DEFAULT_VALUES
247     = new HashMap<String, String>();
248   private final static Set<Bytes> RESERVED_KEYWORDS
249       = new HashSet<Bytes>();
250 
251   static {
252       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
253       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
254       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
255       DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
256       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
257       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
258       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
259       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
260       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
261       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
262       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
263       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
264       DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1));
265       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
266       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
267       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
268       DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
269       for (String s : DEFAULT_VALUES.keySet()) {
270         RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
271       }
272       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION)));
273       RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY)));
274       RESERVED_KEYWORDS.add(new Bytes(IS_MOB_BYTES));
275       RESERVED_KEYWORDS.add(new Bytes(MOB_THRESHOLD_BYTES));
276   }
277 
278   private static final int UNINITIALIZED = -1;
279 
280   // Column family name
281   private byte [] name;
282 
283   // Column metadata
284   private final Map<Bytes, Bytes> values =
285       new HashMap<Bytes, Bytes>();
286 
287   /**
288    * A map which holds the configuration specific to the column family.
289    * The keys of the map have the same names as config keys and override the defaults with
290    * cf-specific settings. Example usage may be for compactions, etc.
291    */
292   private final Map<String, String> configuration = new HashMap<String, String>();
293 
294   /*
295    * Cache the max versions rather than calculate it every time.
296    */
297   private int cachedMaxVersions = UNINITIALIZED;
298 
299   /**
300    * Default constructor. Must be present for PB deserializations.
301    */
302   private HColumnDescriptor() {
303     this.name = null;
304   }
305 
306   /**
307    * Construct a column descriptor specifying only the family name
308    * The other attributes are defaulted.
309    *
310    * @param familyName Column family name. Must be 'printable' -- digit or
311    * letter -- and may not contain a <code>:</code>
312    */
313   public HColumnDescriptor(final String familyName) {
314     this(Bytes.toBytes(familyName));
315   }
316 
317   /**
318    * Construct a column descriptor specifying only the family name
319    * The other attributes are defaulted.
320    *
321    * @param familyName Column family name. Must be 'printable' -- digit or
322    * letter -- and may not contain a <code>:</code>
323    */
324   public HColumnDescriptor(final byte [] familyName) {
325     isLegalFamilyName(familyName);
326     this.name = familyName;
327 
328     setMaxVersions(DEFAULT_VERSIONS);
329     setMinVersions(DEFAULT_MIN_VERSIONS);
330     setKeepDeletedCells(DEFAULT_KEEP_DELETED);
331     setInMemory(DEFAULT_IN_MEMORY);
332     setBlockCacheEnabled(DEFAULT_BLOCKCACHE);
333     setTimeToLive(DEFAULT_TTL);
334     setCompressionType(Compression.Algorithm.valueOf(DEFAULT_COMPRESSION.toUpperCase()));
335     setDataBlockEncoding(DataBlockEncoding.valueOf(DEFAULT_DATA_BLOCK_ENCODING.toUpperCase()));
336     setBloomFilterType(BloomType.valueOf(DEFAULT_BLOOMFILTER.toUpperCase()));
337     setBlocksize(DEFAULT_BLOCKSIZE);
338     setScope(DEFAULT_REPLICATION_SCOPE);
339   }
340 
341   /**
342    * Constructor.
343    * Makes a deep copy of the supplied descriptor.
344    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
345    * @param desc The descriptor.
346    */
347   public HColumnDescriptor(HColumnDescriptor desc) {
348     super();
349     this.name = desc.name.clone();
350     for (Map.Entry<Bytes, Bytes> e :
351         desc.values.entrySet()) {
352       this.values.put(e.getKey(), e.getValue());
353     }
354     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
355       this.configuration.put(e.getKey(), e.getValue());
356     }
357     setMaxVersions(desc.getMaxVersions());
358   }
359 
360   /**
361    * @param b Family name.
362    * @return <code>b</code>
363    * @throws IllegalArgumentException If not null and not a legitimate family
364    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
365    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
366    * either. Also Family can not be an empty value or equal "recovered.edits".
367    */
368   public static byte [] isLegalFamilyName(final byte [] b) {
369     if (b == null) {
370       return b;
371     }
372     Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
373     if (b[0] == '.') {
374       throw new IllegalArgumentException("Family names cannot start with a " +
375         "period: " + Bytes.toString(b));
376     }
377     for (int i = 0; i < b.length; i++) {
378       if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
379         throw new IllegalArgumentException("Illegal character <" + b[i] +
380           ">. Family names cannot contain control characters or colons: " +
381           Bytes.toString(b));
382       }
383     }
384     byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
385     if (Bytes.equals(recoveredEdit, b)) {
386       throw new IllegalArgumentException("Family name cannot be: " +
387           HConstants.RECOVERED_EDITS_DIR);
388     }
389     return b;
390   }
391 
392   /**
393    * @return Name of this column family
394    */
395   public byte [] getName() {
396     return name;
397   }
398 
399   /**
400    * @return Name of this column family
401    */
402   public String getNameAsString() {
403     return Bytes.toString(this.name);
404   }
405 
406   /**
407    * @param key The key.
408    * @return The value.
409    */
410   public byte[] getValue(byte[] key) {
411     Bytes ibw = values.get(new Bytes(key));
412     if (ibw == null)
413       return null;
414     return ibw.get();
415   }
416 
417   /**
418    * @param key The key.
419    * @return The value as a string.
420    */
421   public String getValue(String key) {
422     byte[] value = getValue(Bytes.toBytes(key));
423     if (value == null)
424       return null;
425     return Bytes.toString(value);
426   }
427 
428   /**
429    * @return All values.
430    */
431   public Map<Bytes, Bytes> getValues() {
432     // shallow pointer copy
433     return Collections.unmodifiableMap(values);
434   }
435 
436   /**
437    * @param key The key.
438    * @param value The value.
439    * @return this (for chained invocation)
440    */
441   public HColumnDescriptor setValue(byte[] key, byte[] value) {
442     if (Bytes.compareTo(Bytes.toBytes(HConstants.VERSIONS), key) == 0) {
443       cachedMaxVersions = UNINITIALIZED;
444     }
445     values.put(new Bytes(key),
446         new Bytes(value));
447     return this;
448   }
449 
450   /**
451    * @param key Key whose key and value we're to remove from HCD parameters.
452    */
453   public void remove(final byte [] key) {
454     values.remove(new Bytes(key));
455   }
456 
457   /**
458    * @param key The key.
459    * @param value The value.
460    * @return this (for chained invocation)
461    */
462   public HColumnDescriptor setValue(String key, String value) {
463     if (value == null) {
464       remove(Bytes.toBytes(key));
465     } else {
466       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
467     }
468     return this;
469   }
470 
471   /**
472    * @return compression type being used for the column family
473    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
474    *             (<a href="https://issues.apache.org/jira/browse/HBASE-13655">HBASE-13655</a>).
475    *             Use {@link #getCompressionType()}.
476    */
477   @Deprecated
478   public Compression.Algorithm getCompression() {
479     return getCompressionType();
480   }
481 
482   /**
483    *  @return compression type being used for the column family for major compaction
484    *  @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
485    *             (<a href="https://issues.apache.org/jira/browse/HBASE-13655">HBASE-13655</a>).
486    *             Use {@link #getCompactionCompressionType()}.
487    */
488   @Deprecated
489   public Compression.Algorithm getCompactionCompression() {
490     return getCompactionCompressionType();
491   }
492 
493   /** @return maximum number of versions */
494   public int getMaxVersions() {
495     if (this.cachedMaxVersions == UNINITIALIZED) {
496       String v = getValue(HConstants.VERSIONS);
497       this.cachedMaxVersions = Integer.parseInt(v);
498     }
499     return this.cachedMaxVersions;
500   }
501 
502   /**
503    * @param maxVersions maximum number of versions
504    * @return this (for chained invocation)
505    */
506   public HColumnDescriptor setMaxVersions(int maxVersions) {
507     if (maxVersions <= 0) {
508       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
509       // Until there is support, consider 0 or < 0 -- a configuration error.
510       throw new IllegalArgumentException("Maximum versions must be positive");
511     }
512     if (maxVersions < this.getMinVersions()) {
513         throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
514             + " while minVersion is " + this.getMinVersions()
515             + ". Maximum versions must be >= minimum versions ");
516     }
517     setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
518     cachedMaxVersions = maxVersions;
519     return this;
520   }
521 
522   /**
523    * Set minimum and maximum versions to keep
524    *
525    * @param minVersions minimal number of versions
526    * @param maxVersions maximum number of versions
527    * @return this (for chained invocation)
528    */
529   public HColumnDescriptor setVersions(int minVersions, int maxVersions) {
530     if (minVersions <= 0) {
531       // TODO: Allow minVersion and maxVersion of 0 to be the way you say "Keep all versions".
532       // Until there is support, consider 0 or < 0 -- a configuration error.
533       throw new IllegalArgumentException("Minimum versions must be positive");
534     }
535 
536     if (maxVersions < minVersions) {
537       throw new IllegalArgumentException("Unable to set MaxVersion to " + maxVersions
538         + " and set MinVersion to " + minVersions
539         + ", as maximum versions must be >= minimum versions.");
540     }
541     setMinVersions(minVersions);
542     setMaxVersions(maxVersions);
543     return this;
544   }
545 
546   /**
547    * @return The storefile/hfile blocksize for this column family.
548    */
549   public synchronized int getBlocksize() {
550     if (this.blocksize == null) {
551       String value = getValue(BLOCKSIZE);
552       this.blocksize = (value != null)?
553         Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
554     }
555     return this.blocksize.intValue();
556 
557   }
558 
559   /**
560    * @param s Blocksize to use when writing out storefiles/hfiles on this
561    * column family.
562    * @return this (for chained invocation)
563    */
564   public HColumnDescriptor setBlocksize(int s) {
565     setValue(BLOCKSIZE, Integer.toString(s));
566     this.blocksize = null;
567     return this;
568   }
569 
570   /**
571    * @return Compression type setting.
572    */
573   public Compression.Algorithm getCompressionType() {
574     String n = getValue(COMPRESSION);
575     if (n == null) {
576       return Compression.Algorithm.NONE;
577     }
578     return Compression.Algorithm.valueOf(n.toUpperCase());
579   }
580 
581   /**
582    * Compression types supported in hbase.
583    * LZO is not bundled as part of the hbase distribution.
584    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
585    * for how to enable it.
586    * @param type Compression type setting.
587    * @return this (for chained invocation)
588    */
589   public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
590     return setValue(COMPRESSION, type.getName().toUpperCase());
591   }
592 
593   /**
594    * @return the data block encoding algorithm used in block cache and
595    *         optionally on disk
596    */
597   public DataBlockEncoding getDataBlockEncoding() {
598     String type = getValue(DATA_BLOCK_ENCODING);
599     if (type == null) {
600       type = DEFAULT_DATA_BLOCK_ENCODING;
601     }
602     return DataBlockEncoding.valueOf(type);
603   }
604 
605   /**
606    * Set data block encoding algorithm used in block cache.
607    * @param type What kind of data block encoding will be used.
608    * @return this (for chained invocation)
609    */
610   public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
611     String name;
612     if (type != null) {
613       name = type.toString();
614     } else {
615       name = DataBlockEncoding.NONE.toString();
616     }
617     return setValue(DATA_BLOCK_ENCODING, name);
618   }
619 
620   /**
621    * Set whether the tags should be compressed along with DataBlockEncoding. When no
622    * DataBlockEncoding is been used, this is having no effect.
623    *
624    * @param compressTags
625    * @return this (for chained invocation)
626    */
627   public HColumnDescriptor setCompressTags(boolean compressTags) {
628     return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
629   }
630 
631   /**
632    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
633    *         DataBlockEncoding is been used, this is having no effect.
634    */
635   public boolean isCompressTags() {
636     String compressTagsStr = getValue(COMPRESS_TAGS);
637     boolean compressTags = DEFAULT_COMPRESS_TAGS;
638     if (compressTagsStr != null) {
639       compressTags = Boolean.parseBoolean(compressTagsStr);
640     }
641     return compressTags;
642   }
643 
644   /**
645    * @return Compression type setting.
646    */
647   public Compression.Algorithm getCompactionCompressionType() {
648     String n = getValue(COMPRESSION_COMPACT);
649     if (n == null) {
650       return getCompressionType();
651     }
652     return Compression.Algorithm.valueOf(n.toUpperCase());
653   }
654 
655   /**
656    * Compression types supported in hbase.
657    * LZO is not bundled as part of the hbase distribution.
658    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
659    * for how to enable it.
660    * @param type Compression type setting.
661    * @return this (for chained invocation)
662    */
663   public HColumnDescriptor setCompactionCompressionType(
664       Compression.Algorithm type) {
665     return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase());
666   }
667 
668   /**
669    * @return True if we are to favor keeping all values for this column family in the
670    * HRegionServer cache.
671    */
672   public boolean isInMemory() {
673     String value = getValue(HConstants.IN_MEMORY);
674     if (value != null) {
675       return Boolean.parseBoolean(value);
676     }
677     return DEFAULT_IN_MEMORY;
678   }
679 
680   /**
681    * @param inMemory True if we are to favor keeping all values for this column family in the
682    * HRegionServer cache
683    * @return this (for chained invocation)
684    */
685   public HColumnDescriptor setInMemory(boolean inMemory) {
686     return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
687   }
688 
689   public KeepDeletedCells getKeepDeletedCells() {
690     String value = getValue(KEEP_DELETED_CELLS);
691     if (value != null) {
692       // toUpperCase for backwards compatibility
693       return KeepDeletedCells.valueOf(value.toUpperCase());
694     }
695     return DEFAULT_KEEP_DELETED;
696   }
697 
698   /**
699    * @param keepDeletedCells True if deleted rows should not be collected
700    * immediately.
701    * @return this (for chained invocation)
702    */
703   public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) {
704     return setValue(KEEP_DELETED_CELLS, keepDeletedCells.toString());
705   }
706 
707   /**
708    * @return Time-to-live of cell contents, in seconds.
709    */
710   public int getTimeToLive() {
711     String value = getValue(TTL);
712     return (value != null)? Integer.parseInt(value) : DEFAULT_TTL;
713   }
714 
715   /**
716    * @param timeToLive Time-to-live of cell contents, in seconds.
717    * @return this (for chained invocation)
718    */
719   public HColumnDescriptor setTimeToLive(int timeToLive) {
720     return setValue(TTL, Integer.toString(timeToLive));
721   }
722 
723   /**
724    * @param timeToLive Time to live of cell contents, in human readable format
725    *                   @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit)
726    * @return this (for chained invocation)
727    */
728   public HColumnDescriptor setTimeToLive(String timeToLive) throws HBaseException {
729     return setValue(TTL, PrettyPrinter.valueOf(timeToLive, Unit.TIME_INTERVAL));
730   }
731 
732   /**
733    * @return The minimum number of versions to keep.
734    */
735   public int getMinVersions() {
736     String value = getValue(MIN_VERSIONS);
737     return (value != null)? Integer.parseInt(value) : 0;
738   }
739 
740   /**
741    * @param minVersions The minimum number of versions to keep.
742    * (used when timeToLive is set)
743    * @return this (for chained invocation)
744    */
745   public HColumnDescriptor setMinVersions(int minVersions) {
746     return setValue(MIN_VERSIONS, Integer.toString(minVersions));
747   }
748 
749   /**
750    * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX
751    * and BLOOM type blocks).
752    */
753   public boolean isBlockCacheEnabled() {
754     String value = getValue(BLOCKCACHE);
755     if (value != null) {
756       return Boolean.parseBoolean(value);
757     }
758     return DEFAULT_BLOCKCACHE;
759   }
760 
761   /**
762    * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache
763    * INDEX and BLOOM blocks; you cannot turn this off).
764    * @return this (for chained invocation)
765    */
766   public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
767     return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
768   }
769 
770   /**
771    * @return bloom filter type used for new StoreFiles in ColumnFamily
772    */
773   public BloomType getBloomFilterType() {
774     String n = getValue(BLOOMFILTER);
775     if (n == null) {
776       n = DEFAULT_BLOOMFILTER;
777     }
778     return BloomType.valueOf(n.toUpperCase());
779   }
780 
781   /**
782    * @param bt bloom filter type
783    * @return this (for chained invocation)
784    */
785   public HColumnDescriptor setBloomFilterType(final BloomType bt) {
786     return setValue(BLOOMFILTER, bt.toString());
787   }
788 
789    /**
790     * @return the scope tag
791     */
792   public int getScope() {
793     byte[] value = getValue(REPLICATION_SCOPE_BYTES);
794     if (value != null) {
795       return Integer.parseInt(Bytes.toString(value));
796     }
797     return DEFAULT_REPLICATION_SCOPE;
798   }
799 
800  /**
801   * @param scope the scope tag
802   * @return this (for chained invocation)
803   */
804   public HColumnDescriptor setScope(int scope) {
805     return setValue(REPLICATION_SCOPE, Integer.toString(scope));
806   }
807 
808   /**
809    * @return true if we should cache data blocks on write
810    */
811   public boolean isCacheDataOnWrite() {
812     return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
813   }
814 
815   /**
816    * @param value true if we should cache data blocks on write
817    * @return this (for chained invocation)
818    */
819   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
820     return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
821   }
822 
823   /**
824    * @return true if we should cache data blocks in the L1 cache (if block cache deploy has more
825    *         than one tier; e.g. we are using CombinedBlockCache).
826    */
827   public boolean isCacheDataInL1() {
828     return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
829   }
830 
831   /**
832    * @param value true if we should cache data blocks in the L1 cache (if block cache deploy
833    * has more than one tier; e.g. we are using CombinedBlockCache).
834    * @return this (for chained invocation)
835    */
836   public HColumnDescriptor setCacheDataInL1(boolean value) {
837     return setValue(CACHE_DATA_IN_L1, Boolean.toString(value));
838   }
839 
840   private boolean setAndGetBoolean(final String key, final boolean defaultSetting) {
841     String value = getValue(key);
842     if (value != null) {
843       return Boolean.parseBoolean(value);
844     }
845     return defaultSetting;
846   }
847 
848   /**
849    * @return true if we should cache index blocks on write
850    */
851   public boolean isCacheIndexesOnWrite() {
852     return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
853   }
854 
855   /**
856    * @param value true if we should cache index blocks on write
857    * @return this (for chained invocation)
858    */
859   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
860     return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
861   }
862 
863   /**
864    * @return true if we should cache bloomfilter blocks on write
865    */
866   public boolean isCacheBloomsOnWrite() {
867     return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
868   }
869 
870   /**
871    * @param value true if we should cache bloomfilter blocks on write
872    * @return this (for chained invocation)
873    */
874   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
875     return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
876   }
877 
878   /**
879    * @return true if we should evict cached blocks from the blockcache on close
880    */
881   public boolean isEvictBlocksOnClose() {
882     return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
883   }
884 
885   /**
886    * @param value true if we should evict cached blocks from the blockcache on
887    * close
888    * @return this (for chained invocation)
889    */
890   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
891     return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
892   }
893 
894   /**
895    * @return true if we should prefetch blocks into the blockcache on open
896    */
897   public boolean isPrefetchBlocksOnOpen() {
898     return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
899   }
900 
901   /**
902    * @param value true if we should prefetch blocks into the blockcache on open
903    * @return this (for chained invocation)
904    */
905   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
906     return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
907   }
908 
909   /**
910    * @see java.lang.Object#toString()
911    */
912   @Override
913   public String toString() {
914     StringBuilder s = new StringBuilder();
915 
916     s.append('{');
917     s.append(HConstants.NAME);
918     s.append(" => '");
919     s.append(Bytes.toString(name));
920     s.append("'");
921     s.append(getValues(true));
922     s.append('}');
923     return s.toString();
924   }
925 
926   /**
927    * @return Column family descriptor with only the customized attributes.
928    */
929   public String toStringCustomizedValues() {
930     StringBuilder s = new StringBuilder();
931     s.append('{');
932     s.append(HConstants.NAME);
933     s.append(" => '");
934     s.append(Bytes.toString(name));
935     s.append("'");
936     s.append(getValues(false));
937     s.append('}');
938     return s.toString();
939   }
940 
941   private StringBuilder getValues(boolean printDefaults) {
942     StringBuilder s = new StringBuilder();
943 
944     boolean hasConfigKeys = false;
945 
946     // print all reserved keys first
947     for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
948       if (!RESERVED_KEYWORDS.contains(entry.getKey())) {
949         hasConfigKeys = true;
950         continue;
951       }
952       String key = Bytes.toString(entry.getKey().get());
953       String value = Bytes.toStringBinary(entry.getValue().get());
954       if (printDefaults
955           || !DEFAULT_VALUES.containsKey(key)
956           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
957         s.append(", ");
958         s.append(key);
959         s.append(" => ");
960         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
961       }
962     }
963 
964     // print all non-reserved, advanced config keys as a separate subset
965     if (hasConfigKeys) {
966       s.append(", ");
967       s.append(HConstants.METADATA).append(" => ");
968       s.append('{');
969       boolean printComma = false;
970       for (Bytes k : values.keySet()) {
971         if (RESERVED_KEYWORDS.contains(k)) {
972           continue;
973         }
974         String key = Bytes.toString(k.get());
975         String value = Bytes.toStringBinary(values.get(k).get());
976         if (printComma) {
977           s.append(", ");
978         }
979         printComma = true;
980         s.append('\'').append(key).append('\'');
981         s.append(" => ");
982         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
983       }
984       s.append('}');
985     }
986 
987     if (!configuration.isEmpty()) {
988       s.append(", ");
989       s.append(HConstants.CONFIGURATION).append(" => ");
990       s.append('{');
991       boolean printCommaForConfiguration = false;
992       for (Map.Entry<String, String> e : configuration.entrySet()) {
993         if (printCommaForConfiguration) s.append(", ");
994         printCommaForConfiguration = true;
995         s.append('\'').append(e.getKey()).append('\'');
996         s.append(" => ");
997         s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
998       }
999       s.append("}");
1000     }
1001     return s;
1002   }
1003 
1004   public static Unit getUnit(String key) {
1005     Unit unit;
1006       /* TTL for now, we can add more as we neeed */
1007     if (key.equals(HColumnDescriptor.TTL)) {
1008       unit = Unit.TIME_INTERVAL;
1009     } else if (key.equals(HColumnDescriptor.MOB_THRESHOLD)) {
1010       unit = Unit.LONG;
1011     } else if (key.equals(HColumnDescriptor.IS_MOB)) {
1012       unit = Unit.BOOLEAN;
1013     } else {
1014       unit = Unit.NONE;
1015     }
1016     return unit;
1017   }
1018 
1019   public static Map<String, String> getDefaultValues() {
1020     return Collections.unmodifiableMap(DEFAULT_VALUES);
1021   }
1022 
1023   /**
1024    * @see java.lang.Object#equals(java.lang.Object)
1025    */
1026   @Override
1027   public boolean equals(Object obj) {
1028     if (this == obj) {
1029       return true;
1030     }
1031     if (obj == null) {
1032       return false;
1033     }
1034     if (!(obj instanceof HColumnDescriptor)) {
1035       return false;
1036     }
1037     return compareTo((HColumnDescriptor)obj) == 0;
1038   }
1039 
1040   /**
1041    * @see java.lang.Object#hashCode()
1042    */
1043   @Override
1044   public int hashCode() {
1045     int result = Bytes.hashCode(this.name);
1046     result ^= (int) COLUMN_DESCRIPTOR_VERSION;
1047     result ^= values.hashCode();
1048     result ^= configuration.hashCode();
1049     return result;
1050   }
1051 
1052   // Comparable
1053   @Override
1054   public int compareTo(HColumnDescriptor o) {
1055     int result = Bytes.compareTo(this.name, o.getName());
1056     if (result == 0) {
1057       // punt on comparison for ordering, just calculate difference
1058       result = this.values.hashCode() - o.values.hashCode();
1059       if (result < 0)
1060         result = -1;
1061       else if (result > 0)
1062         result = 1;
1063     }
1064     if (result == 0) {
1065       result = this.configuration.hashCode() - o.configuration.hashCode();
1066       if (result < 0)
1067         result = -1;
1068       else if (result > 0)
1069         result = 1;
1070     }
1071     return result;
1072   }
1073 
1074   /**
1075    * @return This instance serialized with pb with pb magic prefix
1076    * @see #parseFrom(byte[])
1077    */
1078   public byte [] toByteArray() {
1079     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1080   }
1081 
1082   /**
1083    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
1084    * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
1085    * @throws DeserializationException
1086    * @see #toByteArray()
1087    */
1088   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
1089     if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
1090     int pblen = ProtobufUtil.lengthOfPBMagic();
1091     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1092     ColumnFamilySchema cfs = null;
1093     try {
1094       ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
1095       cfs = builder.build();
1096     } catch (IOException e) {
1097       throw new DeserializationException(e);
1098     }
1099     return convert(cfs);
1100   }
1101 
1102   /**
1103    * @param cfs
1104    * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
1105    */
1106   public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
1107     // Use the empty constructor so we preserve the initial values set on construction for things
1108     // like maxVersion.  Otherwise, we pick up wrong values on deserialization which makes for
1109     // unrelated-looking test failures that are hard to trace back to here.
1110     HColumnDescriptor hcd = new HColumnDescriptor();
1111     hcd.name = cfs.getName().toByteArray();
1112     for (BytesBytesPair a: cfs.getAttributesList()) {
1113       hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1114     }
1115     for (NameStringPair a: cfs.getConfigurationList()) {
1116       hcd.setConfiguration(a.getName(), a.getValue());
1117     }
1118     return hcd;
1119   }
1120 
1121   /**
1122    * @return Convert this instance to a the pb column family type
1123    */
1124   public ColumnFamilySchema convert() {
1125     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1126     builder.setName(ByteStringer.wrap(getName()));
1127     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
1128       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1129       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1130       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1131       builder.addAttributes(aBuilder.build());
1132     }
1133     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1134       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1135       aBuilder.setName(e.getKey());
1136       aBuilder.setValue(e.getValue());
1137       builder.addConfiguration(aBuilder.build());
1138     }
1139     return builder.build();
1140   }
1141 
1142   /**
1143    * Getter for accessing the configuration value by key.
1144    */
1145   public String getConfigurationValue(String key) {
1146     return configuration.get(key);
1147   }
1148 
1149   /**
1150    * Getter for fetching an unmodifiable {@link #configuration} map.
1151    */
1152   public Map<String, String> getConfiguration() {
1153     // shallow pointer copy
1154     return Collections.unmodifiableMap(configuration);
1155   }
1156 
1157   /**
1158    * Setter for storing a configuration setting in {@link #configuration} map.
1159    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1160    * @param value String value. If null, removes the configuration.
1161    */
1162   public HColumnDescriptor setConfiguration(String key, String value) {
1163     if (value == null) {
1164       removeConfiguration(key);
1165     } else {
1166       configuration.put(key, value);
1167     }
1168     return this;
1169   }
1170 
1171   /**
1172    * Remove a configuration setting represented by the key from the {@link #configuration} map.
1173    */
1174   public void removeConfiguration(final String key) {
1175     configuration.remove(key);
1176   }
1177 
1178   /**
1179    * Return the encryption algorithm in use by this family
1180    */
1181   public String getEncryptionType() {
1182     return getValue(ENCRYPTION);
1183   }
1184 
1185   /**
1186    * Set the encryption algorithm for use with this family
1187    * @param algorithm
1188    */
1189   public HColumnDescriptor setEncryptionType(String algorithm) {
1190     setValue(ENCRYPTION, algorithm);
1191     return this;
1192   }
1193 
1194   /** Return the raw crypto key attribute for the family, or null if not set  */
1195   public byte[] getEncryptionKey() {
1196     return getValue(Bytes.toBytes(ENCRYPTION_KEY));
1197   }
1198 
1199   /** Set the raw crypto key attribute for the family */
1200   public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
1201     setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
1202     return this;
1203   }
1204 
1205   /**
1206    * Gets the mob threshold of the family.
1207    * If the size of a cell value is larger than this threshold, it's regarded as a mob.
1208    * The default threshold is 1024*100(100K)B.
1209    * @return The mob threshold.
1210    */
1211   public long getMobThreshold() {
1212     byte[] threshold = getValue(MOB_THRESHOLD_BYTES);
1213     return threshold != null && threshold.length == Bytes.SIZEOF_LONG ? Bytes.toLong(threshold)
1214         : DEFAULT_MOB_THRESHOLD;
1215   }
1216 
1217   /**
1218    * Sets the mob threshold of the family.
1219    * @param threshold The mob threshold.
1220    * @return this (for chained invocation)
1221    */
1222   public HColumnDescriptor setMobThreshold(long threshold) {
1223     setValue(MOB_THRESHOLD_BYTES, Bytes.toBytes(threshold));
1224     return this;
1225   }
1226 
1227   /**
1228    * Gets whether the mob is enabled for the family.
1229    * @return True if the mob is enabled for the family.
1230    */
1231   public boolean isMobEnabled() {
1232     byte[] isMobEnabled = getValue(IS_MOB_BYTES);
1233     return isMobEnabled != null && isMobEnabled.length == Bytes.SIZEOF_BOOLEAN
1234         && Bytes.toBoolean(isMobEnabled);
1235   }
1236 
1237   /**
1238    * Enables the mob for the family.
1239    * @param isMobEnabled Whether to enable the mob for the family.
1240    * @return this (for chained invocation)
1241    */
1242   public HColumnDescriptor setMobEnabled(boolean isMobEnabled) {
1243     setValue(IS_MOB_BYTES, Bytes.toBytes(isMobEnabled));
1244     return this;
1245   }
1246 
1247   /**
1248    * @return replication factor set for this CF or {@link #DEFAULT_DFS_REPLICATION} if not set.
1249    *         <p>
1250    *         {@link #DEFAULT_DFS_REPLICATION} value indicates that user has explicitly not set any
1251    *         block replication factor for this CF, hence use the default replication factor set in
1252    *         the file system.
1253    */
1254   public short getDFSReplication() {
1255     String rf = getValue(DFS_REPLICATION);
1256     return rf == null ? DEFAULT_DFS_REPLICATION : Short.valueOf(rf);
1257   }
1258 
1259   /**
1260    * Set the replication factor to hfile(s) belonging to this family
1261    * @param replication number of replicas the blocks(s) belonging to this CF should have, or
1262    *          {@link #DEFAULT_DFS_REPLICATION} for the default replication factor set in the
1263    *          filesystem
1264    * @return this (for chained invocation)
1265    */
1266   public HColumnDescriptor setDFSReplication(short replication) {
1267     if (replication < 1 && replication != DEFAULT_DFS_REPLICATION) {
1268       throw new IllegalArgumentException(
1269           "DFS replication factor cannot be less than 1 if explictly set.");
1270     }
1271     setValue(DFS_REPLICATION, Short.toString(replication));
1272     return this;
1273   }
1274 }