View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.client;
21  
22  import java.io.IOException;
23  import java.util.ArrayList;
24  import java.util.Arrays;
25  import java.util.HashMap;
26  import java.util.List;
27  import java.util.Map;
28  import java.util.NavigableSet;
29  import java.util.TreeMap;
30  import java.util.TreeSet;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.hbase.HConstants;
35  import org.apache.hadoop.hbase.classification.InterfaceAudience;
36  import org.apache.hadoop.hbase.classification.InterfaceStability;
37  import org.apache.hadoop.hbase.filter.Filter;
38  import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
39  import org.apache.hadoop.hbase.io.TimeRange;
40  import org.apache.hadoop.hbase.security.access.Permission;
41  import org.apache.hadoop.hbase.security.visibility.Authorizations;
42  import org.apache.hadoop.hbase.util.Bytes;
43  
44  /**
45   * Used to perform Scan operations.
46   * <p>
47   * All operations are identical to {@link Get} with the exception of
48   * instantiation.  Rather than specifying a single row, an optional startRow
49   * and stopRow may be defined.  If rows are not specified, the Scanner will
50   * iterate over all rows.
51   * <p>
52   * To scan everything for each row, instantiate a Scan object.
53   * <p>
54   * To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}.
55   * If caching is NOT set, we will use the caching value of the hosting {@link Table}.
56   * In addition to row caching, it is possible to specify a
57   * maximum result size, using {@link #setMaxResultSize(long)}. When both are used,
58   * single server requests are limited by either number of rows or maximum result size, whichever
59   * limit comes first.
60   * <p>
61   * To further define the scope of what to get when scanning, perform additional
62   * methods as outlined below.
63   * <p>
64   * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily}
65   * for each family to retrieve.
66   * <p>
67   * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn}
68   * for each column to retrieve.
69   * <p>
70   * To only retrieve columns within a specific range of version timestamps,
71   * execute {@link #setTimeRange(long, long) setTimeRange}.
72   * <p>
73   * To only retrieve columns with a specific timestamp, execute
74   * {@link #setTimeStamp(long) setTimestamp}.
75   * <p>
76   * To limit the number of versions of each column to be returned, execute
77   * {@link #setMaxVersions(int) setMaxVersions}.
78   * <p>
79   * To limit the maximum number of values returned for each call to next(),
80   * execute {@link #setBatch(int) setBatch}.
81   * <p>
82   * To add a filter, execute {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
83   * <p>
84   * Expert: To explicitly disable server-side block caching for this scan,
85   * execute {@link #setCacheBlocks(boolean)}.
86   */
87  @InterfaceAudience.Public
88  @InterfaceStability.Stable
89  public class Scan extends Query {
90    private static final Log LOG = LogFactory.getLog(Scan.class);
91  
92    private static final String RAW_ATTR = "_raw_";
93  
94    /**
95     * EXPERT ONLY.
96     * An integer (not long) indicating to the scanner logic how many times we attempt to retrieve the
97     * next KV before we schedule a reseek.
98     * The right value depends on the size of the average KV. A reseek is more efficient when
99     * it can skip 5-10 KVs or 512B-1KB, or when the next KV is likely found in another HFile block.
100    * Setting this only has any effect when columns were added with
101    * {@link #addColumn(byte[], byte[])}
102    * <pre>{@code
103    * Scan s = new Scan(...);
104    * s.addColumn(...);
105    * s.setAttribute(Scan.HINT_LOOKAHEAD, Bytes.toBytes(2));
106    * }</pre>
107    * Default is 0 (always reseek).
108    */
109   public static final String HINT_LOOKAHEAD = "_look_ahead_";
110 
111   private byte [] startRow = HConstants.EMPTY_START_ROW;
112   private byte [] stopRow  = HConstants.EMPTY_END_ROW;
113   private int maxVersions = 1;
114   private int batch = -1;
115 
116   private int storeLimit = -1;
117   private int storeOffset = 0;
118   private boolean getScan;
119 
120   // If application wants to collect scan metrics, it needs to
121   // call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE))
122   static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
123   static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data";
124 
125   // If an application wants to use multiple scans over different tables each scan must
126   // define this attribute with the appropriate table name by calling
127   // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
128   static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
129 
130   /*
131    * -1 means no caching
132    */
133   private int caching = -1;
134   private long maxResultSize = -1;
135   private boolean cacheBlocks = true;
136   private boolean reversed = false;
137   private TimeRange tr = new TimeRange();
138   private Map<byte [], NavigableSet<byte []>> familyMap =
139     new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
140   private Boolean loadColumnFamiliesOnDemand = null;
141 
142   /**
143    * Set it true for small scan to get better performance
144    *
145    * Small scan should use pread and big scan can use seek + read
146    *
147    * seek + read is fast but can cause two problem (1) resource contention (2)
148    * cause too much network io
149    *
150    * [89-fb] Using pread for non-compaction read request
151    * https://issues.apache.org/jira/browse/HBASE-7266
152    *
153    * On the other hand, if setting it true, we would do
154    * openScanner,next,closeScanner in one RPC call. It means the better
155    * performance for small scan. [HBASE-9488].
156    *
157    * Generally, if the scan range is within one data block(64KB), it could be
158    * considered as a small scan.
159    */
160   private boolean small = false;
161 
162   /**
163    * Create a Scan operation across all rows.
164    */
165   public Scan() {}
166 
167   public Scan(byte [] startRow, Filter filter) {
168     this(startRow);
169     this.filter = filter;
170   }
171 
172   /**
173    * Create a Scan operation starting at the specified row.
174    * <p>
175    * If the specified row does not exist, the Scanner will start from the
176    * next closest row after the specified row.
177    * @param startRow row to start scanner at or after
178    */
179   public Scan(byte [] startRow) {
180     this.startRow = startRow;
181   }
182 
183   /**
184    * Create a Scan operation for the range of rows specified.
185    * @param startRow row to start scanner at or after (inclusive)
186    * @param stopRow row to stop scanner before (exclusive)
187    */
188   public Scan(byte [] startRow, byte [] stopRow) {
189     this.startRow = startRow;
190     this.stopRow = stopRow;
191     //if the startRow and stopRow both are empty, it is not a Get
192     this.getScan = isStartRowAndEqualsStopRow();
193   }
194 
195   /**
196    * Creates a new instance of this class while copying all values.
197    *
198    * @param scan  The scan instance to copy from.
199    * @throws IOException When copying the values fails.
200    */
201   public Scan(Scan scan) throws IOException {
202     startRow = scan.getStartRow();
203     stopRow  = scan.getStopRow();
204     maxVersions = scan.getMaxVersions();
205     batch = scan.getBatch();
206     storeLimit = scan.getMaxResultsPerColumnFamily();
207     storeOffset = scan.getRowOffsetPerColumnFamily();
208     caching = scan.getCaching();
209     maxResultSize = scan.getMaxResultSize();
210     cacheBlocks = scan.getCacheBlocks();
211     getScan = scan.isGetScan();
212     filter = scan.getFilter(); // clone?
213     loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
214     consistency = scan.getConsistency();
215     reversed = scan.isReversed();
216     small = scan.isSmall();
217     TimeRange ctr = scan.getTimeRange();
218     tr = new TimeRange(ctr.getMin(), ctr.getMax());
219     Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
220     for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
221       byte [] fam = entry.getKey();
222       NavigableSet<byte[]> cols = entry.getValue();
223       if (cols != null && cols.size() > 0) {
224         for (byte[] col : cols) {
225           addColumn(fam, col);
226         }
227       } else {
228         addFamily(fam);
229       }
230     }
231     for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
232       setAttribute(attr.getKey(), attr.getValue());
233     }
234   }
235 
236   /**
237    * Builds a scan object with the same specs as get.
238    * @param get get to model scan after
239    */
240   public Scan(Get get) {
241     this.startRow = get.getRow();
242     this.stopRow = get.getRow();
243     this.filter = get.getFilter();
244     this.cacheBlocks = get.getCacheBlocks();
245     this.maxVersions = get.getMaxVersions();
246     this.storeLimit = get.getMaxResultsPerColumnFamily();
247     this.storeOffset = get.getRowOffsetPerColumnFamily();
248     this.tr = get.getTimeRange();
249     this.familyMap = get.getFamilyMap();
250     this.getScan = true;
251     this.consistency = get.getConsistency();
252     for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
253       setAttribute(attr.getKey(), attr.getValue());
254     }
255   }
256 
257   public boolean isGetScan() {
258     return this.getScan || isStartRowAndEqualsStopRow();
259   }
260 
261   private boolean isStartRowAndEqualsStopRow() {
262     return this.startRow != null && this.startRow.length > 0 &&
263         Bytes.equals(this.startRow, this.stopRow);
264   }
265   /**
266    * Get all columns from the specified family.
267    * <p>
268    * Overrides previous calls to addColumn for this family.
269    * @param family family name
270    * @return this
271    */
272   public Scan addFamily(byte [] family) {
273     familyMap.remove(family);
274     familyMap.put(family, null);
275     return this;
276   }
277 
278   /**
279    * Get the column from the specified family with the specified qualifier.
280    * <p>
281    * Overrides previous calls to addFamily for this family.
282    * @param family family name
283    * @param qualifier column qualifier
284    * @return this
285    */
286   public Scan addColumn(byte [] family, byte [] qualifier) {
287     NavigableSet<byte []> set = familyMap.get(family);
288     if(set == null) {
289       set = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
290     }
291     if (qualifier == null) {
292       qualifier = HConstants.EMPTY_BYTE_ARRAY;
293     }
294     set.add(qualifier);
295     familyMap.put(family, set);
296     return this;
297   }
298 
299   /**
300    * Get versions of columns only within the specified timestamp range,
301    * [minStamp, maxStamp).  Note, default maximum versions to return is 1.  If
302    * your time range spans more than one version and you want all versions
303    * returned, up the number of versions beyond the default.
304    * @param minStamp minimum timestamp value, inclusive
305    * @param maxStamp maximum timestamp value, exclusive
306    * @throws IOException if invalid time range
307    * @see #setMaxVersions()
308    * @see #setMaxVersions(int)
309    * @return this
310    */
311   public Scan setTimeRange(long minStamp, long maxStamp)
312   throws IOException {
313     tr = new TimeRange(minStamp, maxStamp);
314     return this;
315   }
316 
317   /**
318    * Get versions of columns with the specified timestamp. Note, default maximum
319    * versions to return is 1.  If your time range spans more than one version
320    * and you want all versions returned, up the number of versions beyond the
321    * defaut.
322    * @param timestamp version timestamp
323    * @see #setMaxVersions()
324    * @see #setMaxVersions(int)
325    * @return this
326    */
327   public Scan setTimeStamp(long timestamp)
328   throws IOException {
329     try {
330       tr = new TimeRange(timestamp, timestamp+1);
331     } catch(IOException e) {
332       // This should never happen, unless integer overflow or something extremely wrong...
333       LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
334       throw e;
335     }
336     return this;
337   }
338 
339   /**
340    * Set the start row of the scan.
341    * <p>
342    * If the specified row does not exist, the Scanner will start from the
343    * next closest row after the specified row.
344    * @param startRow row to start scanner at or after
345    * @return this
346    */
347   public Scan setStartRow(byte [] startRow) {
348     this.startRow = startRow;
349     return this;
350   }
351 
352   /**
353    * Set the stop row of the scan.
354    * @param stopRow row to end at (exclusive)
355    * <p>
356    * The scan will include rows that are lexicographically less than
357    * the provided stopRow.
358    * <p><b>Note:</b> When doing a filter for a rowKey <u>Prefix</u>
359    * use {@link #setRowPrefixFilter(byte[])}.
360    * The 'trailing 0' will not yield the desired result.</p>
361    * @return this
362    */
363   public Scan setStopRow(byte [] stopRow) {
364     this.stopRow = stopRow;
365     return this;
366   }
367 
368   /**
369    * <p>Set a filter (using stopRow and startRow) so the result set only contains rows where the
370    * rowKey starts with the specified prefix.</p>
371    * <p>This is a utility method that converts the desired rowPrefix into the appropriate values
372    * for the startRow and stopRow to achieve the desired result.</p>
373    * <p>This can safely be used in combination with setFilter.</p>
374    * <p><b>NOTE: Doing a {@link #setStartRow(byte[])} and/or {@link #setStopRow(byte[])}
375    * after this method will yield undefined results.</b></p>
376    * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
377    * @return this
378    */
379   public Scan setRowPrefixFilter(byte[] rowPrefix) {
380     if (rowPrefix == null) {
381       setStartRow(HConstants.EMPTY_START_ROW);
382       setStopRow(HConstants.EMPTY_END_ROW);
383     } else {
384       this.setStartRow(rowPrefix);
385       this.setStopRow(calculateTheClosestNextRowKeyForPrefix(rowPrefix));
386     }
387     return this;
388   }
389 
390   /**
391    * <p>When scanning for a prefix the scan should stop immediately after the the last row that
392    * has the specified prefix. This method calculates the closest next rowKey immediately following
393    * the given rowKeyPrefix.</p>
394    * <p><b>IMPORTANT: This converts a rowKey<u>Prefix</u> into a rowKey</b>.</p>
395    * <p>If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can
396    * simply increment the last byte of the array.
397    * But if your application uses real binary rowids you may run into the scenario that your
398    * prefix is something like:</p>
399    * &nbsp;&nbsp;&nbsp;<b>{ 0x12, 0x23, 0xFF, 0xFF }</b><br/>
400    * Then this stopRow needs to be fed into the actual scan<br/>
401    * &nbsp;&nbsp;&nbsp;<b>{ 0x12, 0x24 }</b> (Notice that it is shorter now)<br/>
402    * This method calculates the correct stop row value for this usecase.
403    *
404    * @param rowKeyPrefix the rowKey<u>Prefix</u>.
405    * @return the closest next rowKey immediately following the given rowKeyPrefix.
406    */
407   private byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) {
408     // Essentially we are treating it like an 'unsigned very very long' and doing +1 manually.
409     // Search for the place where the trailing 0xFFs start
410     int offset = rowKeyPrefix.length;
411     while (offset > 0) {
412       if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
413         break;
414       }
415       offset--;
416     }
417 
418     if (offset == 0) {
419       // We got an 0xFFFF... (only FFs) stopRow value which is
420       // the last possible prefix before the end of the table.
421       // So set it to stop at the 'end of the table'
422       return HConstants.EMPTY_END_ROW;
423     }
424 
425     // Copy the right length of the original
426     byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
427     // And increment the last one
428     newStopRow[newStopRow.length - 1]++;
429     return newStopRow;
430   }
431 
432   /**
433    * Get all available versions.
434    * @return this
435    */
436   public Scan setMaxVersions() {
437     this.maxVersions = Integer.MAX_VALUE;
438     return this;
439   }
440 
441   /**
442    * Get up to the specified number of versions of each column.
443    * @param maxVersions maximum versions for each column
444    * @return this
445    */
446   public Scan setMaxVersions(int maxVersions) {
447     this.maxVersions = maxVersions;
448     return this;
449   }
450 
451   /**
452    * Set the maximum number of values to return for each call to next()
453    * @param batch the maximum number of values
454    */
455   public Scan setBatch(int batch) {
456     if (this.hasFilter() && this.filter.hasFilterRow()) {
457       throw new IncompatibleFilterException(
458         "Cannot set batch on a scan using a filter" +
459         " that returns true for filter.hasFilterRow");
460     }
461     this.batch = batch;
462     return this;
463   }
464 
465   /**
466    * Set the maximum number of values to return per row per Column Family
467    * @param limit the maximum number of values returned / row / CF
468    */
469   public Scan setMaxResultsPerColumnFamily(int limit) {
470     this.storeLimit = limit;
471     return this;
472   }
473 
474   /**
475    * Set offset for the row per Column Family.
476    * @param offset is the number of kvs that will be skipped.
477    */
478   public Scan setRowOffsetPerColumnFamily(int offset) {
479     this.storeOffset = offset;
480     return this;
481   }
482 
483   /**
484    * Set the number of rows for caching that will be passed to scanners.
485    * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
486    * apply.
487    * Higher caching values will enable faster scanners but will use more memory.
488    * @param caching the number of rows for caching
489    */
490   public Scan setCaching(int caching) {
491     this.caching = caching;
492     return this;
493   }
494 
495   /**
496    * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)}
497    */
498   public long getMaxResultSize() {
499     return maxResultSize;
500   }
501 
502   /**
503    * Set the maximum result size. The default is -1; this means that no specific
504    * maximum result size will be set for this scan, and the global configured
505    * value will be used instead. (Defaults to unlimited).
506    *
507    * @param maxResultSize The maximum result size in bytes.
508    */
509   public Scan setMaxResultSize(long maxResultSize) {
510     this.maxResultSize = maxResultSize;
511     return this;
512   }
513 
514   @Override
515   public Scan setFilter(Filter filter) {
516     super.setFilter(filter);
517     return this;
518   }
519 
520   /**
521    * Setting the familyMap
522    * @param familyMap map of family to qualifier
523    * @return this
524    */
525   public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) {
526     this.familyMap = familyMap;
527     return this;
528   }
529 
530   /**
531    * Getting the familyMap
532    * @return familyMap
533    */
534   public Map<byte [], NavigableSet<byte []>> getFamilyMap() {
535     return this.familyMap;
536   }
537 
538   /**
539    * @return the number of families in familyMap
540    */
541   public int numFamilies() {
542     if(hasFamilies()) {
543       return this.familyMap.size();
544     }
545     return 0;
546   }
547 
548   /**
549    * @return true if familyMap is non empty, false otherwise
550    */
551   public boolean hasFamilies() {
552     return !this.familyMap.isEmpty();
553   }
554 
555   /**
556    * @return the keys of the familyMap
557    */
558   public byte[][] getFamilies() {
559     if(hasFamilies()) {
560       return this.familyMap.keySet().toArray(new byte[0][0]);
561     }
562     return null;
563   }
564 
565   /**
566    * @return the startrow
567    */
568   public byte [] getStartRow() {
569     return this.startRow;
570   }
571 
572   /**
573    * @return the stoprow
574    */
575   public byte [] getStopRow() {
576     return this.stopRow;
577   }
578 
579   /**
580    * @return the max number of versions to fetch
581    */
582   public int getMaxVersions() {
583     return this.maxVersions;
584   }
585 
586   /**
587    * @return maximum number of values to return for a single call to next()
588    */
589   public int getBatch() {
590     return this.batch;
591   }
592 
593   /**
594    * @return maximum number of values to return per row per CF
595    */
596   public int getMaxResultsPerColumnFamily() {
597     return this.storeLimit;
598   }
599 
600   /**
601    * Method for retrieving the scan's offset per row per column
602    * family (#kvs to be skipped)
603    * @return row offset
604    */
605   public int getRowOffsetPerColumnFamily() {
606     return this.storeOffset;
607   }
608 
609   /**
610    * @return caching the number of rows fetched when calling next on a scanner
611    */
612   public int getCaching() {
613     return this.caching;
614   }
615 
616   /**
617    * @return TimeRange
618    */
619   public TimeRange getTimeRange() {
620     return this.tr;
621   }
622 
623   /**
624    * @return RowFilter
625    */
626   @Override
627   public Filter getFilter() {
628     return filter;
629   }
630 
631   /**
632    * @return true is a filter has been specified, false if not
633    */
634   public boolean hasFilter() {
635     return filter != null;
636   }
637 
638   /**
639    * Set whether blocks should be cached for this Scan.
640    * <p>
641    * This is true by default.  When true, default settings of the table and
642    * family are used (this will never override caching blocks if the block
643    * cache is disabled for that family or entirely).
644    *
645    * @param cacheBlocks if false, default settings are overridden and blocks
646    * will not be cached
647    */
648   public Scan setCacheBlocks(boolean cacheBlocks) {
649     this.cacheBlocks = cacheBlocks;
650     return this;
651   }
652 
653   /**
654    * Get whether blocks should be cached for this Scan.
655    * @return true if default caching should be used, false if blocks should not
656    * be cached
657    */
658   public boolean getCacheBlocks() {
659     return cacheBlocks;
660   }
661 
662   /**
663    * Set whether this scan is a reversed one
664    * <p>
665    * This is false by default which means forward(normal) scan.
666    *
667    * @param reversed if true, scan will be backward order
668    * @return this
669    */
670   public Scan setReversed(boolean reversed) {
671     this.reversed = reversed;
672     return this;
673   }
674 
675   /**
676    * Get whether this scan is a reversed one.
677    * @return true if backward scan, false if forward(default) scan
678    */
679   public boolean isReversed() {
680     return reversed;
681   }
682 
683   /**
684    * Set the value indicating whether loading CFs on demand should be allowed (cluster
685    * default is false). On-demand CF loading doesn't load column families until necessary, e.g.
686    * if you filter on one column, the other column family data will be loaded only for the rows
687    * that are included in result, not all rows like in normal case.
688    * With column-specific filters, like SingleColumnValueFilter w/filterIfMissing == true,
689    * this can deliver huge perf gains when there's a cf with lots of data; however, it can
690    * also lead to some inconsistent results, as follows:
691    * - if someone does a concurrent update to both column families in question you may get a row
692    *   that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } }
693    *   someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan
694    *   filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 },
695    *   { video => "my dog" } }.
696    * - if there's a concurrent split and you have more than 2 column families, some rows may be
697    *   missing some column families.
698    */
699   public Scan setLoadColumnFamiliesOnDemand(boolean value) {
700     this.loadColumnFamiliesOnDemand = value;
701     return this;
702   }
703 
704   /**
705    * Get the raw loadColumnFamiliesOnDemand setting; if it's not set, can be null.
706    */
707   public Boolean getLoadColumnFamiliesOnDemandValue() {
708     return this.loadColumnFamiliesOnDemand;
709   }
710 
711   /**
712    * Get the logical value indicating whether on-demand CF loading should be allowed.
713    */
714   public boolean doLoadColumnFamiliesOnDemand() {
715     return (this.loadColumnFamiliesOnDemand != null)
716       && this.loadColumnFamiliesOnDemand.booleanValue();
717   }
718 
719   /**
720    * Compile the table and column family (i.e. schema) information
721    * into a String. Useful for parsing and aggregation by debugging,
722    * logging, and administration tools.
723    * @return Map
724    */
725   @Override
726   public Map<String, Object> getFingerprint() {
727     Map<String, Object> map = new HashMap<String, Object>();
728     List<String> families = new ArrayList<String>();
729     if(this.familyMap.size() == 0) {
730       map.put("families", "ALL");
731       return map;
732     } else {
733       map.put("families", families);
734     }
735     for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
736         this.familyMap.entrySet()) {
737       families.add(Bytes.toStringBinary(entry.getKey()));
738     }
739     return map;
740   }
741 
742   /**
743    * Compile the details beyond the scope of getFingerprint (row, columns,
744    * timestamps, etc.) into a Map along with the fingerprinted information.
745    * Useful for debugging, logging, and administration tools.
746    * @param maxCols a limit on the number of columns output prior to truncation
747    * @return Map
748    */
749   @Override
750   public Map<String, Object> toMap(int maxCols) {
751     // start with the fingerpring map and build on top of it
752     Map<String, Object> map = getFingerprint();
753     // map from families to column list replaces fingerprint's list of families
754     Map<String, List<String>> familyColumns =
755       new HashMap<String, List<String>>();
756     map.put("families", familyColumns);
757     // add scalar information first
758     map.put("startRow", Bytes.toStringBinary(this.startRow));
759     map.put("stopRow", Bytes.toStringBinary(this.stopRow));
760     map.put("maxVersions", this.maxVersions);
761     map.put("batch", this.batch);
762     map.put("caching", this.caching);
763     map.put("maxResultSize", this.maxResultSize);
764     map.put("cacheBlocks", this.cacheBlocks);
765     map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
766     List<Long> timeRange = new ArrayList<Long>();
767     timeRange.add(this.tr.getMin());
768     timeRange.add(this.tr.getMax());
769     map.put("timeRange", timeRange);
770     int colCount = 0;
771     // iterate through affected families and list out up to maxCols columns
772     for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
773       this.familyMap.entrySet()) {
774       List<String> columns = new ArrayList<String>();
775       familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
776       if(entry.getValue() == null) {
777         colCount++;
778         --maxCols;
779         columns.add("ALL");
780       } else {
781         colCount += entry.getValue().size();
782         if (maxCols <= 0) {
783           continue;
784         }
785         for (byte [] column : entry.getValue()) {
786           if (--maxCols <= 0) {
787             continue;
788           }
789           columns.add(Bytes.toStringBinary(column));
790         }
791       }
792     }
793     map.put("totalColumns", colCount);
794     if (this.filter != null) {
795       map.put("filter", this.filter.toString());
796     }
797     // add the id if set
798     if (getId() != null) {
799       map.put("id", getId());
800     }
801     return map;
802   }
803 
804   /**
805    * Enable/disable "raw" mode for this scan.
806    * If "raw" is enabled the scan will return all
807    * delete marker and deleted rows that have not
808    * been collected, yet.
809    * This is mostly useful for Scan on column families
810    * that have KEEP_DELETED_ROWS enabled.
811    * It is an error to specify any column when "raw" is set.
812    * @param raw True/False to enable/disable "raw" mode.
813    */
814   public Scan setRaw(boolean raw) {
815     setAttribute(RAW_ATTR, Bytes.toBytes(raw));
816     return this;
817   }
818 
819   /**
820    * @return True if this Scan is in "raw" mode.
821    */
822   public boolean isRaw() {
823     byte[] attr = getAttribute(RAW_ATTR);
824     return attr == null ? false : Bytes.toBoolean(attr);
825   }
826 
827 
828 
829   /**
830    * Set whether this scan is a small scan
831    * <p>
832    * Small scan should use pread and big scan can use seek + read
833    *
834    * seek + read is fast but can cause two problem (1) resource contention (2)
835    * cause too much network io
836    *
837    * [89-fb] Using pread for non-compaction read request
838    * https://issues.apache.org/jira/browse/HBASE-7266
839    *
840    * On the other hand, if setting it true, we would do
841    * openScanner,next,closeScanner in one RPC call. It means the better
842    * performance for small scan. [HBASE-9488].
843    *
844    * Generally, if the scan range is within one data block(64KB), it could be
845    * considered as a small scan.
846    *
847    * @param small
848    */
849   public Scan setSmall(boolean small) {
850     this.small = small;
851     return this;
852   }
853 
854   /**
855    * Get whether this scan is a small scan
856    * @return true if small scan
857    */
858   public boolean isSmall() {
859     return small;
860   }
861 
862   @Override
863   public Scan setAttribute(String name, byte[] value) {
864     return (Scan) super.setAttribute(name, value);
865   }
866 
867   @Override
868   public Scan setId(String id) {
869     return (Scan) super.setId(id);
870   }
871 
872   @Override
873   public Scan setAuthorizations(Authorizations authorizations) {
874     return (Scan) super.setAuthorizations(authorizations);
875   }
876 
877   @Override
878   public Scan setACL(Map<String, Permission> perms) {
879     return (Scan) super.setACL(perms);
880   }
881 
882   @Override
883   public Scan setACL(String user, Permission perms) {
884     return (Scan) super.setACL(user, perms);
885   }
886 
887   @Override
888   public Scan setConsistency(Consistency consistency) {
889     return (Scan) super.setConsistency(consistency);
890   }
891 
892   @Override
893   public Scan setReplicaId(int Id) {
894     return (Scan) super.setReplicaId(Id);
895   }
896 
897   @Override
898   public Scan setIsolationLevel(IsolationLevel level) {
899     return (Scan) super.setIsolationLevel(level);
900   }
901 
902   /**
903    * Utility that creates a Scan that will do a  small scan in reverse from passed row
904    * looking for next closest row.
905    * @param row
906    * @param family
907    * @return An instance of Scan primed with passed <code>row</code> and <code>family</code> to
908    * scan in reverse for one row only.
909    */
910   static Scan createGetClosestRowOrBeforeReverseScan(byte[] row) {
911     // Below does not work if you add in family; need to add the family qualifier that is highest
912     // possible family qualifier.  Do we have such a notion?  Would have to be magic.
913     Scan scan = new Scan(row);
914     scan.setSmall(true);
915     scan.setReversed(true);
916     scan.setCaching(1);
917     return scan;
918   }
919 }