View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.client;
21  
22  import java.io.IOException;
23  import java.util.ArrayList;
24  import java.util.Arrays;
25  import java.util.HashMap;
26  import java.util.List;
27  import java.util.Map;
28  import java.util.NavigableSet;
29  import java.util.TreeMap;
30  import java.util.TreeSet;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.hbase.HConstants;
35  import org.apache.hadoop.hbase.classification.InterfaceAudience;
36  import org.apache.hadoop.hbase.classification.InterfaceStability;
37  import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
38  import org.apache.hadoop.hbase.filter.Filter;
39  import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
40  import org.apache.hadoop.hbase.io.TimeRange;
41  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
42  import org.apache.hadoop.hbase.security.access.Permission;
43  import org.apache.hadoop.hbase.security.visibility.Authorizations;
44  import org.apache.hadoop.hbase.util.Bytes;
45  
46  /**
47   * Used to perform Scan operations.
48   * <p>
49   * All operations are identical to {@link Get} with the exception of
50   * instantiation.  Rather than specifying a single row, an optional startRow
51   * and stopRow may be defined.  If rows are not specified, the Scanner will
52   * iterate over all rows.
53   * <p>
54   * To get all columns from all rows of a Table, create an instance with no constraints; use the
55   * {@link #Scan()} constructor. To constrain the scan to specific column families,
56   * call {@link #addFamily(byte[]) addFamily} for each family to retrieve on your Scan instance.
57   * <p>
58   * To get specific columns, call {@link #addColumn(byte[], byte[]) addColumn}
59   * for each column to retrieve.
60   * <p>
61   * To only retrieve columns within a specific range of version timestamps,
62   * call {@link #setTimeRange(long, long) setTimeRange}.
63   * <p>
64   * To only retrieve columns with a specific timestamp, call
65   * {@link #setTimeStamp(long) setTimestamp}.
66   * <p>
67   * To limit the number of versions of each column to be returned, call
68   * {@link #setMaxVersions(int) setMaxVersions}.
69   * <p>
70   * To limit the maximum number of values returned for each call to next(),
71   * call {@link #setBatch(int) setBatch}.
72   * <p>
73   * To add a filter, call {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
74   * <p>
75   * Expert: To explicitly disable server-side block caching for this scan,
76   * execute {@link #setCacheBlocks(boolean)}.
77   * <p><em>Note:</em> Usage alters Scan instances. Internally, attributes are updated as the Scan
78   * runs and if enabled, metrics accumulate in the Scan instance. Be aware this is the case when
79   * you go to clone a Scan instance or if you go to reuse a created Scan instance; safer is create
80   * a Scan instance per usage.
81   */
82  @InterfaceAudience.Public
83  @InterfaceStability.Stable
84  public class Scan extends Query {
85    private static final Log LOG = LogFactory.getLog(Scan.class);
86  
87    private static final String RAW_ATTR = "_raw_";
88  
89    private byte [] startRow = HConstants.EMPTY_START_ROW;
90    private byte [] stopRow  = HConstants.EMPTY_END_ROW;
91    private int maxVersions = 1;
92    private int batch = -1;
93  
94    /**
95     * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}.
96     * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the
97     * cells in the row exceeded max result size on the server. Typically partial results will be
98     * combined client side into complete results before being delivered to the caller. However, if
99     * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e.
100    * they understand that the results returned from the Scanner may only represent part of a
101    * particular row). In such a case, any attempt to combine the partials into a complete result on
102    * the client side will be skipped, and the caller will be able to see the exact results returned
103    * from the server.
104    */
105   private boolean allowPartialResults = false;
106 
107   private int storeLimit = -1;
108   private int storeOffset = 0;
109   private boolean getScan;
110 
111   /**
112    * @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)}
113    */
114   // Make private or remove.
115   @Deprecated
116   static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
117 
118   /**
119    * Use {@link #getScanMetrics()}
120    */
121   // Make this private or remove.
122   @Deprecated
123   static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data";
124 
125   // If an application wants to use multiple scans over different tables each scan must
126   // define this attribute with the appropriate table name by calling
127   // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
128   static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
129 
130   /*
131    * -1 means no caching
132    */
133   private int caching = -1;
134   private long maxResultSize = -1;
135   private boolean cacheBlocks = true;
136   private boolean reversed = false;
137   private TimeRange tr = new TimeRange();
138   private Map<byte [], NavigableSet<byte []>> familyMap =
139     new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
140   private Boolean loadColumnFamiliesOnDemand = null;
141   private Boolean asyncPrefetch = null;
142 
143   /**
144    * Parameter name for client scanner sync/async prefetch toggle.
145    * When using async scanner, prefetching data from the server is done at the background.
146    * The parameter currently won't have any effect in the case that the user has set
147    * Scan#setSmall or Scan#setReversed
148    */
149   public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH =
150       "hbase.client.scanner.async.prefetch";
151 
152   /**
153    * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}.
154    */
155   public static final boolean DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = false;
156 
157    /**
158    * Set it true for small scan to get better performance
159    *
160    * Small scan should use pread and big scan can use seek + read
161    *
162    * seek + read is fast but can cause two problem (1) resource contention (2)
163    * cause too much network io
164    *
165    * [89-fb] Using pread for non-compaction read request
166    * https://issues.apache.org/jira/browse/HBASE-7266
167    *
168    * On the other hand, if setting it true, we would do
169    * openScanner,next,closeScanner in one RPC call. It means the better
170    * performance for small scan. [HBASE-9488].
171    *
172    * Generally, if the scan range is within one data block(64KB), it could be
173    * considered as a small scan.
174    */
175   private boolean small = false;
176 
177   /**
178    * Create a Scan operation across all rows.
179    */
180   public Scan() {}
181 
182   public Scan(byte [] startRow, Filter filter) {
183     this(startRow);
184     this.filter = filter;
185   }
186 
187   /**
188    * Create a Scan operation starting at the specified row.
189    * <p>
190    * If the specified row does not exist, the Scanner will start from the
191    * next closest row after the specified row.
192    * @param startRow row to start scanner at or after
193    */
194   public Scan(byte [] startRow) {
195     this.startRow = startRow;
196   }
197 
198   /**
199    * Create a Scan operation for the range of rows specified.
200    * @param startRow row to start scanner at or after (inclusive)
201    * @param stopRow row to stop scanner before (exclusive)
202    */
203   public Scan(byte [] startRow, byte [] stopRow) {
204     this.startRow = startRow;
205     this.stopRow = stopRow;
206     //if the startRow and stopRow both are empty, it is not a Get
207     this.getScan = isStartRowAndEqualsStopRow();
208   }
209 
210   /**
211    * Creates a new instance of this class while copying all values.
212    *
213    * @param scan  The scan instance to copy from.
214    * @throws IOException When copying the values fails.
215    */
216   public Scan(Scan scan) throws IOException {
217     startRow = scan.getStartRow();
218     stopRow  = scan.getStopRow();
219     maxVersions = scan.getMaxVersions();
220     batch = scan.getBatch();
221     storeLimit = scan.getMaxResultsPerColumnFamily();
222     storeOffset = scan.getRowOffsetPerColumnFamily();
223     caching = scan.getCaching();
224     maxResultSize = scan.getMaxResultSize();
225     cacheBlocks = scan.getCacheBlocks();
226     getScan = scan.isGetScan();
227     filter = scan.getFilter(); // clone?
228     loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
229     consistency = scan.getConsistency();
230     this.setIsolationLevel(scan.getIsolationLevel());
231     reversed = scan.isReversed();
232     asyncPrefetch = scan.isAsyncPrefetch();
233     small = scan.isSmall();
234     allowPartialResults = scan.getAllowPartialResults();
235     TimeRange ctr = scan.getTimeRange();
236     tr = new TimeRange(ctr.getMin(), ctr.getMax());
237     Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
238     for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
239       byte [] fam = entry.getKey();
240       NavigableSet<byte[]> cols = entry.getValue();
241       if (cols != null && cols.size() > 0) {
242         for (byte[] col : cols) {
243           addColumn(fam, col);
244         }
245       } else {
246         addFamily(fam);
247       }
248     }
249     for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
250       setAttribute(attr.getKey(), attr.getValue());
251     }
252     for (Map.Entry<byte[], TimeRange> entry : scan.getColumnFamilyTimeRange().entrySet()) {
253       TimeRange tr = entry.getValue();
254       setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
255     }
256   }
257 
258   /**
259    * Builds a scan object with the same specs as get.
260    * @param get get to model scan after
261    */
262   public Scan(Get get) {
263     this.startRow = get.getRow();
264     this.stopRow = get.getRow();
265     this.filter = get.getFilter();
266     this.cacheBlocks = get.getCacheBlocks();
267     this.maxVersions = get.getMaxVersions();
268     this.storeLimit = get.getMaxResultsPerColumnFamily();
269     this.storeOffset = get.getRowOffsetPerColumnFamily();
270     this.tr = get.getTimeRange();
271     this.familyMap = get.getFamilyMap();
272     this.getScan = true;
273     this.asyncPrefetch = false;
274     this.consistency = get.getConsistency();
275     this.setIsolationLevel(get.getIsolationLevel());
276     for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
277       setAttribute(attr.getKey(), attr.getValue());
278     }
279     for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) {
280       TimeRange tr = entry.getValue();
281       setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
282     }
283   }
284 
285   public boolean isGetScan() {
286     return this.getScan || isStartRowAndEqualsStopRow();
287   }
288 
289   private boolean isStartRowAndEqualsStopRow() {
290     return this.startRow != null && this.startRow.length > 0 &&
291         Bytes.equals(this.startRow, this.stopRow);
292   }
293   /**
294    * Get all columns from the specified family.
295    * <p>
296    * Overrides previous calls to addColumn for this family.
297    * @param family family name
298    * @return this
299    */
300   public Scan addFamily(byte [] family) {
301     familyMap.remove(family);
302     familyMap.put(family, null);
303     return this;
304   }
305 
306   /**
307    * Get the column from the specified family with the specified qualifier.
308    * <p>
309    * Overrides previous calls to addFamily for this family.
310    * @param family family name
311    * @param qualifier column qualifier
312    * @return this
313    */
314   public Scan addColumn(byte [] family, byte [] qualifier) {
315     NavigableSet<byte []> set = familyMap.get(family);
316     if(set == null) {
317       set = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
318     }
319     if (qualifier == null) {
320       qualifier = HConstants.EMPTY_BYTE_ARRAY;
321     }
322     set.add(qualifier);
323     familyMap.put(family, set);
324     return this;
325   }
326 
327   /**
328    * Get versions of columns only within the specified timestamp range,
329    * [minStamp, maxStamp).  Note, default maximum versions to return is 1.  If
330    * your time range spans more than one version and you want all versions
331    * returned, up the number of versions beyond the default.
332    * @param minStamp minimum timestamp value, inclusive
333    * @param maxStamp maximum timestamp value, exclusive
334    * @see #setMaxVersions()
335    * @see #setMaxVersions(int)
336    * @return this
337    */
338   public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
339     tr = new TimeRange(minStamp, maxStamp);
340     return this;
341   }
342 
343   /**
344    * Get versions of columns with the specified timestamp. Note, default maximum
345    * versions to return is 1.  If your time range spans more than one version
346    * and you want all versions returned, up the number of versions beyond the
347    * defaut.
348    * @param timestamp version timestamp
349    * @see #setMaxVersions()
350    * @see #setMaxVersions(int)
351    * @return this
352    */
353   public Scan setTimeStamp(long timestamp)
354   throws IOException {
355     try {
356       tr = new TimeRange(timestamp, timestamp+1);
357     } catch(Exception e) {
358       // This should never happen, unless integer overflow or something extremely wrong...
359       LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
360       throw e;
361     }
362     return this;
363   }
364 
365   @Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
366     return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp);
367   }
368 
369   /**
370    * Set the start row of the scan.
371    * <p>
372    * If the specified row does not exist, the Scanner will start from the
373    * next closest row after the specified row.
374    * @param startRow row to start scanner at or after
375    * @return this
376    * @throws IllegalArgumentException if startRow does not meet criteria
377    * for a row key (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
378    */
379   public Scan setStartRow(byte [] startRow) {
380     if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
381       throw new IllegalArgumentException(
382         "startRow's length must be less than or equal to " +
383         HConstants.MAX_ROW_LENGTH + " to meet the criteria" +
384         " for a row key.");
385     }
386     this.startRow = startRow;
387     return this;
388   }
389
390   /**
391    * Set the stop row of the scan.
392    * @param stopRow row to end at (exclusive)
393    * <p>
394    * The scan will include rows that are lexicographically less than
395    * the provided stopRow.
396    * <p><b>Note:</b> When doing a filter for a rowKey <u>Prefix</u>
397    * use {@link #setRowPrefixFilter(byte[])}.
398    * The 'trailing 0' will not yield the desired result.</p>
399    * @return this
400    * @throws IllegalArgumentException if stopRow does not meet criteria
401    * for a row key (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
402    */
403   public Scan setStopRow(byte [] stopRow) {
404     if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
405       throw new IllegalArgumentException(
406         "stopRow's length must be less than or equal to " +
407         HConstants.MAX_ROW_LENGTH + " to meet the criteria" +
408         " for a row key.");
409     }
410     this.stopRow = stopRow;
411     return this;
412   }
413
414   /**
415    * <p>Set a filter (using stopRow and startRow) so the result set only contains rows where the
416    * rowKey starts with the specified prefix.</p>
417    * <p>This is a utility method that converts the desired rowPrefix into the appropriate values
418    * for the startRow and stopRow to achieve the desired result.</p>
419    * <p>This can safely be used in combination with setFilter.</p>
420    * <p><b>NOTE: Doing a {@link #setStartRow(byte[])} and/or {@link #setStopRow(byte[])}
421    * after this method will yield undefined results.</b></p>
422    * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
423    * @return this
424    */
425   public Scan setRowPrefixFilter(byte[] rowPrefix) {
426     if (rowPrefix == null) {
427       setStartRow(HConstants.EMPTY_START_ROW);
428       setStopRow(HConstants.EMPTY_END_ROW);
429     } else {
430       this.setStartRow(rowPrefix);
431       this.setStopRow(calculateTheClosestNextRowKeyForPrefix(rowPrefix));
432     }
433     return this;
434   }
435
436   /**
437    * <p>When scanning for a prefix the scan should stop immediately after the the last row that
438    * has the specified prefix. This method calculates the closest next rowKey immediately following
439    * the given rowKeyPrefix.</p>
440    * <p><b>IMPORTANT: This converts a rowKey<u>Prefix</u> into a rowKey</b>.</p>
441    * <p>If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can
442    * simply increment the last byte of the array.
443    * But if your application uses real binary rowids you may run into the scenario that your
444    * prefix is something like:</p>
445    * &nbsp;&nbsp;&nbsp;<b>{ 0x12, 0x23, 0xFF, 0xFF }</b><br/>
446    * Then this stopRow needs to be fed into the actual scan<br/>
447    * &nbsp;&nbsp;&nbsp;<b>{ 0x12, 0x24 }</b> (Notice that it is shorter now)<br/>
448    * This method calculates the correct stop row value for this usecase.
449    *
450    * @param rowKeyPrefix the rowKey<u>Prefix</u>.
451    * @return the closest next rowKey immediately following the given rowKeyPrefix.
452    */
453   private byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) {
454     // Essentially we are treating it like an 'unsigned very very long' and doing +1 manually.
455     // Search for the place where the trailing 0xFFs start
456     int offset = rowKeyPrefix.length;
457     while (offset > 0) {
458       if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
459         break;
460       }
461       offset--;
462     }
463
464     if (offset == 0) {
465       // We got an 0xFFFF... (only FFs) stopRow value which is
466       // the last possible prefix before the end of the table.
467       // So set it to stop at the 'end of the table'
468       return HConstants.EMPTY_END_ROW;
469     }
470 
471     // Copy the right length of the original
472     byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
473     // And increment the last one
474     newStopRow[newStopRow.length - 1]++;
475     return newStopRow;
476   }
477
478   /**
479    * Get all available versions.
480    * @return this
481    */
482   public Scan setMaxVersions() {
483     this.maxVersions = Integer.MAX_VALUE;
484     return this;
485   }
486
487   /**
488    * Get up to the specified number of versions of each column.
489    * @param maxVersions maximum versions for each column
490    * @return this
491    */
492   public Scan setMaxVersions(int maxVersions) {
493     this.maxVersions = maxVersions;
494     return this;
495   }
496
497   /**
498    * Set the maximum number of values to return for each call to next().
499    * Callers should be aware that invoking this method with any value
500    * is equivalent to calling {@link #setAllowPartialResults(boolean)}
501    * with a value of {@code true}; partial results may be returned if
502    * this method is called. Use {@link #setMaxResultSize(long)}} to
503    * limit the size of a Scan's Results instead.
504    *
505    * @param batch the maximum number of values
506    */
507   public Scan setBatch(int batch) {
508     if (this.hasFilter() && this.filter.hasFilterRow()) {
509       throw new IncompatibleFilterException(
510         "Cannot set batch on a scan using a filter" +
511         " that returns true for filter.hasFilterRow");
512     }
513     this.batch = batch;
514     return this;
515   }
516
517   /**
518    * Set the maximum number of values to return per row per Column Family
519    * @param limit the maximum number of values returned / row / CF
520    */
521   public Scan setMaxResultsPerColumnFamily(int limit) {
522     this.storeLimit = limit;
523     return this;
524   }
525
526   /**
527    * Set offset for the row per Column Family.
528    * @param offset is the number of kvs that will be skipped.
529    */
530   public Scan setRowOffsetPerColumnFamily(int offset) {
531     this.storeOffset = offset;
532     return this;
533   }
534
535   /**
536    * Set the number of rows for caching that will be passed to scanners.
537    * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
538    * apply.
539    * Higher caching values will enable faster scanners but will use more memory.
540    * @param caching the number of rows for caching
541    */
542   public Scan setCaching(int caching) {
543     this.caching = caching;
544     return this;
545   }
546
547   /**
548    * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)}
549    */
550   public long getMaxResultSize() {
551     return maxResultSize;
552   }
553
554   /**
555    * Set the maximum result size. The default is -1; this means that no specific
556    * maximum result size will be set for this scan, and the global configured
557    * value will be used instead. (Defaults to unlimited).
558    *
559    * @param maxResultSize The maximum result size in bytes.
560    */
561   public Scan setMaxResultSize(long maxResultSize) {
562     this.maxResultSize = maxResultSize;
563     return this;
564   }
565
566   @Override
567   public Scan setFilter(Filter filter) {
568     super.setFilter(filter);
569     return this;
570   }
571
572   /**
573    * Setting the familyMap
574    * @param familyMap map of family to qualifier
575    * @return this
576    */
577   public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) {
578     this.familyMap = familyMap;
579     return this;
580   }
581
582   /**
583    * Getting the familyMap
584    * @return familyMap
585    */
586   public Map<byte [], NavigableSet<byte []>> getFamilyMap() {
587     return this.familyMap;
588   }
589
590   /**
591    * @return the number of families in familyMap
592    */
593   public int numFamilies() {
594     if(hasFamilies()) {
595       return this.familyMap.size();
596     }
597     return 0;
598   }
599
600   /**
601    * @return true if familyMap is non empty, false otherwise
602    */
603   public boolean hasFamilies() {
604     return !this.familyMap.isEmpty();
605   }
606
607   /**
608    * @return the keys of the familyMap
609    */
610   public byte[][] getFamilies() {
611     if(hasFamilies()) {
612       return this.familyMap.keySet().toArray(new byte[0][0]);
613     }
614     return null;
615   }
616
617   /**
618    * @return the startrow
619    */
620   public byte [] getStartRow() {
621     return this.startRow;
622   }
623
624   /**
625    * @return the stoprow
626    */
627   public byte [] getStopRow() {
628     return this.stopRow;
629   }
630
631   /**
632    * @return the max number of versions to fetch
633    */
634   public int getMaxVersions() {
635     return this.maxVersions;
636   }
637
638   /**
639    * @return maximum number of values to return for a single call to next()
640    */
641   public int getBatch() {
642     return this.batch;
643   }
644
645   /**
646    * @return maximum number of values to return per row per CF
647    */
648   public int getMaxResultsPerColumnFamily() {
649     return this.storeLimit;
650   }
651
652   /**
653    * Method for retrieving the scan's offset per row per column
654    * family (#kvs to be skipped)
655    * @return row offset
656    */
657   public int getRowOffsetPerColumnFamily() {
658     return this.storeOffset;
659   }
660
661   /**
662    * @return caching the number of rows fetched when calling next on a scanner
663    */
664   public int getCaching() {
665     return this.caching;
666   }
667
668   /**
669    * @return TimeRange
670    */
671   public TimeRange getTimeRange() {
672     return this.tr;
673   }
674
675   /**
676    * @return RowFilter
677    */
678   @Override
679   public Filter getFilter() {
680     return filter;
681   }
682
683   /**
684    * @return true is a filter has been specified, false if not
685    */
686   public boolean hasFilter() {
687     return filter != null;
688   }
689
690   /**
691    * Set whether blocks should be cached for this Scan.
692    * <p>
693    * This is true by default.  When true, default settings of the table and
694    * family are used (this will never override caching blocks if the block
695    * cache is disabled for that family or entirely).
696    *
697    * @param cacheBlocks if false, default settings are overridden and blocks
698    * will not be cached
699    */
700   public Scan setCacheBlocks(boolean cacheBlocks) {
701     this.cacheBlocks = cacheBlocks;
702     return this;
703   }
704
705   /**
706    * Get whether blocks should be cached for this Scan.
707    * @return true if default caching should be used, false if blocks should not
708    * be cached
709    */
710   public boolean getCacheBlocks() {
711     return cacheBlocks;
712   }
713
714   /**
715    * Set whether this scan is a reversed one
716    * <p>
717    * This is false by default which means forward(normal) scan.
718    *
719    * @param reversed if true, scan will be backward order
720    * @return this
721    */
722   public Scan setReversed(boolean reversed) {
723     this.reversed = reversed;
724     return this;
725   }
726
727   /**
728    * Get whether this scan is a reversed one.
729    * @return true if backward scan, false if forward(default) scan
730    */
731   public boolean isReversed() {
732     return reversed;
733   }
734
735   /**
736    * Setting whether the caller wants to see the partial results that may be returned from the
737    * server. By default this value is false and the complete results will be assembled client side
738    * before being delivered to the caller.
739    * @param allowPartialResults
740    * @return this
741    */
742   public Scan setAllowPartialResults(final boolean allowPartialResults) {
743     this.allowPartialResults = allowPartialResults;
744     return this;
745   }
746
747   /**
748    * @return true when the constructor of this scan understands that the results they will see may
749    *         only represent a partial portion of a row. The entire row would be retrieved by
750    *         subsequent calls to {@link ResultScanner#next()}
751    */
752   public boolean getAllowPartialResults() {
753     return allowPartialResults;
754   }
755
756   /**
757    * Set the value indicating whether loading CFs on demand should be allowed (cluster
758    * default is false). On-demand CF loading doesn't load column families until necessary, e.g.
759    * if you filter on one column, the other column family data will be loaded only for the rows
760    * that are included in result, not all rows like in normal case.
761    * With column-specific filters, like SingleColumnValueFilter w/filterIfMissing == true,
762    * this can deliver huge perf gains when there's a cf with lots of data; however, it can
763    * also lead to some inconsistent results, as follows:
764    * - if someone does a concurrent update to both column families in question you may get a row
765    *   that never existed, e.g. for { rowKey = 5, { cat_videos =&gt; 1 }, { video =&gt; "my cat" } }
766    *   someone puts rowKey 5 with { cat_videos =&gt; 0 }, { video =&gt; "my dog" }, concurrent scan
767    *   filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos =&gt; 1 },
768    *   { video =&gt; "my dog" } }.
769    * - if there's a concurrent split and you have more than 2 column families, some rows may be
770    *   missing some column families.
771    */
772   public Scan setLoadColumnFamiliesOnDemand(boolean value) {
773     this.loadColumnFamiliesOnDemand = value;
774     return this;
775   }
776
777   /**
778    * Get the raw loadColumnFamiliesOnDemand setting; if it's not set, can be null.
779    */
780   public Boolean getLoadColumnFamiliesOnDemandValue() {
781     return this.loadColumnFamiliesOnDemand;
782   }
783
784   /**
785    * Get the logical value indicating whether on-demand CF loading should be allowed.
786    */
787   public boolean doLoadColumnFamiliesOnDemand() {
788     return (this.loadColumnFamiliesOnDemand != null)
789       && this.loadColumnFamiliesOnDemand.booleanValue();
790   }
791
792   /**
793    * Compile the table and column family (i.e. schema) information
794    * into a String. Useful for parsing and aggregation by debugging,
795    * logging, and administration tools.
796    * @return Map
797    */
798   @Override
799   public Map<String, Object> getFingerprint() {
800     Map<String, Object> map = new HashMap<String, Object>();
801     List<String> families = new ArrayList<String>();
802     if(this.familyMap.size() == 0) {
803       map.put("families", "ALL");
804       return map;
805     } else {
806       map.put("families", families);
807     }
808     for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
809         this.familyMap.entrySet()) {
810       families.add(Bytes.toStringBinary(entry.getKey()));
811     }
812     return map;
813   }
814
815   /**
816    * Compile the details beyond the scope of getFingerprint (row, columns,
817    * timestamps, etc.) into a Map along with the fingerprinted information.
818    * Useful for debugging, logging, and administration tools.
819    * @param maxCols a limit on the number of columns output prior to truncation
820    * @return Map
821    */
822   @Override
823   public Map<String, Object> toMap(int maxCols) {
824     // start with the fingerpring map and build on top of it
825     Map<String, Object> map = getFingerprint();
826     // map from families to column list replaces fingerprint's list of families
827     Map<String, List<String>> familyColumns =
828       new HashMap<String, List<String>>();
829     map.put("families", familyColumns);
830     // add scalar information first
831     map.put("startRow", Bytes.toStringBinary(this.startRow));
832     map.put("stopRow", Bytes.toStringBinary(this.stopRow));
833     map.put("maxVersions", this.maxVersions);
834     map.put("batch", this.batch);
835     map.put("caching", this.caching);
836     map.put("maxResultSize", this.maxResultSize);
837     map.put("cacheBlocks", this.cacheBlocks);
838     map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
839     List<Long> timeRange = new ArrayList<Long>();
840     timeRange.add(this.tr.getMin());
841     timeRange.add(this.tr.getMax());
842     map.put("timeRange", timeRange);
843     int colCount = 0;
844     // iterate through affected families and list out up to maxCols columns
845     for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
846       this.familyMap.entrySet()) {
847       List<String> columns = new ArrayList<String>();
848       familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
849       if(entry.getValue() == null) {
850         colCount++;
851         --maxCols;
852         columns.add("ALL");
853       } else {
854         colCount += entry.getValue().size();
855         if (maxCols <= 0) {
856           continue;
857         }
858         for (byte [] column : entry.getValue()) {
859           if (--maxCols <= 0) {
860             continue;
861           }
862           columns.add(Bytes.toStringBinary(column));
863         }
864       }
865     }
866     map.put("totalColumns", colCount);
867     if (this.filter != null) {
868       map.put("filter", this.filter.toString());
869     }
870     // add the id if set
871     if (getId() != null) {
872       map.put("id", getId());
873     }
874     return map;
875   }
876
877   /**
878    * Enable/disable "raw" mode for this scan.
879    * If "raw" is enabled the scan will return all
880    * delete marker and deleted rows that have not
881    * been collected, yet.
882    * This is mostly useful for Scan on column families
883    * that have KEEP_DELETED_ROWS enabled.
884    * It is an error to specify any column when "raw" is set.
885    * @param raw True/False to enable/disable "raw" mode.
886    */
887   public Scan setRaw(boolean raw) {
888     setAttribute(RAW_ATTR, Bytes.toBytes(raw));
889     return this;
890   }
891
892   /**
893    * @return True if this Scan is in "raw" mode.
894    */
895   public boolean isRaw() {
896     byte[] attr = getAttribute(RAW_ATTR);
897     return attr == null ? false : Bytes.toBoolean(attr);
898   }
899
900
901
902   /**
903    * Set whether this scan is a small scan
904    * <p>
905    * Small scan should use pread and big scan can use seek + read
906    *
907    * seek + read is fast but can cause two problem (1) resource contention (2)
908    * cause too much network io
909    *
910    * [89-fb] Using pread for non-compaction read request
911    * https://issues.apache.org/jira/browse/HBASE-7266
912    *
913    * On the other hand, if setting it true, we would do
914    * openScanner,next,closeScanner in one RPC call. It means the better
915    * performance for small scan. [HBASE-9488].
916    *
917    * Generally, if the scan range is within one data block(64KB), it could be
918    * considered as a small scan.
919    *
920    * @param small
921    */
922   public Scan setSmall(boolean small) {
923     this.small = small;
924     return this;
925   }
926
927   /**
928    * Get whether this scan is a small scan
929    * @return true if small scan
930    */
931   public boolean isSmall() {
932     return small;
933   }
934
935   @Override
936   public Scan setAttribute(String name, byte[] value) {
937     return (Scan) super.setAttribute(name, value);
938   }
939
940   @Override
941   public Scan setId(String id) {
942     return (Scan) super.setId(id);
943   }
944
945   @Override
946   public Scan setAuthorizations(Authorizations authorizations) {
947     return (Scan) super.setAuthorizations(authorizations);
948   }
949
950   @Override
951   public Scan setACL(Map<String, Permission> perms) {
952     return (Scan) super.setACL(perms);
953   }
954
955   @Override
956   public Scan setACL(String user, Permission perms) {
957     return (Scan) super.setACL(user, perms);
958   }
959
960   @Override
961   public Scan setConsistency(Consistency consistency) {
962     return (Scan) super.setConsistency(consistency);
963   }
964
965   @Override
966   public Scan setReplicaId(int Id) {
967     return (Scan) super.setReplicaId(Id);
968   }
969
970   @Override
971   public Scan setIsolationLevel(IsolationLevel level) {
972     return (Scan) super.setIsolationLevel(level);
973   }
974
975   /**
976    * Enable collection of {@link ScanMetrics}. For advanced users.
977    * @param enabled Set to true to enable accumulating scan metrics
978    */
979   public Scan setScanMetricsEnabled(final boolean enabled) {
980     setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
981     return this;
982   }
983
984   /**
985    * @return True if collection of scan metrics is enabled. For advanced users.
986    */
987   public boolean isScanMetricsEnabled() {
988     byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
989     return attr == null ? false : Bytes.toBoolean(attr);
990   }
991
992   /**
993    * @return Metrics on this Scan, if metrics were enabled.
994    * @see #setScanMetricsEnabled(boolean)
995    */
996   public ScanMetrics getScanMetrics() {
997     byte [] bytes = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
998     if (bytes == null) return null;
999     return ProtobufUtil.toScanMetrics(bytes);
1000   }
1001
1002   public Boolean isAsyncPrefetch() {
1003     return asyncPrefetch;
1004   }
1005
1006   public Scan setAsyncPrefetch(boolean asyncPrefetch) {
1007     this.asyncPrefetch = asyncPrefetch;
1008     return this;
1009   }
1010 }