001/*
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019
020package org.apache.hadoop.hbase.client;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.HashMap;
025import java.util.List;
026import java.util.Map;
027import java.util.NavigableSet;
028import java.util.TreeMap;
029import java.util.TreeSet;
030import org.apache.hadoop.hbase.HConstants;
031import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
032import org.apache.hadoop.hbase.filter.Filter;
033import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
034import org.apache.hadoop.hbase.io.TimeRange;
035import org.apache.hadoop.hbase.security.access.Permission;
036import org.apache.hadoop.hbase.security.visibility.Authorizations;
037import org.apache.hadoop.hbase.util.Bytes;
038import org.apache.yetus.audience.InterfaceAudience;
039import org.slf4j.Logger;
040import org.slf4j.LoggerFactory;
041
042/**
043 * Used to perform Scan operations.
044 * <p>
045 * All operations are identical to {@link Get} with the exception of instantiation. Rather than
046 * specifying a single row, an optional startRow and stopRow may be defined. If rows are not
047 * specified, the Scanner will iterate over all rows.
048 * <p>
049 * To get all columns from all rows of a Table, create an instance with no constraints; use the
050 * {@link #Scan()} constructor. To constrain the scan to specific column families, call
051 * {@link #addFamily(byte[]) addFamily} for each family to retrieve on your Scan instance.
052 * <p>
053 * To get specific columns, call {@link #addColumn(byte[], byte[]) addColumn} for each column to
054 * retrieve.
055 * <p>
056 * To only retrieve columns within a specific range of version timestamps, call
057 * {@link #setTimeRange(long, long) setTimeRange}.
058 * <p>
059 * To only retrieve columns with a specific timestamp, call {@link #setTimestamp(long) setTimestamp}
060 * .
061 * <p>
062 * To limit the number of versions of each column to be returned, call {@link #readVersions(int)}.
063 * <p>
064 * To limit the maximum number of values returned for each call to next(), call
065 * {@link #setBatch(int) setBatch}.
066 * <p>
067 * To add a filter, call {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
068 * <p>
069 * For small scan, it is deprecated in 2.0.0. Now we have a {@link #setLimit(int)} method in Scan
070 * object which is used to tell RS how many rows we want. If the rows return reaches the limit, the
071 * RS will close the RegionScanner automatically. And we will also fetch data when openScanner in
072 * the new implementation, this means we can also finish a scan operation in one rpc call. And we
073 * have also introduced a {@link #setReadType(ReadType)} method. You can use this method to tell RS
074 * to use pread explicitly.
075 * <p>
076 * Expert: To explicitly disable server-side block caching for this scan, execute
077 * {@link #setCacheBlocks(boolean)}.
078 * <p>
079 * <em>Note:</em> Usage alters Scan instances. Internally, attributes are updated as the Scan runs
080 * and if enabled, metrics accumulate in the Scan instance. Be aware this is the case when you go to
081 * clone a Scan instance or if you go to reuse a created Scan instance; safer is create a Scan
082 * instance per usage.
083 */
084@InterfaceAudience.Public
085public class Scan extends Query {
086  private static final Logger LOG = LoggerFactory.getLogger(Scan.class);
087
088  private static final String RAW_ATTR = "_raw_";
089
090  private byte[] startRow = HConstants.EMPTY_START_ROW;
091  private boolean includeStartRow = true;
092  private byte[] stopRow  = HConstants.EMPTY_END_ROW;
093  private boolean includeStopRow = false;
094  private int maxVersions = 1;
095  private int batch = -1;
096
097  /**
098   * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}.
099   * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the
100   * cells in the row exceeded max result size on the server. Typically partial results will be
101   * combined client side into complete results before being delivered to the caller. However, if
102   * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e.
103   * they understand that the results returned from the Scanner may only represent part of a
104   * particular row). In such a case, any attempt to combine the partials into a complete result on
105   * the client side will be skipped, and the caller will be able to see the exact results returned
106   * from the server.
107   */
108  private boolean allowPartialResults = false;
109
110  private int storeLimit = -1;
111  private int storeOffset = 0;
112
113  private static final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
114
115  // If an application wants to use multiple scans over different tables each scan must
116  // define this attribute with the appropriate table name by calling
117  // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
118  static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
119
120  /**
121   * -1 means no caching specified and the value of {@link HConstants#HBASE_CLIENT_SCANNER_CACHING}
122   * (default to {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_CACHING}) will be used
123   */
124  private int caching = -1;
125  private long maxResultSize = -1;
126  private boolean cacheBlocks = true;
127  private boolean reversed = false;
128  private TimeRange tr = TimeRange.allTime();
129  private Map<byte [], NavigableSet<byte []>> familyMap =
130    new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
131  private Boolean asyncPrefetch = null;
132
133  /**
134   * Parameter name for client scanner sync/async prefetch toggle.
135   * When using async scanner, prefetching data from the server is done at the background.
136   * The parameter currently won't have any effect in the case that the user has set
137   * Scan#setSmall or Scan#setReversed
138   */
139  public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH =
140      "hbase.client.scanner.async.prefetch";
141
142  /**
143   * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}.
144   */
145  public static final boolean DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = false;
146
147  /**
148   * Set it true for small scan to get better performance Small scan should use pread and big scan
149   * can use seek + read seek + read is fast but can cause two problem (1) resource contention (2)
150   * cause too much network io [89-fb] Using pread for non-compaction read request
151   * https://issues.apache.org/jira/browse/HBASE-7266 On the other hand, if setting it true, we
152   * would do openScanner,next,closeScanner in one RPC call. It means the better performance for
153   * small scan. [HBASE-9488]. Generally, if the scan range is within one data block(64KB), it could
154   * be considered as a small scan.
155   */
156  private boolean small = false;
157
158  /**
159   * The mvcc read point to use when open a scanner. Remember to clear it after switching regions as
160   * the mvcc is only valid within region scope.
161   */
162  private long mvccReadPoint = -1L;
163
164  /**
165   * The number of rows we want for this scan. We will terminate the scan if the number of return
166   * rows reaches this value.
167   */
168  private int limit = -1;
169
170  /**
171   * Control whether to use pread at server side.
172   */
173  private ReadType readType = ReadType.DEFAULT;
174
175  private boolean needCursorResult = false;
176
177  /**
178   * Create a Scan operation across all rows.
179   */
180  public Scan() {}
181
182  /**
183   * Creates a new instance of this class while copying all values.
184   *
185   * @param scan  The scan instance to copy from.
186   * @throws IOException When copying the values fails.
187   */
188  public Scan(Scan scan) throws IOException {
189    startRow = scan.getStartRow();
190    includeStartRow = scan.includeStartRow();
191    stopRow  = scan.getStopRow();
192    includeStopRow = scan.includeStopRow();
193    maxVersions = scan.getMaxVersions();
194    batch = scan.getBatch();
195    storeLimit = scan.getMaxResultsPerColumnFamily();
196    storeOffset = scan.getRowOffsetPerColumnFamily();
197    caching = scan.getCaching();
198    maxResultSize = scan.getMaxResultSize();
199    cacheBlocks = scan.getCacheBlocks();
200    filter = scan.getFilter(); // clone?
201    loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
202    consistency = scan.getConsistency();
203    this.setIsolationLevel(scan.getIsolationLevel());
204    reversed = scan.isReversed();
205    asyncPrefetch = scan.isAsyncPrefetch();
206    small = scan.isSmall();
207    allowPartialResults = scan.getAllowPartialResults();
208    tr = scan.getTimeRange(); // TimeRange is immutable
209    Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
210    for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
211      byte [] fam = entry.getKey();
212      NavigableSet<byte[]> cols = entry.getValue();
213      if (cols != null && cols.size() > 0) {
214        for (byte[] col : cols) {
215          addColumn(fam, col);
216        }
217      } else {
218        addFamily(fam);
219      }
220    }
221    for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
222      setAttribute(attr.getKey(), attr.getValue());
223    }
224    for (Map.Entry<byte[], TimeRange> entry : scan.getColumnFamilyTimeRange().entrySet()) {
225      TimeRange tr = entry.getValue();
226      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
227    }
228    this.mvccReadPoint = scan.getMvccReadPoint();
229    this.limit = scan.getLimit();
230    this.needCursorResult = scan.isNeedCursorResult();
231    setPriority(scan.getPriority());
232    readType = scan.getReadType();
233    super.setReplicaId(scan.getReplicaId());
234  }
235
236  /**
237   * Builds a scan object with the same specs as get.
238   * @param get get to model scan after
239   */
240  public Scan(Get get) {
241    this.startRow = get.getRow();
242    this.includeStartRow = true;
243    this.stopRow = get.getRow();
244    this.includeStopRow = true;
245    this.filter = get.getFilter();
246    this.cacheBlocks = get.getCacheBlocks();
247    this.maxVersions = get.getMaxVersions();
248    this.storeLimit = get.getMaxResultsPerColumnFamily();
249    this.storeOffset = get.getRowOffsetPerColumnFamily();
250    this.tr = get.getTimeRange();
251    this.familyMap = get.getFamilyMap();
252    this.asyncPrefetch = false;
253    this.consistency = get.getConsistency();
254    this.setIsolationLevel(get.getIsolationLevel());
255    this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
256    for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
257      setAttribute(attr.getKey(), attr.getValue());
258    }
259    for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) {
260      TimeRange tr = entry.getValue();
261      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
262    }
263    this.mvccReadPoint = -1L;
264    setPriority(get.getPriority());
265    super.setReplicaId(get.getReplicaId());
266  }
267
268  public boolean isGetScan() {
269    return includeStartRow && includeStopRow
270        && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow);
271  }
272
273  /**
274   * Get all columns from the specified family.
275   * <p>
276   * Overrides previous calls to addColumn for this family.
277   * @param family family name
278   * @return this
279   */
280  public Scan addFamily(byte [] family) {
281    familyMap.remove(family);
282    familyMap.put(family, null);
283    return this;
284  }
285
286  /**
287   * Get the column from the specified family with the specified qualifier.
288   * <p>
289   * Overrides previous calls to addFamily for this family.
290   * @param family family name
291   * @param qualifier column qualifier
292   * @return this
293   */
294  public Scan addColumn(byte [] family, byte [] qualifier) {
295    NavigableSet<byte []> set = familyMap.get(family);
296    if(set == null) {
297      set = new TreeSet<>(Bytes.BYTES_COMPARATOR);
298      familyMap.put(family, set);
299    }
300    if (qualifier == null) {
301      qualifier = HConstants.EMPTY_BYTE_ARRAY;
302    }
303    set.add(qualifier);
304    return this;
305  }
306
307  /**
308   * Get versions of columns only within the specified timestamp range,
309   * [minStamp, maxStamp).  Note, default maximum versions to return is 1.  If
310   * your time range spans more than one version and you want all versions
311   * returned, up the number of versions beyond the default.
312   * @param minStamp minimum timestamp value, inclusive
313   * @param maxStamp maximum timestamp value, exclusive
314   * @see #readAllVersions()
315   * @see #readVersions(int)
316   * @return this
317   */
318  public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
319    tr = new TimeRange(minStamp, maxStamp);
320    return this;
321  }
322
323  /**
324   * Get versions of columns with the specified timestamp. Note, default maximum
325   * versions to return is 1.  If your time range spans more than one version
326   * and you want all versions returned, up the number of versions beyond the
327   * defaut.
328   * @param timestamp version timestamp
329   * @see #readAllVersions()
330   * @see #readVersions(int)
331   * @return this
332   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
333   *             Use {@link #setTimestamp(long)} instead
334   */
335  @Deprecated
336  public Scan setTimeStamp(long timestamp)
337  throws IOException {
338    return this.setTimestamp(timestamp);
339  }
340
341  /**
342   * Get versions of columns with the specified timestamp. Note, default maximum
343   * versions to return is 1.  If your time range spans more than one version
344   * and you want all versions returned, up the number of versions beyond the
345   * defaut.
346   * @param timestamp version timestamp
347   * @see #readAllVersions()
348   * @see #readVersions(int)
349   * @return this
350   */
351  public Scan setTimestamp(long timestamp) {
352    try {
353      tr = new TimeRange(timestamp, timestamp + 1);
354    } catch(Exception e) {
355      // This should never happen, unless integer overflow or something extremely wrong...
356      LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
357      throw e;
358    }
359
360    return this;
361  }
362
363  @Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
364    return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp);
365  }
366
367  /**
368   * Set the start row of the scan.
369   * <p>
370   * If the specified row does not exist, the Scanner will start from the next closest row after the
371   * specified row.
372   * @param startRow row to start scanner at or after
373   * @return this
374   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
375   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
376   */
377  public Scan withStartRow(byte[] startRow) {
378    return withStartRow(startRow, true);
379  }
380
381  /**
382   * Set the start row of the scan.
383   * <p>
384   * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner
385   * will start from the next closest row after the specified row.
386   * @param startRow row to start scanner at or after
387   * @param inclusive whether we should include the start row when scan
388   * @return this
389   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
390   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
391   */
392  public Scan withStartRow(byte[] startRow, boolean inclusive) {
393    if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
394      throw new IllegalArgumentException("startRow's length must be less than or equal to "
395          + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
396    }
397    this.startRow = startRow;
398    this.includeStartRow = inclusive;
399    return this;
400  }
401
402  /**
403   * Set the stop row of the scan.
404   * <p>
405   * The scan will include rows that are lexicographically less than the provided stopRow.
406   * <p>
407   * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
408   * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
409   * </p>
410   * @param stopRow row to end at (exclusive)
411   * @return this
412   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
413   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
414   */
415  public Scan withStopRow(byte[] stopRow) {
416    return withStopRow(stopRow, false);
417  }
418
419  /**
420   * Set the stop row of the scan.
421   * <p>
422   * The scan will include rows that are lexicographically less than (or equal to if
423   * {@code inclusive} is {@code true}) the provided stopRow.
424   * @param stopRow row to end at
425   * @param inclusive whether we should include the stop row when scan
426   * @return this
427   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
428   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
429   */
430  public Scan withStopRow(byte[] stopRow, boolean inclusive) {
431    if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
432      throw new IllegalArgumentException("stopRow's length must be less than or equal to "
433          + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
434    }
435    this.stopRow = stopRow;
436    this.includeStopRow = inclusive;
437    return this;
438  }
439
440  /**
441   * <p>Set a filter (using stopRow and startRow) so the result set only contains rows where the
442   * rowKey starts with the specified prefix.</p>
443   * <p>This is a utility method that converts the desired rowPrefix into the appropriate values
444   * for the startRow and stopRow to achieve the desired result.</p>
445   * <p>This can safely be used in combination with setFilter.</p>
446   * <p><b>NOTE: Doing a {@link #withStartRow(byte[])} and/or {@link #withStopRow(byte[])}
447   * after this method will yield undefined results.</b></p>
448   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
449   * @return this
450   */
451  public Scan setRowPrefixFilter(byte[] rowPrefix) {
452    if (rowPrefix == null) {
453      withStartRow(HConstants.EMPTY_START_ROW);
454      withStopRow(HConstants.EMPTY_END_ROW);
455    } else {
456      this.withStartRow(rowPrefix);
457      this.withStopRow(ClientUtil.calculateTheClosestNextRowKeyForPrefix(rowPrefix));
458    }
459    return this;
460  }
461
462  /**
463   * Get all available versions.
464   * @return this
465   */
466  public Scan readAllVersions() {
467    this.maxVersions = Integer.MAX_VALUE;
468    return this;
469  }
470
471  /**
472   * Get up to the specified number of versions of each column.
473   * @param versions specified number of versions for each column
474   * @return this
475   */
476  public Scan readVersions(int versions) {
477    this.maxVersions = versions;
478    return this;
479  }
480
481  /**
482   * Set the maximum number of cells to return for each call to next(). Callers should be aware
483   * that this is not equivalent to calling {@link #setAllowPartialResults(boolean)}.
484   * If you don't allow partial results, the number of cells in each Result must equal to your
485   * batch setting unless it is the last Result for current row. So this method is helpful in paging
486   * queries. If you just want to prevent OOM at client, use setAllowPartialResults(true) is better.
487   * @param batch the maximum number of values
488   * @see Result#mayHaveMoreCellsInRow()
489   */
490  public Scan setBatch(int batch) {
491    if (this.hasFilter() && this.filter.hasFilterRow()) {
492      throw new IncompatibleFilterException(
493        "Cannot set batch on a scan using a filter" +
494        " that returns true for filter.hasFilterRow");
495    }
496    this.batch = batch;
497    return this;
498  }
499
500  /**
501   * Set the maximum number of values to return per row per Column Family
502   * @param limit the maximum number of values returned / row / CF
503   */
504  public Scan setMaxResultsPerColumnFamily(int limit) {
505    this.storeLimit = limit;
506    return this;
507  }
508
509  /**
510   * Set offset for the row per Column Family.
511   * @param offset is the number of kvs that will be skipped.
512   */
513  public Scan setRowOffsetPerColumnFamily(int offset) {
514    this.storeOffset = offset;
515    return this;
516  }
517
518  /**
519   * Set the number of rows for caching that will be passed to scanners.
520   * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
521   * apply.
522   * Higher caching values will enable faster scanners but will use more memory.
523   * @param caching the number of rows for caching
524   */
525  public Scan setCaching(int caching) {
526    this.caching = caching;
527    return this;
528  }
529
530  /**
531   * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)}
532   */
533  public long getMaxResultSize() {
534    return maxResultSize;
535  }
536
537  /**
538   * Set the maximum result size. The default is -1; this means that no specific
539   * maximum result size will be set for this scan, and the global configured
540   * value will be used instead. (Defaults to unlimited).
541   *
542   * @param maxResultSize The maximum result size in bytes.
543   */
544  public Scan setMaxResultSize(long maxResultSize) {
545    this.maxResultSize = maxResultSize;
546    return this;
547  }
548
549  @Override
550  public Scan setFilter(Filter filter) {
551    super.setFilter(filter);
552    return this;
553  }
554
555  /**
556   * Setting the familyMap
557   * @param familyMap map of family to qualifier
558   * @return this
559   */
560  public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) {
561    this.familyMap = familyMap;
562    return this;
563  }
564
565  /**
566   * Getting the familyMap
567   * @return familyMap
568   */
569  public Map<byte [], NavigableSet<byte []>> getFamilyMap() {
570    return this.familyMap;
571  }
572
573  /**
574   * @return the number of families in familyMap
575   */
576  public int numFamilies() {
577    if(hasFamilies()) {
578      return this.familyMap.size();
579    }
580    return 0;
581  }
582
583  /**
584   * @return true if familyMap is non empty, false otherwise
585   */
586  public boolean hasFamilies() {
587    return !this.familyMap.isEmpty();
588  }
589
590  /**
591   * @return the keys of the familyMap
592   */
593  public byte[][] getFamilies() {
594    if(hasFamilies()) {
595      return this.familyMap.keySet().toArray(new byte[0][0]);
596    }
597    return null;
598  }
599
600  /**
601   * @return the startrow
602   */
603  public byte [] getStartRow() {
604    return this.startRow;
605  }
606
607  /**
608   * @return if we should include start row when scan
609   */
610  public boolean includeStartRow() {
611    return includeStartRow;
612  }
613
614  /**
615   * @return the stoprow
616   */
617  public byte[] getStopRow() {
618    return this.stopRow;
619  }
620
621  /**
622   * @return if we should include stop row when scan
623   */
624  public boolean includeStopRow() {
625    return includeStopRow;
626  }
627
628  /**
629   * @return the max number of versions to fetch
630   */
631  public int getMaxVersions() {
632    return this.maxVersions;
633  }
634
635  /**
636   * @return maximum number of values to return for a single call to next()
637   */
638  public int getBatch() {
639    return this.batch;
640  }
641
642  /**
643   * @return maximum number of values to return per row per CF
644   */
645  public int getMaxResultsPerColumnFamily() {
646    return this.storeLimit;
647  }
648
649  /**
650   * Method for retrieving the scan's offset per row per column
651   * family (#kvs to be skipped)
652   * @return row offset
653   */
654  public int getRowOffsetPerColumnFamily() {
655    return this.storeOffset;
656  }
657
658  /**
659   * @return caching the number of rows fetched when calling next on a scanner
660   */
661  public int getCaching() {
662    return this.caching;
663  }
664
665  /**
666   * @return TimeRange
667   */
668  public TimeRange getTimeRange() {
669    return this.tr;
670  }
671
672  /**
673   * @return RowFilter
674   */
675  @Override
676  public Filter getFilter() {
677    return filter;
678  }
679
680  /**
681   * @return true is a filter has been specified, false if not
682   */
683  public boolean hasFilter() {
684    return filter != null;
685  }
686
687  /**
688   * Set whether blocks should be cached for this Scan.
689   * <p>
690   * This is true by default.  When true, default settings of the table and
691   * family are used (this will never override caching blocks if the block
692   * cache is disabled for that family or entirely).
693   *
694   * @param cacheBlocks if false, default settings are overridden and blocks
695   * will not be cached
696   */
697  public Scan setCacheBlocks(boolean cacheBlocks) {
698    this.cacheBlocks = cacheBlocks;
699    return this;
700  }
701
702  /**
703   * Get whether blocks should be cached for this Scan.
704   * @return true if default caching should be used, false if blocks should not
705   * be cached
706   */
707  public boolean getCacheBlocks() {
708    return cacheBlocks;
709  }
710
711  /**
712   * Set whether this scan is a reversed one
713   * <p>
714   * This is false by default which means forward(normal) scan.
715   *
716   * @param reversed if true, scan will be backward order
717   * @return this
718   */
719  public Scan setReversed(boolean reversed) {
720    this.reversed = reversed;
721    return this;
722  }
723
724  /**
725   * Get whether this scan is a reversed one.
726   * @return true if backward scan, false if forward(default) scan
727   */
728  public boolean isReversed() {
729    return reversed;
730  }
731
732  /**
733   * Setting whether the caller wants to see the partial results when server returns
734   * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client.
735   * By default this value is false and the complete results will be assembled client side
736   * before being delivered to the caller.
737   * @param allowPartialResults
738   * @return this
739   * @see Result#mayHaveMoreCellsInRow()
740   * @see #setBatch(int)
741   */
742  public Scan setAllowPartialResults(final boolean allowPartialResults) {
743    this.allowPartialResults = allowPartialResults;
744    return this;
745  }
746
747  /**
748   * @return true when the constructor of this scan understands that the results they will see may
749   *         only represent a partial portion of a row. The entire row would be retrieved by
750   *         subsequent calls to {@link ResultScanner#next()}
751   */
752  public boolean getAllowPartialResults() {
753    return allowPartialResults;
754  }
755
756  @Override
757  public Scan setLoadColumnFamiliesOnDemand(boolean value) {
758    return (Scan) super.setLoadColumnFamiliesOnDemand(value);
759  }
760
761  /**
762   * Compile the table and column family (i.e. schema) information
763   * into a String. Useful for parsing and aggregation by debugging,
764   * logging, and administration tools.
765   * @return Map
766   */
767  @Override
768  public Map<String, Object> getFingerprint() {
769    Map<String, Object> map = new HashMap<>();
770    List<String> families = new ArrayList<>();
771    if(this.familyMap.isEmpty()) {
772      map.put("families", "ALL");
773      return map;
774    } else {
775      map.put("families", families);
776    }
777    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
778        this.familyMap.entrySet()) {
779      families.add(Bytes.toStringBinary(entry.getKey()));
780    }
781    return map;
782  }
783
784  /**
785   * Compile the details beyond the scope of getFingerprint (row, columns,
786   * timestamps, etc.) into a Map along with the fingerprinted information.
787   * Useful for debugging, logging, and administration tools.
788   * @param maxCols a limit on the number of columns output prior to truncation
789   * @return Map
790   */
791  @Override
792  public Map<String, Object> toMap(int maxCols) {
793    // start with the fingerpring map and build on top of it
794    Map<String, Object> map = getFingerprint();
795    // map from families to column list replaces fingerprint's list of families
796    Map<String, List<String>> familyColumns = new HashMap<>();
797    map.put("families", familyColumns);
798    // add scalar information first
799    map.put("startRow", Bytes.toStringBinary(this.startRow));
800    map.put("stopRow", Bytes.toStringBinary(this.stopRow));
801    map.put("maxVersions", this.maxVersions);
802    map.put("batch", this.batch);
803    map.put("caching", this.caching);
804    map.put("maxResultSize", this.maxResultSize);
805    map.put("cacheBlocks", this.cacheBlocks);
806    map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
807    List<Long> timeRange = new ArrayList<>(2);
808    timeRange.add(this.tr.getMin());
809    timeRange.add(this.tr.getMax());
810    map.put("timeRange", timeRange);
811    int colCount = 0;
812    // iterate through affected families and list out up to maxCols columns
813    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
814      this.familyMap.entrySet()) {
815      List<String> columns = new ArrayList<>();
816      familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
817      if(entry.getValue() == null) {
818        colCount++;
819        --maxCols;
820        columns.add("ALL");
821      } else {
822        colCount += entry.getValue().size();
823        if (maxCols <= 0) {
824          continue;
825        }
826        for (byte [] column : entry.getValue()) {
827          if (--maxCols <= 0) {
828            continue;
829          }
830          columns.add(Bytes.toStringBinary(column));
831        }
832      }
833    }
834    map.put("totalColumns", colCount);
835    if (this.filter != null) {
836      map.put("filter", this.filter.toString());
837    }
838    // add the id if set
839    if (getId() != null) {
840      map.put("id", getId());
841    }
842    return map;
843  }
844
845  /**
846   * Enable/disable "raw" mode for this scan.
847   * If "raw" is enabled the scan will return all
848   * delete marker and deleted rows that have not
849   * been collected, yet.
850   * This is mostly useful for Scan on column families
851   * that have KEEP_DELETED_ROWS enabled.
852   * It is an error to specify any column when "raw" is set.
853   * @param raw True/False to enable/disable "raw" mode.
854   */
855  public Scan setRaw(boolean raw) {
856    setAttribute(RAW_ATTR, Bytes.toBytes(raw));
857    return this;
858  }
859
860  /**
861   * @return True if this Scan is in "raw" mode.
862   */
863  public boolean isRaw() {
864    byte[] attr = getAttribute(RAW_ATTR);
865    return attr == null ? false : Bytes.toBoolean(attr);
866  }
867
868  /**
869   * Set whether this scan is a small scan
870   * <p>
871   * Small scan should use pread and big scan can use seek + read seek + read is fast but can cause
872   * two problem (1) resource contention (2) cause too much network io [89-fb] Using pread for
873   * non-compaction read request https://issues.apache.org/jira/browse/HBASE-7266 On the other hand,
874   * if setting it true, we would do openScanner,next,closeScanner in one RPC call. It means the
875   * better performance for small scan. [HBASE-9488]. Generally, if the scan range is within one
876   * data block(64KB), it could be considered as a small scan.
877   * @param small
878   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #setLimit(int)} and
879   *   {@link #setReadType(ReadType)} instead. And for the one rpc optimization, now we will also
880   *   fetch data when openScanner, and if the number of rows reaches the limit then we will close
881   *   the scanner automatically which means we will fall back to one rpc.
882   * @see #setLimit(int)
883   * @see #setReadType(ReadType)
884   * @see <a href="https://issues.apache.org/jira/browse/HBASE-17045">HBASE-17045</a>
885   */
886  @Deprecated
887  public Scan setSmall(boolean small) {
888    this.small = small;
889    this.readType = ReadType.PREAD;
890    return this;
891  }
892
893  /**
894   * Get whether this scan is a small scan
895   * @return true if small scan
896   * @deprecated since 2.0.0 and will be removed in 3.0.0. See the comment of
897   *   {@link #setSmall(boolean)}
898   * @see <a href="https://issues.apache.org/jira/browse/HBASE-17045">HBASE-17045</a>
899   */
900  @Deprecated
901  public boolean isSmall() {
902    return small;
903  }
904
905  @Override
906  public Scan setAttribute(String name, byte[] value) {
907    return (Scan) super.setAttribute(name, value);
908  }
909
910  @Override
911  public Scan setId(String id) {
912    return (Scan) super.setId(id);
913  }
914
915  @Override
916  public Scan setAuthorizations(Authorizations authorizations) {
917    return (Scan) super.setAuthorizations(authorizations);
918  }
919
920  @Override
921  public Scan setACL(Map<String, Permission> perms) {
922    return (Scan) super.setACL(perms);
923  }
924
925  @Override
926  public Scan setACL(String user, Permission perms) {
927    return (Scan) super.setACL(user, perms);
928  }
929
930  @Override
931  public Scan setConsistency(Consistency consistency) {
932    return (Scan) super.setConsistency(consistency);
933  }
934
935  @Override
936  public Scan setReplicaId(int Id) {
937    return (Scan) super.setReplicaId(Id);
938  }
939
940  @Override
941  public Scan setIsolationLevel(IsolationLevel level) {
942    return (Scan) super.setIsolationLevel(level);
943  }
944
945  @Override
946  public Scan setPriority(int priority) {
947    return (Scan) super.setPriority(priority);
948  }
949
950  /**
951   * Enable collection of {@link ScanMetrics}. For advanced users.
952   * @param enabled Set to true to enable accumulating scan metrics
953   */
954  public Scan setScanMetricsEnabled(final boolean enabled) {
955    setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
956    return this;
957  }
958
959  /**
960   * @return True if collection of scan metrics is enabled. For advanced users.
961   */
962  public boolean isScanMetricsEnabled() {
963    byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
964    return attr == null ? false : Bytes.toBoolean(attr);
965  }
966
967  public Boolean isAsyncPrefetch() {
968    return asyncPrefetch;
969  }
970
971  /**
972   * @deprecated Since 3.0.0, will be removed in 4.0.0. After building sync client upon async
973   *             client, the implementation is always 'async prefetch', so this flag is useless now.
974   */
975  @Deprecated
976  public Scan setAsyncPrefetch(boolean asyncPrefetch) {
977    this.asyncPrefetch = asyncPrefetch;
978    return this;
979  }
980
981  /**
982   * @return the limit of rows for this scan
983   */
984  public int getLimit() {
985    return limit;
986  }
987
988  /**
989   * Set the limit of rows for this scan. We will terminate the scan if the number of returned rows
990   * reaches this value.
991   * <p>
992   * This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
993   * @param limit the limit of rows for this scan
994   * @return this
995   */
996  public Scan setLimit(int limit) {
997    this.limit = limit;
998    return this;
999  }
1000
1001  /**
1002   * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
1003   * set {@code readType} to {@link ReadType#PREAD}.
1004   * @return this
1005   */
1006  public Scan setOneRowLimit() {
1007    return setLimit(1).setReadType(ReadType.PREAD);
1008  }
1009
1010  @InterfaceAudience.Public
1011  public enum ReadType {
1012    DEFAULT, STREAM, PREAD
1013  }
1014
1015  /**
1016   * @return the read type for this scan
1017   */
1018  public ReadType getReadType() {
1019    return readType;
1020  }
1021
1022  /**
1023   * Set the read type for this scan.
1024   * <p>
1025   * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
1026   * example, we will always use pread if this is a get scan.
1027   * @return this
1028   */
1029  public Scan setReadType(ReadType readType) {
1030    this.readType = readType;
1031    return this;
1032  }
1033
1034  /**
1035   * Get the mvcc read point used to open a scanner.
1036   */
1037  long getMvccReadPoint() {
1038    return mvccReadPoint;
1039  }
1040
1041  /**
1042   * Set the mvcc read point used to open a scanner.
1043   */
1044  Scan setMvccReadPoint(long mvccReadPoint) {
1045    this.mvccReadPoint = mvccReadPoint;
1046    return this;
1047  }
1048
1049  /**
1050   * Set the mvcc read point to -1 which means do not use it.
1051   */
1052  Scan resetMvccReadPoint() {
1053    return setMvccReadPoint(-1L);
1054  }
1055
1056  /**
1057   * When the server is slow or we scan a table with many deleted data or we use a sparse filter,
1058   * the server will response heartbeat to prevent timeout. However the scanner will return a Result
1059   * only when client can do it. So if there are many heartbeats, the blocking time on
1060   * ResultScanner#next() may be very long, which is not friendly to online services.
1061   *
1062   * Set this to true then you can get a special Result whose #isCursor() returns true and is not
1063   * contains any real data. It only tells you where the server has scanned. You can call next
1064   * to continue scanning or open a new scanner with this row key as start row whenever you want.
1065   *
1066   * Users can get a cursor when and only when there is a response from the server but we can not
1067   * return a Result to users, for example, this response is a heartbeat or there are partial cells
1068   * but users do not allow partial result.
1069   *
1070   * Now the cursor is in row level which means the special Result will only contains a row key.
1071   * {@link Result#isCursor()}
1072   * {@link Result#getCursor()}
1073   * {@link Cursor}
1074   */
1075  public Scan setNeedCursorResult(boolean needCursorResult) {
1076    this.needCursorResult = needCursorResult;
1077    return this;
1078  }
1079
1080  public boolean isNeedCursorResult() {
1081    return needCursorResult;
1082  }
1083
1084  /**
1085   * Create a new Scan with a cursor. It only set the position information like start row key.
1086   * The others (like cfs, stop row, limit) should still be filled in by the user.
1087   * {@link Result#isCursor()}
1088   * {@link Result#getCursor()}
1089   * {@link Cursor}
1090   */
1091  public static Scan createScanFromCursor(Cursor cursor) {
1092    return new Scan().withStartRow(cursor.getRow());
1093  }
1094}