001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import java.io.IOException;
021import java.util.ArrayList;
022import java.util.HashMap;
023import java.util.List;
024import java.util.Map;
025import java.util.NavigableSet;
026import java.util.TreeMap;
027import java.util.TreeSet;
028import java.util.stream.Collectors;
029import org.apache.hadoop.hbase.HConstants;
030import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
031import org.apache.hadoop.hbase.filter.Filter;
032import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
033import org.apache.hadoop.hbase.io.TimeRange;
034import org.apache.hadoop.hbase.security.access.Permission;
035import org.apache.hadoop.hbase.security.visibility.Authorizations;
036import org.apache.hadoop.hbase.util.Bytes;
037import org.apache.yetus.audience.InterfaceAudience;
038import org.slf4j.Logger;
039import org.slf4j.LoggerFactory;
040
041/**
042 * Used to perform Scan operations.
043 * <p>
044 * All operations are identical to {@link Get} with the exception of instantiation. Rather than
045 * specifying a single row, an optional startRow and stopRow may be defined. If rows are not
046 * specified, the Scanner will iterate over all rows.
047 * <p>
048 * To get all columns from all rows of a Table, create an instance with no constraints; use the
049 * {@link #Scan()} constructor. To constrain the scan to specific column families, call
050 * {@link #addFamily(byte[]) addFamily} for each family to retrieve on your Scan instance.
051 * <p>
052 * To get specific columns, call {@link #addColumn(byte[], byte[]) addColumn} for each column to
053 * retrieve.
054 * <p>
055 * To only retrieve columns within a specific range of version timestamps, call
056 * {@link #setTimeRange(long, long) setTimeRange}.
057 * <p>
058 * To only retrieve columns with a specific timestamp, call {@link #setTimestamp(long) setTimestamp}
059 * .
060 * <p>
061 * To limit the number of versions of each column to be returned, call {@link #readVersions(int)}.
062 * <p>
063 * To limit the maximum number of values returned for each call to next(), call
064 * {@link #setBatch(int) setBatch}.
065 * <p>
066 * To add a filter, call {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
067 * <p>
068 * For small scan, it is deprecated in 2.0.0. Now we have a {@link #setLimit(int)} method in Scan
069 * object which is used to tell RS how many rows we want. If the rows return reaches the limit, the
070 * RS will close the RegionScanner automatically. And we will also fetch data when openScanner in
071 * the new implementation, this means we can also finish a scan operation in one rpc call. And we
072 * have also introduced a {@link #setReadType(ReadType)} method. You can use this method to tell RS
073 * to use pread explicitly.
074 * <p>
075 * Expert: To explicitly disable server-side block caching for this scan, execute
076 * {@link #setCacheBlocks(boolean)}.
077 * <p>
078 * <em>Note:</em> Usage alters Scan instances. Internally, attributes are updated as the Scan runs
079 * and if enabled, metrics accumulate in the Scan instance. Be aware this is the case when you go to
080 * clone a Scan instance or if you go to reuse a created Scan instance; safer is create a Scan
081 * instance per usage.
082 */
083@InterfaceAudience.Public
084public class Scan extends Query {
085  private static final Logger LOG = LoggerFactory.getLogger(Scan.class);
086
087  private static final String RAW_ATTR = "_raw_";
088
089  private byte[] startRow = HConstants.EMPTY_START_ROW;
090  private boolean includeStartRow = true;
091  private byte[] stopRow = HConstants.EMPTY_END_ROW;
092  private boolean includeStopRow = false;
093  private int maxVersions = 1;
094  private int batch = -1;
095
096  /**
097   * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}.
098   * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the
099   * cells in the row exceeded max result size on the server. Typically partial results will be
100   * combined client side into complete results before being delivered to the caller. However, if
101   * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e.
102   * they understand that the results returned from the Scanner may only represent part of a
103   * particular row). In such a case, any attempt to combine the partials into a complete result on
104   * the client side will be skipped, and the caller will be able to see the exact results returned
105   * from the server.
106   */
107  private boolean allowPartialResults = false;
108
109  private int storeLimit = -1;
110  private int storeOffset = 0;
111
112  private static final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
113
114  // If an application wants to use multiple scans over different tables each scan must
115  // define this attribute with the appropriate table name by calling
116  // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
117  static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
118
119  /**
120   * -1 means no caching specified and the value of {@link HConstants#HBASE_CLIENT_SCANNER_CACHING}
121   * (default to {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_CACHING}) will be used
122   */
123  private int caching = -1;
124  private long maxResultSize = -1;
125  private boolean cacheBlocks = true;
126  private boolean reversed = false;
127  private TimeRange tr = TimeRange.allTime();
128  private Map<byte[], NavigableSet<byte[]>> familyMap =
129    new TreeMap<byte[], NavigableSet<byte[]>>(Bytes.BYTES_COMPARATOR);
130  private Boolean asyncPrefetch = null;
131
132  /**
133   * Parameter name for client scanner sync/async prefetch toggle. When using async scanner,
134   * prefetching data from the server is done at the background. The parameter currently won't have
135   * any effect in the case that the user has set Scan#setSmall or Scan#setReversed
136   */
137  public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH =
138    "hbase.client.scanner.async.prefetch";
139
140  /**
141   * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}.
142   */
143  public static final boolean DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = false;
144
145  /**
146   * The mvcc read point to use when open a scanner. Remember to clear it after switching regions as
147   * the mvcc is only valid within region scope.
148   */
149  private long mvccReadPoint = -1L;
150
151  /**
152   * The number of rows we want for this scan. We will terminate the scan if the number of return
153   * rows reaches this value.
154   */
155  private int limit = -1;
156
157  /**
158   * Control whether to use pread at server side.
159   */
160  private ReadType readType = ReadType.DEFAULT;
161
162  private boolean needCursorResult = false;
163
164  /**
165   * Create a Scan operation across all rows.
166   */
167  public Scan() {
168  }
169
170  /**
171   * Creates a new instance of this class while copying all values.
172   * @param scan The scan instance to copy from.
173   * @throws IOException When copying the values fails.
174   */
175  public Scan(Scan scan) throws IOException {
176    startRow = scan.getStartRow();
177    includeStartRow = scan.includeStartRow();
178    stopRow = scan.getStopRow();
179    includeStopRow = scan.includeStopRow();
180    maxVersions = scan.getMaxVersions();
181    batch = scan.getBatch();
182    storeLimit = scan.getMaxResultsPerColumnFamily();
183    storeOffset = scan.getRowOffsetPerColumnFamily();
184    caching = scan.getCaching();
185    maxResultSize = scan.getMaxResultSize();
186    cacheBlocks = scan.getCacheBlocks();
187    filter = scan.getFilter(); // clone?
188    loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
189    consistency = scan.getConsistency();
190    this.setIsolationLevel(scan.getIsolationLevel());
191    reversed = scan.isReversed();
192    asyncPrefetch = scan.isAsyncPrefetch();
193    allowPartialResults = scan.getAllowPartialResults();
194    tr = scan.getTimeRange(); // TimeRange is immutable
195    Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
196    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : fams.entrySet()) {
197      byte[] fam = entry.getKey();
198      NavigableSet<byte[]> cols = entry.getValue();
199      if (cols != null && cols.size() > 0) {
200        for (byte[] col : cols) {
201          addColumn(fam, col);
202        }
203      } else {
204        addFamily(fam);
205      }
206    }
207    for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
208      setAttribute(attr.getKey(), attr.getValue());
209    }
210    for (Map.Entry<byte[], TimeRange> entry : scan.getColumnFamilyTimeRange().entrySet()) {
211      TimeRange tr = entry.getValue();
212      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
213    }
214    this.mvccReadPoint = scan.getMvccReadPoint();
215    this.limit = scan.getLimit();
216    this.needCursorResult = scan.isNeedCursorResult();
217    setPriority(scan.getPriority());
218    readType = scan.getReadType();
219    super.setReplicaId(scan.getReplicaId());
220  }
221
222  /**
223   * Builds a scan object with the same specs as get.
224   * @param get get to model scan after
225   */
226  public Scan(Get get) {
227    this.startRow = get.getRow();
228    this.includeStartRow = true;
229    this.stopRow = get.getRow();
230    this.includeStopRow = true;
231    this.filter = get.getFilter();
232    this.cacheBlocks = get.getCacheBlocks();
233    this.maxVersions = get.getMaxVersions();
234    this.storeLimit = get.getMaxResultsPerColumnFamily();
235    this.storeOffset = get.getRowOffsetPerColumnFamily();
236    this.tr = get.getTimeRange();
237    this.familyMap = get.getFamilyMap();
238    this.asyncPrefetch = false;
239    this.consistency = get.getConsistency();
240    this.setIsolationLevel(get.getIsolationLevel());
241    this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
242    for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
243      setAttribute(attr.getKey(), attr.getValue());
244    }
245    for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) {
246      TimeRange tr = entry.getValue();
247      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
248    }
249    this.mvccReadPoint = -1L;
250    setPriority(get.getPriority());
251    super.setReplicaId(get.getReplicaId());
252  }
253
254  public boolean isGetScan() {
255    return includeStartRow && includeStopRow
256      && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow);
257  }
258
259  /**
260   * Get all columns from the specified family.
261   * <p>
262   * Overrides previous calls to addColumn for this family.
263   * @param family family name
264   */
265  public Scan addFamily(byte[] family) {
266    familyMap.remove(family);
267    familyMap.put(family, null);
268    return this;
269  }
270
271  /**
272   * Get the column from the specified family with the specified qualifier.
273   * <p>
274   * Overrides previous calls to addFamily for this family.
275   * @param family    family name
276   * @param qualifier column qualifier
277   */
278  public Scan addColumn(byte[] family, byte[] qualifier) {
279    NavigableSet<byte[]> set = familyMap.get(family);
280    if (set == null) {
281      set = new TreeSet<>(Bytes.BYTES_COMPARATOR);
282      familyMap.put(family, set);
283    }
284    if (qualifier == null) {
285      qualifier = HConstants.EMPTY_BYTE_ARRAY;
286    }
287    set.add(qualifier);
288    return this;
289  }
290
291  /**
292   * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp). Note,
293   * default maximum versions to return is 1. If your time range spans more than one version and you
294   * want all versions returned, up the number of versions beyond the default.
295   * @param minStamp minimum timestamp value, inclusive
296   * @param maxStamp maximum timestamp value, exclusive
297   * @see #readAllVersions()
298   * @see #readVersions(int)
299   */
300  public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
301    tr = TimeRange.between(minStamp, maxStamp);
302    return this;
303  }
304
305  /**
306   * Get versions of columns with the specified timestamp. Note, default maximum versions to return
307   * is 1. If your time range spans more than one version and you want all versions returned, up the
308   * number of versions beyond the defaut.
309   * @param timestamp version timestamp
310   * @see #readAllVersions()
311   * @see #readVersions(int)
312   */
313  public Scan setTimestamp(long timestamp) {
314    try {
315      tr = TimeRange.at(timestamp);
316    } catch (Exception e) {
317      // This should never happen, unless integer overflow or something extremely wrong...
318      LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
319      throw e;
320    }
321
322    return this;
323  }
324
325  @Override
326  public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
327    super.setColumnFamilyTimeRange(cf, minStamp, maxStamp);
328    return this;
329  }
330
331  /**
332   * Set the start row of the scan.
333   * <p>
334   * If the specified row does not exist, the Scanner will start from the next closest row after the
335   * specified row.
336   * <p>
337   * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
338   * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
339   * unexpected or even undefined.
340   * </p>
341   * @param startRow row to start scanner at or after
342   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
343   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
344   */
345  public Scan withStartRow(byte[] startRow) {
346    return withStartRow(startRow, true);
347  }
348
349  /**
350   * Set the start row of the scan.
351   * <p>
352   * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner
353   * will start from the next closest row after the specified row.
354   * <p>
355   * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
356   * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
357   * unexpected or even undefined.
358   * </p>
359   * @param startRow  row to start scanner at or after
360   * @param inclusive whether we should include the start row when scan
361   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
362   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
363   */
364  public Scan withStartRow(byte[] startRow, boolean inclusive) {
365    if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
366      throw new IllegalArgumentException("startRow's length must be less than or equal to "
367        + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
368    }
369    this.startRow = startRow;
370    this.includeStartRow = inclusive;
371    return this;
372  }
373
374  /**
375   * Set the stop row of the scan.
376   * <p>
377   * The scan will include rows that are lexicographically less than the provided stopRow.
378   * <p>
379   * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
380   * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
381   * unexpected or even undefined.
382   * </p>
383   * @param stopRow row to end at (exclusive)
384   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
385   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
386   */
387  public Scan withStopRow(byte[] stopRow) {
388    return withStopRow(stopRow, false);
389  }
390
391  /**
392   * Set the stop row of the scan.
393   * <p>
394   * The scan will include rows that are lexicographically less than (or equal to if
395   * {@code inclusive} is {@code true}) the provided stopRow.
396   * <p>
397   * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
398   * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
399   * unexpected or even undefined.
400   * </p>
401   * @param stopRow   row to end at
402   * @param inclusive whether we should include the stop row when scan
403   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
404   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
405   */
406  public Scan withStopRow(byte[] stopRow, boolean inclusive) {
407    if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
408      throw new IllegalArgumentException("stopRow's length must be less than or equal to "
409        + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
410    }
411    this.stopRow = stopRow;
412    this.includeStopRow = inclusive;
413    return this;
414  }
415
416  /**
417   * <p>
418   * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey
419   * starts with the specified prefix.
420   * </p>
421   * <p>
422   * This is a utility method that converts the desired rowPrefix into the appropriate values for
423   * the startRow and stopRow to achieve the desired result.
424   * </p>
425   * <p>
426   * This can safely be used in combination with setFilter.
427   * </p>
428   * <p>
429   * <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
430   * a combination will yield unexpected and even undefined results.
431   * </p>
432   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
433   * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method is considered to be
434   *             confusing as it does not use a {@link Filter} but uses setting the startRow and
435   *             stopRow instead. Use {@link #setStartStopRowForPrefixScan(byte[])} instead.
436   */
437  @Deprecated
438  public Scan setRowPrefixFilter(byte[] rowPrefix) {
439    return setStartStopRowForPrefixScan(rowPrefix);
440  }
441
442  /**
443   * <p>
444   * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey
445   * starts with the specified prefix.
446   * </p>
447   * <p>
448   * This is a utility method that converts the desired rowPrefix into the appropriate values for
449   * the startRow and stopRow to achieve the desired result.
450   * </p>
451   * <p>
452   * This can safely be used in combination with setFilter.
453   * </p>
454   * <p>
455   * <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
456   * a combination will yield unexpected and even undefined results.
457   * </p>
458   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
459   */
460  public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) {
461    if (rowPrefix == null) {
462      withStartRow(HConstants.EMPTY_START_ROW);
463      withStopRow(HConstants.EMPTY_END_ROW);
464    } else {
465      this.withStartRow(rowPrefix);
466      this.withStopRow(ClientUtil.calculateTheClosestNextRowKeyForPrefix(rowPrefix));
467    }
468    return this;
469  }
470
471  /**
472   * Get all available versions.
473   */
474  public Scan readAllVersions() {
475    this.maxVersions = Integer.MAX_VALUE;
476    return this;
477  }
478
479  /**
480   * Get up to the specified number of versions of each column.
481   * @param versions specified number of versions for each column
482   */
483  public Scan readVersions(int versions) {
484    this.maxVersions = versions;
485    return this;
486  }
487
488  /**
489   * Set the maximum number of cells to return for each call to next(). Callers should be aware that
490   * this is not equivalent to calling {@link #setAllowPartialResults(boolean)}. If you don't allow
491   * partial results, the number of cells in each Result must equal to your batch setting unless it
492   * is the last Result for current row. So this method is helpful in paging queries. If you just
493   * want to prevent OOM at client, use setAllowPartialResults(true) is better.
494   * @param batch the maximum number of values
495   * @see Result#mayHaveMoreCellsInRow()
496   */
497  public Scan setBatch(int batch) {
498    if (this.hasFilter() && this.filter.hasFilterRow()) {
499      throw new IncompatibleFilterException(
500        "Cannot set batch on a scan using a filter" + " that returns true for filter.hasFilterRow");
501    }
502    this.batch = batch;
503    return this;
504  }
505
506  /**
507   * Set the maximum number of values to return per row per Column Family
508   * @param limit the maximum number of values returned / row / CF
509   */
510  public Scan setMaxResultsPerColumnFamily(int limit) {
511    this.storeLimit = limit;
512    return this;
513  }
514
515  /**
516   * Set offset for the row per Column Family.
517   * @param offset is the number of kvs that will be skipped.
518   */
519  public Scan setRowOffsetPerColumnFamily(int offset) {
520    this.storeOffset = offset;
521    return this;
522  }
523
524  /**
525   * Set the number of rows for caching that will be passed to scanners. If not set, the
526   * Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will apply. Higher
527   * caching values will enable faster scanners but will use more memory.
528   * @param caching the number of rows for caching
529   */
530  public Scan setCaching(int caching) {
531    this.caching = caching;
532    return this;
533  }
534
535  /** Returns the maximum result size in bytes. See {@link #setMaxResultSize(long)} */
536  public long getMaxResultSize() {
537    return maxResultSize;
538  }
539
540  /**
541   * Set the maximum result size. The default is -1; this means that no specific maximum result size
542   * will be set for this scan, and the global configured value will be used instead. (Defaults to
543   * unlimited).
544   * @param maxResultSize The maximum result size in bytes.
545   */
546  public Scan setMaxResultSize(long maxResultSize) {
547    this.maxResultSize = maxResultSize;
548    return this;
549  }
550
551  @Override
552  public Scan setFilter(Filter filter) {
553    super.setFilter(filter);
554    return this;
555  }
556
557  /**
558   * Setting the familyMap
559   * @param familyMap map of family to qualifier
560   */
561  public Scan setFamilyMap(Map<byte[], NavigableSet<byte[]>> familyMap) {
562    this.familyMap = familyMap;
563    return this;
564  }
565
566  /**
567   * Getting the familyMap
568   */
569  public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
570    return this.familyMap;
571  }
572
573  /** Returns the number of families in familyMap */
574  public int numFamilies() {
575    if (hasFamilies()) {
576      return this.familyMap.size();
577    }
578    return 0;
579  }
580
581  /** Returns true if familyMap is non empty, false otherwise */
582  public boolean hasFamilies() {
583    return !this.familyMap.isEmpty();
584  }
585
586  /** Returns the keys of the familyMap */
587  public byte[][] getFamilies() {
588    if (hasFamilies()) {
589      return this.familyMap.keySet().toArray(new byte[0][0]);
590    }
591    return null;
592  }
593
594  /** Returns the startrow */
595  public byte[] getStartRow() {
596    return this.startRow;
597  }
598
599  /** Returns if we should include start row when scan */
600  public boolean includeStartRow() {
601    return includeStartRow;
602  }
603
604  /** Returns the stoprow */
605  public byte[] getStopRow() {
606    return this.stopRow;
607  }
608
609  /** Returns if we should include stop row when scan */
610  public boolean includeStopRow() {
611    return includeStopRow;
612  }
613
614  /** Returns the max number of versions to fetch */
615  public int getMaxVersions() {
616    return this.maxVersions;
617  }
618
619  /** Returns maximum number of values to return for a single call to next() */
620  public int getBatch() {
621    return this.batch;
622  }
623
624  /** Returns maximum number of values to return per row per CF */
625  public int getMaxResultsPerColumnFamily() {
626    return this.storeLimit;
627  }
628
629  /**
630   * Method for retrieving the scan's offset per row per column family (#kvs to be skipped)
631   * @return row offset
632   */
633  public int getRowOffsetPerColumnFamily() {
634    return this.storeOffset;
635  }
636
637  /** Returns caching the number of rows fetched when calling next on a scanner */
638  public int getCaching() {
639    return this.caching;
640  }
641
642  /** Returns TimeRange */
643  public TimeRange getTimeRange() {
644    return this.tr;
645  }
646
647  /** Returns RowFilter */
648  @Override
649  public Filter getFilter() {
650    return filter;
651  }
652
653  /** Returns true is a filter has been specified, false if not */
654  public boolean hasFilter() {
655    return filter != null;
656  }
657
658  /**
659   * Set whether blocks should be cached for this Scan.
660   * <p>
661   * This is true by default. When true, default settings of the table and family are used (this
662   * will never override caching blocks if the block cache is disabled for that family or entirely).
663   * @param cacheBlocks if false, default settings are overridden and blocks will not be cached
664   */
665  public Scan setCacheBlocks(boolean cacheBlocks) {
666    this.cacheBlocks = cacheBlocks;
667    return this;
668  }
669
670  /**
671   * Get whether blocks should be cached for this Scan.
672   * @return true if default caching should be used, false if blocks should not be cached
673   */
674  public boolean getCacheBlocks() {
675    return cacheBlocks;
676  }
677
678  /**
679   * Set whether this scan is a reversed one
680   * <p>
681   * This is false by default which means forward(normal) scan.
682   * @param reversed if true, scan will be backward order
683   */
684  public Scan setReversed(boolean reversed) {
685    this.reversed = reversed;
686    return this;
687  }
688
689  /**
690   * Get whether this scan is a reversed one.
691   * @return true if backward scan, false if forward(default) scan
692   */
693  public boolean isReversed() {
694    return reversed;
695  }
696
697  /**
698   * Setting whether the caller wants to see the partial results when server returns
699   * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By
700   * default this value is false and the complete results will be assembled client side before being
701   * delivered to the caller.
702   * @see Result#mayHaveMoreCellsInRow()
703   * @see #setBatch(int)
704   */
705  public Scan setAllowPartialResults(final boolean allowPartialResults) {
706    this.allowPartialResults = allowPartialResults;
707    return this;
708  }
709
710  /**
711   * Returns true when the constructor of this scan understands that the results they will see may
712   * only represent a partial portion of a row. The entire row would be retrieved by subsequent
713   * calls to {@link ResultScanner#next()}
714   */
715  public boolean getAllowPartialResults() {
716    return allowPartialResults;
717  }
718
719  @Override
720  public Scan setLoadColumnFamiliesOnDemand(boolean value) {
721    super.setLoadColumnFamiliesOnDemand(value);
722    return this;
723  }
724
725  /**
726   * Compile the table and column family (i.e. schema) information into a String. Useful for parsing
727   * and aggregation by debugging, logging, and administration tools.
728   */
729  @Override
730  public Map<String, Object> getFingerprint() {
731    Map<String, Object> map = new HashMap<>();
732    List<String> families = new ArrayList<>();
733    if (this.familyMap.isEmpty()) {
734      map.put("families", "ALL");
735      return map;
736    } else {
737      map.put("families", families);
738    }
739    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
740      families.add(Bytes.toStringBinary(entry.getKey()));
741    }
742    return map;
743  }
744
745  /**
746   * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
747   * Map along with the fingerprinted information. Useful for debugging, logging, and administration
748   * tools.
749   * @param maxCols a limit on the number of columns output prior to truncation
750   */
751  @Override
752  public Map<String, Object> toMap(int maxCols) {
753    // start with the fingerprint map and build on top of it
754    Map<String, Object> map = getFingerprint();
755    // map from families to column list replaces fingerprint's list of families
756    Map<String, List<String>> familyColumns = new HashMap<>();
757    map.put("families", familyColumns);
758    // add scalar information first
759    map.put("startRow", Bytes.toStringBinary(this.startRow));
760    map.put("stopRow", Bytes.toStringBinary(this.stopRow));
761    map.put("maxVersions", this.maxVersions);
762    map.put("batch", this.batch);
763    map.put("caching", this.caching);
764    map.put("maxResultSize", this.maxResultSize);
765    map.put("cacheBlocks", this.cacheBlocks);
766    map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
767    List<Long> timeRange = new ArrayList<>(2);
768    timeRange.add(this.tr.getMin());
769    timeRange.add(this.tr.getMax());
770    map.put("timeRange", timeRange);
771    int colCount = 0;
772    // iterate through affected families and list out up to maxCols columns
773    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
774      List<String> columns = new ArrayList<>();
775      familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
776      if (entry.getValue() == null) {
777        colCount++;
778        --maxCols;
779        columns.add("ALL");
780      } else {
781        colCount += entry.getValue().size();
782        if (maxCols <= 0) {
783          continue;
784        }
785        for (byte[] column : entry.getValue()) {
786          if (--maxCols <= 0) {
787            continue;
788          }
789          columns.add(Bytes.toStringBinary(column));
790        }
791      }
792    }
793    map.put("totalColumns", colCount);
794    if (this.filter != null) {
795      map.put("filter", this.filter.toString());
796    }
797    // add the id if set
798    if (getId() != null) {
799      map.put("id", getId());
800    }
801    map.put("includeStartRow", includeStartRow);
802    map.put("includeStopRow", includeStopRow);
803    map.put("allowPartialResults", allowPartialResults);
804    map.put("storeLimit", storeLimit);
805    map.put("storeOffset", storeOffset);
806    map.put("reversed", reversed);
807    if (null != asyncPrefetch) {
808      map.put("asyncPrefetch", asyncPrefetch);
809    }
810    map.put("mvccReadPoint", mvccReadPoint);
811    map.put("limit", limit);
812    map.put("readType", readType);
813    map.put("needCursorResult", needCursorResult);
814    map.put("targetReplicaId", targetReplicaId);
815    map.put("consistency", consistency);
816    if (!colFamTimeRangeMap.isEmpty()) {
817      Map<String, List<Long>> colFamTimeRangeMapStr = colFamTimeRangeMap.entrySet().stream()
818        .collect(Collectors.toMap((e) -> Bytes.toStringBinary(e.getKey()), e -> {
819          TimeRange value = e.getValue();
820          List<Long> rangeList = new ArrayList<>();
821          rangeList.add(value.getMin());
822          rangeList.add(value.getMax());
823          return rangeList;
824        }));
825
826      map.put("colFamTimeRangeMap", colFamTimeRangeMapStr);
827    }
828    map.put("priority", getPriority());
829    return map;
830  }
831
832  /**
833   * Enable/disable "raw" mode for this scan. If "raw" is enabled the scan will return all delete
834   * marker and deleted rows that have not been collected, yet. This is mostly useful for Scan on
835   * column families that have KEEP_DELETED_ROWS enabled. It is an error to specify any column when
836   * "raw" is set.
837   * @param raw True/False to enable/disable "raw" mode.
838   */
839  public Scan setRaw(boolean raw) {
840    setAttribute(RAW_ATTR, Bytes.toBytes(raw));
841    return this;
842  }
843
844  /** Returns True if this Scan is in "raw" mode. */
845  public boolean isRaw() {
846    byte[] attr = getAttribute(RAW_ATTR);
847    return attr == null ? false : Bytes.toBoolean(attr);
848  }
849
850  @Override
851  public Scan setAttribute(String name, byte[] value) {
852    super.setAttribute(name, value);
853    return this;
854  }
855
856  @Override
857  public Scan setId(String id) {
858    super.setId(id);
859    return this;
860  }
861
862  @Override
863  public Scan setAuthorizations(Authorizations authorizations) {
864    super.setAuthorizations(authorizations);
865    return this;
866  }
867
868  @Override
869  public Scan setACL(Map<String, Permission> perms) {
870    super.setACL(perms);
871    return this;
872  }
873
874  @Override
875  public Scan setACL(String user, Permission perms) {
876    super.setACL(user, perms);
877    return this;
878  }
879
880  @Override
881  public Scan setConsistency(Consistency consistency) {
882    super.setConsistency(consistency);
883    return this;
884  }
885
886  @Override
887  public Scan setReplicaId(int Id) {
888    super.setReplicaId(Id);
889    return this;
890  }
891
892  @Override
893  public Scan setIsolationLevel(IsolationLevel level) {
894    super.setIsolationLevel(level);
895    return this;
896  }
897
898  @Override
899  public Scan setPriority(int priority) {
900    super.setPriority(priority);
901    return this;
902  }
903
904  /**
905   * Enable collection of {@link ScanMetrics}. For advanced users.
906   * @param enabled Set to true to enable accumulating scan metrics
907   */
908  public Scan setScanMetricsEnabled(final boolean enabled) {
909    setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
910    return this;
911  }
912
913  /** Returns True if collection of scan metrics is enabled. For advanced users. */
914  public boolean isScanMetricsEnabled() {
915    byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
916    return attr == null ? false : Bytes.toBoolean(attr);
917  }
918
919  public Boolean isAsyncPrefetch() {
920    return asyncPrefetch;
921  }
922
923  /**
924   * @deprecated Since 3.0.0, will be removed in 4.0.0. After building sync client upon async
925   *             client, the implementation is always 'async prefetch', so this flag is useless now.
926   */
927  @Deprecated
928  public Scan setAsyncPrefetch(boolean asyncPrefetch) {
929    this.asyncPrefetch = asyncPrefetch;
930    return this;
931  }
932
933  /** Returns the limit of rows for this scan */
934  public int getLimit() {
935    return limit;
936  }
937
938  /**
939   * Set the limit of rows for this scan. We will terminate the scan if the number of returned rows
940   * reaches this value.
941   * <p>
942   * This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
943   * @param limit the limit of rows for this scan
944   */
945  public Scan setLimit(int limit) {
946    this.limit = limit;
947    return this;
948  }
949
950  /**
951   * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
952   * set {@code readType} to {@link ReadType#PREAD}.
953   */
954  public Scan setOneRowLimit() {
955    return setLimit(1).setReadType(ReadType.PREAD);
956  }
957
958  @InterfaceAudience.Public
959  public enum ReadType {
960    DEFAULT,
961    STREAM,
962    PREAD
963  }
964
965  /** Returns the read type for this scan */
966  public ReadType getReadType() {
967    return readType;
968  }
969
970  /**
971   * Set the read type for this scan.
972   * <p>
973   * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
974   * example, we will always use pread if this is a get scan.
975   */
976  public Scan setReadType(ReadType readType) {
977    this.readType = readType;
978    return this;
979  }
980
981  /**
982   * Get the mvcc read point used to open a scanner.
983   */
984  long getMvccReadPoint() {
985    return mvccReadPoint;
986  }
987
988  /**
989   * Set the mvcc read point used to open a scanner.
990   */
991  Scan setMvccReadPoint(long mvccReadPoint) {
992    this.mvccReadPoint = mvccReadPoint;
993    return this;
994  }
995
996  /**
997   * Set the mvcc read point to -1 which means do not use it.
998   */
999  Scan resetMvccReadPoint() {
1000    return setMvccReadPoint(-1L);
1001  }
1002
1003  /**
1004   * When the server is slow or we scan a table with many deleted data or we use a sparse filter,
1005   * the server will response heartbeat to prevent timeout. However the scanner will return a Result
1006   * only when client can do it. So if there are many heartbeats, the blocking time on
1007   * ResultScanner#next() may be very long, which is not friendly to online services. Set this to
1008   * true then you can get a special Result whose #isCursor() returns true and is not contains any
1009   * real data. It only tells you where the server has scanned. You can call next to continue
1010   * scanning or open a new scanner with this row key as start row whenever you want. Users can get
1011   * a cursor when and only when there is a response from the server but we can not return a Result
1012   * to users, for example, this response is a heartbeat or there are partial cells but users do not
1013   * allow partial result. Now the cursor is in row level which means the special Result will only
1014   * contains a row key. {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor}
1015   */
1016  public Scan setNeedCursorResult(boolean needCursorResult) {
1017    this.needCursorResult = needCursorResult;
1018    return this;
1019  }
1020
1021  public boolean isNeedCursorResult() {
1022    return needCursorResult;
1023  }
1024
1025  /**
1026   * Create a new Scan with a cursor. It only set the position information like start row key. The
1027   * others (like cfs, stop row, limit) should still be filled in by the user.
1028   * {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor}
1029   */
1030  public static Scan createScanFromCursor(Cursor cursor) {
1031    return new Scan().withStartRow(cursor.getRow());
1032  }
1033}