001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import java.io.IOException;
021import java.util.ArrayList;
022import java.util.HashMap;
023import java.util.List;
024import java.util.Map;
025import java.util.NavigableSet;
026import java.util.TreeMap;
027import java.util.TreeSet;
028import java.util.stream.Collectors;
029import org.apache.hadoop.hbase.HConstants;
030import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
031import org.apache.hadoop.hbase.filter.Filter;
032import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
033import org.apache.hadoop.hbase.io.TimeRange;
034import org.apache.hadoop.hbase.security.access.Permission;
035import org.apache.hadoop.hbase.security.visibility.Authorizations;
036import org.apache.hadoop.hbase.util.Bytes;
037import org.apache.yetus.audience.InterfaceAudience;
038import org.slf4j.Logger;
039import org.slf4j.LoggerFactory;
040
041/**
042 * Used to perform Scan operations.
043 * <p>
044 * All operations are identical to {@link Get} with the exception of instantiation. Rather than
045 * specifying a single row, an optional startRow and stopRow may be defined. If rows are not
046 * specified, the Scanner will iterate over all rows.
047 * <p>
048 * To get all columns from all rows of a Table, create an instance with no constraints; use the
049 * {@link #Scan()} constructor. To constrain the scan to specific column families, call
050 * {@link #addFamily(byte[]) addFamily} for each family to retrieve on your Scan instance.
051 * <p>
052 * To get specific columns, call {@link #addColumn(byte[], byte[]) addColumn} for each column to
053 * retrieve.
054 * <p>
055 * To only retrieve columns within a specific range of version timestamps, call
056 * {@link #setTimeRange(long, long) setTimeRange}.
057 * <p>
058 * To only retrieve columns with a specific timestamp, call {@link #setTimestamp(long) setTimestamp}
059 * .
060 * <p>
061 * To limit the number of versions of each column to be returned, call {@link #readVersions(int)}.
062 * <p>
063 * To limit the maximum number of values returned for each call to next(), call
064 * {@link #setBatch(int) setBatch}.
065 * <p>
066 * To add a filter, call {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
067 * <p>
068 * For small scan, it is deprecated in 2.0.0. Now we have a {@link #setLimit(int)} method in Scan
069 * object which is used to tell RS how many rows we want. If the rows return reaches the limit, the
070 * RS will close the RegionScanner automatically. And we will also fetch data when openScanner in
071 * the new implementation, this means we can also finish a scan operation in one rpc call. And we
072 * have also introduced a {@link #setReadType(ReadType)} method. You can use this method to tell RS
073 * to use pread explicitly.
074 * <p>
075 * Expert: To explicitly disable server-side block caching for this scan, execute
076 * {@link #setCacheBlocks(boolean)}.
077 * <p>
078 * <em>Note:</em> Usage alters Scan instances. Internally, attributes are updated as the Scan runs
079 * and if enabled, metrics accumulate in the Scan instance. Be aware this is the case when you go to
080 * clone a Scan instance or if you go to reuse a created Scan instance; safer is create a Scan
081 * instance per usage.
082 */
083@InterfaceAudience.Public
084public class Scan extends Query {
085  private static final Logger LOG = LoggerFactory.getLogger(Scan.class);
086
087  private static final String RAW_ATTR = "_raw_";
088
089  private byte[] startRow = HConstants.EMPTY_START_ROW;
090  private boolean includeStartRow = true;
091  private byte[] stopRow = HConstants.EMPTY_END_ROW;
092  private boolean includeStopRow = false;
093  private int maxVersions = 1;
094  private int batch = -1;
095
096  /**
097   * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}.
098   * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the
099   * cells in the row exceeded max result size on the server. Typically partial results will be
100   * combined client side into complete results before being delivered to the caller. However, if
101   * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e.
102   * they understand that the results returned from the Scanner may only represent part of a
103   * particular row). In such a case, any attempt to combine the partials into a complete result on
104   * the client side will be skipped, and the caller will be able to see the exact results returned
105   * from the server.
106   */
107  private boolean allowPartialResults = false;
108
109  private int storeLimit = -1;
110  private int storeOffset = 0;
111
112  private static final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
113
114  // If an application wants to use multiple scans over different tables each scan must
115  // define this attribute with the appropriate table name by calling
116  // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
117  static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
118  static private final String SCAN_ATTRIBUTES_METRICS_BY_REGION_ENABLE =
119    "scan.attributes.metrics.byregion.enable";
120
121  /**
122   * -1 means no caching specified and the value of {@link HConstants#HBASE_CLIENT_SCANNER_CACHING}
123   * (default to {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_CACHING}) will be used
124   */
125  private int caching = -1;
126  private long maxResultSize = -1;
127  private boolean cacheBlocks = true;
128  private boolean reversed = false;
129  private TimeRange tr = TimeRange.allTime();
130  private Map<byte[], NavigableSet<byte[]>> familyMap =
131    new TreeMap<byte[], NavigableSet<byte[]>>(Bytes.BYTES_COMPARATOR);
132  private Boolean asyncPrefetch = null;
133
134  /**
135   * Parameter name for client scanner sync/async prefetch toggle. When using async scanner,
136   * prefetching data from the server is done at the background. The parameter currently won't have
137   * any effect in the case that the user has set Scan#setSmall or Scan#setReversed
138   */
139  public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH =
140    "hbase.client.scanner.async.prefetch";
141
142  /**
143   * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}.
144   */
145  public static final boolean DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = false;
146
147  /**
148   * The mvcc read point to use when open a scanner. Remember to clear it after switching regions as
149   * the mvcc is only valid within region scope.
150   */
151  private long mvccReadPoint = -1L;
152
153  /**
154   * The number of rows we want for this scan. We will terminate the scan if the number of return
155   * rows reaches this value.
156   */
157  private int limit = -1;
158
159  /**
160   * Control whether to use pread at server side.
161   */
162  private ReadType readType = ReadType.DEFAULT;
163
164  private boolean needCursorResult = false;
165
166  /**
167   * Create a Scan operation across all rows.
168   */
169  public Scan() {
170  }
171
172  /**
173   * Creates a new instance of this class while copying all values.
174   * @param scan The scan instance to copy from.
175   * @throws IOException When copying the values fails.
176   */
177  public Scan(Scan scan) throws IOException {
178    startRow = scan.getStartRow();
179    includeStartRow = scan.includeStartRow();
180    stopRow = scan.getStopRow();
181    includeStopRow = scan.includeStopRow();
182    maxVersions = scan.getMaxVersions();
183    batch = scan.getBatch();
184    storeLimit = scan.getMaxResultsPerColumnFamily();
185    storeOffset = scan.getRowOffsetPerColumnFamily();
186    caching = scan.getCaching();
187    maxResultSize = scan.getMaxResultSize();
188    cacheBlocks = scan.getCacheBlocks();
189    filter = scan.getFilter(); // clone?
190    loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
191    consistency = scan.getConsistency();
192    this.setIsolationLevel(scan.getIsolationLevel());
193    reversed = scan.isReversed();
194    asyncPrefetch = scan.isAsyncPrefetch();
195    allowPartialResults = scan.getAllowPartialResults();
196    tr = scan.getTimeRange(); // TimeRange is immutable
197    Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
198    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : fams.entrySet()) {
199      byte[] fam = entry.getKey();
200      NavigableSet<byte[]> cols = entry.getValue();
201      if (cols != null && cols.size() > 0) {
202        for (byte[] col : cols) {
203          addColumn(fam, col);
204        }
205      } else {
206        addFamily(fam);
207      }
208    }
209    for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
210      setAttribute(attr.getKey(), attr.getValue());
211    }
212    for (Map.Entry<byte[], TimeRange> entry : scan.getColumnFamilyTimeRange().entrySet()) {
213      TimeRange tr = entry.getValue();
214      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
215    }
216    this.mvccReadPoint = scan.getMvccReadPoint();
217    this.limit = scan.getLimit();
218    this.needCursorResult = scan.isNeedCursorResult();
219    setPriority(scan.getPriority());
220    readType = scan.getReadType();
221    super.setReplicaId(scan.getReplicaId());
222    super.setQueryMetricsEnabled(scan.isQueryMetricsEnabled());
223  }
224
225  /**
226   * Builds a scan object with the same specs as get.
227   * @param get get to model scan after
228   */
229  public Scan(Get get) {
230    this.startRow = get.getRow();
231    this.includeStartRow = true;
232    this.stopRow = get.getRow();
233    this.includeStopRow = true;
234    this.filter = get.getFilter();
235    this.cacheBlocks = get.getCacheBlocks();
236    this.maxVersions = get.getMaxVersions();
237    this.storeLimit = get.getMaxResultsPerColumnFamily();
238    this.storeOffset = get.getRowOffsetPerColumnFamily();
239    this.tr = get.getTimeRange();
240    this.familyMap = get.getFamilyMap();
241    this.asyncPrefetch = false;
242    this.consistency = get.getConsistency();
243    this.setIsolationLevel(get.getIsolationLevel());
244    this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
245    for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
246      setAttribute(attr.getKey(), attr.getValue());
247    }
248    for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) {
249      TimeRange tr = entry.getValue();
250      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
251    }
252    this.mvccReadPoint = -1L;
253    setPriority(get.getPriority());
254    super.setReplicaId(get.getReplicaId());
255    super.setQueryMetricsEnabled(get.isQueryMetricsEnabled());
256  }
257
258  public boolean isGetScan() {
259    return includeStartRow && includeStopRow
260      && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow);
261  }
262
263  /**
264   * Get all columns from the specified family.
265   * <p>
266   * Overrides previous calls to addColumn for this family.
267   * @param family family name
268   */
269  public Scan addFamily(byte[] family) {
270    familyMap.remove(family);
271    familyMap.put(family, null);
272    return this;
273  }
274
275  /**
276   * Get the column from the specified family with the specified qualifier.
277   * <p>
278   * Overrides previous calls to addFamily for this family.
279   * @param family    family name
280   * @param qualifier column qualifier
281   */
282  public Scan addColumn(byte[] family, byte[] qualifier) {
283    NavigableSet<byte[]> set = familyMap.get(family);
284    if (set == null) {
285      set = new TreeSet<>(Bytes.BYTES_COMPARATOR);
286      familyMap.put(family, set);
287    }
288    if (qualifier == null) {
289      qualifier = HConstants.EMPTY_BYTE_ARRAY;
290    }
291    set.add(qualifier);
292    return this;
293  }
294
295  /**
296   * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp). Note,
297   * default maximum versions to return is 1. If your time range spans more than one version and you
298   * want all versions returned, up the number of versions beyond the default.
299   * @param minStamp minimum timestamp value, inclusive
300   * @param maxStamp maximum timestamp value, exclusive
301   * @see #readAllVersions()
302   * @see #readVersions(int)
303   */
304  public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
305    tr = TimeRange.between(minStamp, maxStamp);
306    return this;
307  }
308
309  /**
310   * Get versions of columns with the specified timestamp. Note, default maximum versions to return
311   * is 1. If your time range spans more than one version and you want all versions returned, up the
312   * number of versions beyond the defaut.
313   * @param timestamp version timestamp
314   * @see #readAllVersions()
315   * @see #readVersions(int)
316   */
317  public Scan setTimestamp(long timestamp) {
318    try {
319      tr = TimeRange.at(timestamp);
320    } catch (Exception e) {
321      // This should never happen, unless integer overflow or something extremely wrong...
322      LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
323      throw e;
324    }
325
326    return this;
327  }
328
329  @Override
330  public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
331    super.setColumnFamilyTimeRange(cf, minStamp, maxStamp);
332    return this;
333  }
334
335  /**
336   * Set the start row of the scan.
337   * <p>
338   * If the specified row does not exist, the Scanner will start from the next closest row after the
339   * specified row.
340   * <p>
341   * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
342   * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
343   * unexpected or even undefined.
344   * </p>
345   * @param startRow row to start scanner at or after
346   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
347   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
348   */
349  public Scan withStartRow(byte[] startRow) {
350    return withStartRow(startRow, true);
351  }
352
353  /**
354   * Set the start row of the scan.
355   * <p>
356   * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner
357   * will start from the next closest row after the specified row.
358   * <p>
359   * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
360   * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
361   * unexpected or even undefined.
362   * </p>
363   * @param startRow  row to start scanner at or after
364   * @param inclusive whether we should include the start row when scan
365   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
366   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
367   */
368  public Scan withStartRow(byte[] startRow, boolean inclusive) {
369    if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
370      throw new IllegalArgumentException("startRow's length must be less than or equal to "
371        + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
372    }
373    this.startRow = startRow;
374    this.includeStartRow = inclusive;
375    return this;
376  }
377
378  /**
379   * Set the stop row of the scan.
380   * <p>
381   * The scan will include rows that are lexicographically less than the provided stopRow.
382   * <p>
383   * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
384   * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
385   * unexpected or even undefined.
386   * </p>
387   * @param stopRow row to end at (exclusive)
388   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
389   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
390   */
391  public Scan withStopRow(byte[] stopRow) {
392    return withStopRow(stopRow, false);
393  }
394
395  /**
396   * Set the stop row of the scan.
397   * <p>
398   * The scan will include rows that are lexicographically less than (or equal to if
399   * {@code inclusive} is {@code true}) the provided stopRow.
400   * <p>
401   * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
402   * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
403   * unexpected or even undefined.
404   * </p>
405   * @param stopRow   row to end at
406   * @param inclusive whether we should include the stop row when scan
407   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
408   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
409   */
410  public Scan withStopRow(byte[] stopRow, boolean inclusive) {
411    if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
412      throw new IllegalArgumentException("stopRow's length must be less than or equal to "
413        + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
414    }
415    this.stopRow = stopRow;
416    this.includeStopRow = inclusive;
417    return this;
418  }
419
420  /**
421   * <p>
422   * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey
423   * starts with the specified prefix.
424   * </p>
425   * <p>
426   * This is a utility method that converts the desired rowPrefix into the appropriate values for
427   * the startRow and stopRow to achieve the desired result.
428   * </p>
429   * <p>
430   * This can safely be used in combination with setFilter.
431   * </p>
432   * <p>
433   * <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
434   * a combination will yield unexpected and even undefined results.
435   * </p>
436   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
437   * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method is considered to be
438   *             confusing as it does not use a {@link Filter} but uses setting the startRow and
439   *             stopRow instead. Use {@link #setStartStopRowForPrefixScan(byte[])} instead.
440   */
441  @Deprecated
442  public Scan setRowPrefixFilter(byte[] rowPrefix) {
443    return setStartStopRowForPrefixScan(rowPrefix);
444  }
445
446  /**
447   * <p>
448   * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey
449   * starts with the specified prefix.
450   * </p>
451   * <p>
452   * This is a utility method that converts the desired rowPrefix into the appropriate values for
453   * the startRow and stopRow to achieve the desired result.
454   * </p>
455   * <p>
456   * This can safely be used in combination with setFilter.
457   * </p>
458   * <p>
459   * <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
460   * a combination will yield unexpected and even undefined results.
461   * </p>
462   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
463   */
464  public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) {
465    if (rowPrefix == null) {
466      withStartRow(HConstants.EMPTY_START_ROW);
467      withStopRow(HConstants.EMPTY_END_ROW);
468    } else {
469      this.withStartRow(rowPrefix);
470      this.withStopRow(ClientUtil.calculateTheClosestNextRowKeyForPrefix(rowPrefix));
471    }
472    return this;
473  }
474
475  /**
476   * Get all available versions.
477   */
478  public Scan readAllVersions() {
479    this.maxVersions = Integer.MAX_VALUE;
480    return this;
481  }
482
483  /**
484   * Get up to the specified number of versions of each column.
485   * @param versions specified number of versions for each column
486   */
487  public Scan readVersions(int versions) {
488    this.maxVersions = versions;
489    return this;
490  }
491
492  /**
493   * Set the maximum number of cells to return for each call to next(). Callers should be aware that
494   * this is not equivalent to calling {@link #setAllowPartialResults(boolean)}. If you don't allow
495   * partial results, the number of cells in each Result must equal to your batch setting unless it
496   * is the last Result for current row. So this method is helpful in paging queries. If you just
497   * want to prevent OOM at client, use setAllowPartialResults(true) is better.
498   * @param batch the maximum number of values
499   * @see Result#mayHaveMoreCellsInRow()
500   */
501  public Scan setBatch(int batch) {
502    if (this.hasFilter() && this.filter.hasFilterRow()) {
503      throw new IncompatibleFilterException(
504        "Cannot set batch on a scan using a filter" + " that returns true for filter.hasFilterRow");
505    }
506    this.batch = batch;
507    return this;
508  }
509
510  /**
511   * Set the maximum number of values to return per row per Column Family
512   * @param limit the maximum number of values returned / row / CF
513   */
514  public Scan setMaxResultsPerColumnFamily(int limit) {
515    this.storeLimit = limit;
516    return this;
517  }
518
519  /**
520   * Set offset for the row per Column Family.
521   * @param offset is the number of kvs that will be skipped.
522   */
523  public Scan setRowOffsetPerColumnFamily(int offset) {
524    this.storeOffset = offset;
525    return this;
526  }
527
528  /**
529   * Set the number of rows for caching that will be passed to scanners. If not set, the
530   * Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will apply. Higher
531   * caching values will enable faster scanners but will use more memory.
532   * @param caching the number of rows for caching
533   */
534  public Scan setCaching(int caching) {
535    this.caching = caching;
536    return this;
537  }
538
539  /** Returns the maximum result size in bytes. See {@link #setMaxResultSize(long)} */
540  public long getMaxResultSize() {
541    return maxResultSize;
542  }
543
544  /**
545   * Set the maximum result size. The default is -1; this means that no specific maximum result size
546   * will be set for this scan, and the global configured value will be used instead. (Defaults to
547   * unlimited).
548   * @param maxResultSize The maximum result size in bytes.
549   */
550  public Scan setMaxResultSize(long maxResultSize) {
551    this.maxResultSize = maxResultSize;
552    return this;
553  }
554
555  @Override
556  public Scan setFilter(Filter filter) {
557    super.setFilter(filter);
558    return this;
559  }
560
561  /**
562   * Setting the familyMap
563   * @param familyMap map of family to qualifier
564   */
565  public Scan setFamilyMap(Map<byte[], NavigableSet<byte[]>> familyMap) {
566    this.familyMap = familyMap;
567    return this;
568  }
569
570  /**
571   * Getting the familyMap
572   */
573  public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
574    return this.familyMap;
575  }
576
577  /** Returns the number of families in familyMap */
578  public int numFamilies() {
579    if (hasFamilies()) {
580      return this.familyMap.size();
581    }
582    return 0;
583  }
584
585  /** Returns true if familyMap is non empty, false otherwise */
586  public boolean hasFamilies() {
587    return !this.familyMap.isEmpty();
588  }
589
590  /** Returns the keys of the familyMap */
591  public byte[][] getFamilies() {
592    if (hasFamilies()) {
593      return this.familyMap.keySet().toArray(new byte[0][0]);
594    }
595    return null;
596  }
597
598  /** Returns the startrow */
599  public byte[] getStartRow() {
600    return this.startRow;
601  }
602
603  /** Returns if we should include start row when scan */
604  public boolean includeStartRow() {
605    return includeStartRow;
606  }
607
608  /** Returns the stoprow */
609  public byte[] getStopRow() {
610    return this.stopRow;
611  }
612
613  /** Returns if we should include stop row when scan */
614  public boolean includeStopRow() {
615    return includeStopRow;
616  }
617
618  /** Returns the max number of versions to fetch */
619  public int getMaxVersions() {
620    return this.maxVersions;
621  }
622
623  /** Returns maximum number of values to return for a single call to next() */
624  public int getBatch() {
625    return this.batch;
626  }
627
628  /** Returns maximum number of values to return per row per CF */
629  public int getMaxResultsPerColumnFamily() {
630    return this.storeLimit;
631  }
632
633  /**
634   * Method for retrieving the scan's offset per row per column family (#kvs to be skipped)
635   * @return row offset
636   */
637  public int getRowOffsetPerColumnFamily() {
638    return this.storeOffset;
639  }
640
641  /** Returns caching the number of rows fetched when calling next on a scanner */
642  public int getCaching() {
643    return this.caching;
644  }
645
646  /** Returns TimeRange */
647  public TimeRange getTimeRange() {
648    return this.tr;
649  }
650
651  /** Returns RowFilter */
652  @Override
653  public Filter getFilter() {
654    return filter;
655  }
656
657  /** Returns true is a filter has been specified, false if not */
658  public boolean hasFilter() {
659    return filter != null;
660  }
661
662  /**
663   * Set whether blocks should be cached for this Scan.
664   * <p>
665   * This is true by default. When true, default settings of the table and family are used (this
666   * will never override caching blocks if the block cache is disabled for that family or entirely).
667   * @param cacheBlocks if false, default settings are overridden and blocks will not be cached
668   */
669  public Scan setCacheBlocks(boolean cacheBlocks) {
670    this.cacheBlocks = cacheBlocks;
671    return this;
672  }
673
674  /**
675   * Get whether blocks should be cached for this Scan.
676   * @return true if default caching should be used, false if blocks should not be cached
677   */
678  public boolean getCacheBlocks() {
679    return cacheBlocks;
680  }
681
682  /**
683   * Set whether this scan is a reversed one
684   * <p>
685   * This is false by default which means forward(normal) scan.
686   * @param reversed if true, scan will be backward order
687   */
688  public Scan setReversed(boolean reversed) {
689    this.reversed = reversed;
690    return this;
691  }
692
693  /**
694   * Get whether this scan is a reversed one.
695   * @return true if backward scan, false if forward(default) scan
696   */
697  public boolean isReversed() {
698    return reversed;
699  }
700
701  /**
702   * Setting whether the caller wants to see the partial results when server returns
703   * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By
704   * default this value is false and the complete results will be assembled client side before being
705   * delivered to the caller.
706   * @see Result#mayHaveMoreCellsInRow()
707   * @see #setBatch(int)
708   */
709  public Scan setAllowPartialResults(final boolean allowPartialResults) {
710    this.allowPartialResults = allowPartialResults;
711    return this;
712  }
713
714  /**
715   * Returns true when the constructor of this scan understands that the results they will see may
716   * only represent a partial portion of a row. The entire row would be retrieved by subsequent
717   * calls to {@link ResultScanner#next()}
718   */
719  public boolean getAllowPartialResults() {
720    return allowPartialResults;
721  }
722
723  @Override
724  public Scan setLoadColumnFamiliesOnDemand(boolean value) {
725    super.setLoadColumnFamiliesOnDemand(value);
726    return this;
727  }
728
729  /**
730   * Compile the table and column family (i.e. schema) information into a String. Useful for parsing
731   * and aggregation by debugging, logging, and administration tools.
732   */
733  @Override
734  public Map<String, Object> getFingerprint() {
735    Map<String, Object> map = new HashMap<>();
736    List<String> families = new ArrayList<>();
737    if (this.familyMap.isEmpty()) {
738      map.put("families", "ALL");
739      return map;
740    } else {
741      map.put("families", families);
742    }
743    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
744      families.add(Bytes.toStringBinary(entry.getKey()));
745    }
746    return map;
747  }
748
749  /**
750   * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
751   * Map along with the fingerprinted information. Useful for debugging, logging, and administration
752   * tools.
753   * @param maxCols a limit on the number of columns output prior to truncation
754   */
755  @Override
756  public Map<String, Object> toMap(int maxCols) {
757    // start with the fingerprint map and build on top of it
758    Map<String, Object> map = getFingerprint();
759    // map from families to column list replaces fingerprint's list of families
760    Map<String, List<String>> familyColumns = new HashMap<>();
761    map.put("families", familyColumns);
762    // add scalar information first
763    map.put("startRow", Bytes.toStringBinary(this.startRow));
764    map.put("stopRow", Bytes.toStringBinary(this.stopRow));
765    map.put("maxVersions", this.maxVersions);
766    map.put("batch", this.batch);
767    map.put("caching", this.caching);
768    map.put("maxResultSize", this.maxResultSize);
769    map.put("cacheBlocks", this.cacheBlocks);
770    map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
771    List<Long> timeRange = new ArrayList<>(2);
772    timeRange.add(this.tr.getMin());
773    timeRange.add(this.tr.getMax());
774    map.put("timeRange", timeRange);
775    int colCount = 0;
776    // iterate through affected families and list out up to maxCols columns
777    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
778      List<String> columns = new ArrayList<>();
779      familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
780      if (entry.getValue() == null) {
781        colCount++;
782        --maxCols;
783        columns.add("ALL");
784      } else {
785        colCount += entry.getValue().size();
786        if (maxCols <= 0) {
787          continue;
788        }
789        for (byte[] column : entry.getValue()) {
790          if (--maxCols <= 0) {
791            continue;
792          }
793          columns.add(Bytes.toStringBinary(column));
794        }
795      }
796    }
797    map.put("totalColumns", colCount);
798    if (this.filter != null) {
799      map.put("filter", this.filter.toString());
800    }
801    // add the id if set
802    if (getId() != null) {
803      map.put("id", getId());
804    }
805    map.put("includeStartRow", includeStartRow);
806    map.put("includeStopRow", includeStopRow);
807    map.put("allowPartialResults", allowPartialResults);
808    map.put("storeLimit", storeLimit);
809    map.put("storeOffset", storeOffset);
810    map.put("reversed", reversed);
811    if (null != asyncPrefetch) {
812      map.put("asyncPrefetch", asyncPrefetch);
813    }
814    map.put("mvccReadPoint", mvccReadPoint);
815    map.put("limit", limit);
816    map.put("readType", readType);
817    map.put("needCursorResult", needCursorResult);
818    map.put("targetReplicaId", targetReplicaId);
819    map.put("consistency", consistency);
820    if (!colFamTimeRangeMap.isEmpty()) {
821      Map<String, List<Long>> colFamTimeRangeMapStr = colFamTimeRangeMap.entrySet().stream()
822        .collect(Collectors.toMap((e) -> Bytes.toStringBinary(e.getKey()), e -> {
823          TimeRange value = e.getValue();
824          List<Long> rangeList = new ArrayList<>();
825          rangeList.add(value.getMin());
826          rangeList.add(value.getMax());
827          return rangeList;
828        }));
829
830      map.put("colFamTimeRangeMap", colFamTimeRangeMapStr);
831    }
832    map.put("priority", getPriority());
833    map.put("queryMetricsEnabled", queryMetricsEnabled);
834    return map;
835  }
836
837  /**
838   * Enable/disable "raw" mode for this scan. If "raw" is enabled the scan will return all delete
839   * marker and deleted rows that have not been collected, yet. This is mostly useful for Scan on
840   * column families that have KEEP_DELETED_ROWS enabled. It is an error to specify any column when
841   * "raw" is set.
842   * @param raw True/False to enable/disable "raw" mode.
843   */
844  public Scan setRaw(boolean raw) {
845    setAttribute(RAW_ATTR, Bytes.toBytes(raw));
846    return this;
847  }
848
849  /** Returns True if this Scan is in "raw" mode. */
850  public boolean isRaw() {
851    byte[] attr = getAttribute(RAW_ATTR);
852    return attr == null ? false : Bytes.toBoolean(attr);
853  }
854
855  @Override
856  public Scan setAttribute(String name, byte[] value) {
857    super.setAttribute(name, value);
858    return this;
859  }
860
861  @Override
862  public Scan setId(String id) {
863    super.setId(id);
864    return this;
865  }
866
867  @Override
868  public Scan setAuthorizations(Authorizations authorizations) {
869    super.setAuthorizations(authorizations);
870    return this;
871  }
872
873  @Override
874  public Scan setACL(Map<String, Permission> perms) {
875    super.setACL(perms);
876    return this;
877  }
878
879  @Override
880  public Scan setACL(String user, Permission perms) {
881    super.setACL(user, perms);
882    return this;
883  }
884
885  @Override
886  public Scan setConsistency(Consistency consistency) {
887    super.setConsistency(consistency);
888    return this;
889  }
890
891  @Override
892  public Scan setReplicaId(int Id) {
893    super.setReplicaId(Id);
894    return this;
895  }
896
897  @Override
898  public Scan setIsolationLevel(IsolationLevel level) {
899    super.setIsolationLevel(level);
900    return this;
901  }
902
903  @Override
904  public Scan setPriority(int priority) {
905    super.setPriority(priority);
906    return this;
907  }
908
909  /**
910   * Enable collection of {@link ScanMetrics}. For advanced users. While disabling scan metrics,
911   * will also disable region level scan metrics.
912   * @param enabled Set to true to enable accumulating scan metrics
913   */
914  public Scan setScanMetricsEnabled(final boolean enabled) {
915    setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
916    if (!enabled) {
917      setEnableScanMetricsByRegion(false);
918    }
919    return this;
920  }
921
922  /** Returns True if collection of scan metrics is enabled. For advanced users. */
923  public boolean isScanMetricsEnabled() {
924    byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
925    return attr == null ? false : Bytes.toBoolean(attr);
926  }
927
928  public Boolean isAsyncPrefetch() {
929    return asyncPrefetch;
930  }
931
932  /**
933   * @deprecated Since 3.0.0, will be removed in 4.0.0. After building sync client upon async
934   *             client, the implementation is always 'async prefetch', so this flag is useless now.
935   */
936  @Deprecated
937  public Scan setAsyncPrefetch(boolean asyncPrefetch) {
938    this.asyncPrefetch = asyncPrefetch;
939    return this;
940  }
941
942  /** Returns the limit of rows for this scan */
943  public int getLimit() {
944    return limit;
945  }
946
947  /**
948   * Set the limit of rows for this scan. We will terminate the scan if the number of returned rows
949   * reaches this value.
950   * <p>
951   * This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
952   * @param limit the limit of rows for this scan
953   */
954  public Scan setLimit(int limit) {
955    this.limit = limit;
956    return this;
957  }
958
959  /**
960   * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
961   * set {@code readType} to {@link ReadType#PREAD}.
962   */
963  public Scan setOneRowLimit() {
964    return setLimit(1).setReadType(ReadType.PREAD);
965  }
966
967  @InterfaceAudience.Public
968  public enum ReadType {
969    DEFAULT,
970    STREAM,
971    PREAD
972  }
973
974  /** Returns the read type for this scan */
975  public ReadType getReadType() {
976    return readType;
977  }
978
979  /**
980   * Set the read type for this scan.
981   * <p>
982   * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
983   * example, we will always use pread if this is a get scan.
984   */
985  public Scan setReadType(ReadType readType) {
986    this.readType = readType;
987    return this;
988  }
989
990  /**
991   * Get the mvcc read point used to open a scanner.
992   */
993  long getMvccReadPoint() {
994    return mvccReadPoint;
995  }
996
997  /**
998   * Set the mvcc read point used to open a scanner.
999   */
1000  Scan setMvccReadPoint(long mvccReadPoint) {
1001    this.mvccReadPoint = mvccReadPoint;
1002    return this;
1003  }
1004
1005  /**
1006   * Set the mvcc read point to -1 which means do not use it.
1007   */
1008  Scan resetMvccReadPoint() {
1009    return setMvccReadPoint(-1L);
1010  }
1011
1012  /**
1013   * When the server is slow or we scan a table with many deleted data or we use a sparse filter,
1014   * the server will response heartbeat to prevent timeout. However the scanner will return a Result
1015   * only when client can do it. So if there are many heartbeats, the blocking time on
1016   * ResultScanner#next() may be very long, which is not friendly to online services. Set this to
1017   * true then you can get a special Result whose #isCursor() returns true and is not contains any
1018   * real data. It only tells you where the server has scanned. You can call next to continue
1019   * scanning or open a new scanner with this row key as start row whenever you want. Users can get
1020   * a cursor when and only when there is a response from the server but we can not return a Result
1021   * to users, for example, this response is a heartbeat or there are partial cells but users do not
1022   * allow partial result. Now the cursor is in row level which means the special Result will only
1023   * contains a row key. {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor}
1024   */
1025  public Scan setNeedCursorResult(boolean needCursorResult) {
1026    this.needCursorResult = needCursorResult;
1027    return this;
1028  }
1029
1030  public boolean isNeedCursorResult() {
1031    return needCursorResult;
1032  }
1033
1034  /**
1035   * Create a new Scan with a cursor. It only set the position information like start row key. The
1036   * others (like cfs, stop row, limit) should still be filled in by the user.
1037   * {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor}
1038   */
1039  public static Scan createScanFromCursor(Cursor cursor) {
1040    return new Scan().withStartRow(cursor.getRow());
1041  }
1042
1043  /**
1044   * Enables region level scan metrics. If scan metrics are disabled then first enables scan metrics
1045   * followed by region level scan metrics.
1046   * @param enable Set to true to enable region level scan metrics.
1047   */
1048  public Scan setEnableScanMetricsByRegion(final boolean enable) {
1049    if (enable) {
1050      setScanMetricsEnabled(true);
1051    }
1052    setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_BY_REGION_ENABLE, Bytes.toBytes(enable));
1053    return this;
1054  }
1055
1056  public boolean isScanMetricsByRegionEnabled() {
1057    byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_BY_REGION_ENABLE);
1058    return attr != null && Bytes.toBoolean(attr);
1059  }
1060}