001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import java.io.IOException;
021import java.util.ArrayList;
022import java.util.HashMap;
023import java.util.List;
024import java.util.Map;
025import java.util.NavigableSet;
026import java.util.TreeMap;
027import java.util.TreeSet;
028import org.apache.hadoop.hbase.HConstants;
029import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
030import org.apache.hadoop.hbase.filter.Filter;
031import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
032import org.apache.hadoop.hbase.io.TimeRange;
033import org.apache.hadoop.hbase.security.access.Permission;
034import org.apache.hadoop.hbase.security.visibility.Authorizations;
035import org.apache.hadoop.hbase.util.Bytes;
036import org.apache.yetus.audience.InterfaceAudience;
037import org.slf4j.Logger;
038import org.slf4j.LoggerFactory;
039
040/**
041 * Used to perform Scan operations.
042 * <p>
043 * All operations are identical to {@link Get} with the exception of instantiation. Rather than
044 * specifying a single row, an optional startRow and stopRow may be defined. If rows are not
045 * specified, the Scanner will iterate over all rows.
046 * <p>
047 * To get all columns from all rows of a Table, create an instance with no constraints; use the
048 * {@link #Scan()} constructor. To constrain the scan to specific column families, call
049 * {@link #addFamily(byte[]) addFamily} for each family to retrieve on your Scan instance.
050 * <p>
051 * To get specific columns, call {@link #addColumn(byte[], byte[]) addColumn} for each column to
052 * retrieve.
053 * <p>
054 * To only retrieve columns within a specific range of version timestamps, call
055 * {@link #setTimeRange(long, long) setTimeRange}.
056 * <p>
057 * To only retrieve columns with a specific timestamp, call {@link #setTimestamp(long) setTimestamp}
058 * .
059 * <p>
060 * To limit the number of versions of each column to be returned, call {@link #readVersions(int)}.
061 * <p>
062 * To limit the maximum number of values returned for each call to next(), call
063 * {@link #setBatch(int) setBatch}.
064 * <p>
065 * To add a filter, call {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
066 * <p>
067 * For small scan, it is deprecated in 2.0.0. Now we have a {@link #setLimit(int)} method in Scan
068 * object which is used to tell RS how many rows we want. If the rows return reaches the limit, the
069 * RS will close the RegionScanner automatically. And we will also fetch data when openScanner in
070 * the new implementation, this means we can also finish a scan operation in one rpc call. And we
071 * have also introduced a {@link #setReadType(ReadType)} method. You can use this method to tell RS
072 * to use pread explicitly.
073 * <p>
074 * Expert: To explicitly disable server-side block caching for this scan, execute
075 * {@link #setCacheBlocks(boolean)}.
076 * <p>
077 * <em>Note:</em> Usage alters Scan instances. Internally, attributes are updated as the Scan runs
078 * and if enabled, metrics accumulate in the Scan instance. Be aware this is the case when you go to
079 * clone a Scan instance or if you go to reuse a created Scan instance; safer is create a Scan
080 * instance per usage.
081 */
082@InterfaceAudience.Public
083public class Scan extends Query {
084  private static final Logger LOG = LoggerFactory.getLogger(Scan.class);
085
086  private static final String RAW_ATTR = "_raw_";
087
088  private byte[] startRow = HConstants.EMPTY_START_ROW;
089  private boolean includeStartRow = true;
090  private byte[] stopRow = HConstants.EMPTY_END_ROW;
091  private boolean includeStopRow = false;
092  private int maxVersions = 1;
093  private int batch = -1;
094
095  /**
096   * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}.
097   * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the
098   * cells in the row exceeded max result size on the server. Typically partial results will be
099   * combined client side into complete results before being delivered to the caller. However, if
100   * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e.
101   * they understand that the results returned from the Scanner may only represent part of a
102   * particular row). In such a case, any attempt to combine the partials into a complete result on
103   * the client side will be skipped, and the caller will be able to see the exact results returned
104   * from the server.
105   */
106  private boolean allowPartialResults = false;
107
108  private int storeLimit = -1;
109  private int storeOffset = 0;
110
111  private static final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
112
113  // If an application wants to use multiple scans over different tables each scan must
114  // define this attribute with the appropriate table name by calling
115  // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
116  static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
117
118  /**
119   * -1 means no caching specified and the value of {@link HConstants#HBASE_CLIENT_SCANNER_CACHING}
120   * (default to {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_CACHING}) will be used
121   */
122  private int caching = -1;
123  private long maxResultSize = -1;
124  private boolean cacheBlocks = true;
125  private boolean reversed = false;
126  private TimeRange tr = TimeRange.allTime();
127  private Map<byte[], NavigableSet<byte[]>> familyMap =
128    new TreeMap<byte[], NavigableSet<byte[]>>(Bytes.BYTES_COMPARATOR);
129  private Boolean asyncPrefetch = null;
130
131  /**
132   * Parameter name for client scanner sync/async prefetch toggle. When using async scanner,
133   * prefetching data from the server is done at the background. The parameter currently won't have
134   * any effect in the case that the user has set Scan#setSmall or Scan#setReversed
135   */
136  public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH =
137    "hbase.client.scanner.async.prefetch";
138
139  /**
140   * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}.
141   */
142  public static final boolean DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = false;
143
144  /**
145   * The mvcc read point to use when open a scanner. Remember to clear it after switching regions as
146   * the mvcc is only valid within region scope.
147   */
148  private long mvccReadPoint = -1L;
149
150  /**
151   * The number of rows we want for this scan. We will terminate the scan if the number of return
152   * rows reaches this value.
153   */
154  private int limit = -1;
155
156  /**
157   * Control whether to use pread at server side.
158   */
159  private ReadType readType = ReadType.DEFAULT;
160
161  private boolean needCursorResult = false;
162
163  /**
164   * Create a Scan operation across all rows.
165   */
166  public Scan() {
167  }
168
169  /**
170   * Creates a new instance of this class while copying all values.
171   * @param scan The scan instance to copy from.
172   * @throws IOException When copying the values fails.
173   */
174  public Scan(Scan scan) throws IOException {
175    startRow = scan.getStartRow();
176    includeStartRow = scan.includeStartRow();
177    stopRow = scan.getStopRow();
178    includeStopRow = scan.includeStopRow();
179    maxVersions = scan.getMaxVersions();
180    batch = scan.getBatch();
181    storeLimit = scan.getMaxResultsPerColumnFamily();
182    storeOffset = scan.getRowOffsetPerColumnFamily();
183    caching = scan.getCaching();
184    maxResultSize = scan.getMaxResultSize();
185    cacheBlocks = scan.getCacheBlocks();
186    filter = scan.getFilter(); // clone?
187    loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
188    consistency = scan.getConsistency();
189    this.setIsolationLevel(scan.getIsolationLevel());
190    reversed = scan.isReversed();
191    asyncPrefetch = scan.isAsyncPrefetch();
192    allowPartialResults = scan.getAllowPartialResults();
193    tr = scan.getTimeRange(); // TimeRange is immutable
194    Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
195    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : fams.entrySet()) {
196      byte[] fam = entry.getKey();
197      NavigableSet<byte[]> cols = entry.getValue();
198      if (cols != null && cols.size() > 0) {
199        for (byte[] col : cols) {
200          addColumn(fam, col);
201        }
202      } else {
203        addFamily(fam);
204      }
205    }
206    for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
207      setAttribute(attr.getKey(), attr.getValue());
208    }
209    for (Map.Entry<byte[], TimeRange> entry : scan.getColumnFamilyTimeRange().entrySet()) {
210      TimeRange tr = entry.getValue();
211      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
212    }
213    this.mvccReadPoint = scan.getMvccReadPoint();
214    this.limit = scan.getLimit();
215    this.needCursorResult = scan.isNeedCursorResult();
216    setPriority(scan.getPriority());
217    readType = scan.getReadType();
218    super.setReplicaId(scan.getReplicaId());
219  }
220
221  /**
222   * Builds a scan object with the same specs as get.
223   * @param get get to model scan after
224   */
225  public Scan(Get get) {
226    this.startRow = get.getRow();
227    this.includeStartRow = true;
228    this.stopRow = get.getRow();
229    this.includeStopRow = true;
230    this.filter = get.getFilter();
231    this.cacheBlocks = get.getCacheBlocks();
232    this.maxVersions = get.getMaxVersions();
233    this.storeLimit = get.getMaxResultsPerColumnFamily();
234    this.storeOffset = get.getRowOffsetPerColumnFamily();
235    this.tr = get.getTimeRange();
236    this.familyMap = get.getFamilyMap();
237    this.asyncPrefetch = false;
238    this.consistency = get.getConsistency();
239    this.setIsolationLevel(get.getIsolationLevel());
240    this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
241    for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
242      setAttribute(attr.getKey(), attr.getValue());
243    }
244    for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) {
245      TimeRange tr = entry.getValue();
246      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
247    }
248    this.mvccReadPoint = -1L;
249    setPriority(get.getPriority());
250    super.setReplicaId(get.getReplicaId());
251  }
252
253  public boolean isGetScan() {
254    return includeStartRow && includeStopRow
255      && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow);
256  }
257
258  /**
259   * Get all columns from the specified family.
260   * <p>
261   * Overrides previous calls to addColumn for this family.
262   * @param family family name n
263   */
264  public Scan addFamily(byte[] family) {
265    familyMap.remove(family);
266    familyMap.put(family, null);
267    return this;
268  }
269
270  /**
271   * Get the column from the specified family with the specified qualifier.
272   * <p>
273   * Overrides previous calls to addFamily for this family.
274   * @param family    family name
275   * @param qualifier column qualifier n
276   */
277  public Scan addColumn(byte[] family, byte[] qualifier) {
278    NavigableSet<byte[]> set = familyMap.get(family);
279    if (set == null) {
280      set = new TreeSet<>(Bytes.BYTES_COMPARATOR);
281      familyMap.put(family, set);
282    }
283    if (qualifier == null) {
284      qualifier = HConstants.EMPTY_BYTE_ARRAY;
285    }
286    set.add(qualifier);
287    return this;
288  }
289
290  /**
291   * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp). Note,
292   * default maximum versions to return is 1. If your time range spans more than one version and you
293   * want all versions returned, up the number of versions beyond the default.
294   * @param minStamp minimum timestamp value, inclusive
295   * @param maxStamp maximum timestamp value, exclusive
296   * @see #readAllVersions()
297   * @see #readVersions(int) n
298   */
299  public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
300    tr = TimeRange.between(minStamp, maxStamp);
301    return this;
302  }
303
304  /**
305   * Get versions of columns with the specified timestamp. Note, default maximum versions to return
306   * is 1. If your time range spans more than one version and you want all versions returned, up the
307   * number of versions beyond the defaut.
308   * @param timestamp version timestamp
309   * @see #readAllVersions()
310   * @see #readVersions(int) n
311   */
312  public Scan setTimestamp(long timestamp) {
313    try {
314      tr = TimeRange.at(timestamp);
315    } catch (Exception e) {
316      // This should never happen, unless integer overflow or something extremely wrong...
317      LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
318      throw e;
319    }
320
321    return this;
322  }
323
324  @Override
325  public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
326    return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp);
327  }
328
329  /**
330   * Set the start row of the scan.
331   * <p>
332   * If the specified row does not exist, the Scanner will start from the next closest row after the
333   * specified row.
334   * <p>
335   * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
336   * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
337   * unexpected or even undefined.
338   * </p>
339   * @param startRow row to start scanner at or after n * @throws IllegalArgumentException if
340   *                 startRow does not meet criteria for a row key (when length exceeds
341   *                 {@link HConstants#MAX_ROW_LENGTH})
342   */
343  public Scan withStartRow(byte[] startRow) {
344    return withStartRow(startRow, true);
345  }
346
347  /**
348   * Set the start row of the scan.
349   * <p>
350   * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner
351   * will start from the next closest row after the specified row.
352   * <p>
353   * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
354   * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
355   * unexpected or even undefined.
356   * </p>
357   * @param startRow  row to start scanner at or after
358   * @param inclusive whether we should include the start row when scan n * @throws
359   *                  IllegalArgumentException if startRow does not meet criteria for a row key
360   *                  (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
361   */
362  public Scan withStartRow(byte[] startRow, boolean inclusive) {
363    if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
364      throw new IllegalArgumentException("startRow's length must be less than or equal to "
365        + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
366    }
367    this.startRow = startRow;
368    this.includeStartRow = inclusive;
369    return this;
370  }
371
372  /**
373   * Set the stop row of the scan.
374   * <p>
375   * The scan will include rows that are lexicographically less than the provided stopRow.
376   * <p>
377   * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
378   * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
379   * unexpected or even undefined.
380   * </p>
381   * @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does
382   *                not meet criteria for a row key (when length exceeds
383   *                {@link HConstants#MAX_ROW_LENGTH})
384   */
385  public Scan withStopRow(byte[] stopRow) {
386    return withStopRow(stopRow, false);
387  }
388
389  /**
390   * Set the stop row of the scan.
391   * <p>
392   * The scan will include rows that are lexicographically less than (or equal to if
393   * {@code inclusive} is {@code true}) the provided stopRow.
394   * <p>
395   * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
396   * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
397   * unexpected or even undefined.
398   * </p>
399   * @param stopRow   row to end at
400   * @param inclusive whether we should include the stop row when scan n * @throws
401   *                  IllegalArgumentException if stopRow does not meet criteria for a row key (when
402   *                  length exceeds {@link HConstants#MAX_ROW_LENGTH})
403   */
404  public Scan withStopRow(byte[] stopRow, boolean inclusive) {
405    if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
406      throw new IllegalArgumentException("stopRow's length must be less than or equal to "
407        + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
408    }
409    this.stopRow = stopRow;
410    this.includeStopRow = inclusive;
411    return this;
412  }
413
414  /**
415   * <p>
416   * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey
417   * starts with the specified prefix.
418   * </p>
419   * <p>
420   * This is a utility method that converts the desired rowPrefix into the appropriate values for
421   * the startRow and stopRow to achieve the desired result.
422   * </p>
423   * <p>
424   * This can safely be used in combination with setFilter.
425   * </p>
426   * <p>
427   * <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
428   * a combination will yield unexpected and even undefined results.
429   * </p>
430   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) n
431   *                  * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method
432   *                  is considered to be confusing as it does not use a {@link Filter} but uses
433   *                  setting the startRow and stopRow instead. Use
434   *                  {@link #setStartStopRowForPrefixScan(byte[])} instead.
435   */
436  @Deprecated
437  public Scan setRowPrefixFilter(byte[] rowPrefix) {
438    return setStartStopRowForPrefixScan(rowPrefix);
439  }
440
441  /**
442   * <p>
443   * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey
444   * starts with the specified prefix.
445   * </p>
446   * <p>
447   * This is a utility method that converts the desired rowPrefix into the appropriate values for
448   * the startRow and stopRow to achieve the desired result.
449   * </p>
450   * <p>
451   * This can safely be used in combination with setFilter.
452   * </p>
453   * <p>
454   * <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
455   * a combination will yield unexpected and even undefined results.
456   * </p>
457   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) n
458   */
459  public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) {
460    if (rowPrefix == null) {
461      withStartRow(HConstants.EMPTY_START_ROW);
462      withStopRow(HConstants.EMPTY_END_ROW);
463    } else {
464      this.withStartRow(rowPrefix);
465      this.withStopRow(ClientUtil.calculateTheClosestNextRowKeyForPrefix(rowPrefix));
466    }
467    return this;
468  }
469
470  /**
471   * Get all available versions. n
472   */
473  public Scan readAllVersions() {
474    this.maxVersions = Integer.MAX_VALUE;
475    return this;
476  }
477
478  /**
479   * Get up to the specified number of versions of each column.
480   * @param versions specified number of versions for each column n
481   */
482  public Scan readVersions(int versions) {
483    this.maxVersions = versions;
484    return this;
485  }
486
487  /**
488   * Set the maximum number of cells to return for each call to next(). Callers should be aware that
489   * this is not equivalent to calling {@link #setAllowPartialResults(boolean)}. If you don't allow
490   * partial results, the number of cells in each Result must equal to your batch setting unless it
491   * is the last Result for current row. So this method is helpful in paging queries. If you just
492   * want to prevent OOM at client, use setAllowPartialResults(true) is better.
493   * @param batch the maximum number of values
494   * @see Result#mayHaveMoreCellsInRow()
495   */
496  public Scan setBatch(int batch) {
497    if (this.hasFilter() && this.filter.hasFilterRow()) {
498      throw new IncompatibleFilterException(
499        "Cannot set batch on a scan using a filter" + " that returns true for filter.hasFilterRow");
500    }
501    this.batch = batch;
502    return this;
503  }
504
505  /**
506   * Set the maximum number of values to return per row per Column Family
507   * @param limit the maximum number of values returned / row / CF
508   */
509  public Scan setMaxResultsPerColumnFamily(int limit) {
510    this.storeLimit = limit;
511    return this;
512  }
513
514  /**
515   * Set offset for the row per Column Family.
516   * @param offset is the number of kvs that will be skipped.
517   */
518  public Scan setRowOffsetPerColumnFamily(int offset) {
519    this.storeOffset = offset;
520    return this;
521  }
522
523  /**
524   * Set the number of rows for caching that will be passed to scanners. If not set, the
525   * Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will apply. Higher
526   * caching values will enable faster scanners but will use more memory.
527   * @param caching the number of rows for caching
528   */
529  public Scan setCaching(int caching) {
530    this.caching = caching;
531    return this;
532  }
533
534  /** Returns the maximum result size in bytes. See {@link #setMaxResultSize(long)} */
535  public long getMaxResultSize() {
536    return maxResultSize;
537  }
538
539  /**
540   * Set the maximum result size. The default is -1; this means that no specific maximum result size
541   * will be set for this scan, and the global configured value will be used instead. (Defaults to
542   * unlimited).
543   * @param maxResultSize The maximum result size in bytes.
544   */
545  public Scan setMaxResultSize(long maxResultSize) {
546    this.maxResultSize = maxResultSize;
547    return this;
548  }
549
550  @Override
551  public Scan setFilter(Filter filter) {
552    super.setFilter(filter);
553    return this;
554  }
555
556  /**
557   * Setting the familyMap
558   * @param familyMap map of family to qualifier n
559   */
560  public Scan setFamilyMap(Map<byte[], NavigableSet<byte[]>> familyMap) {
561    this.familyMap = familyMap;
562    return this;
563  }
564
565  /**
566   * Getting the familyMap n
567   */
568  public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
569    return this.familyMap;
570  }
571
572  /** Returns the number of families in familyMap */
573  public int numFamilies() {
574    if (hasFamilies()) {
575      return this.familyMap.size();
576    }
577    return 0;
578  }
579
580  /** Returns true if familyMap is non empty, false otherwise */
581  public boolean hasFamilies() {
582    return !this.familyMap.isEmpty();
583  }
584
585  /** Returns the keys of the familyMap */
586  public byte[][] getFamilies() {
587    if (hasFamilies()) {
588      return this.familyMap.keySet().toArray(new byte[0][0]);
589    }
590    return null;
591  }
592
593  /** Returns the startrow */
594  public byte[] getStartRow() {
595    return this.startRow;
596  }
597
598  /** Returns if we should include start row when scan */
599  public boolean includeStartRow() {
600    return includeStartRow;
601  }
602
603  /** Returns the stoprow */
604  public byte[] getStopRow() {
605    return this.stopRow;
606  }
607
608  /** Returns if we should include stop row when scan */
609  public boolean includeStopRow() {
610    return includeStopRow;
611  }
612
613  /** Returns the max number of versions to fetch */
614  public int getMaxVersions() {
615    return this.maxVersions;
616  }
617
618  /** Returns maximum number of values to return for a single call to next() */
619  public int getBatch() {
620    return this.batch;
621  }
622
623  /** Returns maximum number of values to return per row per CF */
624  public int getMaxResultsPerColumnFamily() {
625    return this.storeLimit;
626  }
627
628  /**
629   * Method for retrieving the scan's offset per row per column family (#kvs to be skipped)
630   * @return row offset
631   */
632  public int getRowOffsetPerColumnFamily() {
633    return this.storeOffset;
634  }
635
636  /** Returns caching the number of rows fetched when calling next on a scanner */
637  public int getCaching() {
638    return this.caching;
639  }
640
641  /**
642   * n
643   */
644  public TimeRange getTimeRange() {
645    return this.tr;
646  }
647
648  /**
649   * n
650   */
651  @Override
652  public Filter getFilter() {
653    return filter;
654  }
655
656  /** Returns true is a filter has been specified, false if not */
657  public boolean hasFilter() {
658    return filter != null;
659  }
660
661  /**
662   * Set whether blocks should be cached for this Scan.
663   * <p>
664   * This is true by default. When true, default settings of the table and family are used (this
665   * will never override caching blocks if the block cache is disabled for that family or entirely).
666   * @param cacheBlocks if false, default settings are overridden and blocks will not be cached
667   */
668  public Scan setCacheBlocks(boolean cacheBlocks) {
669    this.cacheBlocks = cacheBlocks;
670    return this;
671  }
672
673  /**
674   * Get whether blocks should be cached for this Scan.
675   * @return true if default caching should be used, false if blocks should not be cached
676   */
677  public boolean getCacheBlocks() {
678    return cacheBlocks;
679  }
680
681  /**
682   * Set whether this scan is a reversed one
683   * <p>
684   * This is false by default which means forward(normal) scan.
685   * @param reversed if true, scan will be backward order n
686   */
687  public Scan setReversed(boolean reversed) {
688    this.reversed = reversed;
689    return this;
690  }
691
692  /**
693   * Get whether this scan is a reversed one.
694   * @return true if backward scan, false if forward(default) scan
695   */
696  public boolean isReversed() {
697    return reversed;
698  }
699
700  /**
701   * Setting whether the caller wants to see the partial results when server returns
702   * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By
703   * default this value is false and the complete results will be assembled client side before being
704   * delivered to the caller. nn * @see Result#mayHaveMoreCellsInRow()
705   * @see #setBatch(int)
706   */
707  public Scan setAllowPartialResults(final boolean allowPartialResults) {
708    this.allowPartialResults = allowPartialResults;
709    return this;
710  }
711
712  /**
713   * Returns true when the constructor of this scan understands that the results they will see may
714   * only represent a partial portion of a row. The entire row would be retrieved by subsequent
715   * calls to {@link ResultScanner#next()}
716   */
717  public boolean getAllowPartialResults() {
718    return allowPartialResults;
719  }
720
721  @Override
722  public Scan setLoadColumnFamiliesOnDemand(boolean value) {
723    return (Scan) super.setLoadColumnFamiliesOnDemand(value);
724  }
725
726  /**
727   * Compile the table and column family (i.e. schema) information into a String. Useful for parsing
728   * and aggregation by debugging, logging, and administration tools. n
729   */
730  @Override
731  public Map<String, Object> getFingerprint() {
732    Map<String, Object> map = new HashMap<>();
733    List<String> families = new ArrayList<>();
734    if (this.familyMap.isEmpty()) {
735      map.put("families", "ALL");
736      return map;
737    } else {
738      map.put("families", families);
739    }
740    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
741      families.add(Bytes.toStringBinary(entry.getKey()));
742    }
743    return map;
744  }
745
746  /**
747   * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
748   * Map along with the fingerprinted information. Useful for debugging, logging, and administration
749   * tools.
750   * @param maxCols a limit on the number of columns output prior to truncation n
751   */
752  @Override
753  public Map<String, Object> toMap(int maxCols) {
754    // start with the fingerpring map and build on top of it
755    Map<String, Object> map = getFingerprint();
756    // map from families to column list replaces fingerprint's list of families
757    Map<String, List<String>> familyColumns = new HashMap<>();
758    map.put("families", familyColumns);
759    // add scalar information first
760    map.put("startRow", Bytes.toStringBinary(this.startRow));
761    map.put("stopRow", Bytes.toStringBinary(this.stopRow));
762    map.put("maxVersions", this.maxVersions);
763    map.put("batch", this.batch);
764    map.put("caching", this.caching);
765    map.put("maxResultSize", this.maxResultSize);
766    map.put("cacheBlocks", this.cacheBlocks);
767    map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
768    List<Long> timeRange = new ArrayList<>(2);
769    timeRange.add(this.tr.getMin());
770    timeRange.add(this.tr.getMax());
771    map.put("timeRange", timeRange);
772    int colCount = 0;
773    // iterate through affected families and list out up to maxCols columns
774    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
775      List<String> columns = new ArrayList<>();
776      familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
777      if (entry.getValue() == null) {
778        colCount++;
779        --maxCols;
780        columns.add("ALL");
781      } else {
782        colCount += entry.getValue().size();
783        if (maxCols <= 0) {
784          continue;
785        }
786        for (byte[] column : entry.getValue()) {
787          if (--maxCols <= 0) {
788            continue;
789          }
790          columns.add(Bytes.toStringBinary(column));
791        }
792      }
793    }
794    map.put("totalColumns", colCount);
795    if (this.filter != null) {
796      map.put("filter", this.filter.toString());
797    }
798    // add the id if set
799    if (getId() != null) {
800      map.put("id", getId());
801    }
802    return map;
803  }
804
805  /**
806   * Enable/disable "raw" mode for this scan. If "raw" is enabled the scan will return all delete
807   * marker and deleted rows that have not been collected, yet. This is mostly useful for Scan on
808   * column families that have KEEP_DELETED_ROWS enabled. It is an error to specify any column when
809   * "raw" is set.
810   * @param raw True/False to enable/disable "raw" mode.
811   */
812  public Scan setRaw(boolean raw) {
813    setAttribute(RAW_ATTR, Bytes.toBytes(raw));
814    return this;
815  }
816
817  /** Returns True if this Scan is in "raw" mode. */
818  public boolean isRaw() {
819    byte[] attr = getAttribute(RAW_ATTR);
820    return attr == null ? false : Bytes.toBoolean(attr);
821  }
822
823  @Override
824  public Scan setAttribute(String name, byte[] value) {
825    return (Scan) super.setAttribute(name, value);
826  }
827
828  @Override
829  public Scan setId(String id) {
830    return (Scan) super.setId(id);
831  }
832
833  @Override
834  public Scan setAuthorizations(Authorizations authorizations) {
835    return (Scan) super.setAuthorizations(authorizations);
836  }
837
838  @Override
839  public Scan setACL(Map<String, Permission> perms) {
840    return (Scan) super.setACL(perms);
841  }
842
843  @Override
844  public Scan setACL(String user, Permission perms) {
845    return (Scan) super.setACL(user, perms);
846  }
847
848  @Override
849  public Scan setConsistency(Consistency consistency) {
850    return (Scan) super.setConsistency(consistency);
851  }
852
853  @Override
854  public Scan setReplicaId(int Id) {
855    return (Scan) super.setReplicaId(Id);
856  }
857
858  @Override
859  public Scan setIsolationLevel(IsolationLevel level) {
860    return (Scan) super.setIsolationLevel(level);
861  }
862
863  @Override
864  public Scan setPriority(int priority) {
865    return (Scan) super.setPriority(priority);
866  }
867
868  /**
869   * Enable collection of {@link ScanMetrics}. For advanced users.
870   * @param enabled Set to true to enable accumulating scan metrics
871   */
872  public Scan setScanMetricsEnabled(final boolean enabled) {
873    setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
874    return this;
875  }
876
877  /** Returns True if collection of scan metrics is enabled. For advanced users. */
878  public boolean isScanMetricsEnabled() {
879    byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
880    return attr == null ? false : Bytes.toBoolean(attr);
881  }
882
883  public Boolean isAsyncPrefetch() {
884    return asyncPrefetch;
885  }
886
887  /**
888   * @deprecated Since 3.0.0, will be removed in 4.0.0. After building sync client upon async
889   *             client, the implementation is always 'async prefetch', so this flag is useless now.
890   */
891  @Deprecated
892  public Scan setAsyncPrefetch(boolean asyncPrefetch) {
893    this.asyncPrefetch = asyncPrefetch;
894    return this;
895  }
896
897  /** Returns the limit of rows for this scan */
898  public int getLimit() {
899    return limit;
900  }
901
902  /**
903   * Set the limit of rows for this scan. We will terminate the scan if the number of returned rows
904   * reaches this value.
905   * <p>
906   * This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
907   * @param limit the limit of rows for this scan n
908   */
909  public Scan setLimit(int limit) {
910    this.limit = limit;
911    return this;
912  }
913
914  /**
915   * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
916   * set {@code readType} to {@link ReadType#PREAD}. n
917   */
918  public Scan setOneRowLimit() {
919    return setLimit(1).setReadType(ReadType.PREAD);
920  }
921
922  @InterfaceAudience.Public
923  public enum ReadType {
924    DEFAULT,
925    STREAM,
926    PREAD
927  }
928
929  /** Returns the read type for this scan */
930  public ReadType getReadType() {
931    return readType;
932  }
933
934  /**
935   * Set the read type for this scan.
936   * <p>
937   * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
938   * example, we will always use pread if this is a get scan. n
939   */
940  public Scan setReadType(ReadType readType) {
941    this.readType = readType;
942    return this;
943  }
944
945  /**
946   * Get the mvcc read point used to open a scanner.
947   */
948  long getMvccReadPoint() {
949    return mvccReadPoint;
950  }
951
952  /**
953   * Set the mvcc read point used to open a scanner.
954   */
955  Scan setMvccReadPoint(long mvccReadPoint) {
956    this.mvccReadPoint = mvccReadPoint;
957    return this;
958  }
959
960  /**
961   * Set the mvcc read point to -1 which means do not use it.
962   */
963  Scan resetMvccReadPoint() {
964    return setMvccReadPoint(-1L);
965  }
966
967  /**
968   * When the server is slow or we scan a table with many deleted data or we use a sparse filter,
969   * the server will response heartbeat to prevent timeout. However the scanner will return a Result
970   * only when client can do it. So if there are many heartbeats, the blocking time on
971   * ResultScanner#next() may be very long, which is not friendly to online services. Set this to
972   * true then you can get a special Result whose #isCursor() returns true and is not contains any
973   * real data. It only tells you where the server has scanned. You can call next to continue
974   * scanning or open a new scanner with this row key as start row whenever you want. Users can get
975   * a cursor when and only when there is a response from the server but we can not return a Result
976   * to users, for example, this response is a heartbeat or there are partial cells but users do not
977   * allow partial result. Now the cursor is in row level which means the special Result will only
978   * contains a row key. {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor}
979   */
980  public Scan setNeedCursorResult(boolean needCursorResult) {
981    this.needCursorResult = needCursorResult;
982    return this;
983  }
984
985  public boolean isNeedCursorResult() {
986    return needCursorResult;
987  }
988
989  /**
990   * Create a new Scan with a cursor. It only set the position information like start row key. The
991   * others (like cfs, stop row, limit) should still be filled in by the user.
992   * {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor}
993   */
994  public static Scan createScanFromCursor(Cursor cursor) {
995    return new Scan().withStartRow(cursor.getRow());
996  }
997}