001/*
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019
020package org.apache.hadoop.hbase.client;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.Arrays;
025import java.util.HashMap;
026import java.util.List;
027import java.util.Map;
028import java.util.NavigableSet;
029import java.util.TreeMap;
030import java.util.TreeSet;
031
032import org.apache.hadoop.hbase.HConstants;
033import org.apache.yetus.audience.InterfaceAudience;
034import org.slf4j.Logger;
035import org.slf4j.LoggerFactory;
036import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
037import org.apache.hadoop.hbase.filter.Filter;
038import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
039import org.apache.hadoop.hbase.io.TimeRange;
040import org.apache.hadoop.hbase.security.access.Permission;
041import org.apache.hadoop.hbase.security.visibility.Authorizations;
042import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
043import org.apache.hadoop.hbase.util.Bytes;
044
045/**
046 * Used to perform Scan operations.
047 * <p>
048 * All operations are identical to {@link Get} with the exception of instantiation. Rather than
049 * specifying a single row, an optional startRow and stopRow may be defined. If rows are not
050 * specified, the Scanner will iterate over all rows.
051 * <p>
052 * To get all columns from all rows of a Table, create an instance with no constraints; use the
053 * {@link #Scan()} constructor. To constrain the scan to specific column families, call
054 * {@link #addFamily(byte[]) addFamily} for each family to retrieve on your Scan instance.
055 * <p>
056 * To get specific columns, call {@link #addColumn(byte[], byte[]) addColumn} for each column to
057 * retrieve.
058 * <p>
059 * To only retrieve columns within a specific range of version timestamps, call
060 * {@link #setTimeRange(long, long) setTimeRange}.
061 * <p>
062 * To only retrieve columns with a specific timestamp, call {@link #setTimeStamp(long) setTimestamp}
063 * .
064 * <p>
065 * To limit the number of versions of each column to be returned, call {@link #setMaxVersions(int)
066 * setMaxVersions}.
067 * <p>
068 * To limit the maximum number of values returned for each call to next(), call
069 * {@link #setBatch(int) setBatch}.
070 * <p>
071 * To add a filter, call {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
072 * <p>
073 * For small scan, it is deprecated in 2.0.0. Now we have a {@link #setLimit(int)} method in Scan
074 * object which is used to tell RS how many rows we want. If the rows return reaches the limit, the
075 * RS will close the RegionScanner automatically. And we will also fetch data when openScanner in
076 * the new implementation, this means we can also finish a scan operation in one rpc call. And we
077 * have also introduced a {@link #setReadType(ReadType)} method. You can use this method to tell RS
078 * to use pread explicitly.
079 * <p>
080 * Expert: To explicitly disable server-side block caching for this scan, execute
081 * {@link #setCacheBlocks(boolean)}.
082 * <p>
083 * <em>Note:</em> Usage alters Scan instances. Internally, attributes are updated as the Scan runs
084 * and if enabled, metrics accumulate in the Scan instance. Be aware this is the case when you go to
085 * clone a Scan instance or if you go to reuse a created Scan instance; safer is create a Scan
086 * instance per usage.
087 */
088@InterfaceAudience.Public
089public class Scan extends Query {
090  private static final Logger LOG = LoggerFactory.getLogger(Scan.class);
091
092  private static final String RAW_ATTR = "_raw_";
093
094  private byte[] startRow = HConstants.EMPTY_START_ROW;
095  private boolean includeStartRow = true;
096  private byte[] stopRow  = HConstants.EMPTY_END_ROW;
097  private boolean includeStopRow = false;
098  private int maxVersions = 1;
099  private int batch = -1;
100
101  /**
102   * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}.
103   * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the
104   * cells in the row exceeded max result size on the server. Typically partial results will be
105   * combined client side into complete results before being delivered to the caller. However, if
106   * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e.
107   * they understand that the results returned from the Scanner may only represent part of a
108   * particular row). In such a case, any attempt to combine the partials into a complete result on
109   * the client side will be skipped, and the caller will be able to see the exact results returned
110   * from the server.
111   */
112  private boolean allowPartialResults = false;
113
114  private int storeLimit = -1;
115  private int storeOffset = 0;
116
117  /**
118   * @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)}
119   */
120  // Make private or remove.
121  @Deprecated
122  static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
123
124  /**
125   * Use {@link #getScanMetrics()}
126   */
127  // Make this private or remove.
128  @Deprecated
129  static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data";
130
131  // If an application wants to use multiple scans over different tables each scan must
132  // define this attribute with the appropriate table name by calling
133  // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
134  static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
135
136  /**
137   * -1 means no caching specified and the value of {@link HConstants#HBASE_CLIENT_SCANNER_CACHING}
138   * (default to {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_CACHING}) will be used
139   */
140  private int caching = -1;
141  private long maxResultSize = -1;
142  private boolean cacheBlocks = true;
143  private boolean reversed = false;
144  private TimeRange tr = new TimeRange();
145  private Map<byte [], NavigableSet<byte []>> familyMap =
146    new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
147  private Boolean asyncPrefetch = null;
148
149  /**
150   * Parameter name for client scanner sync/async prefetch toggle.
151   * When using async scanner, prefetching data from the server is done at the background.
152   * The parameter currently won't have any effect in the case that the user has set
153   * Scan#setSmall or Scan#setReversed
154   */
155  public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH =
156      "hbase.client.scanner.async.prefetch";
157
158  /**
159   * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}.
160   */
161  public static final boolean DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = false;
162
163  /**
164   * Set it true for small scan to get better performance Small scan should use pread and big scan
165   * can use seek + read seek + read is fast but can cause two problem (1) resource contention (2)
166   * cause too much network io [89-fb] Using pread for non-compaction read request
167   * https://issues.apache.org/jira/browse/HBASE-7266 On the other hand, if setting it true, we
168   * would do openScanner,next,closeScanner in one RPC call. It means the better performance for
169   * small scan. [HBASE-9488]. Generally, if the scan range is within one data block(64KB), it could
170   * be considered as a small scan.
171   */
172  private boolean small = false;
173
174  /**
175   * The mvcc read point to use when open a scanner. Remember to clear it after switching regions as
176   * the mvcc is only valid within region scope.
177   */
178  private long mvccReadPoint = -1L;
179
180  /**
181   * The number of rows we want for this scan. We will terminate the scan if the number of return
182   * rows reaches this value.
183   */
184  private int limit = -1;
185
186  /**
187   * Control whether to use pread at server side.
188   */
189  private ReadType readType = ReadType.DEFAULT;
190
191  private boolean needCursorResult = false;
192
193  /**
194   * Create a Scan operation across all rows.
195   */
196  public Scan() {}
197
198  /**
199   * @deprecated use {@code new Scan().withStartRow(startRow).setFilter(filter)} instead.
200   */
201  @Deprecated
202  public Scan(byte[] startRow, Filter filter) {
203    this(startRow);
204    this.filter = filter;
205  }
206
207  /**
208   * Create a Scan operation starting at the specified row.
209   * <p>
210   * If the specified row does not exist, the Scanner will start from the next closest row after the
211   * specified row.
212   * @param startRow row to start scanner at or after
213   * @deprecated use {@code new Scan().withStartRow(startRow)} instead.
214   */
215  @Deprecated
216  public Scan(byte[] startRow) {
217    setStartRow(startRow);
218  }
219
220  /**
221   * Create a Scan operation for the range of rows specified.
222   * @param startRow row to start scanner at or after (inclusive)
223   * @param stopRow row to stop scanner before (exclusive)
224   * @deprecated use {@code new Scan().withStartRow(startRow).withStopRow(stopRow)} instead.
225   */
226  @Deprecated
227  public Scan(byte[] startRow, byte[] stopRow) {
228    setStartRow(startRow);
229    setStopRow(stopRow);
230  }
231
232  /**
233   * Creates a new instance of this class while copying all values.
234   *
235   * @param scan  The scan instance to copy from.
236   * @throws IOException When copying the values fails.
237   */
238  public Scan(Scan scan) throws IOException {
239    startRow = scan.getStartRow();
240    includeStartRow = scan.includeStartRow();
241    stopRow  = scan.getStopRow();
242    includeStopRow = scan.includeStopRow();
243    maxVersions = scan.getMaxVersions();
244    batch = scan.getBatch();
245    storeLimit = scan.getMaxResultsPerColumnFamily();
246    storeOffset = scan.getRowOffsetPerColumnFamily();
247    caching = scan.getCaching();
248    maxResultSize = scan.getMaxResultSize();
249    cacheBlocks = scan.getCacheBlocks();
250    filter = scan.getFilter(); // clone?
251    loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
252    consistency = scan.getConsistency();
253    this.setIsolationLevel(scan.getIsolationLevel());
254    reversed = scan.isReversed();
255    asyncPrefetch = scan.isAsyncPrefetch();
256    small = scan.isSmall();
257    allowPartialResults = scan.getAllowPartialResults();
258    tr = scan.getTimeRange(); // TimeRange is immutable
259    Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
260    for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
261      byte [] fam = entry.getKey();
262      NavigableSet<byte[]> cols = entry.getValue();
263      if (cols != null && cols.size() > 0) {
264        for (byte[] col : cols) {
265          addColumn(fam, col);
266        }
267      } else {
268        addFamily(fam);
269      }
270    }
271    for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
272      setAttribute(attr.getKey(), attr.getValue());
273    }
274    for (Map.Entry<byte[], TimeRange> entry : scan.getColumnFamilyTimeRange().entrySet()) {
275      TimeRange tr = entry.getValue();
276      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
277    }
278    this.mvccReadPoint = scan.getMvccReadPoint();
279    this.limit = scan.getLimit();
280    this.needCursorResult = scan.isNeedCursorResult();
281    setPriority(scan.getPriority());
282  }
283
284  /**
285   * Builds a scan object with the same specs as get.
286   * @param get get to model scan after
287   */
288  public Scan(Get get) {
289    this.startRow = get.getRow();
290    this.includeStartRow = true;
291    this.stopRow = get.getRow();
292    this.includeStopRow = true;
293    this.filter = get.getFilter();
294    this.cacheBlocks = get.getCacheBlocks();
295    this.maxVersions = get.getMaxVersions();
296    this.storeLimit = get.getMaxResultsPerColumnFamily();
297    this.storeOffset = get.getRowOffsetPerColumnFamily();
298    this.tr = get.getTimeRange();
299    this.familyMap = get.getFamilyMap();
300    this.asyncPrefetch = false;
301    this.consistency = get.getConsistency();
302    this.setIsolationLevel(get.getIsolationLevel());
303    this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
304    for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
305      setAttribute(attr.getKey(), attr.getValue());
306    }
307    for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) {
308      TimeRange tr = entry.getValue();
309      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
310    }
311    this.mvccReadPoint = -1L;
312    setPriority(get.getPriority());
313  }
314
315  public boolean isGetScan() {
316    return includeStartRow && includeStopRow
317        && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow);
318  }
319
320  /**
321   * Get all columns from the specified family.
322   * <p>
323   * Overrides previous calls to addColumn for this family.
324   * @param family family name
325   * @return this
326   */
327  public Scan addFamily(byte [] family) {
328    familyMap.remove(family);
329    familyMap.put(family, null);
330    return this;
331  }
332
333  /**
334   * Get the column from the specified family with the specified qualifier.
335   * <p>
336   * Overrides previous calls to addFamily for this family.
337   * @param family family name
338   * @param qualifier column qualifier
339   * @return this
340   */
341  public Scan addColumn(byte [] family, byte [] qualifier) {
342    NavigableSet<byte []> set = familyMap.get(family);
343    if(set == null) {
344      set = new TreeSet<>(Bytes.BYTES_COMPARATOR);
345      familyMap.put(family, set);
346    }
347    if (qualifier == null) {
348      qualifier = HConstants.EMPTY_BYTE_ARRAY;
349    }
350    set.add(qualifier);
351    return this;
352  }
353
354  /**
355   * Get versions of columns only within the specified timestamp range,
356   * [minStamp, maxStamp).  Note, default maximum versions to return is 1.  If
357   * your time range spans more than one version and you want all versions
358   * returned, up the number of versions beyond the default.
359   * @param minStamp minimum timestamp value, inclusive
360   * @param maxStamp maximum timestamp value, exclusive
361   * @see #setMaxVersions()
362   * @see #setMaxVersions(int)
363   * @return this
364   */
365  public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
366    tr = new TimeRange(minStamp, maxStamp);
367    return this;
368  }
369
370  /**
371   * Get versions of columns with the specified timestamp. Note, default maximum
372   * versions to return is 1.  If your time range spans more than one version
373   * and you want all versions returned, up the number of versions beyond the
374   * defaut.
375   * @param timestamp version timestamp
376   * @see #setMaxVersions()
377   * @see #setMaxVersions(int)
378   * @return this
379   */
380  public Scan setTimeStamp(long timestamp)
381  throws IOException {
382    try {
383      tr = new TimeRange(timestamp, timestamp+1);
384    } catch(Exception e) {
385      // This should never happen, unless integer overflow or something extremely wrong...
386      LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
387      throw e;
388    }
389    return this;
390  }
391
392  @Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
393    return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp);
394  }
395
396  /**
397   * Set the start row of the scan.
398   * <p>
399   * If the specified row does not exist, the Scanner will start from the next closest row after the
400   * specified row.
401   * @param startRow row to start scanner at or after
402   * @return this
403   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
404   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
405   * @deprecated use {@link #withStartRow(byte[])} instead. This method may change the inclusive of
406   *             the stop row to keep compatible with the old behavior.
407   */
408  @Deprecated
409  public Scan setStartRow(byte[] startRow) {
410    withStartRow(startRow);
411    if (ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow)) {
412      // for keeping the old behavior that a scan with the same start and stop row is a get scan.
413      this.includeStopRow = true;
414    }
415    return this;
416  }
417
418  /**
419   * Set the start row of the scan.
420   * <p>
421   * If the specified row does not exist, the Scanner will start from the next closest row after the
422   * specified row.
423   * @param startRow row to start scanner at or after
424   * @return this
425   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
426   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
427   */
428  public Scan withStartRow(byte[] startRow) {
429    return withStartRow(startRow, true);
430  }
431
432  /**
433   * Set the start row of the scan.
434   * <p>
435   * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner
436   * will start from the next closest row after the specified row.
437   * @param startRow row to start scanner at or after
438   * @param inclusive whether we should include the start row when scan
439   * @return this
440   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
441   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
442   */
443  public Scan withStartRow(byte[] startRow, boolean inclusive) {
444    if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
445      throw new IllegalArgumentException("startRow's length must be less than or equal to "
446          + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
447    }
448    this.startRow = startRow;
449    this.includeStartRow = inclusive;
450    return this;
451  }
452
453  /**
454   * Set the stop row of the scan.
455   * <p>
456   * The scan will include rows that are lexicographically less than the provided stopRow.
457   * <p>
458   * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
459   * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
460   * </p>
461   * @param stopRow row to end at (exclusive)
462   * @return this
463   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
464   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
465   * @deprecated use {@link #withStartRow(byte[])} instead. This method may change the inclusive of
466   *             the stop row to keep compatible with the old behavior.
467   */
468  @Deprecated
469  public Scan setStopRow(byte[] stopRow) {
470    withStopRow(stopRow);
471    if (ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow)) {
472      // for keeping the old behavior that a scan with the same start and stop row is a get scan.
473      this.includeStopRow = true;
474    }
475    return this;
476  }
477
478  /**
479   * Set the stop row of the scan.
480   * <p>
481   * The scan will include rows that are lexicographically less than the provided stopRow.
482   * <p>
483   * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
484   * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
485   * </p>
486   * @param stopRow row to end at (exclusive)
487   * @return this
488   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
489   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
490   */
491  public Scan withStopRow(byte[] stopRow) {
492    return withStopRow(stopRow, false);
493  }
494
495  /**
496   * Set the stop row of the scan.
497   * <p>
498   * The scan will include rows that are lexicographically less than (or equal to if
499   * {@code inclusive} is {@code true}) the provided stopRow.
500   * @param stopRow row to end at
501   * @param inclusive whether we should include the stop row when scan
502   * @return this
503   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
504   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
505   */
506  public Scan withStopRow(byte[] stopRow, boolean inclusive) {
507    if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
508      throw new IllegalArgumentException("stopRow's length must be less than or equal to "
509          + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
510    }
511    this.stopRow = stopRow;
512    this.includeStopRow = inclusive;
513    return this;
514  }
515
516  /**
517   * <p>Set a filter (using stopRow and startRow) so the result set only contains rows where the
518   * rowKey starts with the specified prefix.</p>
519   * <p>This is a utility method that converts the desired rowPrefix into the appropriate values
520   * for the startRow and stopRow to achieve the desired result.</p>
521   * <p>This can safely be used in combination with setFilter.</p>
522   * <p><b>NOTE: Doing a {@link #setStartRow(byte[])} and/or {@link #setStopRow(byte[])}
523   * after this method will yield undefined results.</b></p>
524   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
525   * @return this
526   */
527  public Scan setRowPrefixFilter(byte[] rowPrefix) {
528    if (rowPrefix == null) {
529      setStartRow(HConstants.EMPTY_START_ROW);
530      setStopRow(HConstants.EMPTY_END_ROW);
531    } else {
532      this.setStartRow(rowPrefix);
533      this.setStopRow(calculateTheClosestNextRowKeyForPrefix(rowPrefix));
534    }
535    return this;
536  }
537
538  /**
539   * <p>When scanning for a prefix the scan should stop immediately after the the last row that
540   * has the specified prefix. This method calculates the closest next rowKey immediately following
541   * the given rowKeyPrefix.</p>
542   * <p><b>IMPORTANT: This converts a rowKey<u>Prefix</u> into a rowKey</b>.</p>
543   * <p>If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can
544   * simply increment the last byte of the array.
545   * But if your application uses real binary rowids you may run into the scenario that your
546   * prefix is something like:</p>
547   * &nbsp;&nbsp;&nbsp;<b>{ 0x12, 0x23, 0xFF, 0xFF }</b><br/>
548   * Then this stopRow needs to be fed into the actual scan<br/>
549   * &nbsp;&nbsp;&nbsp;<b>{ 0x12, 0x24 }</b> (Notice that it is shorter now)<br/>
550   * This method calculates the correct stop row value for this usecase.
551   *
552   * @param rowKeyPrefix the rowKey<u>Prefix</u>.
553   * @return the closest next rowKey immediately following the given rowKeyPrefix.
554   */
555  private byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) {
556    // Essentially we are treating it like an 'unsigned very very long' and doing +1 manually.
557    // Search for the place where the trailing 0xFFs start
558    int offset = rowKeyPrefix.length;
559    while (offset > 0) {
560      if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
561        break;
562      }
563      offset--;
564    }
565
566    if (offset == 0) {
567      // We got an 0xFFFF... (only FFs) stopRow value which is
568      // the last possible prefix before the end of the table.
569      // So set it to stop at the 'end of the table'
570      return HConstants.EMPTY_END_ROW;
571    }
572
573    // Copy the right length of the original
574    byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
575    // And increment the last one
576    newStopRow[newStopRow.length - 1]++;
577    return newStopRow;
578  }
579
580  /**
581   * Get all available versions.
582   * @return this
583   * @deprecated It is easy to misunderstand with column family's max versions, so use
584   *             {@link #readAllVersions()} instead.
585   */
586  @Deprecated
587  public Scan setMaxVersions() {
588    return readAllVersions();
589  }
590
591  /**
592   * Get up to the specified number of versions of each column.
593   * @param maxVersions maximum versions for each column
594   * @return this
595   * @deprecated It is easy to misunderstand with column family's max versions, so use
596   *             {@link #readVersions(int)} instead.
597   */
598  @Deprecated
599  public Scan setMaxVersions(int maxVersions) {
600    return readVersions(maxVersions);
601  }
602
603  /**
604   * Get all available versions.
605   * @return this
606   */
607  public Scan readAllVersions() {
608    this.maxVersions = Integer.MAX_VALUE;
609    return this;
610  }
611
612  /**
613   * Get up to the specified number of versions of each column.
614   * @param versions specified number of versions for each column
615   * @return this
616   */
617  public Scan readVersions(int versions) {
618    this.maxVersions = versions;
619    return this;
620  }
621
622  /**
623   * Set the maximum number of cells to return for each call to next(). Callers should be aware
624   * that this is not equivalent to calling {@link #setAllowPartialResults(boolean)}.
625   * If you don't allow partial results, the number of cells in each Result must equal to your
626   * batch setting unless it is the last Result for current row. So this method is helpful in paging
627   * queries. If you just want to prevent OOM at client, use setAllowPartialResults(true) is better.
628   * @param batch the maximum number of values
629   * @see Result#mayHaveMoreCellsInRow()
630   */
631  public Scan setBatch(int batch) {
632    if (this.hasFilter() && this.filter.hasFilterRow()) {
633      throw new IncompatibleFilterException(
634        "Cannot set batch on a scan using a filter" +
635        " that returns true for filter.hasFilterRow");
636    }
637    this.batch = batch;
638    return this;
639  }
640
641  /**
642   * Set the maximum number of values to return per row per Column Family
643   * @param limit the maximum number of values returned / row / CF
644   */
645  public Scan setMaxResultsPerColumnFamily(int limit) {
646    this.storeLimit = limit;
647    return this;
648  }
649
650  /**
651   * Set offset for the row per Column Family.
652   * @param offset is the number of kvs that will be skipped.
653   */
654  public Scan setRowOffsetPerColumnFamily(int offset) {
655    this.storeOffset = offset;
656    return this;
657  }
658
659  /**
660   * Set the number of rows for caching that will be passed to scanners.
661   * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
662   * apply.
663   * Higher caching values will enable faster scanners but will use more memory.
664   * @param caching the number of rows for caching
665   */
666  public Scan setCaching(int caching) {
667    this.caching = caching;
668    return this;
669  }
670
671  /**
672   * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)}
673   */
674  public long getMaxResultSize() {
675    return maxResultSize;
676  }
677
678  /**
679   * Set the maximum result size. The default is -1; this means that no specific
680   * maximum result size will be set for this scan, and the global configured
681   * value will be used instead. (Defaults to unlimited).
682   *
683   * @param maxResultSize The maximum result size in bytes.
684   */
685  public Scan setMaxResultSize(long maxResultSize) {
686    this.maxResultSize = maxResultSize;
687    return this;
688  }
689
690  @Override
691  public Scan setFilter(Filter filter) {
692    super.setFilter(filter);
693    return this;
694  }
695
696  /**
697   * Setting the familyMap
698   * @param familyMap map of family to qualifier
699   * @return this
700   */
701  public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) {
702    this.familyMap = familyMap;
703    return this;
704  }
705
706  /**
707   * Getting the familyMap
708   * @return familyMap
709   */
710  public Map<byte [], NavigableSet<byte []>> getFamilyMap() {
711    return this.familyMap;
712  }
713
714  /**
715   * @return the number of families in familyMap
716   */
717  public int numFamilies() {
718    if(hasFamilies()) {
719      return this.familyMap.size();
720    }
721    return 0;
722  }
723
724  /**
725   * @return true if familyMap is non empty, false otherwise
726   */
727  public boolean hasFamilies() {
728    return !this.familyMap.isEmpty();
729  }
730
731  /**
732   * @return the keys of the familyMap
733   */
734  public byte[][] getFamilies() {
735    if(hasFamilies()) {
736      return this.familyMap.keySet().toArray(new byte[0][0]);
737    }
738    return null;
739  }
740
741  /**
742   * @return the startrow
743   */
744  public byte [] getStartRow() {
745    return this.startRow;
746  }
747
748  /**
749   * @return if we should include start row when scan
750   */
751  public boolean includeStartRow() {
752    return includeStartRow;
753  }
754
755  /**
756   * @return the stoprow
757   */
758  public byte[] getStopRow() {
759    return this.stopRow;
760  }
761
762  /**
763   * @return if we should include stop row when scan
764   */
765  public boolean includeStopRow() {
766    return includeStopRow;
767  }
768
769  /**
770   * @return the max number of versions to fetch
771   */
772  public int getMaxVersions() {
773    return this.maxVersions;
774  }
775
776  /**
777   * @return maximum number of values to return for a single call to next()
778   */
779  public int getBatch() {
780    return this.batch;
781  }
782
783  /**
784   * @return maximum number of values to return per row per CF
785   */
786  public int getMaxResultsPerColumnFamily() {
787    return this.storeLimit;
788  }
789
790  /**
791   * Method for retrieving the scan's offset per row per column
792   * family (#kvs to be skipped)
793   * @return row offset
794   */
795  public int getRowOffsetPerColumnFamily() {
796    return this.storeOffset;
797  }
798
799  /**
800   * @return caching the number of rows fetched when calling next on a scanner
801   */
802  public int getCaching() {
803    return this.caching;
804  }
805
806  /**
807   * @return TimeRange
808   */
809  public TimeRange getTimeRange() {
810    return this.tr;
811  }
812
813  /**
814   * @return RowFilter
815   */
816  @Override
817  public Filter getFilter() {
818    return filter;
819  }
820
821  /**
822   * @return true is a filter has been specified, false if not
823   */
824  public boolean hasFilter() {
825    return filter != null;
826  }
827
828  /**
829   * Set whether blocks should be cached for this Scan.
830   * <p>
831   * This is true by default.  When true, default settings of the table and
832   * family are used (this will never override caching blocks if the block
833   * cache is disabled for that family or entirely).
834   *
835   * @param cacheBlocks if false, default settings are overridden and blocks
836   * will not be cached
837   */
838  public Scan setCacheBlocks(boolean cacheBlocks) {
839    this.cacheBlocks = cacheBlocks;
840    return this;
841  }
842
843  /**
844   * Get whether blocks should be cached for this Scan.
845   * @return true if default caching should be used, false if blocks should not
846   * be cached
847   */
848  public boolean getCacheBlocks() {
849    return cacheBlocks;
850  }
851
852  /**
853   * Set whether this scan is a reversed one
854   * <p>
855   * This is false by default which means forward(normal) scan.
856   *
857   * @param reversed if true, scan will be backward order
858   * @return this
859   */
860  public Scan setReversed(boolean reversed) {
861    this.reversed = reversed;
862    return this;
863  }
864
865  /**
866   * Get whether this scan is a reversed one.
867   * @return true if backward scan, false if forward(default) scan
868   */
869  public boolean isReversed() {
870    return reversed;
871  }
872
873  /**
874   * Setting whether the caller wants to see the partial results when server returns
875   * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client.
876   * By default this value is false and the complete results will be assembled client side
877   * before being delivered to the caller.
878   * @param allowPartialResults
879   * @return this
880   * @see Result#mayHaveMoreCellsInRow()
881   * @see #setBatch(int)
882   */
883  public Scan setAllowPartialResults(final boolean allowPartialResults) {
884    this.allowPartialResults = allowPartialResults;
885    return this;
886  }
887
888  /**
889   * @return true when the constructor of this scan understands that the results they will see may
890   *         only represent a partial portion of a row. The entire row would be retrieved by
891   *         subsequent calls to {@link ResultScanner#next()}
892   */
893  public boolean getAllowPartialResults() {
894    return allowPartialResults;
895  }
896
897  @Override
898  public Scan setLoadColumnFamiliesOnDemand(boolean value) {
899    return (Scan) super.setLoadColumnFamiliesOnDemand(value);
900  }
901
902  /**
903   * Compile the table and column family (i.e. schema) information
904   * into a String. Useful for parsing and aggregation by debugging,
905   * logging, and administration tools.
906   * @return Map
907   */
908  @Override
909  public Map<String, Object> getFingerprint() {
910    Map<String, Object> map = new HashMap<>();
911    List<String> families = new ArrayList<>();
912    if(this.familyMap.isEmpty()) {
913      map.put("families", "ALL");
914      return map;
915    } else {
916      map.put("families", families);
917    }
918    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
919        this.familyMap.entrySet()) {
920      families.add(Bytes.toStringBinary(entry.getKey()));
921    }
922    return map;
923  }
924
925  /**
926   * Compile the details beyond the scope of getFingerprint (row, columns,
927   * timestamps, etc.) into a Map along with the fingerprinted information.
928   * Useful for debugging, logging, and administration tools.
929   * @param maxCols a limit on the number of columns output prior to truncation
930   * @return Map
931   */
932  @Override
933  public Map<String, Object> toMap(int maxCols) {
934    // start with the fingerpring map and build on top of it
935    Map<String, Object> map = getFingerprint();
936    // map from families to column list replaces fingerprint's list of families
937    Map<String, List<String>> familyColumns = new HashMap<>();
938    map.put("families", familyColumns);
939    // add scalar information first
940    map.put("startRow", Bytes.toStringBinary(this.startRow));
941    map.put("stopRow", Bytes.toStringBinary(this.stopRow));
942    map.put("maxVersions", this.maxVersions);
943    map.put("batch", this.batch);
944    map.put("caching", this.caching);
945    map.put("maxResultSize", this.maxResultSize);
946    map.put("cacheBlocks", this.cacheBlocks);
947    map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
948    List<Long> timeRange = new ArrayList<>(2);
949    timeRange.add(this.tr.getMin());
950    timeRange.add(this.tr.getMax());
951    map.put("timeRange", timeRange);
952    int colCount = 0;
953    // iterate through affected families and list out up to maxCols columns
954    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
955      this.familyMap.entrySet()) {
956      List<String> columns = new ArrayList<>();
957      familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
958      if(entry.getValue() == null) {
959        colCount++;
960        --maxCols;
961        columns.add("ALL");
962      } else {
963        colCount += entry.getValue().size();
964        if (maxCols <= 0) {
965          continue;
966        }
967        for (byte [] column : entry.getValue()) {
968          if (--maxCols <= 0) {
969            continue;
970          }
971          columns.add(Bytes.toStringBinary(column));
972        }
973      }
974    }
975    map.put("totalColumns", colCount);
976    if (this.filter != null) {
977      map.put("filter", this.filter.toString());
978    }
979    // add the id if set
980    if (getId() != null) {
981      map.put("id", getId());
982    }
983    return map;
984  }
985
986  /**
987   * Enable/disable "raw" mode for this scan.
988   * If "raw" is enabled the scan will return all
989   * delete marker and deleted rows that have not
990   * been collected, yet.
991   * This is mostly useful for Scan on column families
992   * that have KEEP_DELETED_ROWS enabled.
993   * It is an error to specify any column when "raw" is set.
994   * @param raw True/False to enable/disable "raw" mode.
995   */
996  public Scan setRaw(boolean raw) {
997    setAttribute(RAW_ATTR, Bytes.toBytes(raw));
998    return this;
999  }
1000
1001  /**
1002   * @return True if this Scan is in "raw" mode.
1003   */
1004  public boolean isRaw() {
1005    byte[] attr = getAttribute(RAW_ATTR);
1006    return attr == null ? false : Bytes.toBoolean(attr);
1007  }
1008
1009  /**
1010   * Set whether this scan is a small scan
1011   * <p>
1012   * Small scan should use pread and big scan can use seek + read seek + read is fast but can cause
1013   * two problem (1) resource contention (2) cause too much network io [89-fb] Using pread for
1014   * non-compaction read request https://issues.apache.org/jira/browse/HBASE-7266 On the other hand,
1015   * if setting it true, we would do openScanner,next,closeScanner in one RPC call. It means the
1016   * better performance for small scan. [HBASE-9488]. Generally, if the scan range is within one
1017   * data block(64KB), it could be considered as a small scan.
1018   * @param small
1019   * @deprecated since 2.0.0. Use {@link #setLimit(int)} and {@link #setReadType(ReadType)} instead.
1020   *             And for the one rpc optimization, now we will also fetch data when openScanner, and
1021   *             if the number of rows reaches the limit then we will close the scanner
1022   *             automatically which means we will fall back to one rpc.
1023   * @see #setLimit(int)
1024   * @see #setReadType(ReadType)
1025   */
1026  @Deprecated
1027  public Scan setSmall(boolean small) {
1028    this.small = small;
1029    this.readType = ReadType.PREAD;
1030    return this;
1031  }
1032
1033  /**
1034   * Get whether this scan is a small scan
1035   * @return true if small scan
1036   * @deprecated since 2.0.0. See the comment of {@link #setSmall(boolean)}
1037   */
1038  @Deprecated
1039  public boolean isSmall() {
1040    return small;
1041  }
1042
1043  @Override
1044  public Scan setAttribute(String name, byte[] value) {
1045    return (Scan) super.setAttribute(name, value);
1046  }
1047
1048  @Override
1049  public Scan setId(String id) {
1050    return (Scan) super.setId(id);
1051  }
1052
1053  @Override
1054  public Scan setAuthorizations(Authorizations authorizations) {
1055    return (Scan) super.setAuthorizations(authorizations);
1056  }
1057
1058  @Override
1059  public Scan setACL(Map<String, Permission> perms) {
1060    return (Scan) super.setACL(perms);
1061  }
1062
1063  @Override
1064  public Scan setACL(String user, Permission perms) {
1065    return (Scan) super.setACL(user, perms);
1066  }
1067
1068  @Override
1069  public Scan setConsistency(Consistency consistency) {
1070    return (Scan) super.setConsistency(consistency);
1071  }
1072
1073  @Override
1074  public Scan setReplicaId(int Id) {
1075    return (Scan) super.setReplicaId(Id);
1076  }
1077
1078  @Override
1079  public Scan setIsolationLevel(IsolationLevel level) {
1080    return (Scan) super.setIsolationLevel(level);
1081  }
1082
1083  @Override
1084  public Scan setPriority(int priority) {
1085    return (Scan) super.setPriority(priority);
1086  }
1087
1088  /**
1089   * Enable collection of {@link ScanMetrics}. For advanced users.
1090   * @param enabled Set to true to enable accumulating scan metrics
1091   */
1092  public Scan setScanMetricsEnabled(final boolean enabled) {
1093    setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
1094    return this;
1095  }
1096
1097  /**
1098   * @return True if collection of scan metrics is enabled. For advanced users.
1099   */
1100  public boolean isScanMetricsEnabled() {
1101    byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
1102    return attr == null ? false : Bytes.toBoolean(attr);
1103  }
1104
1105  /**
1106   * @return Metrics on this Scan, if metrics were enabled.
1107   * @see #setScanMetricsEnabled(boolean)
1108   * @deprecated Use {@link ResultScanner#getScanMetrics()} instead. And notice that, please do not
1109   *             use this method and {@link ResultScanner#getScanMetrics()} together, the metrics
1110   *             will be messed up.
1111   */
1112  @Deprecated
1113  public ScanMetrics getScanMetrics() {
1114    byte[] bytes = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
1115    if (bytes == null) return null;
1116    return ProtobufUtil.toScanMetrics(bytes);
1117  }
1118
1119  public Boolean isAsyncPrefetch() {
1120    return asyncPrefetch;
1121  }
1122
1123  public Scan setAsyncPrefetch(boolean asyncPrefetch) {
1124    this.asyncPrefetch = asyncPrefetch;
1125    return this;
1126  }
1127
1128  /**
1129   * @return the limit of rows for this scan
1130   */
1131  public int getLimit() {
1132    return limit;
1133  }
1134
1135  /**
1136   * Set the limit of rows for this scan. We will terminate the scan if the number of returned rows
1137   * reaches this value.
1138   * <p>
1139   * This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
1140   * @param limit the limit of rows for this scan
1141   * @return this
1142   */
1143  public Scan setLimit(int limit) {
1144    this.limit = limit;
1145    return this;
1146  }
1147
1148  /**
1149   * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
1150   * set {@code readType} to {@link ReadType#PREAD}.
1151   * @return this
1152   */
1153  public Scan setOneRowLimit() {
1154    return setLimit(1).setReadType(ReadType.PREAD);
1155  }
1156
1157  @InterfaceAudience.Public
1158  public enum ReadType {
1159    DEFAULT, STREAM, PREAD
1160  }
1161
1162  /**
1163   * @return the read type for this scan
1164   */
1165  public ReadType getReadType() {
1166    return readType;
1167  }
1168
1169  /**
1170   * Set the read type for this scan.
1171   * <p>
1172   * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
1173   * example, we will always use pread if this is a get scan.
1174   * @return this
1175   */
1176  public Scan setReadType(ReadType readType) {
1177    this.readType = readType;
1178    return this;
1179  }
1180
1181  /**
1182   * Get the mvcc read point used to open a scanner.
1183   */
1184  long getMvccReadPoint() {
1185    return mvccReadPoint;
1186  }
1187
1188  /**
1189   * Set the mvcc read point used to open a scanner.
1190   */
1191  Scan setMvccReadPoint(long mvccReadPoint) {
1192    this.mvccReadPoint = mvccReadPoint;
1193    return this;
1194  }
1195
1196  /**
1197   * Set the mvcc read point to -1 which means do not use it.
1198   */
1199  Scan resetMvccReadPoint() {
1200    return setMvccReadPoint(-1L);
1201  }
1202
1203  /**
1204   * When the server is slow or we scan a table with many deleted data or we use a sparse filter,
1205   * the server will response heartbeat to prevent timeout. However the scanner will return a Result
1206   * only when client can do it. So if there are many heartbeats, the blocking time on
1207   * ResultScanner#next() may be very long, which is not friendly to online services.
1208   *
1209   * Set this to true then you can get a special Result whose #isCursor() returns true and is not
1210   * contains any real data. It only tells you where the server has scanned. You can call next
1211   * to continue scanning or open a new scanner with this row key as start row whenever you want.
1212   *
1213   * Users can get a cursor when and only when there is a response from the server but we can not
1214   * return a Result to users, for example, this response is a heartbeat or there are partial cells
1215   * but users do not allow partial result.
1216   *
1217   * Now the cursor is in row level which means the special Result will only contains a row key.
1218   * {@link Result#isCursor()}
1219   * {@link Result#getCursor()}
1220   * {@link Cursor}
1221   */
1222  public Scan setNeedCursorResult(boolean needCursorResult) {
1223    this.needCursorResult = needCursorResult;
1224    return this;
1225  }
1226
1227  public boolean isNeedCursorResult() {
1228    return needCursorResult;
1229  }
1230
1231  /**
1232   * Create a new Scan with a cursor. It only set the position information like start row key.
1233   * The others (like cfs, stop row, limit) should still be filled in by the user.
1234   * {@link Result#isCursor()}
1235   * {@link Result#getCursor()}
1236   * {@link Cursor}
1237   */
1238  public static Scan createScanFromCursor(Cursor cursor) {
1239    return new Scan().withStartRow(cursor.getRow());
1240  }
1241}