001/*
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019
020package org.apache.hadoop.hbase.client;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.Arrays;
025import java.util.HashMap;
026import java.util.List;
027import java.util.Map;
028import java.util.NavigableSet;
029import java.util.TreeMap;
030import java.util.TreeSet;
031
032import org.apache.hadoop.hbase.HConstants;
033import org.apache.yetus.audience.InterfaceAudience;
034import org.slf4j.Logger;
035import org.slf4j.LoggerFactory;
036import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
037import org.apache.hadoop.hbase.filter.Filter;
038import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
039import org.apache.hadoop.hbase.io.TimeRange;
040import org.apache.hadoop.hbase.security.access.Permission;
041import org.apache.hadoop.hbase.security.visibility.Authorizations;
042import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
043import org.apache.hadoop.hbase.util.Bytes;
044
045/**
046 * Used to perform Scan operations.
047 * <p>
048 * All operations are identical to {@link Get} with the exception of instantiation. Rather than
049 * specifying a single row, an optional startRow and stopRow may be defined. If rows are not
050 * specified, the Scanner will iterate over all rows.
051 * <p>
052 * To get all columns from all rows of a Table, create an instance with no constraints; use the
053 * {@link #Scan()} constructor. To constrain the scan to specific column families, call
054 * {@link #addFamily(byte[]) addFamily} for each family to retrieve on your Scan instance.
055 * <p>
056 * To get specific columns, call {@link #addColumn(byte[], byte[]) addColumn} for each column to
057 * retrieve.
058 * <p>
059 * To only retrieve columns within a specific range of version timestamps, call
060 * {@link #setTimeRange(long, long) setTimeRange}.
061 * <p>
062 * To only retrieve columns with a specific timestamp, call {@link #setTimestamp(long) setTimestamp}
063 * .
064 * <p>
065 * To limit the number of versions of each column to be returned, call {@link #setMaxVersions(int)
066 * setMaxVersions}.
067 * <p>
068 * To limit the maximum number of values returned for each call to next(), call
069 * {@link #setBatch(int) setBatch}.
070 * <p>
071 * To add a filter, call {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
072 * <p>
073 * For small scan, it is deprecated in 2.0.0. Now we have a {@link #setLimit(int)} method in Scan
074 * object which is used to tell RS how many rows we want. If the rows return reaches the limit, the
075 * RS will close the RegionScanner automatically. And we will also fetch data when openScanner in
076 * the new implementation, this means we can also finish a scan operation in one rpc call. And we
077 * have also introduced a {@link #setReadType(ReadType)} method. You can use this method to tell RS
078 * to use pread explicitly.
079 * <p>
080 * Expert: To explicitly disable server-side block caching for this scan, execute
081 * {@link #setCacheBlocks(boolean)}.
082 * <p>
083 * <em>Note:</em> Usage alters Scan instances. Internally, attributes are updated as the Scan runs
084 * and if enabled, metrics accumulate in the Scan instance. Be aware this is the case when you go to
085 * clone a Scan instance or if you go to reuse a created Scan instance; safer is create a Scan
086 * instance per usage.
087 */
088@InterfaceAudience.Public
089public class Scan extends Query {
090  private static final Logger LOG = LoggerFactory.getLogger(Scan.class);
091
092  private static final String RAW_ATTR = "_raw_";
093
094  private byte[] startRow = HConstants.EMPTY_START_ROW;
095  private boolean includeStartRow = true;
096  private byte[] stopRow  = HConstants.EMPTY_END_ROW;
097  private boolean includeStopRow = false;
098  private int maxVersions = 1;
099  private int batch = -1;
100
101  /**
102   * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}.
103   * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the
104   * cells in the row exceeded max result size on the server. Typically partial results will be
105   * combined client side into complete results before being delivered to the caller. However, if
106   * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e.
107   * they understand that the results returned from the Scanner may only represent part of a
108   * particular row). In such a case, any attempt to combine the partials into a complete result on
109   * the client side will be skipped, and the caller will be able to see the exact results returned
110   * from the server.
111   */
112  private boolean allowPartialResults = false;
113
114  private int storeLimit = -1;
115  private int storeOffset = 0;
116
117  /**
118   * @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)}
119   */
120  // Make private or remove.
121  @Deprecated
122  static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
123
124  /**
125   * Use {@link #getScanMetrics()}
126   */
127  // Make this private or remove.
128  @Deprecated
129  static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data";
130
131  // If an application wants to use multiple scans over different tables each scan must
132  // define this attribute with the appropriate table name by calling
133  // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
134  static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
135
136  /**
137   * -1 means no caching specified and the value of {@link HConstants#HBASE_CLIENT_SCANNER_CACHING}
138   * (default to {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_CACHING}) will be used
139   */
140  private int caching = -1;
141  private long maxResultSize = -1;
142  private boolean cacheBlocks = true;
143  private boolean reversed = false;
144  private TimeRange tr = TimeRange.allTime();
145  private Map<byte [], NavigableSet<byte []>> familyMap =
146    new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
147  private Boolean asyncPrefetch = null;
148
149  /**
150   * Parameter name for client scanner sync/async prefetch toggle.
151   * When using async scanner, prefetching data from the server is done at the background.
152   * The parameter currently won't have any effect in the case that the user has set
153   * Scan#setSmall or Scan#setReversed
154   */
155  public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH =
156      "hbase.client.scanner.async.prefetch";
157
158  /**
159   * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}.
160   */
161  public static final boolean DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = false;
162
163  /**
164   * Set it true for small scan to get better performance Small scan should use pread and big scan
165   * can use seek + read seek + read is fast but can cause two problem (1) resource contention (2)
166   * cause too much network io [89-fb] Using pread for non-compaction read request
167   * https://issues.apache.org/jira/browse/HBASE-7266 On the other hand, if setting it true, we
168   * would do openScanner,next,closeScanner in one RPC call. It means the better performance for
169   * small scan. [HBASE-9488]. Generally, if the scan range is within one data block(64KB), it could
170   * be considered as a small scan.
171   */
172  private boolean small = false;
173
174  /**
175   * The mvcc read point to use when open a scanner. Remember to clear it after switching regions as
176   * the mvcc is only valid within region scope.
177   */
178  private long mvccReadPoint = -1L;
179
180  /**
181   * The number of rows we want for this scan. We will terminate the scan if the number of return
182   * rows reaches this value.
183   */
184  private int limit = -1;
185
186  /**
187   * Control whether to use pread at server side.
188   */
189  private ReadType readType = ReadType.DEFAULT;
190
191  private boolean needCursorResult = false;
192
193  /**
194   * Create a Scan operation across all rows.
195   */
196  public Scan() {}
197
198  /**
199   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
200   *   {@code new Scan().withStartRow(startRow).setFilter(filter)} instead.
201   * @see <a href="https://issues.apache.org/jira/browse/HBASE-17320">HBASE-17320</a>
202   */
203  @Deprecated
204  public Scan(byte[] startRow, Filter filter) {
205    this(startRow);
206    this.filter = filter;
207  }
208
209  /**
210   * Create a Scan operation starting at the specified row.
211   * <p>
212   * If the specified row does not exist, the Scanner will start from the next closest row after the
213   * specified row.
214   * @param startRow row to start scanner at or after
215   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
216   *   {@code new Scan().withStartRow(startRow)} instead.
217   * @see <a href="https://issues.apache.org/jira/browse/HBASE-17320">HBASE-17320</a>
218   */
219  @Deprecated
220  public Scan(byte[] startRow) {
221    setStartRow(startRow);
222  }
223
224  /**
225   * Create a Scan operation for the range of rows specified.
226   * @param startRow row to start scanner at or after (inclusive)
227   * @param stopRow row to stop scanner before (exclusive)
228   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
229   *   {@code new Scan().withStartRow(startRow).withStopRow(stopRow)} instead.
230   * @see <a href="https://issues.apache.org/jira/browse/HBASE-17320">HBASE-17320</a>
231   */
232  @Deprecated
233  public Scan(byte[] startRow, byte[] stopRow) {
234    setStartRow(startRow);
235    setStopRow(stopRow);
236  }
237
238  /**
239   * Creates a new instance of this class while copying all values.
240   *
241   * @param scan  The scan instance to copy from.
242   * @throws IOException When copying the values fails.
243   */
244  public Scan(Scan scan) throws IOException {
245    startRow = scan.getStartRow();
246    includeStartRow = scan.includeStartRow();
247    stopRow  = scan.getStopRow();
248    includeStopRow = scan.includeStopRow();
249    maxVersions = scan.getMaxVersions();
250    batch = scan.getBatch();
251    storeLimit = scan.getMaxResultsPerColumnFamily();
252    storeOffset = scan.getRowOffsetPerColumnFamily();
253    caching = scan.getCaching();
254    maxResultSize = scan.getMaxResultSize();
255    cacheBlocks = scan.getCacheBlocks();
256    filter = scan.getFilter(); // clone?
257    loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
258    consistency = scan.getConsistency();
259    this.setIsolationLevel(scan.getIsolationLevel());
260    reversed = scan.isReversed();
261    asyncPrefetch = scan.isAsyncPrefetch();
262    small = scan.isSmall();
263    allowPartialResults = scan.getAllowPartialResults();
264    tr = scan.getTimeRange(); // TimeRange is immutable
265    Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
266    for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
267      byte [] fam = entry.getKey();
268      NavigableSet<byte[]> cols = entry.getValue();
269      if (cols != null && cols.size() > 0) {
270        for (byte[] col : cols) {
271          addColumn(fam, col);
272        }
273      } else {
274        addFamily(fam);
275      }
276    }
277    for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
278      setAttribute(attr.getKey(), attr.getValue());
279    }
280    for (Map.Entry<byte[], TimeRange> entry : scan.getColumnFamilyTimeRange().entrySet()) {
281      TimeRange tr = entry.getValue();
282      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
283    }
284    this.mvccReadPoint = scan.getMvccReadPoint();
285    this.limit = scan.getLimit();
286    this.needCursorResult = scan.isNeedCursorResult();
287    setPriority(scan.getPriority());
288    readType = scan.getReadType();
289    super.setReplicaId(scan.getReplicaId());
290  }
291
292  /**
293   * Builds a scan object with the same specs as get.
294   * @param get get to model scan after
295   */
296  public Scan(Get get) {
297    this.startRow = get.getRow();
298    this.includeStartRow = true;
299    this.stopRow = get.getRow();
300    this.includeStopRow = true;
301    this.filter = get.getFilter();
302    this.cacheBlocks = get.getCacheBlocks();
303    this.maxVersions = get.getMaxVersions();
304    this.storeLimit = get.getMaxResultsPerColumnFamily();
305    this.storeOffset = get.getRowOffsetPerColumnFamily();
306    this.tr = get.getTimeRange();
307    this.familyMap = get.getFamilyMap();
308    this.asyncPrefetch = false;
309    this.consistency = get.getConsistency();
310    this.setIsolationLevel(get.getIsolationLevel());
311    this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
312    for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
313      setAttribute(attr.getKey(), attr.getValue());
314    }
315    for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) {
316      TimeRange tr = entry.getValue();
317      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
318    }
319    this.mvccReadPoint = -1L;
320    setPriority(get.getPriority());
321    super.setReplicaId(get.getReplicaId());
322  }
323
324  public boolean isGetScan() {
325    return includeStartRow && includeStopRow
326        && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow);
327  }
328
329  /**
330   * Get all columns from the specified family.
331   * <p>
332   * Overrides previous calls to addColumn for this family.
333   * @param family family name
334   * @return this
335   */
336  public Scan addFamily(byte [] family) {
337    familyMap.remove(family);
338    familyMap.put(family, null);
339    return this;
340  }
341
342  /**
343   * Get the column from the specified family with the specified qualifier.
344   * <p>
345   * Overrides previous calls to addFamily for this family.
346   * @param family family name
347   * @param qualifier column qualifier
348   * @return this
349   */
350  public Scan addColumn(byte [] family, byte [] qualifier) {
351    NavigableSet<byte []> set = familyMap.get(family);
352    if(set == null) {
353      set = new TreeSet<>(Bytes.BYTES_COMPARATOR);
354      familyMap.put(family, set);
355    }
356    if (qualifier == null) {
357      qualifier = HConstants.EMPTY_BYTE_ARRAY;
358    }
359    set.add(qualifier);
360    return this;
361  }
362
363  /**
364   * Get versions of columns only within the specified timestamp range,
365   * [minStamp, maxStamp).  Note, default maximum versions to return is 1.  If
366   * your time range spans more than one version and you want all versions
367   * returned, up the number of versions beyond the default.
368   * @param minStamp minimum timestamp value, inclusive
369   * @param maxStamp maximum timestamp value, exclusive
370   * @see #setMaxVersions()
371   * @see #setMaxVersions(int)
372   * @return this
373   */
374  public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
375    tr = new TimeRange(minStamp, maxStamp);
376    return this;
377  }
378
379  /**
380   * Get versions of columns with the specified timestamp. Note, default maximum
381   * versions to return is 1.  If your time range spans more than one version
382   * and you want all versions returned, up the number of versions beyond the
383   * defaut.
384   * @param timestamp version timestamp
385   * @see #setMaxVersions()
386   * @see #setMaxVersions(int)
387   * @return this
388   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
389   *             Use {@link #setTimestamp(long)} instead
390   */
391  @Deprecated
392  public Scan setTimeStamp(long timestamp)
393  throws IOException {
394    return this.setTimestamp(timestamp);
395  }
396
397  /**
398   * Get versions of columns with the specified timestamp. Note, default maximum
399   * versions to return is 1.  If your time range spans more than one version
400   * and you want all versions returned, up the number of versions beyond the
401   * defaut.
402   * @param timestamp version timestamp
403   * @see #setMaxVersions()
404   * @see #setMaxVersions(int)
405   * @return this
406   */
407  public Scan setTimestamp(long timestamp) {
408    try {
409      tr = new TimeRange(timestamp, timestamp + 1);
410    } catch(Exception e) {
411      // This should never happen, unless integer overflow or something extremely wrong...
412      LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
413      throw e;
414    }
415
416    return this;
417  }
418
419  @Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
420    return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp);
421  }
422
423  /**
424   * Set the start row of the scan.
425   * <p>
426   * If the specified row does not exist, the Scanner will start from the next closest row after the
427   * specified row.
428   * @param startRow row to start scanner at or after
429   * @return this
430   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
431   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
432   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #withStartRow(byte[])}
433   *   instead. This method may change the inclusive of the stop row to keep compatible with the old
434   *   behavior.
435   * @see #withStartRow(byte[])
436   * @see <a href="https://issues.apache.org/jira/browse/HBASE-17320">HBASE-17320</a>
437   */
438  @Deprecated
439  public Scan setStartRow(byte[] startRow) {
440    withStartRow(startRow);
441    if (ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow)) {
442      // for keeping the old behavior that a scan with the same start and stop row is a get scan.
443      this.includeStopRow = true;
444    }
445    return this;
446  }
447
448  /**
449   * Set the start row of the scan.
450   * <p>
451   * If the specified row does not exist, the Scanner will start from the next closest row after the
452   * specified row.
453   * @param startRow row to start scanner at or after
454   * @return this
455   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
456   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
457   */
458  public Scan withStartRow(byte[] startRow) {
459    return withStartRow(startRow, true);
460  }
461
462  /**
463   * Set the start row of the scan.
464   * <p>
465   * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner
466   * will start from the next closest row after the specified row.
467   * @param startRow row to start scanner at or after
468   * @param inclusive whether we should include the start row when scan
469   * @return this
470   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
471   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
472   */
473  public Scan withStartRow(byte[] startRow, boolean inclusive) {
474    if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
475      throw new IllegalArgumentException("startRow's length must be less than or equal to "
476          + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
477    }
478    this.startRow = startRow;
479    this.includeStartRow = inclusive;
480    return this;
481  }
482
483  /**
484   * Set the stop row of the scan.
485   * <p>
486   * The scan will include rows that are lexicographically less than the provided stopRow.
487   * <p>
488   * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
489   * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
490   * </p>
491   * @param stopRow row to end at (exclusive)
492   * @return this
493   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
494   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
495   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #withStopRow(byte[])} instead.
496   *   This method may change the inclusive of the stop row to keep compatible with the old
497   *   behavior.
498   * @see #withStopRow(byte[])
499   * @see <a href="https://issues.apache.org/jira/browse/HBASE-17320">HBASE-17320</a>
500   */
501  @Deprecated
502  public Scan setStopRow(byte[] stopRow) {
503    withStopRow(stopRow);
504    if (ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow)) {
505      // for keeping the old behavior that a scan with the same start and stop row is a get scan.
506      this.includeStopRow = true;
507    }
508    return this;
509  }
510
511  /**
512   * Set the stop row of the scan.
513   * <p>
514   * The scan will include rows that are lexicographically less than the provided stopRow.
515   * <p>
516   * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
517   * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
518   * </p>
519   * @param stopRow row to end at (exclusive)
520   * @return this
521   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
522   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
523   */
524  public Scan withStopRow(byte[] stopRow) {
525    return withStopRow(stopRow, false);
526  }
527
528  /**
529   * Set the stop row of the scan.
530   * <p>
531   * The scan will include rows that are lexicographically less than (or equal to if
532   * {@code inclusive} is {@code true}) the provided stopRow.
533   * @param stopRow row to end at
534   * @param inclusive whether we should include the stop row when scan
535   * @return this
536   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
537   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
538   */
539  public Scan withStopRow(byte[] stopRow, boolean inclusive) {
540    if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
541      throw new IllegalArgumentException("stopRow's length must be less than or equal to "
542          + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
543    }
544    this.stopRow = stopRow;
545    this.includeStopRow = inclusive;
546    return this;
547  }
548
549  /**
550   * <p>Set a filter (using stopRow and startRow) so the result set only contains rows where the
551   * rowKey starts with the specified prefix.</p>
552   * <p>This is a utility method that converts the desired rowPrefix into the appropriate values
553   * for the startRow and stopRow to achieve the desired result.</p>
554   * <p>This can safely be used in combination with setFilter.</p>
555   * <p><b>NOTE: Doing a {@link #setStartRow(byte[])} and/or {@link #setStopRow(byte[])}
556   * after this method will yield undefined results.</b></p>
557   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
558   * @return this
559   */
560  public Scan setRowPrefixFilter(byte[] rowPrefix) {
561    if (rowPrefix == null) {
562      setStartRow(HConstants.EMPTY_START_ROW);
563      setStopRow(HConstants.EMPTY_END_ROW);
564    } else {
565      this.setStartRow(rowPrefix);
566      this.setStopRow(calculateTheClosestNextRowKeyForPrefix(rowPrefix));
567    }
568    return this;
569  }
570
571  /**
572   * <p>When scanning for a prefix the scan should stop immediately after the the last row that
573   * has the specified prefix. This method calculates the closest next rowKey immediately following
574   * the given rowKeyPrefix.</p>
575   * <p><b>IMPORTANT: This converts a rowKey<u>Prefix</u> into a rowKey</b>.</p>
576   * <p>If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can
577   * simply increment the last byte of the array.
578   * But if your application uses real binary rowids you may run into the scenario that your
579   * prefix is something like:</p>
580   * &nbsp;&nbsp;&nbsp;<b>{ 0x12, 0x23, 0xFF, 0xFF }</b><br/>
581   * Then this stopRow needs to be fed into the actual scan<br/>
582   * &nbsp;&nbsp;&nbsp;<b>{ 0x12, 0x24 }</b> (Notice that it is shorter now)<br/>
583   * This method calculates the correct stop row value for this usecase.
584   *
585   * @param rowKeyPrefix the rowKey<u>Prefix</u>.
586   * @return the closest next rowKey immediately following the given rowKeyPrefix.
587   */
588  private byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) {
589    // Essentially we are treating it like an 'unsigned very very long' and doing +1 manually.
590    // Search for the place where the trailing 0xFFs start
591    int offset = rowKeyPrefix.length;
592    while (offset > 0) {
593      if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
594        break;
595      }
596      offset--;
597    }
598
599    if (offset == 0) {
600      // We got an 0xFFFF... (only FFs) stopRow value which is
601      // the last possible prefix before the end of the table.
602      // So set it to stop at the 'end of the table'
603      return HConstants.EMPTY_END_ROW;
604    }
605
606    // Copy the right length of the original
607    byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
608    // And increment the last one
609    newStopRow[newStopRow.length - 1]++;
610    return newStopRow;
611  }
612
613  /**
614   * Get all available versions.
615   * @return this
616   * @deprecated since 2.0.0 and will be removed in 3.0.0. It is easy to misunderstand with column
617   *   family's max versions, so use {@link #readAllVersions()} instead.
618   * @see #readAllVersions()
619   * @see <a href="https://issues.apache.org/jira/browse/HBASE-17125">HBASE-17125</a>
620   */
621  @Deprecated
622  public Scan setMaxVersions() {
623    return readAllVersions();
624  }
625
626  /**
627   * Get up to the specified number of versions of each column.
628   * @param maxVersions maximum versions for each column
629   * @return this
630   * @deprecated since 2.0.0 and will be removed in 3.0.0. It is easy to misunderstand with column
631   *   family's max versions, so use {@link #readVersions(int)} instead.
632   * @see #readVersions(int)
633   * @see <a href="https://issues.apache.org/jira/browse/HBASE-17125">HBASE-17125</a>
634   */
635  @Deprecated
636  public Scan setMaxVersions(int maxVersions) {
637    return readVersions(maxVersions);
638  }
639
640  /**
641   * Get all available versions.
642   * @return this
643   */
644  public Scan readAllVersions() {
645    this.maxVersions = Integer.MAX_VALUE;
646    return this;
647  }
648
649  /**
650   * Get up to the specified number of versions of each column.
651   * @param versions specified number of versions for each column
652   * @return this
653   */
654  public Scan readVersions(int versions) {
655    this.maxVersions = versions;
656    return this;
657  }
658
659  /**
660   * Set the maximum number of cells to return for each call to next(). Callers should be aware
661   * that this is not equivalent to calling {@link #setAllowPartialResults(boolean)}.
662   * If you don't allow partial results, the number of cells in each Result must equal to your
663   * batch setting unless it is the last Result for current row. So this method is helpful in paging
664   * queries. If you just want to prevent OOM at client, use setAllowPartialResults(true) is better.
665   * @param batch the maximum number of values
666   * @see Result#mayHaveMoreCellsInRow()
667   */
668  public Scan setBatch(int batch) {
669    if (this.hasFilter() && this.filter.hasFilterRow()) {
670      throw new IncompatibleFilterException(
671        "Cannot set batch on a scan using a filter" +
672        " that returns true for filter.hasFilterRow");
673    }
674    this.batch = batch;
675    return this;
676  }
677
678  /**
679   * Set the maximum number of values to return per row per Column Family
680   * @param limit the maximum number of values returned / row / CF
681   */
682  public Scan setMaxResultsPerColumnFamily(int limit) {
683    this.storeLimit = limit;
684    return this;
685  }
686
687  /**
688   * Set offset for the row per Column Family.
689   * @param offset is the number of kvs that will be skipped.
690   */
691  public Scan setRowOffsetPerColumnFamily(int offset) {
692    this.storeOffset = offset;
693    return this;
694  }
695
696  /**
697   * Set the number of rows for caching that will be passed to scanners.
698   * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
699   * apply.
700   * Higher caching values will enable faster scanners but will use more memory.
701   * @param caching the number of rows for caching
702   */
703  public Scan setCaching(int caching) {
704    this.caching = caching;
705    return this;
706  }
707
708  /**
709   * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)}
710   */
711  public long getMaxResultSize() {
712    return maxResultSize;
713  }
714
715  /**
716   * Set the maximum result size. The default is -1; this means that no specific
717   * maximum result size will be set for this scan, and the global configured
718   * value will be used instead. (Defaults to unlimited).
719   *
720   * @param maxResultSize The maximum result size in bytes.
721   */
722  public Scan setMaxResultSize(long maxResultSize) {
723    this.maxResultSize = maxResultSize;
724    return this;
725  }
726
727  @Override
728  public Scan setFilter(Filter filter) {
729    super.setFilter(filter);
730    return this;
731  }
732
733  /**
734   * Setting the familyMap
735   * @param familyMap map of family to qualifier
736   * @return this
737   */
738  public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) {
739    this.familyMap = familyMap;
740    return this;
741  }
742
743  /**
744   * Getting the familyMap
745   * @return familyMap
746   */
747  public Map<byte [], NavigableSet<byte []>> getFamilyMap() {
748    return this.familyMap;
749  }
750
751  /**
752   * @return the number of families in familyMap
753   */
754  public int numFamilies() {
755    if(hasFamilies()) {
756      return this.familyMap.size();
757    }
758    return 0;
759  }
760
761  /**
762   * @return true if familyMap is non empty, false otherwise
763   */
764  public boolean hasFamilies() {
765    return !this.familyMap.isEmpty();
766  }
767
768  /**
769   * @return the keys of the familyMap
770   */
771  public byte[][] getFamilies() {
772    if(hasFamilies()) {
773      return this.familyMap.keySet().toArray(new byte[0][0]);
774    }
775    return null;
776  }
777
778  /**
779   * @return the startrow
780   */
781  public byte [] getStartRow() {
782    return this.startRow;
783  }
784
785  /**
786   * @return if we should include start row when scan
787   */
788  public boolean includeStartRow() {
789    return includeStartRow;
790  }
791
792  /**
793   * @return the stoprow
794   */
795  public byte[] getStopRow() {
796    return this.stopRow;
797  }
798
799  /**
800   * @return if we should include stop row when scan
801   */
802  public boolean includeStopRow() {
803    return includeStopRow;
804  }
805
806  /**
807   * @return the max number of versions to fetch
808   */
809  public int getMaxVersions() {
810    return this.maxVersions;
811  }
812
813  /**
814   * @return maximum number of values to return for a single call to next()
815   */
816  public int getBatch() {
817    return this.batch;
818  }
819
820  /**
821   * @return maximum number of values to return per row per CF
822   */
823  public int getMaxResultsPerColumnFamily() {
824    return this.storeLimit;
825  }
826
827  /**
828   * Method for retrieving the scan's offset per row per column
829   * family (#kvs to be skipped)
830   * @return row offset
831   */
832  public int getRowOffsetPerColumnFamily() {
833    return this.storeOffset;
834  }
835
836  /**
837   * @return caching the number of rows fetched when calling next on a scanner
838   */
839  public int getCaching() {
840    return this.caching;
841  }
842
843  /**
844   * @return TimeRange
845   */
846  public TimeRange getTimeRange() {
847    return this.tr;
848  }
849
850  /**
851   * @return RowFilter
852   */
853  @Override
854  public Filter getFilter() {
855    return filter;
856  }
857
858  /**
859   * @return true is a filter has been specified, false if not
860   */
861  public boolean hasFilter() {
862    return filter != null;
863  }
864
865  /**
866   * Set whether blocks should be cached for this Scan.
867   * <p>
868   * This is true by default.  When true, default settings of the table and
869   * family are used (this will never override caching blocks if the block
870   * cache is disabled for that family or entirely).
871   *
872   * @param cacheBlocks if false, default settings are overridden and blocks
873   * will not be cached
874   */
875  public Scan setCacheBlocks(boolean cacheBlocks) {
876    this.cacheBlocks = cacheBlocks;
877    return this;
878  }
879
880  /**
881   * Get whether blocks should be cached for this Scan.
882   * @return true if default caching should be used, false if blocks should not
883   * be cached
884   */
885  public boolean getCacheBlocks() {
886    return cacheBlocks;
887  }
888
889  /**
890   * Set whether this scan is a reversed one
891   * <p>
892   * This is false by default which means forward(normal) scan.
893   *
894   * @param reversed if true, scan will be backward order
895   * @return this
896   */
897  public Scan setReversed(boolean reversed) {
898    this.reversed = reversed;
899    return this;
900  }
901
902  /**
903   * Get whether this scan is a reversed one.
904   * @return true if backward scan, false if forward(default) scan
905   */
906  public boolean isReversed() {
907    return reversed;
908  }
909
910  /**
911   * Setting whether the caller wants to see the partial results when server returns
912   * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client.
913   * By default this value is false and the complete results will be assembled client side
914   * before being delivered to the caller.
915   * @param allowPartialResults
916   * @return this
917   * @see Result#mayHaveMoreCellsInRow()
918   * @see #setBatch(int)
919   */
920  public Scan setAllowPartialResults(final boolean allowPartialResults) {
921    this.allowPartialResults = allowPartialResults;
922    return this;
923  }
924
925  /**
926   * @return true when the constructor of this scan understands that the results they will see may
927   *         only represent a partial portion of a row. The entire row would be retrieved by
928   *         subsequent calls to {@link ResultScanner#next()}
929   */
930  public boolean getAllowPartialResults() {
931    return allowPartialResults;
932  }
933
934  @Override
935  public Scan setLoadColumnFamiliesOnDemand(boolean value) {
936    return (Scan) super.setLoadColumnFamiliesOnDemand(value);
937  }
938
939  /**
940   * Compile the table and column family (i.e. schema) information
941   * into a String. Useful for parsing and aggregation by debugging,
942   * logging, and administration tools.
943   * @return Map
944   */
945  @Override
946  public Map<String, Object> getFingerprint() {
947    Map<String, Object> map = new HashMap<>();
948    List<String> families = new ArrayList<>();
949    if(this.familyMap.isEmpty()) {
950      map.put("families", "ALL");
951      return map;
952    } else {
953      map.put("families", families);
954    }
955    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
956        this.familyMap.entrySet()) {
957      families.add(Bytes.toStringBinary(entry.getKey()));
958    }
959    return map;
960  }
961
962  /**
963   * Compile the details beyond the scope of getFingerprint (row, columns,
964   * timestamps, etc.) into a Map along with the fingerprinted information.
965   * Useful for debugging, logging, and administration tools.
966   * @param maxCols a limit on the number of columns output prior to truncation
967   * @return Map
968   */
969  @Override
970  public Map<String, Object> toMap(int maxCols) {
971    // start with the fingerpring map and build on top of it
972    Map<String, Object> map = getFingerprint();
973    // map from families to column list replaces fingerprint's list of families
974    Map<String, List<String>> familyColumns = new HashMap<>();
975    map.put("families", familyColumns);
976    // add scalar information first
977    map.put("startRow", Bytes.toStringBinary(this.startRow));
978    map.put("stopRow", Bytes.toStringBinary(this.stopRow));
979    map.put("maxVersions", this.maxVersions);
980    map.put("batch", this.batch);
981    map.put("caching", this.caching);
982    map.put("maxResultSize", this.maxResultSize);
983    map.put("cacheBlocks", this.cacheBlocks);
984    map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
985    List<Long> timeRange = new ArrayList<>(2);
986    timeRange.add(this.tr.getMin());
987    timeRange.add(this.tr.getMax());
988    map.put("timeRange", timeRange);
989    int colCount = 0;
990    // iterate through affected families and list out up to maxCols columns
991    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
992      this.familyMap.entrySet()) {
993      List<String> columns = new ArrayList<>();
994      familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
995      if(entry.getValue() == null) {
996        colCount++;
997        --maxCols;
998        columns.add("ALL");
999      } else {
1000        colCount += entry.getValue().size();
1001        if (maxCols <= 0) {
1002          continue;
1003        }
1004        for (byte [] column : entry.getValue()) {
1005          if (--maxCols <= 0) {
1006            continue;
1007          }
1008          columns.add(Bytes.toStringBinary(column));
1009        }
1010      }
1011    }
1012    map.put("totalColumns", colCount);
1013    if (this.filter != null) {
1014      map.put("filter", this.filter.toString());
1015    }
1016    // add the id if set
1017    if (getId() != null) {
1018      map.put("id", getId());
1019    }
1020    return map;
1021  }
1022
1023  /**
1024   * Enable/disable "raw" mode for this scan.
1025   * If "raw" is enabled the scan will return all
1026   * delete marker and deleted rows that have not
1027   * been collected, yet.
1028   * This is mostly useful for Scan on column families
1029   * that have KEEP_DELETED_ROWS enabled.
1030   * It is an error to specify any column when "raw" is set.
1031   * @param raw True/False to enable/disable "raw" mode.
1032   */
1033  public Scan setRaw(boolean raw) {
1034    setAttribute(RAW_ATTR, Bytes.toBytes(raw));
1035    return this;
1036  }
1037
1038  /**
1039   * @return True if this Scan is in "raw" mode.
1040   */
1041  public boolean isRaw() {
1042    byte[] attr = getAttribute(RAW_ATTR);
1043    return attr == null ? false : Bytes.toBoolean(attr);
1044  }
1045
1046  /**
1047   * Set whether this scan is a small scan
1048   * <p>
1049   * Small scan should use pread and big scan can use seek + read seek + read is fast but can cause
1050   * two problem (1) resource contention (2) cause too much network io [89-fb] Using pread for
1051   * non-compaction read request https://issues.apache.org/jira/browse/HBASE-7266 On the other hand,
1052   * if setting it true, we would do openScanner,next,closeScanner in one RPC call. It means the
1053   * better performance for small scan. [HBASE-9488]. Generally, if the scan range is within one
1054   * data block(64KB), it could be considered as a small scan.
1055   * @param small
1056   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #setLimit(int)} and
1057   *   {@link #setReadType(ReadType)} instead. And for the one rpc optimization, now we will also
1058   *   fetch data when openScanner, and if the number of rows reaches the limit then we will close
1059   *   the scanner automatically which means we will fall back to one rpc.
1060   * @see #setLimit(int)
1061   * @see #setReadType(ReadType)
1062   * @see <a href="https://issues.apache.org/jira/browse/HBASE-17045">HBASE-17045</a>
1063   */
1064  @Deprecated
1065  public Scan setSmall(boolean small) {
1066    this.small = small;
1067    this.readType = ReadType.PREAD;
1068    return this;
1069  }
1070
1071  /**
1072   * Get whether this scan is a small scan
1073   * @return true if small scan
1074   * @deprecated since 2.0.0 and will be removed in 3.0.0. See the comment of
1075   *   {@link #setSmall(boolean)}
1076   * @see <a href="https://issues.apache.org/jira/browse/HBASE-17045">HBASE-17045</a>
1077   */
1078  @Deprecated
1079  public boolean isSmall() {
1080    return small;
1081  }
1082
1083  @Override
1084  public Scan setAttribute(String name, byte[] value) {
1085    return (Scan) super.setAttribute(name, value);
1086  }
1087
1088  @Override
1089  public Scan setId(String id) {
1090    return (Scan) super.setId(id);
1091  }
1092
1093  @Override
1094  public Scan setAuthorizations(Authorizations authorizations) {
1095    return (Scan) super.setAuthorizations(authorizations);
1096  }
1097
1098  @Override
1099  public Scan setACL(Map<String, Permission> perms) {
1100    return (Scan) super.setACL(perms);
1101  }
1102
1103  @Override
1104  public Scan setACL(String user, Permission perms) {
1105    return (Scan) super.setACL(user, perms);
1106  }
1107
1108  @Override
1109  public Scan setConsistency(Consistency consistency) {
1110    return (Scan) super.setConsistency(consistency);
1111  }
1112
1113  @Override
1114  public Scan setReplicaId(int Id) {
1115    return (Scan) super.setReplicaId(Id);
1116  }
1117
1118  @Override
1119  public Scan setIsolationLevel(IsolationLevel level) {
1120    return (Scan) super.setIsolationLevel(level);
1121  }
1122
1123  @Override
1124  public Scan setPriority(int priority) {
1125    return (Scan) super.setPriority(priority);
1126  }
1127
1128  /**
1129   * Enable collection of {@link ScanMetrics}. For advanced users.
1130   * @param enabled Set to true to enable accumulating scan metrics
1131   */
1132  public Scan setScanMetricsEnabled(final boolean enabled) {
1133    setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
1134    return this;
1135  }
1136
1137  /**
1138   * @return True if collection of scan metrics is enabled. For advanced users.
1139   */
1140  public boolean isScanMetricsEnabled() {
1141    byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
1142    return attr == null ? false : Bytes.toBoolean(attr);
1143  }
1144
1145  /**
1146   * @return Metrics on this Scan, if metrics were enabled.
1147   * @see #setScanMetricsEnabled(boolean)
1148   * @deprecated Use {@link ResultScanner#getScanMetrics()} instead. And notice that, please do not
1149   *             use this method and {@link ResultScanner#getScanMetrics()} together, the metrics
1150   *             will be messed up.
1151   */
1152  @Deprecated
1153  public ScanMetrics getScanMetrics() {
1154    byte[] bytes = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
1155    if (bytes == null) return null;
1156    return ProtobufUtil.toScanMetrics(bytes);
1157  }
1158
1159  public Boolean isAsyncPrefetch() {
1160    return asyncPrefetch;
1161  }
1162
1163  public Scan setAsyncPrefetch(boolean asyncPrefetch) {
1164    this.asyncPrefetch = asyncPrefetch;
1165    return this;
1166  }
1167
1168  /**
1169   * @return the limit of rows for this scan
1170   */
1171  public int getLimit() {
1172    return limit;
1173  }
1174
1175  /**
1176   * Set the limit of rows for this scan. We will terminate the scan if the number of returned rows
1177   * reaches this value.
1178   * <p>
1179   * This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
1180   * @param limit the limit of rows for this scan
1181   * @return this
1182   */
1183  public Scan setLimit(int limit) {
1184    this.limit = limit;
1185    return this;
1186  }
1187
1188  /**
1189   * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
1190   * set {@code readType} to {@link ReadType#PREAD}.
1191   * @return this
1192   */
1193  public Scan setOneRowLimit() {
1194    return setLimit(1).setReadType(ReadType.PREAD);
1195  }
1196
1197  @InterfaceAudience.Public
1198  public enum ReadType {
1199    DEFAULT, STREAM, PREAD
1200  }
1201
1202  /**
1203   * @return the read type for this scan
1204   */
1205  public ReadType getReadType() {
1206    return readType;
1207  }
1208
1209  /**
1210   * Set the read type for this scan.
1211   * <p>
1212   * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
1213   * example, we will always use pread if this is a get scan.
1214   * @return this
1215   */
1216  public Scan setReadType(ReadType readType) {
1217    this.readType = readType;
1218    return this;
1219  }
1220
1221  /**
1222   * Get the mvcc read point used to open a scanner.
1223   */
1224  long getMvccReadPoint() {
1225    return mvccReadPoint;
1226  }
1227
1228  /**
1229   * Set the mvcc read point used to open a scanner.
1230   */
1231  Scan setMvccReadPoint(long mvccReadPoint) {
1232    this.mvccReadPoint = mvccReadPoint;
1233    return this;
1234  }
1235
1236  /**
1237   * Set the mvcc read point to -1 which means do not use it.
1238   */
1239  Scan resetMvccReadPoint() {
1240    return setMvccReadPoint(-1L);
1241  }
1242
1243  /**
1244   * When the server is slow or we scan a table with many deleted data or we use a sparse filter,
1245   * the server will response heartbeat to prevent timeout. However the scanner will return a Result
1246   * only when client can do it. So if there are many heartbeats, the blocking time on
1247   * ResultScanner#next() may be very long, which is not friendly to online services.
1248   *
1249   * Set this to true then you can get a special Result whose #isCursor() returns true and is not
1250   * contains any real data. It only tells you where the server has scanned. You can call next
1251   * to continue scanning or open a new scanner with this row key as start row whenever you want.
1252   *
1253   * Users can get a cursor when and only when there is a response from the server but we can not
1254   * return a Result to users, for example, this response is a heartbeat or there are partial cells
1255   * but users do not allow partial result.
1256   *
1257   * Now the cursor is in row level which means the special Result will only contains a row key.
1258   * {@link Result#isCursor()}
1259   * {@link Result#getCursor()}
1260   * {@link Cursor}
1261   */
1262  public Scan setNeedCursorResult(boolean needCursorResult) {
1263    this.needCursorResult = needCursorResult;
1264    return this;
1265  }
1266
1267  public boolean isNeedCursorResult() {
1268    return needCursorResult;
1269  }
1270
1271  /**
1272   * Create a new Scan with a cursor. It only set the position information like start row key.
1273   * The others (like cfs, stop row, limit) should still be filled in by the user.
1274   * {@link Result#isCursor()}
1275   * {@link Result#getCursor()}
1276   * {@link Cursor}
1277   */
1278  public static Scan createScanFromCursor(Cursor cursor) {
1279    return new Scan().withStartRow(cursor.getRow());
1280  }
1281}