001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import java.io.IOException; 021import java.util.ArrayList; 022import java.util.HashMap; 023import java.util.List; 024import java.util.Map; 025import java.util.NavigableSet; 026import java.util.TreeMap; 027import java.util.TreeSet; 028import org.apache.hadoop.hbase.HConstants; 029import org.apache.hadoop.hbase.client.metrics.ScanMetrics; 030import org.apache.hadoop.hbase.filter.Filter; 031import org.apache.hadoop.hbase.filter.IncompatibleFilterException; 032import org.apache.hadoop.hbase.io.TimeRange; 033import org.apache.hadoop.hbase.security.access.Permission; 034import org.apache.hadoop.hbase.security.visibility.Authorizations; 035import org.apache.hadoop.hbase.util.Bytes; 036import org.apache.yetus.audience.InterfaceAudience; 037import org.slf4j.Logger; 038import org.slf4j.LoggerFactory; 039 040/** 041 * Used to perform Scan operations. 042 * <p> 043 * All operations are identical to {@link Get} with the exception of instantiation. Rather than 044 * specifying a single row, an optional startRow and stopRow may be defined. If rows are not 045 * specified, the Scanner will iterate over all rows. 046 * <p> 047 * To get all columns from all rows of a Table, create an instance with no constraints; use the 048 * {@link #Scan()} constructor. To constrain the scan to specific column families, call 049 * {@link #addFamily(byte[]) addFamily} for each family to retrieve on your Scan instance. 050 * <p> 051 * To get specific columns, call {@link #addColumn(byte[], byte[]) addColumn} for each column to 052 * retrieve. 053 * <p> 054 * To only retrieve columns within a specific range of version timestamps, call 055 * {@link #setTimeRange(long, long) setTimeRange}. 056 * <p> 057 * To only retrieve columns with a specific timestamp, call {@link #setTimestamp(long) setTimestamp} 058 * . 059 * <p> 060 * To limit the number of versions of each column to be returned, call {@link #readVersions(int)}. 061 * <p> 062 * To limit the maximum number of values returned for each call to next(), call 063 * {@link #setBatch(int) setBatch}. 064 * <p> 065 * To add a filter, call {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}. 066 * <p> 067 * For small scan, it is deprecated in 2.0.0. Now we have a {@link #setLimit(int)} method in Scan 068 * object which is used to tell RS how many rows we want. If the rows return reaches the limit, the 069 * RS will close the RegionScanner automatically. And we will also fetch data when openScanner in 070 * the new implementation, this means we can also finish a scan operation in one rpc call. And we 071 * have also introduced a {@link #setReadType(ReadType)} method. You can use this method to tell RS 072 * to use pread explicitly. 073 * <p> 074 * Expert: To explicitly disable server-side block caching for this scan, execute 075 * {@link #setCacheBlocks(boolean)}. 076 * <p> 077 * <em>Note:</em> Usage alters Scan instances. Internally, attributes are updated as the Scan runs 078 * and if enabled, metrics accumulate in the Scan instance. Be aware this is the case when you go to 079 * clone a Scan instance or if you go to reuse a created Scan instance; safer is create a Scan 080 * instance per usage. 081 */ 082@InterfaceAudience.Public 083public class Scan extends Query { 084 private static final Logger LOG = LoggerFactory.getLogger(Scan.class); 085 086 private static final String RAW_ATTR = "_raw_"; 087 088 private byte[] startRow = HConstants.EMPTY_START_ROW; 089 private boolean includeStartRow = true; 090 private byte[] stopRow = HConstants.EMPTY_END_ROW; 091 private boolean includeStopRow = false; 092 private int maxVersions = 1; 093 private int batch = -1; 094 095 /** 096 * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}. 097 * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the 098 * cells in the row exceeded max result size on the server. Typically partial results will be 099 * combined client side into complete results before being delivered to the caller. However, if 100 * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e. 101 * they understand that the results returned from the Scanner may only represent part of a 102 * particular row). In such a case, any attempt to combine the partials into a complete result on 103 * the client side will be skipped, and the caller will be able to see the exact results returned 104 * from the server. 105 */ 106 private boolean allowPartialResults = false; 107 108 private int storeLimit = -1; 109 private int storeOffset = 0; 110 111 private static final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable"; 112 113 // If an application wants to use multiple scans over different tables each scan must 114 // define this attribute with the appropriate table name by calling 115 // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName)) 116 static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name"; 117 118 /** 119 * -1 means no caching specified and the value of {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} 120 * (default to {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_CACHING}) will be used 121 */ 122 private int caching = -1; 123 private long maxResultSize = -1; 124 private boolean cacheBlocks = true; 125 private boolean reversed = false; 126 private TimeRange tr = TimeRange.allTime(); 127 private Map<byte[], NavigableSet<byte[]>> familyMap = 128 new TreeMap<byte[], NavigableSet<byte[]>>(Bytes.BYTES_COMPARATOR); 129 private Boolean asyncPrefetch = null; 130 131 /** 132 * Parameter name for client scanner sync/async prefetch toggle. When using async scanner, 133 * prefetching data from the server is done at the background. The parameter currently won't have 134 * any effect in the case that the user has set Scan#setSmall or Scan#setReversed 135 */ 136 public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = 137 "hbase.client.scanner.async.prefetch"; 138 139 /** 140 * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}. 141 */ 142 public static final boolean DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = false; 143 144 /** 145 * The mvcc read point to use when open a scanner. Remember to clear it after switching regions as 146 * the mvcc is only valid within region scope. 147 */ 148 private long mvccReadPoint = -1L; 149 150 /** 151 * The number of rows we want for this scan. We will terminate the scan if the number of return 152 * rows reaches this value. 153 */ 154 private int limit = -1; 155 156 /** 157 * Control whether to use pread at server side. 158 */ 159 private ReadType readType = ReadType.DEFAULT; 160 161 private boolean needCursorResult = false; 162 163 /** 164 * Create a Scan operation across all rows. 165 */ 166 public Scan() { 167 } 168 169 /** 170 * Creates a new instance of this class while copying all values. 171 * @param scan The scan instance to copy from. 172 * @throws IOException When copying the values fails. 173 */ 174 public Scan(Scan scan) throws IOException { 175 startRow = scan.getStartRow(); 176 includeStartRow = scan.includeStartRow(); 177 stopRow = scan.getStopRow(); 178 includeStopRow = scan.includeStopRow(); 179 maxVersions = scan.getMaxVersions(); 180 batch = scan.getBatch(); 181 storeLimit = scan.getMaxResultsPerColumnFamily(); 182 storeOffset = scan.getRowOffsetPerColumnFamily(); 183 caching = scan.getCaching(); 184 maxResultSize = scan.getMaxResultSize(); 185 cacheBlocks = scan.getCacheBlocks(); 186 filter = scan.getFilter(); // clone? 187 loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue(); 188 consistency = scan.getConsistency(); 189 this.setIsolationLevel(scan.getIsolationLevel()); 190 reversed = scan.isReversed(); 191 asyncPrefetch = scan.isAsyncPrefetch(); 192 allowPartialResults = scan.getAllowPartialResults(); 193 tr = scan.getTimeRange(); // TimeRange is immutable 194 Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap(); 195 for (Map.Entry<byte[], NavigableSet<byte[]>> entry : fams.entrySet()) { 196 byte[] fam = entry.getKey(); 197 NavigableSet<byte[]> cols = entry.getValue(); 198 if (cols != null && cols.size() > 0) { 199 for (byte[] col : cols) { 200 addColumn(fam, col); 201 } 202 } else { 203 addFamily(fam); 204 } 205 } 206 for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) { 207 setAttribute(attr.getKey(), attr.getValue()); 208 } 209 for (Map.Entry<byte[], TimeRange> entry : scan.getColumnFamilyTimeRange().entrySet()) { 210 TimeRange tr = entry.getValue(); 211 setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax()); 212 } 213 this.mvccReadPoint = scan.getMvccReadPoint(); 214 this.limit = scan.getLimit(); 215 this.needCursorResult = scan.isNeedCursorResult(); 216 setPriority(scan.getPriority()); 217 readType = scan.getReadType(); 218 super.setReplicaId(scan.getReplicaId()); 219 } 220 221 /** 222 * Builds a scan object with the same specs as get. 223 * @param get get to model scan after 224 */ 225 public Scan(Get get) { 226 this.startRow = get.getRow(); 227 this.includeStartRow = true; 228 this.stopRow = get.getRow(); 229 this.includeStopRow = true; 230 this.filter = get.getFilter(); 231 this.cacheBlocks = get.getCacheBlocks(); 232 this.maxVersions = get.getMaxVersions(); 233 this.storeLimit = get.getMaxResultsPerColumnFamily(); 234 this.storeOffset = get.getRowOffsetPerColumnFamily(); 235 this.tr = get.getTimeRange(); 236 this.familyMap = get.getFamilyMap(); 237 this.asyncPrefetch = false; 238 this.consistency = get.getConsistency(); 239 this.setIsolationLevel(get.getIsolationLevel()); 240 this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue(); 241 for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) { 242 setAttribute(attr.getKey(), attr.getValue()); 243 } 244 for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) { 245 TimeRange tr = entry.getValue(); 246 setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax()); 247 } 248 this.mvccReadPoint = -1L; 249 setPriority(get.getPriority()); 250 super.setReplicaId(get.getReplicaId()); 251 } 252 253 public boolean isGetScan() { 254 return includeStartRow && includeStopRow 255 && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow); 256 } 257 258 /** 259 * Get all columns from the specified family. 260 * <p> 261 * Overrides previous calls to addColumn for this family. 262 * @param family family name n 263 */ 264 public Scan addFamily(byte[] family) { 265 familyMap.remove(family); 266 familyMap.put(family, null); 267 return this; 268 } 269 270 /** 271 * Get the column from the specified family with the specified qualifier. 272 * <p> 273 * Overrides previous calls to addFamily for this family. 274 * @param family family name 275 * @param qualifier column qualifier n 276 */ 277 public Scan addColumn(byte[] family, byte[] qualifier) { 278 NavigableSet<byte[]> set = familyMap.get(family); 279 if (set == null) { 280 set = new TreeSet<>(Bytes.BYTES_COMPARATOR); 281 familyMap.put(family, set); 282 } 283 if (qualifier == null) { 284 qualifier = HConstants.EMPTY_BYTE_ARRAY; 285 } 286 set.add(qualifier); 287 return this; 288 } 289 290 /** 291 * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp). Note, 292 * default maximum versions to return is 1. If your time range spans more than one version and you 293 * want all versions returned, up the number of versions beyond the default. 294 * @param minStamp minimum timestamp value, inclusive 295 * @param maxStamp maximum timestamp value, exclusive 296 * @see #readAllVersions() 297 * @see #readVersions(int) n 298 */ 299 public Scan setTimeRange(long minStamp, long maxStamp) throws IOException { 300 tr = TimeRange.between(minStamp, maxStamp); 301 return this; 302 } 303 304 /** 305 * Get versions of columns with the specified timestamp. Note, default maximum versions to return 306 * is 1. If your time range spans more than one version and you want all versions returned, up the 307 * number of versions beyond the defaut. 308 * @param timestamp version timestamp 309 * @see #readAllVersions() 310 * @see #readVersions(int) n 311 */ 312 public Scan setTimestamp(long timestamp) { 313 try { 314 tr = TimeRange.at(timestamp); 315 } catch (Exception e) { 316 // This should never happen, unless integer overflow or something extremely wrong... 317 LOG.error("TimeRange failed, likely caused by integer overflow. ", e); 318 throw e; 319 } 320 321 return this; 322 } 323 324 @Override 325 public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { 326 return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp); 327 } 328 329 /** 330 * Set the start row of the scan. 331 * <p> 332 * If the specified row does not exist, the Scanner will start from the next closest row after the 333 * specified row. 334 * <p> 335 * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or 336 * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result 337 * unexpected or even undefined. 338 * </p> 339 * @param startRow row to start scanner at or after n * @throws IllegalArgumentException if 340 * startRow does not meet criteria for a row key (when length exceeds 341 * {@link HConstants#MAX_ROW_LENGTH}) 342 */ 343 public Scan withStartRow(byte[] startRow) { 344 return withStartRow(startRow, true); 345 } 346 347 /** 348 * Set the start row of the scan. 349 * <p> 350 * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner 351 * will start from the next closest row after the specified row. 352 * <p> 353 * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or 354 * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result 355 * unexpected or even undefined. 356 * </p> 357 * @param startRow row to start scanner at or after 358 * @param inclusive whether we should include the start row when scan n * @throws 359 * IllegalArgumentException if startRow does not meet criteria for a row key 360 * (when length exceeds {@link HConstants#MAX_ROW_LENGTH}) 361 */ 362 public Scan withStartRow(byte[] startRow, boolean inclusive) { 363 if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) { 364 throw new IllegalArgumentException("startRow's length must be less than or equal to " 365 + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); 366 } 367 this.startRow = startRow; 368 this.includeStartRow = inclusive; 369 return this; 370 } 371 372 /** 373 * Set the stop row of the scan. 374 * <p> 375 * The scan will include rows that are lexicographically less than the provided stopRow. 376 * <p> 377 * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or 378 * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result 379 * unexpected or even undefined. 380 * </p> 381 * @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does 382 * not meet criteria for a row key (when length exceeds 383 * {@link HConstants#MAX_ROW_LENGTH}) 384 */ 385 public Scan withStopRow(byte[] stopRow) { 386 return withStopRow(stopRow, false); 387 } 388 389 /** 390 * Set the stop row of the scan. 391 * <p> 392 * The scan will include rows that are lexicographically less than (or equal to if 393 * {@code inclusive} is {@code true}) the provided stopRow. 394 * <p> 395 * <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or 396 * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result 397 * unexpected or even undefined. 398 * </p> 399 * @param stopRow row to end at 400 * @param inclusive whether we should include the stop row when scan n * @throws 401 * IllegalArgumentException if stopRow does not meet criteria for a row key (when 402 * length exceeds {@link HConstants#MAX_ROW_LENGTH}) 403 */ 404 public Scan withStopRow(byte[] stopRow, boolean inclusive) { 405 if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) { 406 throw new IllegalArgumentException("stopRow's length must be less than or equal to " 407 + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); 408 } 409 this.stopRow = stopRow; 410 this.includeStopRow = inclusive; 411 return this; 412 } 413 414 /** 415 * <p> 416 * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey 417 * starts with the specified prefix. 418 * </p> 419 * <p> 420 * This is a utility method that converts the desired rowPrefix into the appropriate values for 421 * the startRow and stopRow to achieve the desired result. 422 * </p> 423 * <p> 424 * This can safely be used in combination with setFilter. 425 * </p> 426 * <p> 427 * <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such 428 * a combination will yield unexpected and even undefined results. 429 * </p> 430 * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) n 431 * * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method 432 * is considered to be confusing as it does not use a {@link Filter} but uses 433 * setting the startRow and stopRow instead. Use 434 * {@link #setStartStopRowForPrefixScan(byte[])} instead. 435 */ 436 @Deprecated 437 public Scan setRowPrefixFilter(byte[] rowPrefix) { 438 return setStartStopRowForPrefixScan(rowPrefix); 439 } 440 441 /** 442 * <p> 443 * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey 444 * starts with the specified prefix. 445 * </p> 446 * <p> 447 * This is a utility method that converts the desired rowPrefix into the appropriate values for 448 * the startRow and stopRow to achieve the desired result. 449 * </p> 450 * <p> 451 * This can safely be used in combination with setFilter. 452 * </p> 453 * <p> 454 * <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such 455 * a combination will yield unexpected and even undefined results. 456 * </p> 457 * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) n 458 */ 459 public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) { 460 if (rowPrefix == null) { 461 withStartRow(HConstants.EMPTY_START_ROW); 462 withStopRow(HConstants.EMPTY_END_ROW); 463 } else { 464 this.withStartRow(rowPrefix); 465 this.withStopRow(ClientUtil.calculateTheClosestNextRowKeyForPrefix(rowPrefix)); 466 } 467 return this; 468 } 469 470 /** 471 * Get all available versions. n 472 */ 473 public Scan readAllVersions() { 474 this.maxVersions = Integer.MAX_VALUE; 475 return this; 476 } 477 478 /** 479 * Get up to the specified number of versions of each column. 480 * @param versions specified number of versions for each column n 481 */ 482 public Scan readVersions(int versions) { 483 this.maxVersions = versions; 484 return this; 485 } 486 487 /** 488 * Set the maximum number of cells to return for each call to next(). Callers should be aware that 489 * this is not equivalent to calling {@link #setAllowPartialResults(boolean)}. If you don't allow 490 * partial results, the number of cells in each Result must equal to your batch setting unless it 491 * is the last Result for current row. So this method is helpful in paging queries. If you just 492 * want to prevent OOM at client, use setAllowPartialResults(true) is better. 493 * @param batch the maximum number of values 494 * @see Result#mayHaveMoreCellsInRow() 495 */ 496 public Scan setBatch(int batch) { 497 if (this.hasFilter() && this.filter.hasFilterRow()) { 498 throw new IncompatibleFilterException( 499 "Cannot set batch on a scan using a filter" + " that returns true for filter.hasFilterRow"); 500 } 501 this.batch = batch; 502 return this; 503 } 504 505 /** 506 * Set the maximum number of values to return per row per Column Family 507 * @param limit the maximum number of values returned / row / CF 508 */ 509 public Scan setMaxResultsPerColumnFamily(int limit) { 510 this.storeLimit = limit; 511 return this; 512 } 513 514 /** 515 * Set offset for the row per Column Family. 516 * @param offset is the number of kvs that will be skipped. 517 */ 518 public Scan setRowOffsetPerColumnFamily(int offset) { 519 this.storeOffset = offset; 520 return this; 521 } 522 523 /** 524 * Set the number of rows for caching that will be passed to scanners. If not set, the 525 * Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will apply. Higher 526 * caching values will enable faster scanners but will use more memory. 527 * @param caching the number of rows for caching 528 */ 529 public Scan setCaching(int caching) { 530 this.caching = caching; 531 return this; 532 } 533 534 /** 535 * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)} 536 */ 537 public long getMaxResultSize() { 538 return maxResultSize; 539 } 540 541 /** 542 * Set the maximum result size. The default is -1; this means that no specific maximum result size 543 * will be set for this scan, and the global configured value will be used instead. (Defaults to 544 * unlimited). 545 * @param maxResultSize The maximum result size in bytes. 546 */ 547 public Scan setMaxResultSize(long maxResultSize) { 548 this.maxResultSize = maxResultSize; 549 return this; 550 } 551 552 @Override 553 public Scan setFilter(Filter filter) { 554 super.setFilter(filter); 555 return this; 556 } 557 558 /** 559 * Setting the familyMap 560 * @param familyMap map of family to qualifier n 561 */ 562 public Scan setFamilyMap(Map<byte[], NavigableSet<byte[]>> familyMap) { 563 this.familyMap = familyMap; 564 return this; 565 } 566 567 /** 568 * Getting the familyMap n 569 */ 570 public Map<byte[], NavigableSet<byte[]>> getFamilyMap() { 571 return this.familyMap; 572 } 573 574 /** 575 * @return the number of families in familyMap 576 */ 577 public int numFamilies() { 578 if (hasFamilies()) { 579 return this.familyMap.size(); 580 } 581 return 0; 582 } 583 584 /** 585 * @return true if familyMap is non empty, false otherwise 586 */ 587 public boolean hasFamilies() { 588 return !this.familyMap.isEmpty(); 589 } 590 591 /** 592 * @return the keys of the familyMap 593 */ 594 public byte[][] getFamilies() { 595 if (hasFamilies()) { 596 return this.familyMap.keySet().toArray(new byte[0][0]); 597 } 598 return null; 599 } 600 601 /** 602 * @return the startrow 603 */ 604 public byte[] getStartRow() { 605 return this.startRow; 606 } 607 608 /** 609 * @return if we should include start row when scan 610 */ 611 public boolean includeStartRow() { 612 return includeStartRow; 613 } 614 615 /** 616 * @return the stoprow 617 */ 618 public byte[] getStopRow() { 619 return this.stopRow; 620 } 621 622 /** 623 * @return if we should include stop row when scan 624 */ 625 public boolean includeStopRow() { 626 return includeStopRow; 627 } 628 629 /** 630 * @return the max number of versions to fetch 631 */ 632 public int getMaxVersions() { 633 return this.maxVersions; 634 } 635 636 /** 637 * @return maximum number of values to return for a single call to next() 638 */ 639 public int getBatch() { 640 return this.batch; 641 } 642 643 /** 644 * @return maximum number of values to return per row per CF 645 */ 646 public int getMaxResultsPerColumnFamily() { 647 return this.storeLimit; 648 } 649 650 /** 651 * Method for retrieving the scan's offset per row per column family (#kvs to be skipped) 652 * @return row offset 653 */ 654 public int getRowOffsetPerColumnFamily() { 655 return this.storeOffset; 656 } 657 658 /** 659 * @return caching the number of rows fetched when calling next on a scanner 660 */ 661 public int getCaching() { 662 return this.caching; 663 } 664 665 /** 666 * n 667 */ 668 public TimeRange getTimeRange() { 669 return this.tr; 670 } 671 672 /** 673 * n 674 */ 675 @Override 676 public Filter getFilter() { 677 return filter; 678 } 679 680 /** 681 * @return true is a filter has been specified, false if not 682 */ 683 public boolean hasFilter() { 684 return filter != null; 685 } 686 687 /** 688 * Set whether blocks should be cached for this Scan. 689 * <p> 690 * This is true by default. When true, default settings of the table and family are used (this 691 * will never override caching blocks if the block cache is disabled for that family or entirely). 692 * @param cacheBlocks if false, default settings are overridden and blocks will not be cached 693 */ 694 public Scan setCacheBlocks(boolean cacheBlocks) { 695 this.cacheBlocks = cacheBlocks; 696 return this; 697 } 698 699 /** 700 * Get whether blocks should be cached for this Scan. 701 * @return true if default caching should be used, false if blocks should not be cached 702 */ 703 public boolean getCacheBlocks() { 704 return cacheBlocks; 705 } 706 707 /** 708 * Set whether this scan is a reversed one 709 * <p> 710 * This is false by default which means forward(normal) scan. 711 * @param reversed if true, scan will be backward order n 712 */ 713 public Scan setReversed(boolean reversed) { 714 this.reversed = reversed; 715 return this; 716 } 717 718 /** 719 * Get whether this scan is a reversed one. 720 * @return true if backward scan, false if forward(default) scan 721 */ 722 public boolean isReversed() { 723 return reversed; 724 } 725 726 /** 727 * Setting whether the caller wants to see the partial results when server returns 728 * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By 729 * default this value is false and the complete results will be assembled client side before being 730 * delivered to the caller. nn * @see Result#mayHaveMoreCellsInRow() 731 * @see #setBatch(int) 732 */ 733 public Scan setAllowPartialResults(final boolean allowPartialResults) { 734 this.allowPartialResults = allowPartialResults; 735 return this; 736 } 737 738 /** 739 * @return true when the constructor of this scan understands that the results they will see may 740 * only represent a partial portion of a row. The entire row would be retrieved by 741 * subsequent calls to {@link ResultScanner#next()} 742 */ 743 public boolean getAllowPartialResults() { 744 return allowPartialResults; 745 } 746 747 @Override 748 public Scan setLoadColumnFamiliesOnDemand(boolean value) { 749 return (Scan) super.setLoadColumnFamiliesOnDemand(value); 750 } 751 752 /** 753 * Compile the table and column family (i.e. schema) information into a String. Useful for parsing 754 * and aggregation by debugging, logging, and administration tools. n 755 */ 756 @Override 757 public Map<String, Object> getFingerprint() { 758 Map<String, Object> map = new HashMap<>(); 759 List<String> families = new ArrayList<>(); 760 if (this.familyMap.isEmpty()) { 761 map.put("families", "ALL"); 762 return map; 763 } else { 764 map.put("families", families); 765 } 766 for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) { 767 families.add(Bytes.toStringBinary(entry.getKey())); 768 } 769 return map; 770 } 771 772 /** 773 * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a 774 * Map along with the fingerprinted information. Useful for debugging, logging, and administration 775 * tools. 776 * @param maxCols a limit on the number of columns output prior to truncation n 777 */ 778 @Override 779 public Map<String, Object> toMap(int maxCols) { 780 // start with the fingerpring map and build on top of it 781 Map<String, Object> map = getFingerprint(); 782 // map from families to column list replaces fingerprint's list of families 783 Map<String, List<String>> familyColumns = new HashMap<>(); 784 map.put("families", familyColumns); 785 // add scalar information first 786 map.put("startRow", Bytes.toStringBinary(this.startRow)); 787 map.put("stopRow", Bytes.toStringBinary(this.stopRow)); 788 map.put("maxVersions", this.maxVersions); 789 map.put("batch", this.batch); 790 map.put("caching", this.caching); 791 map.put("maxResultSize", this.maxResultSize); 792 map.put("cacheBlocks", this.cacheBlocks); 793 map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand); 794 List<Long> timeRange = new ArrayList<>(2); 795 timeRange.add(this.tr.getMin()); 796 timeRange.add(this.tr.getMax()); 797 map.put("timeRange", timeRange); 798 int colCount = 0; 799 // iterate through affected families and list out up to maxCols columns 800 for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) { 801 List<String> columns = new ArrayList<>(); 802 familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns); 803 if (entry.getValue() == null) { 804 colCount++; 805 --maxCols; 806 columns.add("ALL"); 807 } else { 808 colCount += entry.getValue().size(); 809 if (maxCols <= 0) { 810 continue; 811 } 812 for (byte[] column : entry.getValue()) { 813 if (--maxCols <= 0) { 814 continue; 815 } 816 columns.add(Bytes.toStringBinary(column)); 817 } 818 } 819 } 820 map.put("totalColumns", colCount); 821 if (this.filter != null) { 822 map.put("filter", this.filter.toString()); 823 } 824 // add the id if set 825 if (getId() != null) { 826 map.put("id", getId()); 827 } 828 return map; 829 } 830 831 /** 832 * Enable/disable "raw" mode for this scan. If "raw" is enabled the scan will return all delete 833 * marker and deleted rows that have not been collected, yet. This is mostly useful for Scan on 834 * column families that have KEEP_DELETED_ROWS enabled. It is an error to specify any column when 835 * "raw" is set. 836 * @param raw True/False to enable/disable "raw" mode. 837 */ 838 public Scan setRaw(boolean raw) { 839 setAttribute(RAW_ATTR, Bytes.toBytes(raw)); 840 return this; 841 } 842 843 /** 844 * @return True if this Scan is in "raw" mode. 845 */ 846 public boolean isRaw() { 847 byte[] attr = getAttribute(RAW_ATTR); 848 return attr == null ? false : Bytes.toBoolean(attr); 849 } 850 851 @Override 852 public Scan setAttribute(String name, byte[] value) { 853 return (Scan) super.setAttribute(name, value); 854 } 855 856 @Override 857 public Scan setId(String id) { 858 return (Scan) super.setId(id); 859 } 860 861 @Override 862 public Scan setAuthorizations(Authorizations authorizations) { 863 return (Scan) super.setAuthorizations(authorizations); 864 } 865 866 @Override 867 public Scan setACL(Map<String, Permission> perms) { 868 return (Scan) super.setACL(perms); 869 } 870 871 @Override 872 public Scan setACL(String user, Permission perms) { 873 return (Scan) super.setACL(user, perms); 874 } 875 876 @Override 877 public Scan setConsistency(Consistency consistency) { 878 return (Scan) super.setConsistency(consistency); 879 } 880 881 @Override 882 public Scan setReplicaId(int Id) { 883 return (Scan) super.setReplicaId(Id); 884 } 885 886 @Override 887 public Scan setIsolationLevel(IsolationLevel level) { 888 return (Scan) super.setIsolationLevel(level); 889 } 890 891 @Override 892 public Scan setPriority(int priority) { 893 return (Scan) super.setPriority(priority); 894 } 895 896 /** 897 * Enable collection of {@link ScanMetrics}. For advanced users. 898 * @param enabled Set to true to enable accumulating scan metrics 899 */ 900 public Scan setScanMetricsEnabled(final boolean enabled) { 901 setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled))); 902 return this; 903 } 904 905 /** 906 * @return True if collection of scan metrics is enabled. For advanced users. 907 */ 908 public boolean isScanMetricsEnabled() { 909 byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); 910 return attr == null ? false : Bytes.toBoolean(attr); 911 } 912 913 public Boolean isAsyncPrefetch() { 914 return asyncPrefetch; 915 } 916 917 /** 918 * @deprecated Since 3.0.0, will be removed in 4.0.0. After building sync client upon async 919 * client, the implementation is always 'async prefetch', so this flag is useless now. 920 */ 921 @Deprecated 922 public Scan setAsyncPrefetch(boolean asyncPrefetch) { 923 this.asyncPrefetch = asyncPrefetch; 924 return this; 925 } 926 927 /** 928 * @return the limit of rows for this scan 929 */ 930 public int getLimit() { 931 return limit; 932 } 933 934 /** 935 * Set the limit of rows for this scan. We will terminate the scan if the number of returned rows 936 * reaches this value. 937 * <p> 938 * This condition will be tested at last, after all other conditions such as stopRow, filter, etc. 939 * @param limit the limit of rows for this scan n 940 */ 941 public Scan setLimit(int limit) { 942 this.limit = limit; 943 return this; 944 } 945 946 /** 947 * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also 948 * set {@code readType} to {@link ReadType#PREAD}. n 949 */ 950 public Scan setOneRowLimit() { 951 return setLimit(1).setReadType(ReadType.PREAD); 952 } 953 954 @InterfaceAudience.Public 955 public enum ReadType { 956 DEFAULT, 957 STREAM, 958 PREAD 959 } 960 961 /** 962 * @return the read type for this scan 963 */ 964 public ReadType getReadType() { 965 return readType; 966 } 967 968 /** 969 * Set the read type for this scan. 970 * <p> 971 * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For 972 * example, we will always use pread if this is a get scan. n 973 */ 974 public Scan setReadType(ReadType readType) { 975 this.readType = readType; 976 return this; 977 } 978 979 /** 980 * Get the mvcc read point used to open a scanner. 981 */ 982 long getMvccReadPoint() { 983 return mvccReadPoint; 984 } 985 986 /** 987 * Set the mvcc read point used to open a scanner. 988 */ 989 Scan setMvccReadPoint(long mvccReadPoint) { 990 this.mvccReadPoint = mvccReadPoint; 991 return this; 992 } 993 994 /** 995 * Set the mvcc read point to -1 which means do not use it. 996 */ 997 Scan resetMvccReadPoint() { 998 return setMvccReadPoint(-1L); 999 } 1000 1001 /** 1002 * When the server is slow or we scan a table with many deleted data or we use a sparse filter, 1003 * the server will response heartbeat to prevent timeout. However the scanner will return a Result 1004 * only when client can do it. So if there are many heartbeats, the blocking time on 1005 * ResultScanner#next() may be very long, which is not friendly to online services. Set this to 1006 * true then you can get a special Result whose #isCursor() returns true and is not contains any 1007 * real data. It only tells you where the server has scanned. You can call next to continue 1008 * scanning or open a new scanner with this row key as start row whenever you want. Users can get 1009 * a cursor when and only when there is a response from the server but we can not return a Result 1010 * to users, for example, this response is a heartbeat or there are partial cells but users do not 1011 * allow partial result. Now the cursor is in row level which means the special Result will only 1012 * contains a row key. {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor} 1013 */ 1014 public Scan setNeedCursorResult(boolean needCursorResult) { 1015 this.needCursorResult = needCursorResult; 1016 return this; 1017 } 1018 1019 public boolean isNeedCursorResult() { 1020 return needCursorResult; 1021 } 1022 1023 /** 1024 * Create a new Scan with a cursor. It only set the position information like start row key. The 1025 * others (like cfs, stop row, limit) should still be filled in by the user. 1026 * {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor} 1027 */ 1028 public static Scan createScanFromCursor(Cursor cursor) { 1029 return new Scan().withStartRow(cursor.getRow()); 1030 } 1031}