001/* 002 * 003 * Licensed to the Apache Software Foundation (ASF) under one 004 * or more contributor license agreements. See the NOTICE file 005 * distributed with this work for additional information 006 * regarding copyright ownership. The ASF licenses this file 007 * to you under the Apache License, Version 2.0 (the 008 * "License"); you may not use this file except in compliance 009 * with the License. You may obtain a copy of the License at 010 * 011 * http://www.apache.org/licenses/LICENSE-2.0 012 * 013 * Unless required by applicable law or agreed to in writing, software 014 * distributed under the License is distributed on an "AS IS" BASIS, 015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 016 * See the License for the specific language governing permissions and 017 * limitations under the License. 018 */ 019 020package org.apache.hadoop.hbase.client; 021 022import java.io.IOException; 023import java.util.ArrayList; 024import java.util.HashMap; 025import java.util.List; 026import java.util.Map; 027import java.util.NavigableSet; 028import java.util.TreeMap; 029import java.util.TreeSet; 030import org.apache.hadoop.hbase.HConstants; 031import org.apache.hadoop.hbase.client.metrics.ScanMetrics; 032import org.apache.hadoop.hbase.filter.Filter; 033import org.apache.hadoop.hbase.filter.IncompatibleFilterException; 034import org.apache.hadoop.hbase.io.TimeRange; 035import org.apache.hadoop.hbase.security.access.Permission; 036import org.apache.hadoop.hbase.security.visibility.Authorizations; 037import org.apache.hadoop.hbase.util.Bytes; 038import org.apache.yetus.audience.InterfaceAudience; 039import org.slf4j.Logger; 040import org.slf4j.LoggerFactory; 041 042/** 043 * Used to perform Scan operations. 044 * <p> 045 * All operations are identical to {@link Get} with the exception of instantiation. Rather than 046 * specifying a single row, an optional startRow and stopRow may be defined. If rows are not 047 * specified, the Scanner will iterate over all rows. 048 * <p> 049 * To get all columns from all rows of a Table, create an instance with no constraints; use the 050 * {@link #Scan()} constructor. To constrain the scan to specific column families, call 051 * {@link #addFamily(byte[]) addFamily} for each family to retrieve on your Scan instance. 052 * <p> 053 * To get specific columns, call {@link #addColumn(byte[], byte[]) addColumn} for each column to 054 * retrieve. 055 * <p> 056 * To only retrieve columns within a specific range of version timestamps, call 057 * {@link #setTimeRange(long, long) setTimeRange}. 058 * <p> 059 * To only retrieve columns with a specific timestamp, call {@link #setTimestamp(long) setTimestamp} 060 * . 061 * <p> 062 * To limit the number of versions of each column to be returned, call {@link #readVersions(int)}. 063 * <p> 064 * To limit the maximum number of values returned for each call to next(), call 065 * {@link #setBatch(int) setBatch}. 066 * <p> 067 * To add a filter, call {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}. 068 * <p> 069 * For small scan, it is deprecated in 2.0.0. Now we have a {@link #setLimit(int)} method in Scan 070 * object which is used to tell RS how many rows we want. If the rows return reaches the limit, the 071 * RS will close the RegionScanner automatically. And we will also fetch data when openScanner in 072 * the new implementation, this means we can also finish a scan operation in one rpc call. And we 073 * have also introduced a {@link #setReadType(ReadType)} method. You can use this method to tell RS 074 * to use pread explicitly. 075 * <p> 076 * Expert: To explicitly disable server-side block caching for this scan, execute 077 * {@link #setCacheBlocks(boolean)}. 078 * <p> 079 * <em>Note:</em> Usage alters Scan instances. Internally, attributes are updated as the Scan runs 080 * and if enabled, metrics accumulate in the Scan instance. Be aware this is the case when you go to 081 * clone a Scan instance or if you go to reuse a created Scan instance; safer is create a Scan 082 * instance per usage. 083 */ 084@InterfaceAudience.Public 085public class Scan extends Query { 086 private static final Logger LOG = LoggerFactory.getLogger(Scan.class); 087 088 private static final String RAW_ATTR = "_raw_"; 089 090 private byte[] startRow = HConstants.EMPTY_START_ROW; 091 private boolean includeStartRow = true; 092 private byte[] stopRow = HConstants.EMPTY_END_ROW; 093 private boolean includeStopRow = false; 094 private int maxVersions = 1; 095 private int batch = -1; 096 097 /** 098 * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}. 099 * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the 100 * cells in the row exceeded max result size on the server. Typically partial results will be 101 * combined client side into complete results before being delivered to the caller. However, if 102 * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e. 103 * they understand that the results returned from the Scanner may only represent part of a 104 * particular row). In such a case, any attempt to combine the partials into a complete result on 105 * the client side will be skipped, and the caller will be able to see the exact results returned 106 * from the server. 107 */ 108 private boolean allowPartialResults = false; 109 110 private int storeLimit = -1; 111 private int storeOffset = 0; 112 113 private static final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable"; 114 115 // If an application wants to use multiple scans over different tables each scan must 116 // define this attribute with the appropriate table name by calling 117 // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName)) 118 static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name"; 119 120 /** 121 * -1 means no caching specified and the value of {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} 122 * (default to {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_CACHING}) will be used 123 */ 124 private int caching = -1; 125 private long maxResultSize = -1; 126 private boolean cacheBlocks = true; 127 private boolean reversed = false; 128 private TimeRange tr = TimeRange.allTime(); 129 private Map<byte [], NavigableSet<byte []>> familyMap = 130 new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR); 131 private Boolean asyncPrefetch = null; 132 133 /** 134 * Parameter name for client scanner sync/async prefetch toggle. 135 * When using async scanner, prefetching data from the server is done at the background. 136 * The parameter currently won't have any effect in the case that the user has set 137 * Scan#setSmall or Scan#setReversed 138 */ 139 public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = 140 "hbase.client.scanner.async.prefetch"; 141 142 /** 143 * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}. 144 */ 145 public static final boolean DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = false; 146 147 /** 148 * Set it true for small scan to get better performance Small scan should use pread and big scan 149 * can use seek + read seek + read is fast but can cause two problem (1) resource contention (2) 150 * cause too much network io [89-fb] Using pread for non-compaction read request 151 * https://issues.apache.org/jira/browse/HBASE-7266 On the other hand, if setting it true, we 152 * would do openScanner,next,closeScanner in one RPC call. It means the better performance for 153 * small scan. [HBASE-9488]. Generally, if the scan range is within one data block(64KB), it could 154 * be considered as a small scan. 155 */ 156 private boolean small = false; 157 158 /** 159 * The mvcc read point to use when open a scanner. Remember to clear it after switching regions as 160 * the mvcc is only valid within region scope. 161 */ 162 private long mvccReadPoint = -1L; 163 164 /** 165 * The number of rows we want for this scan. We will terminate the scan if the number of return 166 * rows reaches this value. 167 */ 168 private int limit = -1; 169 170 /** 171 * Control whether to use pread at server side. 172 */ 173 private ReadType readType = ReadType.DEFAULT; 174 175 private boolean needCursorResult = false; 176 177 /** 178 * Create a Scan operation across all rows. 179 */ 180 public Scan() {} 181 182 /** 183 * Creates a new instance of this class while copying all values. 184 * 185 * @param scan The scan instance to copy from. 186 * @throws IOException When copying the values fails. 187 */ 188 public Scan(Scan scan) throws IOException { 189 startRow = scan.getStartRow(); 190 includeStartRow = scan.includeStartRow(); 191 stopRow = scan.getStopRow(); 192 includeStopRow = scan.includeStopRow(); 193 maxVersions = scan.getMaxVersions(); 194 batch = scan.getBatch(); 195 storeLimit = scan.getMaxResultsPerColumnFamily(); 196 storeOffset = scan.getRowOffsetPerColumnFamily(); 197 caching = scan.getCaching(); 198 maxResultSize = scan.getMaxResultSize(); 199 cacheBlocks = scan.getCacheBlocks(); 200 filter = scan.getFilter(); // clone? 201 loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue(); 202 consistency = scan.getConsistency(); 203 this.setIsolationLevel(scan.getIsolationLevel()); 204 reversed = scan.isReversed(); 205 asyncPrefetch = scan.isAsyncPrefetch(); 206 small = scan.isSmall(); 207 allowPartialResults = scan.getAllowPartialResults(); 208 tr = scan.getTimeRange(); // TimeRange is immutable 209 Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap(); 210 for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) { 211 byte [] fam = entry.getKey(); 212 NavigableSet<byte[]> cols = entry.getValue(); 213 if (cols != null && cols.size() > 0) { 214 for (byte[] col : cols) { 215 addColumn(fam, col); 216 } 217 } else { 218 addFamily(fam); 219 } 220 } 221 for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) { 222 setAttribute(attr.getKey(), attr.getValue()); 223 } 224 for (Map.Entry<byte[], TimeRange> entry : scan.getColumnFamilyTimeRange().entrySet()) { 225 TimeRange tr = entry.getValue(); 226 setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax()); 227 } 228 this.mvccReadPoint = scan.getMvccReadPoint(); 229 this.limit = scan.getLimit(); 230 this.needCursorResult = scan.isNeedCursorResult(); 231 setPriority(scan.getPriority()); 232 readType = scan.getReadType(); 233 super.setReplicaId(scan.getReplicaId()); 234 } 235 236 /** 237 * Builds a scan object with the same specs as get. 238 * @param get get to model scan after 239 */ 240 public Scan(Get get) { 241 this.startRow = get.getRow(); 242 this.includeStartRow = true; 243 this.stopRow = get.getRow(); 244 this.includeStopRow = true; 245 this.filter = get.getFilter(); 246 this.cacheBlocks = get.getCacheBlocks(); 247 this.maxVersions = get.getMaxVersions(); 248 this.storeLimit = get.getMaxResultsPerColumnFamily(); 249 this.storeOffset = get.getRowOffsetPerColumnFamily(); 250 this.tr = get.getTimeRange(); 251 this.familyMap = get.getFamilyMap(); 252 this.asyncPrefetch = false; 253 this.consistency = get.getConsistency(); 254 this.setIsolationLevel(get.getIsolationLevel()); 255 this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue(); 256 for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) { 257 setAttribute(attr.getKey(), attr.getValue()); 258 } 259 for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) { 260 TimeRange tr = entry.getValue(); 261 setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax()); 262 } 263 this.mvccReadPoint = -1L; 264 setPriority(get.getPriority()); 265 super.setReplicaId(get.getReplicaId()); 266 } 267 268 public boolean isGetScan() { 269 return includeStartRow && includeStopRow 270 && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow); 271 } 272 273 /** 274 * Get all columns from the specified family. 275 * <p> 276 * Overrides previous calls to addColumn for this family. 277 * @param family family name 278 * @return this 279 */ 280 public Scan addFamily(byte [] family) { 281 familyMap.remove(family); 282 familyMap.put(family, null); 283 return this; 284 } 285 286 /** 287 * Get the column from the specified family with the specified qualifier. 288 * <p> 289 * Overrides previous calls to addFamily for this family. 290 * @param family family name 291 * @param qualifier column qualifier 292 * @return this 293 */ 294 public Scan addColumn(byte [] family, byte [] qualifier) { 295 NavigableSet<byte []> set = familyMap.get(family); 296 if(set == null) { 297 set = new TreeSet<>(Bytes.BYTES_COMPARATOR); 298 familyMap.put(family, set); 299 } 300 if (qualifier == null) { 301 qualifier = HConstants.EMPTY_BYTE_ARRAY; 302 } 303 set.add(qualifier); 304 return this; 305 } 306 307 /** 308 * Get versions of columns only within the specified timestamp range, 309 * [minStamp, maxStamp). Note, default maximum versions to return is 1. If 310 * your time range spans more than one version and you want all versions 311 * returned, up the number of versions beyond the default. 312 * @param minStamp minimum timestamp value, inclusive 313 * @param maxStamp maximum timestamp value, exclusive 314 * @see #readAllVersions() 315 * @see #readVersions(int) 316 * @return this 317 */ 318 public Scan setTimeRange(long minStamp, long maxStamp) throws IOException { 319 tr = TimeRange.between(minStamp, maxStamp); 320 return this; 321 } 322 323 /** 324 * Get versions of columns with the specified timestamp. Note, default maximum 325 * versions to return is 1. If your time range spans more than one version 326 * and you want all versions returned, up the number of versions beyond the 327 * defaut. 328 * @param timestamp version timestamp 329 * @see #readAllVersions() 330 * @see #readVersions(int) 331 * @return this 332 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 333 * Use {@link #setTimestamp(long)} instead 334 */ 335 @Deprecated 336 public Scan setTimeStamp(long timestamp) 337 throws IOException { 338 return this.setTimestamp(timestamp); 339 } 340 341 /** 342 * Get versions of columns with the specified timestamp. Note, default maximum 343 * versions to return is 1. If your time range spans more than one version 344 * and you want all versions returned, up the number of versions beyond the 345 * defaut. 346 * @param timestamp version timestamp 347 * @see #readAllVersions() 348 * @see #readVersions(int) 349 * @return this 350 */ 351 public Scan setTimestamp(long timestamp) { 352 try { 353 tr = TimeRange.at(timestamp); 354 } catch(Exception e) { 355 // This should never happen, unless integer overflow or something extremely wrong... 356 LOG.error("TimeRange failed, likely caused by integer overflow. ", e); 357 throw e; 358 } 359 360 return this; 361 } 362 363 @Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { 364 return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp); 365 } 366 367 /** 368 * Set the start row of the scan. 369 * <p> 370 * If the specified row does not exist, the Scanner will start from the next closest row after the 371 * specified row. 372 * @param startRow row to start scanner at or after 373 * @return this 374 * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length 375 * exceeds {@link HConstants#MAX_ROW_LENGTH}) 376 */ 377 public Scan withStartRow(byte[] startRow) { 378 return withStartRow(startRow, true); 379 } 380 381 /** 382 * Set the start row of the scan. 383 * <p> 384 * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner 385 * will start from the next closest row after the specified row. 386 * <p> 387 * <b>Note:</b> When use {@link #setRowPrefixFilter(byte[])}, the result might be unexpected. 388 * </p> 389 * @param startRow row to start scanner at or after 390 * @param inclusive whether we should include the start row when scan 391 * @return this 392 * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length 393 * exceeds {@link HConstants#MAX_ROW_LENGTH}) 394 */ 395 public Scan withStartRow(byte[] startRow, boolean inclusive) { 396 if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) { 397 throw new IllegalArgumentException("startRow's length must be less than or equal to " 398 + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); 399 } 400 this.startRow = startRow; 401 this.includeStartRow = inclusive; 402 return this; 403 } 404 405 /** 406 * Set the stop row of the scan. 407 * <p> 408 * The scan will include rows that are lexicographically less than the provided stopRow. 409 * <p> 410 * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use 411 * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result. 412 * </p> 413 * @param stopRow row to end at (exclusive) 414 * @return this 415 * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length 416 * exceeds {@link HConstants#MAX_ROW_LENGTH}) 417 */ 418 public Scan withStopRow(byte[] stopRow) { 419 return withStopRow(stopRow, false); 420 } 421 422 /** 423 * Set the stop row of the scan. 424 * <p> 425 * The scan will include rows that are lexicographically less than (or equal to if 426 * {@code inclusive} is {@code true}) the provided stopRow. 427 * @param stopRow row to end at 428 * @param inclusive whether we should include the stop row when scan 429 * @return this 430 * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length 431 * exceeds {@link HConstants#MAX_ROW_LENGTH}) 432 */ 433 public Scan withStopRow(byte[] stopRow, boolean inclusive) { 434 if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) { 435 throw new IllegalArgumentException("stopRow's length must be less than or equal to " 436 + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); 437 } 438 this.stopRow = stopRow; 439 this.includeStopRow = inclusive; 440 return this; 441 } 442 443 /** 444 * <p>Set a filter (using stopRow and startRow) so the result set only contains rows where the 445 * rowKey starts with the specified prefix.</p> 446 * <p>This is a utility method that converts the desired rowPrefix into the appropriate values 447 * for the startRow and stopRow to achieve the desired result.</p> 448 * <p>This can safely be used in combination with setFilter.</p> 449 * <p><b>NOTE: Doing a {@link #withStartRow(byte[])} and/or {@link #withStopRow(byte[])} 450 * after this method will yield undefined results.</b></p> 451 * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) 452 * @return this 453 * @deprecated since 3.0.0. The scan result might be unexpected in some cases. 454 * e.g. startRow : "112" and rowPrefixFilter : "11" 455 * The Result of this scan might contains : "111" 456 * This method implements the filter by setting startRow and stopRow, 457 * but does not take care of the scenario where startRow has been set. 458 */ 459 @Deprecated 460 public Scan setRowPrefixFilter(byte[] rowPrefix) { 461 if (rowPrefix == null) { 462 withStartRow(HConstants.EMPTY_START_ROW); 463 withStopRow(HConstants.EMPTY_END_ROW); 464 } else { 465 this.withStartRow(rowPrefix); 466 this.withStopRow(ClientUtil.calculateTheClosestNextRowKeyForPrefix(rowPrefix)); 467 } 468 return this; 469 } 470 471 /** 472 * Get all available versions. 473 * @return this 474 */ 475 public Scan readAllVersions() { 476 this.maxVersions = Integer.MAX_VALUE; 477 return this; 478 } 479 480 /** 481 * Get up to the specified number of versions of each column. 482 * @param versions specified number of versions for each column 483 * @return this 484 */ 485 public Scan readVersions(int versions) { 486 this.maxVersions = versions; 487 return this; 488 } 489 490 /** 491 * Set the maximum number of cells to return for each call to next(). Callers should be aware 492 * that this is not equivalent to calling {@link #setAllowPartialResults(boolean)}. 493 * If you don't allow partial results, the number of cells in each Result must equal to your 494 * batch setting unless it is the last Result for current row. So this method is helpful in paging 495 * queries. If you just want to prevent OOM at client, use setAllowPartialResults(true) is better. 496 * @param batch the maximum number of values 497 * @see Result#mayHaveMoreCellsInRow() 498 */ 499 public Scan setBatch(int batch) { 500 if (this.hasFilter() && this.filter.hasFilterRow()) { 501 throw new IncompatibleFilterException( 502 "Cannot set batch on a scan using a filter" + 503 " that returns true for filter.hasFilterRow"); 504 } 505 this.batch = batch; 506 return this; 507 } 508 509 /** 510 * Set the maximum number of values to return per row per Column Family 511 * @param limit the maximum number of values returned / row / CF 512 */ 513 public Scan setMaxResultsPerColumnFamily(int limit) { 514 this.storeLimit = limit; 515 return this; 516 } 517 518 /** 519 * Set offset for the row per Column Family. 520 * @param offset is the number of kvs that will be skipped. 521 */ 522 public Scan setRowOffsetPerColumnFamily(int offset) { 523 this.storeOffset = offset; 524 return this; 525 } 526 527 /** 528 * Set the number of rows for caching that will be passed to scanners. 529 * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will 530 * apply. 531 * Higher caching values will enable faster scanners but will use more memory. 532 * @param caching the number of rows for caching 533 */ 534 public Scan setCaching(int caching) { 535 this.caching = caching; 536 return this; 537 } 538 539 /** 540 * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)} 541 */ 542 public long getMaxResultSize() { 543 return maxResultSize; 544 } 545 546 /** 547 * Set the maximum result size. The default is -1; this means that no specific 548 * maximum result size will be set for this scan, and the global configured 549 * value will be used instead. (Defaults to unlimited). 550 * 551 * @param maxResultSize The maximum result size in bytes. 552 */ 553 public Scan setMaxResultSize(long maxResultSize) { 554 this.maxResultSize = maxResultSize; 555 return this; 556 } 557 558 @Override 559 public Scan setFilter(Filter filter) { 560 super.setFilter(filter); 561 return this; 562 } 563 564 /** 565 * Setting the familyMap 566 * @param familyMap map of family to qualifier 567 * @return this 568 */ 569 public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) { 570 this.familyMap = familyMap; 571 return this; 572 } 573 574 /** 575 * Getting the familyMap 576 * @return familyMap 577 */ 578 public Map<byte [], NavigableSet<byte []>> getFamilyMap() { 579 return this.familyMap; 580 } 581 582 /** 583 * @return the number of families in familyMap 584 */ 585 public int numFamilies() { 586 if(hasFamilies()) { 587 return this.familyMap.size(); 588 } 589 return 0; 590 } 591 592 /** 593 * @return true if familyMap is non empty, false otherwise 594 */ 595 public boolean hasFamilies() { 596 return !this.familyMap.isEmpty(); 597 } 598 599 /** 600 * @return the keys of the familyMap 601 */ 602 public byte[][] getFamilies() { 603 if(hasFamilies()) { 604 return this.familyMap.keySet().toArray(new byte[0][0]); 605 } 606 return null; 607 } 608 609 /** 610 * @return the startrow 611 */ 612 public byte [] getStartRow() { 613 return this.startRow; 614 } 615 616 /** 617 * @return if we should include start row when scan 618 */ 619 public boolean includeStartRow() { 620 return includeStartRow; 621 } 622 623 /** 624 * @return the stoprow 625 */ 626 public byte[] getStopRow() { 627 return this.stopRow; 628 } 629 630 /** 631 * @return if we should include stop row when scan 632 */ 633 public boolean includeStopRow() { 634 return includeStopRow; 635 } 636 637 /** 638 * @return the max number of versions to fetch 639 */ 640 public int getMaxVersions() { 641 return this.maxVersions; 642 } 643 644 /** 645 * @return maximum number of values to return for a single call to next() 646 */ 647 public int getBatch() { 648 return this.batch; 649 } 650 651 /** 652 * @return maximum number of values to return per row per CF 653 */ 654 public int getMaxResultsPerColumnFamily() { 655 return this.storeLimit; 656 } 657 658 /** 659 * Method for retrieving the scan's offset per row per column 660 * family (#kvs to be skipped) 661 * @return row offset 662 */ 663 public int getRowOffsetPerColumnFamily() { 664 return this.storeOffset; 665 } 666 667 /** 668 * @return caching the number of rows fetched when calling next on a scanner 669 */ 670 public int getCaching() { 671 return this.caching; 672 } 673 674 /** 675 * @return TimeRange 676 */ 677 public TimeRange getTimeRange() { 678 return this.tr; 679 } 680 681 /** 682 * @return RowFilter 683 */ 684 @Override 685 public Filter getFilter() { 686 return filter; 687 } 688 689 /** 690 * @return true is a filter has been specified, false if not 691 */ 692 public boolean hasFilter() { 693 return filter != null; 694 } 695 696 /** 697 * Set whether blocks should be cached for this Scan. 698 * <p> 699 * This is true by default. When true, default settings of the table and 700 * family are used (this will never override caching blocks if the block 701 * cache is disabled for that family or entirely). 702 * 703 * @param cacheBlocks if false, default settings are overridden and blocks 704 * will not be cached 705 */ 706 public Scan setCacheBlocks(boolean cacheBlocks) { 707 this.cacheBlocks = cacheBlocks; 708 return this; 709 } 710 711 /** 712 * Get whether blocks should be cached for this Scan. 713 * @return true if default caching should be used, false if blocks should not 714 * be cached 715 */ 716 public boolean getCacheBlocks() { 717 return cacheBlocks; 718 } 719 720 /** 721 * Set whether this scan is a reversed one 722 * <p> 723 * This is false by default which means forward(normal) scan. 724 * 725 * @param reversed if true, scan will be backward order 726 * @return this 727 */ 728 public Scan setReversed(boolean reversed) { 729 this.reversed = reversed; 730 return this; 731 } 732 733 /** 734 * Get whether this scan is a reversed one. 735 * @return true if backward scan, false if forward(default) scan 736 */ 737 public boolean isReversed() { 738 return reversed; 739 } 740 741 /** 742 * Setting whether the caller wants to see the partial results when server returns 743 * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. 744 * By default this value is false and the complete results will be assembled client side 745 * before being delivered to the caller. 746 * @param allowPartialResults 747 * @return this 748 * @see Result#mayHaveMoreCellsInRow() 749 * @see #setBatch(int) 750 */ 751 public Scan setAllowPartialResults(final boolean allowPartialResults) { 752 this.allowPartialResults = allowPartialResults; 753 return this; 754 } 755 756 /** 757 * @return true when the constructor of this scan understands that the results they will see may 758 * only represent a partial portion of a row. The entire row would be retrieved by 759 * subsequent calls to {@link ResultScanner#next()} 760 */ 761 public boolean getAllowPartialResults() { 762 return allowPartialResults; 763 } 764 765 @Override 766 public Scan setLoadColumnFamiliesOnDemand(boolean value) { 767 return (Scan) super.setLoadColumnFamiliesOnDemand(value); 768 } 769 770 /** 771 * Compile the table and column family (i.e. schema) information 772 * into a String. Useful for parsing and aggregation by debugging, 773 * logging, and administration tools. 774 * @return Map 775 */ 776 @Override 777 public Map<String, Object> getFingerprint() { 778 Map<String, Object> map = new HashMap<>(); 779 List<String> families = new ArrayList<>(); 780 if(this.familyMap.isEmpty()) { 781 map.put("families", "ALL"); 782 return map; 783 } else { 784 map.put("families", families); 785 } 786 for (Map.Entry<byte [], NavigableSet<byte[]>> entry : 787 this.familyMap.entrySet()) { 788 families.add(Bytes.toStringBinary(entry.getKey())); 789 } 790 return map; 791 } 792 793 /** 794 * Compile the details beyond the scope of getFingerprint (row, columns, 795 * timestamps, etc.) into a Map along with the fingerprinted information. 796 * Useful for debugging, logging, and administration tools. 797 * @param maxCols a limit on the number of columns output prior to truncation 798 * @return Map 799 */ 800 @Override 801 public Map<String, Object> toMap(int maxCols) { 802 // start with the fingerpring map and build on top of it 803 Map<String, Object> map = getFingerprint(); 804 // map from families to column list replaces fingerprint's list of families 805 Map<String, List<String>> familyColumns = new HashMap<>(); 806 map.put("families", familyColumns); 807 // add scalar information first 808 map.put("startRow", Bytes.toStringBinary(this.startRow)); 809 map.put("stopRow", Bytes.toStringBinary(this.stopRow)); 810 map.put("maxVersions", this.maxVersions); 811 map.put("batch", this.batch); 812 map.put("caching", this.caching); 813 map.put("maxResultSize", this.maxResultSize); 814 map.put("cacheBlocks", this.cacheBlocks); 815 map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand); 816 List<Long> timeRange = new ArrayList<>(2); 817 timeRange.add(this.tr.getMin()); 818 timeRange.add(this.tr.getMax()); 819 map.put("timeRange", timeRange); 820 int colCount = 0; 821 // iterate through affected families and list out up to maxCols columns 822 for (Map.Entry<byte [], NavigableSet<byte[]>> entry : 823 this.familyMap.entrySet()) { 824 List<String> columns = new ArrayList<>(); 825 familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns); 826 if(entry.getValue() == null) { 827 colCount++; 828 --maxCols; 829 columns.add("ALL"); 830 } else { 831 colCount += entry.getValue().size(); 832 if (maxCols <= 0) { 833 continue; 834 } 835 for (byte [] column : entry.getValue()) { 836 if (--maxCols <= 0) { 837 continue; 838 } 839 columns.add(Bytes.toStringBinary(column)); 840 } 841 } 842 } 843 map.put("totalColumns", colCount); 844 if (this.filter != null) { 845 map.put("filter", this.filter.toString()); 846 } 847 // add the id if set 848 if (getId() != null) { 849 map.put("id", getId()); 850 } 851 return map; 852 } 853 854 /** 855 * Enable/disable "raw" mode for this scan. 856 * If "raw" is enabled the scan will return all 857 * delete marker and deleted rows that have not 858 * been collected, yet. 859 * This is mostly useful for Scan on column families 860 * that have KEEP_DELETED_ROWS enabled. 861 * It is an error to specify any column when "raw" is set. 862 * @param raw True/False to enable/disable "raw" mode. 863 */ 864 public Scan setRaw(boolean raw) { 865 setAttribute(RAW_ATTR, Bytes.toBytes(raw)); 866 return this; 867 } 868 869 /** 870 * @return True if this Scan is in "raw" mode. 871 */ 872 public boolean isRaw() { 873 byte[] attr = getAttribute(RAW_ATTR); 874 return attr == null ? false : Bytes.toBoolean(attr); 875 } 876 877 /** 878 * Set whether this scan is a small scan 879 * <p> 880 * Small scan should use pread and big scan can use seek + read seek + read is fast but can cause 881 * two problem (1) resource contention (2) cause too much network io [89-fb] Using pread for 882 * non-compaction read request https://issues.apache.org/jira/browse/HBASE-7266 On the other hand, 883 * if setting it true, we would do openScanner,next,closeScanner in one RPC call. It means the 884 * better performance for small scan. [HBASE-9488]. Generally, if the scan range is within one 885 * data block(64KB), it could be considered as a small scan. 886 * @param small 887 * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #setLimit(int)} and 888 * {@link #setReadType(ReadType)} instead. And for the one rpc optimization, now we will also 889 * fetch data when openScanner, and if the number of rows reaches the limit then we will close 890 * the scanner automatically which means we will fall back to one rpc. 891 * @see #setLimit(int) 892 * @see #setReadType(ReadType) 893 * @see <a href="https://issues.apache.org/jira/browse/HBASE-17045">HBASE-17045</a> 894 */ 895 @Deprecated 896 public Scan setSmall(boolean small) { 897 this.small = small; 898 this.readType = ReadType.PREAD; 899 return this; 900 } 901 902 /** 903 * Get whether this scan is a small scan 904 * @return true if small scan 905 * @deprecated since 2.0.0 and will be removed in 3.0.0. See the comment of 906 * {@link #setSmall(boolean)} 907 * @see <a href="https://issues.apache.org/jira/browse/HBASE-17045">HBASE-17045</a> 908 */ 909 @Deprecated 910 public boolean isSmall() { 911 return small; 912 } 913 914 @Override 915 public Scan setAttribute(String name, byte[] value) { 916 return (Scan) super.setAttribute(name, value); 917 } 918 919 @Override 920 public Scan setId(String id) { 921 return (Scan) super.setId(id); 922 } 923 924 @Override 925 public Scan setAuthorizations(Authorizations authorizations) { 926 return (Scan) super.setAuthorizations(authorizations); 927 } 928 929 @Override 930 public Scan setACL(Map<String, Permission> perms) { 931 return (Scan) super.setACL(perms); 932 } 933 934 @Override 935 public Scan setACL(String user, Permission perms) { 936 return (Scan) super.setACL(user, perms); 937 } 938 939 @Override 940 public Scan setConsistency(Consistency consistency) { 941 return (Scan) super.setConsistency(consistency); 942 } 943 944 @Override 945 public Scan setReplicaId(int Id) { 946 return (Scan) super.setReplicaId(Id); 947 } 948 949 @Override 950 public Scan setIsolationLevel(IsolationLevel level) { 951 return (Scan) super.setIsolationLevel(level); 952 } 953 954 @Override 955 public Scan setPriority(int priority) { 956 return (Scan) super.setPriority(priority); 957 } 958 959 /** 960 * Enable collection of {@link ScanMetrics}. For advanced users. 961 * @param enabled Set to true to enable accumulating scan metrics 962 */ 963 public Scan setScanMetricsEnabled(final boolean enabled) { 964 setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled))); 965 return this; 966 } 967 968 /** 969 * @return True if collection of scan metrics is enabled. For advanced users. 970 */ 971 public boolean isScanMetricsEnabled() { 972 byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); 973 return attr == null ? false : Bytes.toBoolean(attr); 974 } 975 976 public Boolean isAsyncPrefetch() { 977 return asyncPrefetch; 978 } 979 980 /** 981 * @deprecated Since 3.0.0, will be removed in 4.0.0. After building sync client upon async 982 * client, the implementation is always 'async prefetch', so this flag is useless now. 983 */ 984 @Deprecated 985 public Scan setAsyncPrefetch(boolean asyncPrefetch) { 986 this.asyncPrefetch = asyncPrefetch; 987 return this; 988 } 989 990 /** 991 * @return the limit of rows for this scan 992 */ 993 public int getLimit() { 994 return limit; 995 } 996 997 /** 998 * Set the limit of rows for this scan. We will terminate the scan if the number of returned rows 999 * reaches this value. 1000 * <p> 1001 * This condition will be tested at last, after all other conditions such as stopRow, filter, etc. 1002 * @param limit the limit of rows for this scan 1003 * @return this 1004 */ 1005 public Scan setLimit(int limit) { 1006 this.limit = limit; 1007 return this; 1008 } 1009 1010 /** 1011 * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also 1012 * set {@code readType} to {@link ReadType#PREAD}. 1013 * @return this 1014 */ 1015 public Scan setOneRowLimit() { 1016 return setLimit(1).setReadType(ReadType.PREAD); 1017 } 1018 1019 @InterfaceAudience.Public 1020 public enum ReadType { 1021 DEFAULT, STREAM, PREAD 1022 } 1023 1024 /** 1025 * @return the read type for this scan 1026 */ 1027 public ReadType getReadType() { 1028 return readType; 1029 } 1030 1031 /** 1032 * Set the read type for this scan. 1033 * <p> 1034 * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For 1035 * example, we will always use pread if this is a get scan. 1036 * @return this 1037 */ 1038 public Scan setReadType(ReadType readType) { 1039 this.readType = readType; 1040 return this; 1041 } 1042 1043 /** 1044 * Get the mvcc read point used to open a scanner. 1045 */ 1046 long getMvccReadPoint() { 1047 return mvccReadPoint; 1048 } 1049 1050 /** 1051 * Set the mvcc read point used to open a scanner. 1052 */ 1053 Scan setMvccReadPoint(long mvccReadPoint) { 1054 this.mvccReadPoint = mvccReadPoint; 1055 return this; 1056 } 1057 1058 /** 1059 * Set the mvcc read point to -1 which means do not use it. 1060 */ 1061 Scan resetMvccReadPoint() { 1062 return setMvccReadPoint(-1L); 1063 } 1064 1065 /** 1066 * When the server is slow or we scan a table with many deleted data or we use a sparse filter, 1067 * the server will response heartbeat to prevent timeout. However the scanner will return a Result 1068 * only when client can do it. So if there are many heartbeats, the blocking time on 1069 * ResultScanner#next() may be very long, which is not friendly to online services. 1070 * 1071 * Set this to true then you can get a special Result whose #isCursor() returns true and is not 1072 * contains any real data. It only tells you where the server has scanned. You can call next 1073 * to continue scanning or open a new scanner with this row key as start row whenever you want. 1074 * 1075 * Users can get a cursor when and only when there is a response from the server but we can not 1076 * return a Result to users, for example, this response is a heartbeat or there are partial cells 1077 * but users do not allow partial result. 1078 * 1079 * Now the cursor is in row level which means the special Result will only contains a row key. 1080 * {@link Result#isCursor()} 1081 * {@link Result#getCursor()} 1082 * {@link Cursor} 1083 */ 1084 public Scan setNeedCursorResult(boolean needCursorResult) { 1085 this.needCursorResult = needCursorResult; 1086 return this; 1087 } 1088 1089 public boolean isNeedCursorResult() { 1090 return needCursorResult; 1091 } 1092 1093 /** 1094 * Create a new Scan with a cursor. It only set the position information like start row key. 1095 * The others (like cfs, stop row, limit) should still be filled in by the user. 1096 * {@link Result#isCursor()} 1097 * {@link Result#getCursor()} 1098 * {@link Cursor} 1099 */ 1100 public static Scan createScanFromCursor(Cursor cursor) { 1101 return new Scan().withStartRow(cursor.getRow()); 1102 } 1103}