001/* 002 * 003 * Licensed to the Apache Software Foundation (ASF) under one 004 * or more contributor license agreements. See the NOTICE file 005 * distributed with this work for additional information 006 * regarding copyright ownership. The ASF licenses this file 007 * to you under the Apache License, Version 2.0 (the 008 * "License"); you may not use this file except in compliance 009 * with the License. You may obtain a copy of the License at 010 * 011 * http://www.apache.org/licenses/LICENSE-2.0 012 * 013 * Unless required by applicable law or agreed to in writing, software 014 * distributed under the License is distributed on an "AS IS" BASIS, 015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 016 * See the License for the specific language governing permissions and 017 * limitations under the License. 018 */ 019 020package org.apache.hadoop.hbase.client; 021 022import java.io.IOException; 023import java.util.ArrayList; 024import java.util.HashMap; 025import java.util.List; 026import java.util.Map; 027import java.util.NavigableSet; 028import java.util.TreeMap; 029import java.util.TreeSet; 030 031import org.apache.hadoop.hbase.HConstants; 032import org.apache.yetus.audience.InterfaceAudience; 033import org.slf4j.Logger; 034import org.slf4j.LoggerFactory; 035import org.apache.hadoop.hbase.client.metrics.ScanMetrics; 036import org.apache.hadoop.hbase.filter.Filter; 037import org.apache.hadoop.hbase.filter.IncompatibleFilterException; 038import org.apache.hadoop.hbase.io.TimeRange; 039import org.apache.hadoop.hbase.security.access.Permission; 040import org.apache.hadoop.hbase.security.visibility.Authorizations; 041import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 042import org.apache.hadoop.hbase.util.Bytes; 043 044/** 045 * Used to perform Scan operations. 046 * <p> 047 * All operations are identical to {@link Get} with the exception of instantiation. Rather than 048 * specifying a single row, an optional startRow and stopRow may be defined. If rows are not 049 * specified, the Scanner will iterate over all rows. 050 * <p> 051 * To get all columns from all rows of a Table, create an instance with no constraints; use the 052 * {@link #Scan()} constructor. To constrain the scan to specific column families, call 053 * {@link #addFamily(byte[]) addFamily} for each family to retrieve on your Scan instance. 054 * <p> 055 * To get specific columns, call {@link #addColumn(byte[], byte[]) addColumn} for each column to 056 * retrieve. 057 * <p> 058 * To only retrieve columns within a specific range of version timestamps, call 059 * {@link #setTimeRange(long, long) setTimeRange}. 060 * <p> 061 * To only retrieve columns with a specific timestamp, call {@link #setTimestamp(long) setTimestamp} 062 * . 063 * <p> 064 * To limit the number of versions of each column to be returned, call {@link #setMaxVersions(int) 065 * setMaxVersions}. 066 * <p> 067 * To limit the maximum number of values returned for each call to next(), call 068 * {@link #setBatch(int) setBatch}. 069 * <p> 070 * To add a filter, call {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}. 071 * <p> 072 * For small scan, it is deprecated in 2.0.0. Now we have a {@link #setLimit(int)} method in Scan 073 * object which is used to tell RS how many rows we want. If the rows return reaches the limit, the 074 * RS will close the RegionScanner automatically. And we will also fetch data when openScanner in 075 * the new implementation, this means we can also finish a scan operation in one rpc call. And we 076 * have also introduced a {@link #setReadType(ReadType)} method. You can use this method to tell RS 077 * to use pread explicitly. 078 * <p> 079 * Expert: To explicitly disable server-side block caching for this scan, execute 080 * {@link #setCacheBlocks(boolean)}. 081 * <p> 082 * <em>Note:</em> Usage alters Scan instances. Internally, attributes are updated as the Scan runs 083 * and if enabled, metrics accumulate in the Scan instance. Be aware this is the case when you go to 084 * clone a Scan instance or if you go to reuse a created Scan instance; safer is create a Scan 085 * instance per usage. 086 */ 087@InterfaceAudience.Public 088public class Scan extends Query { 089 private static final Logger LOG = LoggerFactory.getLogger(Scan.class); 090 091 private static final String RAW_ATTR = "_raw_"; 092 093 private byte[] startRow = HConstants.EMPTY_START_ROW; 094 private boolean includeStartRow = true; 095 private byte[] stopRow = HConstants.EMPTY_END_ROW; 096 private boolean includeStopRow = false; 097 private int maxVersions = 1; 098 private int batch = -1; 099 100 /** 101 * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}. 102 * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the 103 * cells in the row exceeded max result size on the server. Typically partial results will be 104 * combined client side into complete results before being delivered to the caller. However, if 105 * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e. 106 * they understand that the results returned from the Scanner may only represent part of a 107 * particular row). In such a case, any attempt to combine the partials into a complete result on 108 * the client side will be skipped, and the caller will be able to see the exact results returned 109 * from the server. 110 */ 111 private boolean allowPartialResults = false; 112 113 private int storeLimit = -1; 114 private int storeOffset = 0; 115 116 /** 117 * @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)} 118 */ 119 // Make private or remove. 120 @Deprecated 121 static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable"; 122 123 /** 124 * Use {@link #getScanMetrics()} 125 */ 126 // Make this private or remove. 127 @Deprecated 128 static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data"; 129 130 // If an application wants to use multiple scans over different tables each scan must 131 // define this attribute with the appropriate table name by calling 132 // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName)) 133 static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name"; 134 135 /** 136 * -1 means no caching specified and the value of {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} 137 * (default to {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_CACHING}) will be used 138 */ 139 private int caching = -1; 140 private long maxResultSize = -1; 141 private boolean cacheBlocks = true; 142 private boolean reversed = false; 143 private TimeRange tr = TimeRange.allTime(); 144 private Map<byte [], NavigableSet<byte []>> familyMap = 145 new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR); 146 private Boolean asyncPrefetch = null; 147 148 /** 149 * Parameter name for client scanner sync/async prefetch toggle. 150 * When using async scanner, prefetching data from the server is done at the background. 151 * The parameter currently won't have any effect in the case that the user has set 152 * Scan#setSmall or Scan#setReversed 153 */ 154 public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = 155 "hbase.client.scanner.async.prefetch"; 156 157 /** 158 * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}. 159 */ 160 public static final boolean DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = false; 161 162 /** 163 * Set it true for small scan to get better performance Small scan should use pread and big scan 164 * can use seek + read seek + read is fast but can cause two problem (1) resource contention (2) 165 * cause too much network io [89-fb] Using pread for non-compaction read request 166 * https://issues.apache.org/jira/browse/HBASE-7266 On the other hand, if setting it true, we 167 * would do openScanner,next,closeScanner in one RPC call. It means the better performance for 168 * small scan. [HBASE-9488]. Generally, if the scan range is within one data block(64KB), it could 169 * be considered as a small scan. 170 */ 171 private boolean small = false; 172 173 /** 174 * The mvcc read point to use when open a scanner. Remember to clear it after switching regions as 175 * the mvcc is only valid within region scope. 176 */ 177 private long mvccReadPoint = -1L; 178 179 /** 180 * The number of rows we want for this scan. We will terminate the scan if the number of return 181 * rows reaches this value. 182 */ 183 private int limit = -1; 184 185 /** 186 * Control whether to use pread at server side. 187 */ 188 private ReadType readType = ReadType.DEFAULT; 189 190 private boolean needCursorResult = false; 191 192 /** 193 * Create a Scan operation across all rows. 194 */ 195 public Scan() {} 196 197 /** 198 * @deprecated since 2.0.0 and will be removed in 3.0.0. Use 199 * {@code new Scan().withStartRow(startRow).setFilter(filter)} instead. 200 * @see <a href="https://issues.apache.org/jira/browse/HBASE-17320">HBASE-17320</a> 201 */ 202 @Deprecated 203 public Scan(byte[] startRow, Filter filter) { 204 this(startRow); 205 this.filter = filter; 206 } 207 208 /** 209 * Create a Scan operation starting at the specified row. 210 * <p> 211 * If the specified row does not exist, the Scanner will start from the next closest row after the 212 * specified row. 213 * @param startRow row to start scanner at or after 214 * @deprecated since 2.0.0 and will be removed in 3.0.0. Use 215 * {@code new Scan().withStartRow(startRow)} instead. 216 * @see <a href="https://issues.apache.org/jira/browse/HBASE-17320">HBASE-17320</a> 217 */ 218 @Deprecated 219 public Scan(byte[] startRow) { 220 setStartRow(startRow); 221 } 222 223 /** 224 * Create a Scan operation for the range of rows specified. 225 * @param startRow row to start scanner at or after (inclusive) 226 * @param stopRow row to stop scanner before (exclusive) 227 * @deprecated since 2.0.0 and will be removed in 3.0.0. Use 228 * {@code new Scan().withStartRow(startRow).withStopRow(stopRow)} instead. 229 * @see <a href="https://issues.apache.org/jira/browse/HBASE-17320">HBASE-17320</a> 230 */ 231 @Deprecated 232 public Scan(byte[] startRow, byte[] stopRow) { 233 setStartRow(startRow); 234 setStopRow(stopRow); 235 } 236 237 /** 238 * Creates a new instance of this class while copying all values. 239 * 240 * @param scan The scan instance to copy from. 241 * @throws IOException When copying the values fails. 242 */ 243 public Scan(Scan scan) throws IOException { 244 startRow = scan.getStartRow(); 245 includeStartRow = scan.includeStartRow(); 246 stopRow = scan.getStopRow(); 247 includeStopRow = scan.includeStopRow(); 248 maxVersions = scan.getMaxVersions(); 249 batch = scan.getBatch(); 250 storeLimit = scan.getMaxResultsPerColumnFamily(); 251 storeOffset = scan.getRowOffsetPerColumnFamily(); 252 caching = scan.getCaching(); 253 maxResultSize = scan.getMaxResultSize(); 254 cacheBlocks = scan.getCacheBlocks(); 255 filter = scan.getFilter(); // clone? 256 loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue(); 257 consistency = scan.getConsistency(); 258 this.setIsolationLevel(scan.getIsolationLevel()); 259 reversed = scan.isReversed(); 260 asyncPrefetch = scan.isAsyncPrefetch(); 261 small = scan.isSmall(); 262 allowPartialResults = scan.getAllowPartialResults(); 263 tr = scan.getTimeRange(); // TimeRange is immutable 264 Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap(); 265 for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) { 266 byte [] fam = entry.getKey(); 267 NavigableSet<byte[]> cols = entry.getValue(); 268 if (cols != null && cols.size() > 0) { 269 for (byte[] col : cols) { 270 addColumn(fam, col); 271 } 272 } else { 273 addFamily(fam); 274 } 275 } 276 for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) { 277 setAttribute(attr.getKey(), attr.getValue()); 278 } 279 for (Map.Entry<byte[], TimeRange> entry : scan.getColumnFamilyTimeRange().entrySet()) { 280 TimeRange tr = entry.getValue(); 281 setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax()); 282 } 283 this.mvccReadPoint = scan.getMvccReadPoint(); 284 this.limit = scan.getLimit(); 285 this.needCursorResult = scan.isNeedCursorResult(); 286 setPriority(scan.getPriority()); 287 readType = scan.getReadType(); 288 super.setReplicaId(scan.getReplicaId()); 289 } 290 291 /** 292 * Builds a scan object with the same specs as get. 293 * @param get get to model scan after 294 */ 295 public Scan(Get get) { 296 this.startRow = get.getRow(); 297 this.includeStartRow = true; 298 this.stopRow = get.getRow(); 299 this.includeStopRow = true; 300 this.filter = get.getFilter(); 301 this.cacheBlocks = get.getCacheBlocks(); 302 this.maxVersions = get.getMaxVersions(); 303 this.storeLimit = get.getMaxResultsPerColumnFamily(); 304 this.storeOffset = get.getRowOffsetPerColumnFamily(); 305 this.tr = get.getTimeRange(); 306 this.familyMap = get.getFamilyMap(); 307 this.asyncPrefetch = false; 308 this.consistency = get.getConsistency(); 309 this.setIsolationLevel(get.getIsolationLevel()); 310 this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue(); 311 for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) { 312 setAttribute(attr.getKey(), attr.getValue()); 313 } 314 for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) { 315 TimeRange tr = entry.getValue(); 316 setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax()); 317 } 318 this.mvccReadPoint = -1L; 319 setPriority(get.getPriority()); 320 super.setReplicaId(get.getReplicaId()); 321 } 322 323 public boolean isGetScan() { 324 return includeStartRow && includeStopRow 325 && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow); 326 } 327 328 /** 329 * Get all columns from the specified family. 330 * <p> 331 * Overrides previous calls to addColumn for this family. 332 * @param family family name 333 * @return this 334 */ 335 public Scan addFamily(byte [] family) { 336 familyMap.remove(family); 337 familyMap.put(family, null); 338 return this; 339 } 340 341 /** 342 * Get the column from the specified family with the specified qualifier. 343 * <p> 344 * Overrides previous calls to addFamily for this family. 345 * @param family family name 346 * @param qualifier column qualifier 347 * @return this 348 */ 349 public Scan addColumn(byte [] family, byte [] qualifier) { 350 NavigableSet<byte []> set = familyMap.get(family); 351 if(set == null) { 352 set = new TreeSet<>(Bytes.BYTES_COMPARATOR); 353 familyMap.put(family, set); 354 } 355 if (qualifier == null) { 356 qualifier = HConstants.EMPTY_BYTE_ARRAY; 357 } 358 set.add(qualifier); 359 return this; 360 } 361 362 /** 363 * Get versions of columns only within the specified timestamp range, 364 * [minStamp, maxStamp). Note, default maximum versions to return is 1. If 365 * your time range spans more than one version and you want all versions 366 * returned, up the number of versions beyond the default. 367 * @param minStamp minimum timestamp value, inclusive 368 * @param maxStamp maximum timestamp value, exclusive 369 * @see #setMaxVersions() 370 * @see #setMaxVersions(int) 371 * @return this 372 */ 373 public Scan setTimeRange(long minStamp, long maxStamp) throws IOException { 374 tr = new TimeRange(minStamp, maxStamp); 375 return this; 376 } 377 378 /** 379 * Get versions of columns with the specified timestamp. Note, default maximum 380 * versions to return is 1. If your time range spans more than one version 381 * and you want all versions returned, up the number of versions beyond the 382 * defaut. 383 * @param timestamp version timestamp 384 * @see #setMaxVersions() 385 * @see #setMaxVersions(int) 386 * @return this 387 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 388 * Use {@link #setTimestamp(long)} instead 389 */ 390 @Deprecated 391 public Scan setTimeStamp(long timestamp) 392 throws IOException { 393 return this.setTimestamp(timestamp); 394 } 395 396 /** 397 * Get versions of columns with the specified timestamp. Note, default maximum 398 * versions to return is 1. If your time range spans more than one version 399 * and you want all versions returned, up the number of versions beyond the 400 * defaut. 401 * @param timestamp version timestamp 402 * @see #setMaxVersions() 403 * @see #setMaxVersions(int) 404 * @return this 405 */ 406 public Scan setTimestamp(long timestamp) { 407 try { 408 tr = new TimeRange(timestamp, timestamp + 1); 409 } catch(Exception e) { 410 // This should never happen, unless integer overflow or something extremely wrong... 411 LOG.error("TimeRange failed, likely caused by integer overflow. ", e); 412 throw e; 413 } 414 415 return this; 416 } 417 418 @Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { 419 return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp); 420 } 421 422 /** 423 * Set the start row of the scan. 424 * <p> 425 * If the specified row does not exist, the Scanner will start from the next closest row after the 426 * specified row. 427 * @param startRow row to start scanner at or after 428 * @return this 429 * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length 430 * exceeds {@link HConstants#MAX_ROW_LENGTH}) 431 * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #withStartRow(byte[])} 432 * instead. This method may change the inclusive of the stop row to keep compatible with the old 433 * behavior. 434 * @see #withStartRow(byte[]) 435 * @see <a href="https://issues.apache.org/jira/browse/HBASE-17320">HBASE-17320</a> 436 */ 437 @Deprecated 438 public Scan setStartRow(byte[] startRow) { 439 withStartRow(startRow); 440 if (ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow)) { 441 // for keeping the old behavior that a scan with the same start and stop row is a get scan. 442 this.includeStopRow = true; 443 } 444 return this; 445 } 446 447 /** 448 * Set the start row of the scan. 449 * <p> 450 * If the specified row does not exist, the Scanner will start from the next closest row after the 451 * specified row. 452 * @param startRow row to start scanner at or after 453 * @return this 454 * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length 455 * exceeds {@link HConstants#MAX_ROW_LENGTH}) 456 */ 457 public Scan withStartRow(byte[] startRow) { 458 return withStartRow(startRow, true); 459 } 460 461 /** 462 * Set the start row of the scan. 463 * <p> 464 * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner 465 * will start from the next closest row after the specified row. 466 * @param startRow row to start scanner at or after 467 * @param inclusive whether we should include the start row when scan 468 * @return this 469 * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length 470 * exceeds {@link HConstants#MAX_ROW_LENGTH}) 471 */ 472 public Scan withStartRow(byte[] startRow, boolean inclusive) { 473 if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) { 474 throw new IllegalArgumentException("startRow's length must be less than or equal to " 475 + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); 476 } 477 this.startRow = startRow; 478 this.includeStartRow = inclusive; 479 return this; 480 } 481 482 /** 483 * Set the stop row of the scan. 484 * <p> 485 * The scan will include rows that are lexicographically less than the provided stopRow. 486 * <p> 487 * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use 488 * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result. 489 * </p> 490 * @param stopRow row to end at (exclusive) 491 * @return this 492 * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length 493 * exceeds {@link HConstants#MAX_ROW_LENGTH}) 494 * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #withStopRow(byte[])} instead. 495 * This method may change the inclusive of the stop row to keep compatible with the old 496 * behavior. 497 * @see #withStopRow(byte[]) 498 * @see <a href="https://issues.apache.org/jira/browse/HBASE-17320">HBASE-17320</a> 499 */ 500 @Deprecated 501 public Scan setStopRow(byte[] stopRow) { 502 withStopRow(stopRow); 503 if (ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow)) { 504 // for keeping the old behavior that a scan with the same start and stop row is a get scan. 505 this.includeStopRow = true; 506 } 507 return this; 508 } 509 510 /** 511 * Set the stop row of the scan. 512 * <p> 513 * The scan will include rows that are lexicographically less than the provided stopRow. 514 * <p> 515 * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use 516 * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result. 517 * </p> 518 * @param stopRow row to end at (exclusive) 519 * @return this 520 * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length 521 * exceeds {@link HConstants#MAX_ROW_LENGTH}) 522 */ 523 public Scan withStopRow(byte[] stopRow) { 524 return withStopRow(stopRow, false); 525 } 526 527 /** 528 * Set the stop row of the scan. 529 * <p> 530 * The scan will include rows that are lexicographically less than (or equal to if 531 * {@code inclusive} is {@code true}) the provided stopRow. 532 * @param stopRow row to end at 533 * @param inclusive whether we should include the stop row when scan 534 * @return this 535 * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length 536 * exceeds {@link HConstants#MAX_ROW_LENGTH}) 537 */ 538 public Scan withStopRow(byte[] stopRow, boolean inclusive) { 539 if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) { 540 throw new IllegalArgumentException("stopRow's length must be less than or equal to " 541 + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); 542 } 543 this.stopRow = stopRow; 544 this.includeStopRow = inclusive; 545 return this; 546 } 547 548 /** 549 * <p>Set a filter (using stopRow and startRow) so the result set only contains rows where the 550 * rowKey starts with the specified prefix.</p> 551 * <p>This is a utility method that converts the desired rowPrefix into the appropriate values 552 * for the startRow and stopRow to achieve the desired result.</p> 553 * <p>This can safely be used in combination with setFilter.</p> 554 * <p><b>NOTE: Doing a {@link #setStartRow(byte[])} and/or {@link #setStopRow(byte[])} 555 * after this method will yield undefined results.</b></p> 556 * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) 557 * @return this 558 */ 559 public Scan setRowPrefixFilter(byte[] rowPrefix) { 560 if (rowPrefix == null) { 561 setStartRow(HConstants.EMPTY_START_ROW); 562 setStopRow(HConstants.EMPTY_END_ROW); 563 } else { 564 this.setStartRow(rowPrefix); 565 this.setStopRow(ClientUtil.calculateTheClosestNextRowKeyForPrefix(rowPrefix)); 566 } 567 return this; 568 } 569 570 /** 571 * Get all available versions. 572 * @return this 573 * @deprecated since 2.0.0 and will be removed in 3.0.0. It is easy to misunderstand with column 574 * family's max versions, so use {@link #readAllVersions()} instead. 575 * @see #readAllVersions() 576 * @see <a href="https://issues.apache.org/jira/browse/HBASE-17125">HBASE-17125</a> 577 */ 578 @Deprecated 579 public Scan setMaxVersions() { 580 return readAllVersions(); 581 } 582 583 /** 584 * Get up to the specified number of versions of each column. 585 * @param maxVersions maximum versions for each column 586 * @return this 587 * @deprecated since 2.0.0 and will be removed in 3.0.0. It is easy to misunderstand with column 588 * family's max versions, so use {@link #readVersions(int)} instead. 589 * @see #readVersions(int) 590 * @see <a href="https://issues.apache.org/jira/browse/HBASE-17125">HBASE-17125</a> 591 */ 592 @Deprecated 593 public Scan setMaxVersions(int maxVersions) { 594 return readVersions(maxVersions); 595 } 596 597 /** 598 * Get all available versions. 599 * @return this 600 */ 601 public Scan readAllVersions() { 602 this.maxVersions = Integer.MAX_VALUE; 603 return this; 604 } 605 606 /** 607 * Get up to the specified number of versions of each column. 608 * @param versions specified number of versions for each column 609 * @return this 610 */ 611 public Scan readVersions(int versions) { 612 this.maxVersions = versions; 613 return this; 614 } 615 616 /** 617 * Set the maximum number of cells to return for each call to next(). Callers should be aware 618 * that this is not equivalent to calling {@link #setAllowPartialResults(boolean)}. 619 * If you don't allow partial results, the number of cells in each Result must equal to your 620 * batch setting unless it is the last Result for current row. So this method is helpful in paging 621 * queries. If you just want to prevent OOM at client, use setAllowPartialResults(true) is better. 622 * @param batch the maximum number of values 623 * @see Result#mayHaveMoreCellsInRow() 624 */ 625 public Scan setBatch(int batch) { 626 if (this.hasFilter() && this.filter.hasFilterRow()) { 627 throw new IncompatibleFilterException( 628 "Cannot set batch on a scan using a filter" + 629 " that returns true for filter.hasFilterRow"); 630 } 631 this.batch = batch; 632 return this; 633 } 634 635 /** 636 * Set the maximum number of values to return per row per Column Family 637 * @param limit the maximum number of values returned / row / CF 638 */ 639 public Scan setMaxResultsPerColumnFamily(int limit) { 640 this.storeLimit = limit; 641 return this; 642 } 643 644 /** 645 * Set offset for the row per Column Family. 646 * @param offset is the number of kvs that will be skipped. 647 */ 648 public Scan setRowOffsetPerColumnFamily(int offset) { 649 this.storeOffset = offset; 650 return this; 651 } 652 653 /** 654 * Set the number of rows for caching that will be passed to scanners. 655 * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will 656 * apply. 657 * Higher caching values will enable faster scanners but will use more memory. 658 * @param caching the number of rows for caching 659 */ 660 public Scan setCaching(int caching) { 661 this.caching = caching; 662 return this; 663 } 664 665 /** 666 * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)} 667 */ 668 public long getMaxResultSize() { 669 return maxResultSize; 670 } 671 672 /** 673 * Set the maximum result size. The default is -1; this means that no specific 674 * maximum result size will be set for this scan, and the global configured 675 * value will be used instead. (Defaults to unlimited). 676 * 677 * @param maxResultSize The maximum result size in bytes. 678 */ 679 public Scan setMaxResultSize(long maxResultSize) { 680 this.maxResultSize = maxResultSize; 681 return this; 682 } 683 684 @Override 685 public Scan setFilter(Filter filter) { 686 super.setFilter(filter); 687 return this; 688 } 689 690 /** 691 * Setting the familyMap 692 * @param familyMap map of family to qualifier 693 * @return this 694 */ 695 public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) { 696 this.familyMap = familyMap; 697 return this; 698 } 699 700 /** 701 * Getting the familyMap 702 * @return familyMap 703 */ 704 public Map<byte [], NavigableSet<byte []>> getFamilyMap() { 705 return this.familyMap; 706 } 707 708 /** 709 * @return the number of families in familyMap 710 */ 711 public int numFamilies() { 712 if(hasFamilies()) { 713 return this.familyMap.size(); 714 } 715 return 0; 716 } 717 718 /** 719 * @return true if familyMap is non empty, false otherwise 720 */ 721 public boolean hasFamilies() { 722 return !this.familyMap.isEmpty(); 723 } 724 725 /** 726 * @return the keys of the familyMap 727 */ 728 public byte[][] getFamilies() { 729 if(hasFamilies()) { 730 return this.familyMap.keySet().toArray(new byte[0][0]); 731 } 732 return null; 733 } 734 735 /** 736 * @return the startrow 737 */ 738 public byte [] getStartRow() { 739 return this.startRow; 740 } 741 742 /** 743 * @return if we should include start row when scan 744 */ 745 public boolean includeStartRow() { 746 return includeStartRow; 747 } 748 749 /** 750 * @return the stoprow 751 */ 752 public byte[] getStopRow() { 753 return this.stopRow; 754 } 755 756 /** 757 * @return if we should include stop row when scan 758 */ 759 public boolean includeStopRow() { 760 return includeStopRow; 761 } 762 763 /** 764 * @return the max number of versions to fetch 765 */ 766 public int getMaxVersions() { 767 return this.maxVersions; 768 } 769 770 /** 771 * @return maximum number of values to return for a single call to next() 772 */ 773 public int getBatch() { 774 return this.batch; 775 } 776 777 /** 778 * @return maximum number of values to return per row per CF 779 */ 780 public int getMaxResultsPerColumnFamily() { 781 return this.storeLimit; 782 } 783 784 /** 785 * Method for retrieving the scan's offset per row per column 786 * family (#kvs to be skipped) 787 * @return row offset 788 */ 789 public int getRowOffsetPerColumnFamily() { 790 return this.storeOffset; 791 } 792 793 /** 794 * @return caching the number of rows fetched when calling next on a scanner 795 */ 796 public int getCaching() { 797 return this.caching; 798 } 799 800 /** 801 * @return TimeRange 802 */ 803 public TimeRange getTimeRange() { 804 return this.tr; 805 } 806 807 /** 808 * @return RowFilter 809 */ 810 @Override 811 public Filter getFilter() { 812 return filter; 813 } 814 815 /** 816 * @return true is a filter has been specified, false if not 817 */ 818 public boolean hasFilter() { 819 return filter != null; 820 } 821 822 /** 823 * Set whether blocks should be cached for this Scan. 824 * <p> 825 * This is true by default. When true, default settings of the table and 826 * family are used (this will never override caching blocks if the block 827 * cache is disabled for that family or entirely). 828 * 829 * @param cacheBlocks if false, default settings are overridden and blocks 830 * will not be cached 831 */ 832 public Scan setCacheBlocks(boolean cacheBlocks) { 833 this.cacheBlocks = cacheBlocks; 834 return this; 835 } 836 837 /** 838 * Get whether blocks should be cached for this Scan. 839 * @return true if default caching should be used, false if blocks should not 840 * be cached 841 */ 842 public boolean getCacheBlocks() { 843 return cacheBlocks; 844 } 845 846 /** 847 * Set whether this scan is a reversed one 848 * <p> 849 * This is false by default which means forward(normal) scan. 850 * 851 * @param reversed if true, scan will be backward order 852 * @return this 853 */ 854 public Scan setReversed(boolean reversed) { 855 this.reversed = reversed; 856 return this; 857 } 858 859 /** 860 * Get whether this scan is a reversed one. 861 * @return true if backward scan, false if forward(default) scan 862 */ 863 public boolean isReversed() { 864 return reversed; 865 } 866 867 /** 868 * Setting whether the caller wants to see the partial results when server returns 869 * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. 870 * By default this value is false and the complete results will be assembled client side 871 * before being delivered to the caller. 872 * @param allowPartialResults 873 * @return this 874 * @see Result#mayHaveMoreCellsInRow() 875 * @see #setBatch(int) 876 */ 877 public Scan setAllowPartialResults(final boolean allowPartialResults) { 878 this.allowPartialResults = allowPartialResults; 879 return this; 880 } 881 882 /** 883 * @return true when the constructor of this scan understands that the results they will see may 884 * only represent a partial portion of a row. The entire row would be retrieved by 885 * subsequent calls to {@link ResultScanner#next()} 886 */ 887 public boolean getAllowPartialResults() { 888 return allowPartialResults; 889 } 890 891 @Override 892 public Scan setLoadColumnFamiliesOnDemand(boolean value) { 893 return (Scan) super.setLoadColumnFamiliesOnDemand(value); 894 } 895 896 /** 897 * Compile the table and column family (i.e. schema) information 898 * into a String. Useful for parsing and aggregation by debugging, 899 * logging, and administration tools. 900 * @return Map 901 */ 902 @Override 903 public Map<String, Object> getFingerprint() { 904 Map<String, Object> map = new HashMap<>(); 905 List<String> families = new ArrayList<>(); 906 if(this.familyMap.isEmpty()) { 907 map.put("families", "ALL"); 908 return map; 909 } else { 910 map.put("families", families); 911 } 912 for (Map.Entry<byte [], NavigableSet<byte[]>> entry : 913 this.familyMap.entrySet()) { 914 families.add(Bytes.toStringBinary(entry.getKey())); 915 } 916 return map; 917 } 918 919 /** 920 * Compile the details beyond the scope of getFingerprint (row, columns, 921 * timestamps, etc.) into a Map along with the fingerprinted information. 922 * Useful for debugging, logging, and administration tools. 923 * @param maxCols a limit on the number of columns output prior to truncation 924 * @return Map 925 */ 926 @Override 927 public Map<String, Object> toMap(int maxCols) { 928 // start with the fingerpring map and build on top of it 929 Map<String, Object> map = getFingerprint(); 930 // map from families to column list replaces fingerprint's list of families 931 Map<String, List<String>> familyColumns = new HashMap<>(); 932 map.put("families", familyColumns); 933 // add scalar information first 934 map.put("startRow", Bytes.toStringBinary(this.startRow)); 935 map.put("stopRow", Bytes.toStringBinary(this.stopRow)); 936 map.put("maxVersions", this.maxVersions); 937 map.put("batch", this.batch); 938 map.put("caching", this.caching); 939 map.put("maxResultSize", this.maxResultSize); 940 map.put("cacheBlocks", this.cacheBlocks); 941 map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand); 942 List<Long> timeRange = new ArrayList<>(2); 943 timeRange.add(this.tr.getMin()); 944 timeRange.add(this.tr.getMax()); 945 map.put("timeRange", timeRange); 946 int colCount = 0; 947 // iterate through affected families and list out up to maxCols columns 948 for (Map.Entry<byte [], NavigableSet<byte[]>> entry : 949 this.familyMap.entrySet()) { 950 List<String> columns = new ArrayList<>(); 951 familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns); 952 if(entry.getValue() == null) { 953 colCount++; 954 --maxCols; 955 columns.add("ALL"); 956 } else { 957 colCount += entry.getValue().size(); 958 if (maxCols <= 0) { 959 continue; 960 } 961 for (byte [] column : entry.getValue()) { 962 if (--maxCols <= 0) { 963 continue; 964 } 965 columns.add(Bytes.toStringBinary(column)); 966 } 967 } 968 } 969 map.put("totalColumns", colCount); 970 if (this.filter != null) { 971 map.put("filter", this.filter.toString()); 972 } 973 // add the id if set 974 if (getId() != null) { 975 map.put("id", getId()); 976 } 977 return map; 978 } 979 980 /** 981 * Enable/disable "raw" mode for this scan. 982 * If "raw" is enabled the scan will return all 983 * delete marker and deleted rows that have not 984 * been collected, yet. 985 * This is mostly useful for Scan on column families 986 * that have KEEP_DELETED_ROWS enabled. 987 * It is an error to specify any column when "raw" is set. 988 * @param raw True/False to enable/disable "raw" mode. 989 */ 990 public Scan setRaw(boolean raw) { 991 setAttribute(RAW_ATTR, Bytes.toBytes(raw)); 992 return this; 993 } 994 995 /** 996 * @return True if this Scan is in "raw" mode. 997 */ 998 public boolean isRaw() { 999 byte[] attr = getAttribute(RAW_ATTR); 1000 return attr == null ? false : Bytes.toBoolean(attr); 1001 } 1002 1003 /** 1004 * Set whether this scan is a small scan 1005 * <p> 1006 * Small scan should use pread and big scan can use seek + read seek + read is fast but can cause 1007 * two problem (1) resource contention (2) cause too much network io [89-fb] Using pread for 1008 * non-compaction read request https://issues.apache.org/jira/browse/HBASE-7266 On the other hand, 1009 * if setting it true, we would do openScanner,next,closeScanner in one RPC call. It means the 1010 * better performance for small scan. [HBASE-9488]. Generally, if the scan range is within one 1011 * data block(64KB), it could be considered as a small scan. 1012 * @param small 1013 * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #setLimit(int)} and 1014 * {@link #setReadType(ReadType)} instead. And for the one rpc optimization, now we will also 1015 * fetch data when openScanner, and if the number of rows reaches the limit then we will close 1016 * the scanner automatically which means we will fall back to one rpc. 1017 * @see #setLimit(int) 1018 * @see #setReadType(ReadType) 1019 * @see <a href="https://issues.apache.org/jira/browse/HBASE-17045">HBASE-17045</a> 1020 */ 1021 @Deprecated 1022 public Scan setSmall(boolean small) { 1023 this.small = small; 1024 this.readType = ReadType.PREAD; 1025 return this; 1026 } 1027 1028 /** 1029 * Get whether this scan is a small scan 1030 * @return true if small scan 1031 * @deprecated since 2.0.0 and will be removed in 3.0.0. See the comment of 1032 * {@link #setSmall(boolean)} 1033 * @see <a href="https://issues.apache.org/jira/browse/HBASE-17045">HBASE-17045</a> 1034 */ 1035 @Deprecated 1036 public boolean isSmall() { 1037 return small; 1038 } 1039 1040 @Override 1041 public Scan setAttribute(String name, byte[] value) { 1042 return (Scan) super.setAttribute(name, value); 1043 } 1044 1045 @Override 1046 public Scan setId(String id) { 1047 return (Scan) super.setId(id); 1048 } 1049 1050 @Override 1051 public Scan setAuthorizations(Authorizations authorizations) { 1052 return (Scan) super.setAuthorizations(authorizations); 1053 } 1054 1055 @Override 1056 public Scan setACL(Map<String, Permission> perms) { 1057 return (Scan) super.setACL(perms); 1058 } 1059 1060 @Override 1061 public Scan setACL(String user, Permission perms) { 1062 return (Scan) super.setACL(user, perms); 1063 } 1064 1065 @Override 1066 public Scan setConsistency(Consistency consistency) { 1067 return (Scan) super.setConsistency(consistency); 1068 } 1069 1070 @Override 1071 public Scan setReplicaId(int Id) { 1072 return (Scan) super.setReplicaId(Id); 1073 } 1074 1075 @Override 1076 public Scan setIsolationLevel(IsolationLevel level) { 1077 return (Scan) super.setIsolationLevel(level); 1078 } 1079 1080 @Override 1081 public Scan setPriority(int priority) { 1082 return (Scan) super.setPriority(priority); 1083 } 1084 1085 /** 1086 * Enable collection of {@link ScanMetrics}. For advanced users. 1087 * @param enabled Set to true to enable accumulating scan metrics 1088 */ 1089 public Scan setScanMetricsEnabled(final boolean enabled) { 1090 setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled))); 1091 return this; 1092 } 1093 1094 /** 1095 * @return True if collection of scan metrics is enabled. For advanced users. 1096 */ 1097 public boolean isScanMetricsEnabled() { 1098 byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); 1099 return attr == null ? false : Bytes.toBoolean(attr); 1100 } 1101 1102 /** 1103 * @return Metrics on this Scan, if metrics were enabled. 1104 * @see #setScanMetricsEnabled(boolean) 1105 * @deprecated Use {@link ResultScanner#getScanMetrics()} instead. And notice that, please do not 1106 * use this method and {@link ResultScanner#getScanMetrics()} together, the metrics 1107 * will be messed up. 1108 */ 1109 @Deprecated 1110 public ScanMetrics getScanMetrics() { 1111 byte[] bytes = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA); 1112 if (bytes == null) return null; 1113 return ProtobufUtil.toScanMetrics(bytes); 1114 } 1115 1116 public Boolean isAsyncPrefetch() { 1117 return asyncPrefetch; 1118 } 1119 1120 public Scan setAsyncPrefetch(boolean asyncPrefetch) { 1121 this.asyncPrefetch = asyncPrefetch; 1122 return this; 1123 } 1124 1125 /** 1126 * @return the limit of rows for this scan 1127 */ 1128 public int getLimit() { 1129 return limit; 1130 } 1131 1132 /** 1133 * Set the limit of rows for this scan. We will terminate the scan if the number of returned rows 1134 * reaches this value. 1135 * <p> 1136 * This condition will be tested at last, after all other conditions such as stopRow, filter, etc. 1137 * @param limit the limit of rows for this scan 1138 * @return this 1139 */ 1140 public Scan setLimit(int limit) { 1141 this.limit = limit; 1142 return this; 1143 } 1144 1145 /** 1146 * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also 1147 * set {@code readType} to {@link ReadType#PREAD}. 1148 * @return this 1149 */ 1150 public Scan setOneRowLimit() { 1151 return setLimit(1).setReadType(ReadType.PREAD); 1152 } 1153 1154 @InterfaceAudience.Public 1155 public enum ReadType { 1156 DEFAULT, STREAM, PREAD 1157 } 1158 1159 /** 1160 * @return the read type for this scan 1161 */ 1162 public ReadType getReadType() { 1163 return readType; 1164 } 1165 1166 /** 1167 * Set the read type for this scan. 1168 * <p> 1169 * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For 1170 * example, we will always use pread if this is a get scan. 1171 * @return this 1172 */ 1173 public Scan setReadType(ReadType readType) { 1174 this.readType = readType; 1175 return this; 1176 } 1177 1178 /** 1179 * Get the mvcc read point used to open a scanner. 1180 */ 1181 long getMvccReadPoint() { 1182 return mvccReadPoint; 1183 } 1184 1185 /** 1186 * Set the mvcc read point used to open a scanner. 1187 */ 1188 Scan setMvccReadPoint(long mvccReadPoint) { 1189 this.mvccReadPoint = mvccReadPoint; 1190 return this; 1191 } 1192 1193 /** 1194 * Set the mvcc read point to -1 which means do not use it. 1195 */ 1196 Scan resetMvccReadPoint() { 1197 return setMvccReadPoint(-1L); 1198 } 1199 1200 /** 1201 * When the server is slow or we scan a table with many deleted data or we use a sparse filter, 1202 * the server will response heartbeat to prevent timeout. However the scanner will return a Result 1203 * only when client can do it. So if there are many heartbeats, the blocking time on 1204 * ResultScanner#next() may be very long, which is not friendly to online services. 1205 * 1206 * Set this to true then you can get a special Result whose #isCursor() returns true and is not 1207 * contains any real data. It only tells you where the server has scanned. You can call next 1208 * to continue scanning or open a new scanner with this row key as start row whenever you want. 1209 * 1210 * Users can get a cursor when and only when there is a response from the server but we can not 1211 * return a Result to users, for example, this response is a heartbeat or there are partial cells 1212 * but users do not allow partial result. 1213 * 1214 * Now the cursor is in row level which means the special Result will only contains a row key. 1215 * {@link Result#isCursor()} 1216 * {@link Result#getCursor()} 1217 * {@link Cursor} 1218 */ 1219 public Scan setNeedCursorResult(boolean needCursorResult) { 1220 this.needCursorResult = needCursorResult; 1221 return this; 1222 } 1223 1224 public boolean isNeedCursorResult() { 1225 return needCursorResult; 1226 } 1227 1228 /** 1229 * Create a new Scan with a cursor. It only set the position information like start row key. 1230 * The others (like cfs, stop row, limit) should still be filled in by the user. 1231 * {@link Result#isCursor()} 1232 * {@link Result#getCursor()} 1233 * {@link Cursor} 1234 */ 1235 public static Scan createScanFromCursor(Cursor cursor) { 1236 return new Scan().withStartRow(cursor.getRow()); 1237 } 1238}