View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.client;
19  
20  import java.io.IOException;
21  import java.io.InterruptedIOException;
22  import java.util.ArrayList;
23  import java.util.Arrays;
24  import java.util.LinkedList;
25  import java.util.List;
26  import java.util.concurrent.ExecutorService;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.hbase.KeyValue.MetaComparator;
31  import org.apache.hadoop.hbase.classification.InterfaceAudience;
32  import org.apache.hadoop.conf.Configuration;
33  import org.apache.hadoop.hbase.Cell;
34  import org.apache.hadoop.hbase.CellComparator;
35  import org.apache.hadoop.hbase.CellUtil;
36  import org.apache.hadoop.hbase.DoNotRetryIOException;
37  import org.apache.hadoop.hbase.HBaseConfiguration;
38  import org.apache.hadoop.hbase.HConstants;
39  import org.apache.hadoop.hbase.HRegionInfo;
40  import org.apache.hadoop.hbase.NotServingRegionException;
41  import org.apache.hadoop.hbase.TableName;
42  import org.apache.hadoop.hbase.UnknownScannerException;
43  import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
44  import org.apache.hadoop.hbase.exceptions.ScannerResetException;
45  import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
46  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
47  import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
48  import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
49  import org.apache.hadoop.hbase.util.Bytes;
50  
51  import com.google.common.annotations.VisibleForTesting;
52  
53  /**
54   * Implements the scanner interface for the HBase client.
55   * If there are multiple regions in a table, this scanner will iterate
56   * through them all.
57   */
58  @InterfaceAudience.Private
59  public class ClientScanner extends AbstractClientScanner {
60      private final Log LOG = LogFactory.getLog(this.getClass());
61      // A byte array in which all elements are the max byte, and it is used to
62      // construct closest front row
63      static byte[] MAX_BYTE_ARRAY = Bytes.createMaxByteArray(9);
64      protected Scan scan;
65      protected boolean closed = false;
66      // Current region scanner is against.  Gets cleared if current region goes
67      // wonky: e.g. if it splits on us.
68      protected HRegionInfo currentRegion = null;
69      protected ScannerCallableWithReplicas callable = null;
70      protected final LinkedList<Result> cache = new LinkedList<Result>();
71      /**
72       * A list of partial results that have been returned from the server. This list should only
73       * contain results if this scanner does not have enough partial results to form the complete
74       * result.
75       */
76      protected final LinkedList<Result> partialResults = new LinkedList<Result>();
77      /**
78       * The row for which we are accumulating partial Results (i.e. the row of the Results stored
79       * inside partialResults). Changes to partialResultsRow and partialResults are kept in sync
80       * via the methods {@link #addToPartialResults(Result)} and {@link #clearPartialResults()}
81       */
82      protected byte[] partialResultsRow = null;
83      /**
84       * The last cell from a not full Row which is added to cache
85       */
86      protected Cell lastCellLoadedToCache = null;
87      protected final int caching;
88      protected long lastNext;
89      // Keep lastResult returned successfully in case we have to reset scanner.
90      protected Result lastResult = null;
91      protected final long maxScannerResultSize;
92      private final ClusterConnection connection;
93      private final TableName tableName;
94      protected final int scannerTimeout;
95      protected boolean scanMetricsPublished = false;
96      protected RpcRetryingCaller<Result []> caller;
97      protected RpcControllerFactory rpcControllerFactory;
98      protected Configuration conf;
99      //The timeout on the primary. Applicable if there are multiple replicas for a region
100     //In that case, we will only wait for this much timeout on the primary before going
101     //to the replicas and trying the same scan. Note that the retries will still happen
102     //on each replica and the first successful results will be taken. A timeout of 0 is
103     //disallowed.
104     protected final int primaryOperationTimeout;
105     private int retries;
106     protected final ExecutorService pool;
107     private static MetaComparator metaComparator = new MetaComparator();
108 
109   /**
110    * Create a new ClientScanner for the specified table Note that the passed {@link Scan}'s start
111    * row maybe changed changed.
112    * @param conf The {@link Configuration} to use.
113    * @param scan {@link Scan} to use in this scanner
114    * @param tableName The table that we wish to scan
115    * @param connection Connection identifying the cluster
116    * @throws IOException
117    */
118   public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName,
119       ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,
120       RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout)
121       throws IOException {
122       if (LOG.isTraceEnabled()) {
123         LOG.trace("Scan table=" + tableName
124             + ", startRow=" + Bytes.toStringBinary(scan.getStartRow()));
125       }
126       this.scan = scan;
127       this.tableName = tableName;
128       this.lastNext = System.currentTimeMillis();
129       this.connection = connection;
130       this.pool = pool;
131       this.primaryOperationTimeout = primaryOperationTimeout;
132       this.retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
133           HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
134       if (scan.getMaxResultSize() > 0) {
135         this.maxScannerResultSize = scan.getMaxResultSize();
136       } else {
137         this.maxScannerResultSize = conf.getLong(
138           HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
139           HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
140       }
141       this.scannerTimeout = HBaseConfiguration.getInt(conf,
142         HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
143         HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY,
144         HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
145 
146       // check if application wants to collect scan metrics
147       initScanMetrics(scan);
148 
149       // Use the caching from the Scan.  If not set, use the default cache setting for this table.
150       if (this.scan.getCaching() > 0) {
151         this.caching = this.scan.getCaching();
152       } else {
153         this.caching = conf.getInt(
154             HConstants.HBASE_CLIENT_SCANNER_CACHING,
155             HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING);
156       }
157 
158       this.caller = rpcFactory.<Result[]> newCaller();
159       this.rpcControllerFactory = controllerFactory;
160 
161       this.conf = conf;
162       initializeScannerInConstruction();
163     }
164 
165     protected void initializeScannerInConstruction() throws IOException{
166       // initialize the scanner
167       nextScanner(this.caching, false);
168     }
169 
170     protected ClusterConnection getConnection() {
171       return this.connection;
172     }
173 
174     /**
175      * @return Table name
176      * @deprecated As of release 0.96
177      *             (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
178      *             This will be removed in HBase 2.0.0. Use {@link #getTable()}.
179      */
180     @Deprecated
181     protected byte [] getTableName() {
182       return this.tableName.getName();
183     }
184 
185     protected TableName getTable() {
186       return this.tableName;
187     }
188 
189     protected int getRetries() {
190       return this.retries;
191     }
192 
193     protected int getScannerTimeout() {
194       return this.scannerTimeout;
195     }
196 
197     protected Configuration getConf() {
198       return this.conf;
199     }
200 
201     protected Scan getScan() {
202       return scan;
203     }
204 
205     protected ExecutorService getPool() {
206       return pool;
207     }
208 
209     protected int getPrimaryOperationTimeout() {
210       return primaryOperationTimeout;
211     }
212 
213     protected int getCaching() {
214       return caching;
215     }
216 
217     protected long getTimestamp() {
218       return lastNext;
219     }
220 
221     @VisibleForTesting
222     protected long getMaxResultSize() {
223       return maxScannerResultSize;
224     }
225 
226     // returns true if the passed region endKey
227     protected boolean checkScanStopRow(final byte [] endKey) {
228       if (this.scan.getStopRow().length > 0) {
229         // there is a stop row, check to see if we are past it.
230         byte [] stopRow = scan.getStopRow();
231         int cmp = Bytes.compareTo(stopRow, 0, stopRow.length,
232           endKey, 0, endKey.length);
233         if (cmp <= 0) {
234           // stopRow <= endKey (endKey is equals to or larger than stopRow)
235           // This is a stop.
236           return true;
237         }
238       }
239       return false; //unlikely.
240     }
241 
242     private boolean possiblyNextScanner(int nbRows, final boolean done) throws IOException {
243       // If we have just switched replica, don't go to the next scanner yet. Rather, try
244       // the scanner operations on the new replica, from the right point in the scan
245       // Note that when we switched to a different replica we left it at a point
246       // where we just did the "openScanner" with the appropriate startrow
247       if (callable != null && callable.switchedToADifferentReplica()) return true;
248       return nextScanner(nbRows, done);
249     }
250 
251     /*
252      * Gets a scanner for the next region.  If this.currentRegion != null, then
253      * we will move to the endrow of this.currentRegion.  Else we will get
254      * scanner at the scan.getStartRow().  We will go no further, just tidy
255      * up outstanding scanners, if <code>currentRegion != null</code> and
256      * <code>done</code> is true.
257      * @param nbRows
258      * @param done Server-side says we're done scanning.
259      */
260   protected boolean nextScanner(int nbRows, final boolean done)
261     throws IOException {
262       // Close the previous scanner if it's open
263       if (this.callable != null) {
264         this.callable.setClose();
265         call(callable, caller, scannerTimeout);
266         this.callable = null;
267       }
268 
269       // Where to start the next scanner
270       byte [] localStartKey;
271 
272       // if we're at end of table, close and return false to stop iterating
273       if (this.currentRegion != null) {
274         byte [] endKey = this.currentRegion.getEndKey();
275         if (endKey == null ||
276             Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY) ||
277             checkScanStopRow(endKey) ||
278             done) {
279           close();
280           if (LOG.isTraceEnabled()) {
281             LOG.trace("Finished " + this.currentRegion);
282           }
283           return false;
284         }
285         localStartKey = endKey;
286         if (LOG.isTraceEnabled()) {
287           LOG.trace("Finished " + this.currentRegion);
288         }
289       } else {
290         localStartKey = this.scan.getStartRow();
291       }
292 
293       if (LOG.isDebugEnabled() && this.currentRegion != null) {
294         // Only worth logging if NOT first region in scan.
295         LOG.debug("Advancing internal scanner to startKey at '" +
296           Bytes.toStringBinary(localStartKey) + "'");
297       }
298       try {
299         callable = getScannerCallable(localStartKey, nbRows);
300         // Open a scanner on the region server starting at the
301         // beginning of the region
302         call(callable, caller, scannerTimeout);
303         this.currentRegion = callable.getHRegionInfo();
304         if (this.scanMetrics != null) {
305           this.scanMetrics.countOfRegions.incrementAndGet();
306         }
307       } catch (IOException e) {
308         close();
309         throw e;
310       }
311       return true;
312     }
313 
314   @VisibleForTesting
315   boolean isAnyRPCcancelled() {
316     return callable.isAnyRPCcancelled();
317   }
318 
319   Result[] call(ScannerCallableWithReplicas callable,
320       RpcRetryingCaller<Result[]> caller, int scannerTimeout)
321       throws IOException, RuntimeException {
322     if (Thread.interrupted()) {
323       throw new InterruptedIOException();
324     }
325     // callWithoutRetries is at this layer. Within the ScannerCallableWithReplicas,
326     // we do a callWithRetries
327     return caller.callWithoutRetries(callable, scannerTimeout);
328   }
329 
330     @InterfaceAudience.Private
331     protected ScannerCallableWithReplicas getScannerCallable(byte [] localStartKey,
332         int nbRows) {
333       scan.setStartRow(localStartKey);
334       ScannerCallable s =
335           new ScannerCallable(getConnection(), getTable(), scan, this.scanMetrics,
336               this.rpcControllerFactory);
337       s.setCaching(nbRows);
338       ScannerCallableWithReplicas sr = new ScannerCallableWithReplicas(tableName, getConnection(),
339        s, pool, primaryOperationTimeout, scan,
340        retries, scannerTimeout, caching, conf, caller);
341       return sr;
342     }
343 
344     /**
345      * Publish the scan metrics. For now, we use scan.setAttribute to pass the metrics back to the
346      * application or TableInputFormat.Later, we could push it to other systems. We don't use
347      * metrics framework because it doesn't support multi-instances of the same metrics on the same
348      * machine; for scan/map reduce scenarios, we will have multiple scans running at the same time.
349      *
350      * By default, scan metrics are disabled; if the application wants to collect them, this
351      * behavior can be turned on by calling calling {@link Scan#setScanMetricsEnabled(boolean)}
352      * 
353      * <p>This invocation clears the scan metrics. Metrics are aggregated in the Scan instance.
354      */
355     protected void writeScanMetrics() {
356       if (this.scanMetrics == null || scanMetricsPublished) {
357         return;
358       }
359       MapReduceProtos.ScanMetrics pScanMetrics = ProtobufUtil.toScanMetrics(scanMetrics);
360       scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA, pScanMetrics.toByteArray());
361       scanMetricsPublished = true;
362     }
363 
364     @Override
365     public Result next() throws IOException {
366       // If the scanner is closed and there's nothing left in the cache, next is a no-op.
367       if (cache.size() == 0 && this.closed) {
368         return null;
369       }
370       if (cache.size() == 0) {
371         loadCache();
372       }
373 
374       if (cache.size() > 0) {
375         return cache.poll();
376       }
377 
378       // if we exhausted this scanner before calling close, write out the scan metrics
379       writeScanMetrics();
380       return null;
381     }
382 
383   @VisibleForTesting
384   public int getCacheSize() {
385     return cache != null ? cache.size() : 0;
386   }
387 
388   /**
389    * Contact the servers to load more {@link Result}s in the cache.
390    */
391   protected void loadCache() throws IOException {
392     Result[] values = null;
393     long remainingResultSize = maxScannerResultSize;
394     int countdown = this.caching;
395     // We need to reset it if it's a new callable that was created with a countdown in nextScanner
396     callable.setCaching(this.caching);
397     // This flag is set when we want to skip the result returned. We do
398     // this when we reset scanner because it split under us.
399     boolean retryAfterOutOfOrderException = true;
400     // We don't expect that the server will have more results for us if
401     // it doesn't tell us otherwise. We rely on the size or count of results
402     boolean serverHasMoreResults = false;
403     boolean allResultsSkipped = false;
404     do {
405       allResultsSkipped = false;
406       try {
407         // Server returns a null values if scanning is to stop. Else,
408         // returns an empty array if scanning is to go on and we've just
409         // exhausted current region.
410         values = call(callable, caller, scannerTimeout);
411         // When the replica switch happens, we need to do certain operations again.
412         // The callable will openScanner with the right startkey but we need to pick up
413         // from there. Bypass the rest of the loop and let the catch-up happen in the beginning
414         // of the loop as it happens for the cases where we see exceptions.
415         // Since only openScanner would have happened, values would be null
416         if (values == null && callable.switchedToADifferentReplica()) {
417           // Any accumulated partial results are no longer valid since the callable will
418           // openScanner with the correct startkey and we must pick up from there
419           clearPartialResults();
420           this.currentRegion = callable.getHRegionInfo();
421           continue;
422         }
423         retryAfterOutOfOrderException = true;
424       } catch (DoNotRetryIOException e) {
425         // An exception was thrown which makes any partial results that we were collecting
426         // invalid. The scanner will need to be reset to the beginning of a row.
427         clearPartialResults();
428         // DNRIOEs are thrown to make us break out of retries. Some types of DNRIOEs want us
429         // to reset the scanner and come back in again.
430 
431         // If exception is any but the list below throw it back to the client; else setup
432         // the scanner and retry.
433         Throwable cause = e.getCause();
434         if ((cause != null && cause instanceof NotServingRegionException) ||
435             (cause != null && cause instanceof RegionServerStoppedException) ||
436             e instanceof OutOfOrderScannerNextException ||
437             e instanceof UnknownScannerException ||
438             e instanceof ScannerResetException) {
439           // Pass. It is easier writing the if loop test as list of what is allowed rather than
440           // as a list of what is not allowed... so if in here, it means we do not throw.
441         } else {
442           throw e;
443         }
444 
445         // Else, its signal from depths of ScannerCallable that we need to reset the scanner.
446         if (this.lastResult != null) {
447           // The region has moved. We need to open a brand new scanner at the new location.
448           // Reset the startRow to the row we've seen last so that the new scanner starts at
449           // the correct row. Otherwise we may see previously returned rows again.
450           // (ScannerCallable by now has "relocated" the correct region)
451           if (!this.lastResult.isPartial() && scan.getBatch() < 0 ) {
452             if (scan.isReversed()) {
453               scan.setStartRow(createClosestRowBefore(lastResult.getRow()));
454             } else {
455               scan.setStartRow(Bytes.add(lastResult.getRow(), new byte[1]));
456             }
457           } else {
458             // we need rescan this row because we only loaded partial row before
459             scan.setStartRow(lastResult.getRow());
460           }
461         }
462         if (e instanceof OutOfOrderScannerNextException) {
463           if (retryAfterOutOfOrderException) {
464             retryAfterOutOfOrderException = false;
465           } else {
466             // TODO: Why wrap this in a DNRIOE when it already is a DNRIOE?
467             throw new DoNotRetryIOException("Failed after retry of " +
468                 "OutOfOrderScannerNextException: was there a rpc timeout?", e);
469           }
470         }
471         // Clear region.
472         this.currentRegion = null;
473         // Set this to zero so we don't try and do an rpc and close on remote server when
474         // the exception we got was UnknownScanner or the Server is going down.
475         callable = null;
476         // This continue will take us to while at end of loop where we will set up new scanner.
477         continue;
478       }
479       long currentTime = System.currentTimeMillis();
480       if (this.scanMetrics != null) {
481         this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime - lastNext);
482       }
483       lastNext = currentTime;
484       // Groom the array of Results that we received back from the server before adding that
485       // Results to the scanner's cache. If partial results are not allowed to be seen by the
486       // caller, all book keeping will be performed within this method.
487       List<Result> resultsToAddToCache =
488           getResultsToAddToCache(values, callable.isHeartbeatMessage());
489       if (!resultsToAddToCache.isEmpty()) {
490         for (Result rs : resultsToAddToCache) {
491           rs = filterLoadedCell(rs);
492           if (rs == null) {
493             continue;
494           }
495           cache.add(rs);
496           for (Cell cell : rs.rawCells()) {
497             remainingResultSize -= CellUtil.estimatedHeapSizeOf(cell);
498           }
499           countdown--;
500           this.lastResult = rs;
501           if (this.lastResult.isPartial() || scan.getBatch() > 0 ) {
502             updateLastCellLoadedToCache(this.lastResult);
503           } else {
504             this.lastCellLoadedToCache = null;
505           }
506         }
507         if (cache.isEmpty()) {
508           // all result has been seen before, we need scan more.
509           allResultsSkipped = true;
510           continue;
511         }
512       }
513       if (callable.isHeartbeatMessage()) {
514         if (cache.size() > 0) {
515           // Caller of this method just wants a Result. If we see a heartbeat message, it means
516           // processing of the scan is taking a long time server side. Rather than continue to
517           // loop until a limit (e.g. size or caching) is reached, break out early to avoid causing
518           // unnecesary delays to the caller
519           if (LOG.isTraceEnabled()) {
520             LOG.trace("Heartbeat message received and cache contains Results."
521                     + " Breaking out of scan loop");
522           }
523           break;
524         }
525         continue;
526       }
527 
528       // We expect that the server won't have more results for us when we exhaust
529       // the size (bytes or count) of the results returned. If the server *does* inform us that
530       // there are more results, we want to avoid possiblyNextScanner(...). Only when we actually
531       // get results is the moreResults context valid.
532       if (null != values && values.length > 0 && callable.hasMoreResultsContext()) {
533         // Only adhere to more server results when we don't have any partialResults
534         // as it keeps the outer loop logic the same.
535         serverHasMoreResults = callable.getServerHasMoreResults() && partialResults.isEmpty();
536       }
537       // Values == null means server-side filter has determined we must STOP
538       // !partialResults.isEmpty() means that we are still accumulating partial Results for a
539       // row. We should not change scanners before we receive all the partial Results for that
540       // row.
541     } while (allResultsSkipped || (callable != null && callable.isHeartbeatMessage())
542         || (doneWithRegion(remainingResultSize, countdown, serverHasMoreResults)
543         && (!partialResults.isEmpty() || possiblyNextScanner(countdown, values == null))));
544   }
545 
546   /**
547    * @param remainingResultSize
548    * @param remainingRows
549    * @param regionHasMoreResults
550    * @return true when the current region has been exhausted. When the current region has been
551    *         exhausted, the region must be changed before scanning can continue
552    */
553   private boolean doneWithRegion(long remainingResultSize, int remainingRows,
554       boolean regionHasMoreResults) {
555     return remainingResultSize > 0 && remainingRows > 0 && !regionHasMoreResults;
556   }
557 
558   /**
559    * This method ensures all of our book keeping regarding partial results is kept up to date. This
560    * method should be called once we know that the results we received back from the RPC request do
561    * not contain errors. We return a list of results that should be added to the cache. In general,
562    * this list will contain all NON-partial results from the input array (unless the client has
563    * specified that they are okay with receiving partial results)
564    * @param resultsFromServer The array of {@link Result}s returned from the server
565    * @param heartbeatMessage Flag indicating whether or not the response received from the server
566    *          represented a complete response, or a heartbeat message that was sent to keep the
567    *          client-server connection alive
568    * @return the list of results that should be added to the cache.
569    * @throws IOException
570    */
571   protected List<Result>
572       getResultsToAddToCache(Result[] resultsFromServer, boolean heartbeatMessage)
573           throws IOException {
574     int resultSize = resultsFromServer != null ? resultsFromServer.length : 0;
575     List<Result> resultsToAddToCache = new ArrayList<Result>(resultSize);
576 
577     final boolean isBatchSet = scan != null && scan.getBatch() > 0;
578     final boolean allowPartials = scan != null && scan.getAllowPartialResults();
579 
580     // If the caller has indicated in their scan that they are okay with seeing partial results,
581     // then simply add all results to the list. Note that since scan batching also returns results
582     // for a row in pieces we treat batch being set as equivalent to allowing partials. The
583     // implication of treating batching as equivalent to partial results is that it is possible
584     // the caller will receive a result back where the number of cells in the result is less than
585     // the batch size even though it may not be the last group of cells for that row.
586     if (allowPartials || isBatchSet) {
587       addResultsToList(resultsToAddToCache, resultsFromServer, 0,
588           (null == resultsFromServer ? 0 : resultsFromServer.length));
589       return resultsToAddToCache;
590     }
591 
592     // If no results were returned it indicates that either we have the all the partial results
593     // necessary to construct the complete result or the server had to send a heartbeat message
594     // to the client to keep the client-server connection alive
595     if (resultsFromServer == null || resultsFromServer.length == 0) {
596       // If this response was an empty heartbeat message, then we have not exhausted the region
597       // and thus there may be more partials server side that still need to be added to the partial
598       // list before we form the complete Result
599       if (!partialResults.isEmpty() && !heartbeatMessage) {
600         resultsToAddToCache.add(Result.createCompleteResult(partialResults));
601         clearPartialResults();
602       }
603 
604       return resultsToAddToCache;
605     }
606 
607     // In every RPC response there should be at most a single partial result. Furthermore, if
608     // there is a partial result, it is guaranteed to be in the last position of the array.
609     Result last = resultsFromServer[resultsFromServer.length - 1];
610     Result partial = last.isPartial() ? last : null;
611 
612     if (LOG.isTraceEnabled()) {
613       StringBuilder sb = new StringBuilder();
614       sb.append("number results from RPC: ").append(resultsFromServer.length).append(",");
615       sb.append("partial != null: ").append(partial != null).append(",");
616       sb.append("number of partials so far: ").append(partialResults.size());
617       LOG.trace(sb.toString());
618     }
619 
620     // There are three possibilities cases that can occur while handling partial results
621     //
622     // 1. (partial != null && partialResults.isEmpty())
623     // This is the first partial result that we have received. It should be added to
624     // the list of partialResults and await the next RPC request at which point another
625     // portion of the complete result will be received
626     //
627     // 2. !partialResults.isEmpty()
628     // Since our partialResults list is not empty it means that we have been accumulating partial
629     // Results for a particular row. We cannot form the complete/whole Result for that row until
630     // all partials for the row have been received. Thus we loop through all of the Results
631     // returned from the server and determine whether or not all partial Results for the row have
632     // been received. We know that we have received all of the partial Results for the row when:
633     // i) We notice a row change in the Results
634     // ii) We see a Result for the partial row that is NOT marked as a partial Result
635     //
636     // 3. (partial == null && partialResults.isEmpty())
637     // Business as usual. We are not accumulating partial results and there wasn't a partial result
638     // in the RPC response. This means that all of the results we received from the server are
639     // complete and can be added directly to the cache
640     if (partial != null && partialResults.isEmpty()) {
641       addToPartialResults(partial);
642 
643       // Exclude the last result, it's a partial
644       addResultsToList(resultsToAddToCache, resultsFromServer, 0, resultsFromServer.length - 1);
645     } else if (!partialResults.isEmpty()) {
646       for (int i = 0; i < resultsFromServer.length; i++) {
647         Result result = resultsFromServer[i];
648 
649         // This result is from the same row as the partial Results. Add it to the list of partials
650         // and check if it was the last partial Result for that row
651         if (Bytes.equals(partialResultsRow, result.getRow())) {
652           addToPartialResults(result);
653 
654           // If the result is not a partial, it is a signal to us that it is the last Result we
655           // need to form the complete Result client-side
656           if (!result.isPartial()) {
657             resultsToAddToCache.add(Result.createCompleteResult(partialResults));
658             clearPartialResults();
659           }
660         } else {
661           // The row of this result differs from the row of the partial results we have received so
662           // far. If our list of partials isn't empty, this is a signal to form the complete Result
663           // since the row has now changed
664           if (!partialResults.isEmpty()) {
665             resultsToAddToCache.add(Result.createCompleteResult(partialResults));
666             clearPartialResults();
667           }
668 
669           // It's possible that in one response from the server we receive the final partial for
670           // one row and receive a partial for a different row. Thus, make sure that all Results
671           // are added to the proper list
672           if (result.isPartial()) {
673             addToPartialResults(result);
674           } else {
675             resultsToAddToCache.add(result);
676           }
677         }
678       }
679     } else { // partial == null && partialResults.isEmpty() -- business as usual
680       addResultsToList(resultsToAddToCache, resultsFromServer, 0, resultsFromServer.length);
681     }
682 
683     return resultsToAddToCache;
684   }
685 
686   /**
687    * A convenience method for adding a Result to our list of partials. This method ensure that only
688    * Results that belong to the same row as the other partials can be added to the list.
689    * @param result The result that we want to add to our list of partial Results
690    * @throws IOException
691    */
692   private void addToPartialResults(final Result result) throws IOException {
693     final byte[] row = result.getRow();
694     if (partialResultsRow != null && !Bytes.equals(row, partialResultsRow)) {
695       throw new IOException("Partial result row does not match. All partial results must come "
696           + "from the same row. partialResultsRow: " + Bytes.toString(partialResultsRow) + "row: "
697           + Bytes.toString(row));
698     }
699     partialResultsRow = row;
700     partialResults.add(result);
701   }
702 
703   /**
704    * Convenience method for clearing the list of partials and resetting the partialResultsRow.
705    */
706   private void clearPartialResults() {
707     partialResults.clear();
708     partialResultsRow = null;
709   }
710 
711   /**
712    * Helper method for adding results between the indices [start, end) to the outputList
713    * @param outputList the list that results will be added to
714    * @param inputArray the array that results are taken from
715    * @param start beginning index (inclusive)
716    * @param end ending index (exclusive)
717    */
718   private void addResultsToList(List<Result> outputList, Result[] inputArray, int start, int end) {
719     if (inputArray == null || start < 0 || end > inputArray.length) return;
720 
721     for (int i = start; i < end; i++) {
722       outputList.add(inputArray[i]);
723     }
724   }
725 
726     @Override
727     public void close() {
728       if (!scanMetricsPublished) writeScanMetrics();
729       if (callable != null) {
730         callable.setClose();
731         try {
732           call(callable, caller, scannerTimeout);
733         } catch (UnknownScannerException e) {
734            // We used to catch this error, interpret, and rethrow. However, we
735            // have since decided that it's not nice for a scanner's close to
736            // throw exceptions. Chances are it was just due to lease time out.
737         } catch (IOException e) {
738            /* An exception other than UnknownScanner is unexpected. */
739            LOG.warn("scanner failed to close. Exception follows: " + e);
740         }
741         callable = null;
742       }
743       closed = true;
744     }
745 
746   /**
747    * Create the closest row before the specified row
748    * @param row
749    * @return a new byte array which is the closest front row of the specified one
750    */
751   protected static byte[] createClosestRowBefore(byte[] row) {
752     if (row == null) {
753       throw new IllegalArgumentException("The passed row is empty");
754     }
755     if (Bytes.equals(row, HConstants.EMPTY_BYTE_ARRAY)) {
756       return MAX_BYTE_ARRAY;
757     }
758     if (row[row.length - 1] == 0) {
759       return Arrays.copyOf(row, row.length - 1);
760     } else {
761       byte[] closestFrontRow = Arrays.copyOf(row, row.length);
762       closestFrontRow[row.length - 1] = (byte) ((closestFrontRow[row.length - 1] & 0xff) - 1);
763       closestFrontRow = Bytes.add(closestFrontRow, MAX_BYTE_ARRAY);
764       return closestFrontRow;
765     }
766   }
767 
768   @Override
769   public boolean renewLease() {
770     if (callable != null) {
771       // do not return any rows, do not advance the scanner
772       callable.setRenew(true);
773       try {
774         this.caller.callWithoutRetries(callable, this.scannerTimeout);
775       } catch (Exception e) {
776         return false;
777       } finally {
778         callable.setRenew(false);
779       }
780       return true;
781     }
782     return false;
783   }
784 
785   protected void updateLastCellLoadedToCache(Result result) {
786     if (result.rawCells().length == 0) {
787       return;
788     }
789     this.lastCellLoadedToCache = result.rawCells()[result.rawCells().length - 1];
790   }
791 
792   /**
793    * Compare two Cells considering reversed scanner.
794    * ReversedScanner only reverses rows, not columns.
795    */
796   private int compare(Cell a, Cell b) {
797     int r = 0;
798     if (currentRegion != null && currentRegion.isMetaRegion()) {
799       r = metaComparator.compareRows(a, b);
800     } else {
801       r = CellComparator.compareRows(a, b);
802     }
803     if (r != 0) {
804       return this.scan.isReversed() ? -r : r;
805     }
806     return CellComparator.compareWithoutRow(a, b);
807   }
808 
809   private Result filterLoadedCell(Result result) {
810     // we only filter result when last result is partial
811     // so lastCellLoadedToCache and result should have same row key.
812     // However, if 1) read some cells; 1.1) delete this row at the same time 2) move region;
813     // 3) read more cell. lastCellLoadedToCache and result will be not at same row.
814     if (lastCellLoadedToCache == null || result.rawCells().length == 0) {
815       return result;
816     }
817     if (compare(this.lastCellLoadedToCache, result.rawCells()[0]) < 0) {
818       // The first cell of this result is larger than the last cell of loadcache.
819       // If user do not allow partial result, it must be true.
820       return result;
821     }
822     if (compare(this.lastCellLoadedToCache, result.rawCells()[result.rawCells().length - 1]) >= 0) {
823       // The last cell of this result is smaller than the last cell of loadcache, skip all.
824       return null;
825     }
826 
827     // The first one must not in filtered result, we start at the second.
828     int index = 1;
829     while (index < result.rawCells().length) {
830       if (compare(this.lastCellLoadedToCache, result.rawCells()[index]) < 0) {
831         break;
832       }
833       index++;
834     }
835     Cell[] list = Arrays.copyOfRange(result.rawCells(), index, result.rawCells().length);
836     return Result.create(list, result.getExists(), result.isStale(), result.isPartial());
837   }
838 }