View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import java.io.IOException;
23  import java.util.Arrays;
24  import java.util.NavigableSet;
25  
26  import org.apache.hadoop.hbase.KeyValue.Type;
27  import org.apache.hadoop.hbase.classification.InterfaceAudience;
28  import org.apache.hadoop.hbase.Cell;
29  import org.apache.hadoop.hbase.CellComparator;
30  import org.apache.hadoop.hbase.CellUtil;
31  import org.apache.hadoop.hbase.HConstants;
32  import org.apache.hadoop.hbase.KeepDeletedCells;
33  import org.apache.hadoop.hbase.KeyValue;
34  import org.apache.hadoop.hbase.KeyValueUtil;
35  import org.apache.hadoop.hbase.client.Scan;
36  import org.apache.hadoop.hbase.filter.Filter;
37  import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
38  import org.apache.hadoop.hbase.io.TimeRange;
39  import org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteResult;
40  import org.apache.hadoop.hbase.util.Bytes;
41  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
42  
43  import com.google.common.base.Preconditions;
44  
45  /**
46   * A query matcher that is specifically designed for the scan case.
47   */
48  @InterfaceAudience.Private
49  public class ScanQueryMatcher {
50    // Optimization so we can skip lots of compares when we decide to skip
51    // to the next row.
52    private boolean stickyNextRow;
53    private final byte[] stopRow;
54  
55    private final TimeRange tr;
56  
57    private final Filter filter;
58  
59    /** Keeps track of deletes */
60    private final DeleteTracker deletes;
61  
62    /*
63     * The following three booleans define how we deal with deletes.
64     * There are three different aspects:
65     * 1. Whether to keep delete markers. This is used in compactions.
66     *    Minor compactions always keep delete markers.
67     * 2. Whether to keep deleted rows. This is also used in compactions,
68     *    if the store is set to keep deleted rows. This implies keeping
69     *    the delete markers as well.
70     *    In this case deleted rows are subject to the normal max version
71     *    and TTL/min version rules just like "normal" rows.
72     * 3. Whether a scan can do time travel queries even before deleted
73     *    marker to reach deleted rows.
74     */
75    /** whether to retain delete markers */
76    private boolean retainDeletesInOutput;
77  
78    /** whether to return deleted rows */
79    private final KeepDeletedCells keepDeletedCells;
80    /** whether time range queries can see rows "behind" a delete */
81    private final boolean seePastDeleteMarkers;
82  
83  
84    /** Keeps track of columns and versions */
85    private final ColumnTracker columns;
86  
87    /** Key to seek to in memstore and StoreFiles */
88    private final Cell startKey;
89  
90    /** Row comparator for the region this query is for */
91    private final CellComparator rowComparator;
92  
93    /* row is not private for tests */
94    /** Row the query is on */
95    Cell curCell;
96  
97    /**
98     * Oldest put in any of the involved store files
99     * Used to decide whether it is ok to delete
100    * family delete marker of this store keeps
101    * deleted KVs.
102    */
103   private final long earliestPutTs;
104   private final long ttl;
105 
106   /** The oldest timestamp we are interested in, based on TTL */
107   private final long oldestUnexpiredTS;
108   private final long now;
109 
110   /** readPoint over which the KVs are unconditionally included */
111   protected long maxReadPointToTrackVersions;
112 
113   private byte[] dropDeletesFromRow = null, dropDeletesToRow = null;
114 
115   /**
116    * This variable shows whether there is an null column in the query. There
117    * always exists a null column in the wildcard column query.
118    * There maybe exists a null column in the explicit column query based on the
119    * first column.
120    * */
121   private boolean hasNullColumn = true;
122 
123   private RegionCoprocessorHost regionCoprocessorHost= null;
124 
125   // By default, when hbase.hstore.time.to.purge.deletes is 0ms, a delete
126   // marker is always removed during a major compaction. If set to non-zero
127   // value then major compaction will try to keep a delete marker around for
128   // the given number of milliseconds. We want to keep the delete markers
129   // around a bit longer because old puts might appear out-of-order. For
130   // example, during log replication between two clusters.
131   //
132   // If the delete marker has lived longer than its column-family's TTL then
133   // the delete marker will be removed even if time.to.purge.deletes has not
134   // passed. This is because all the Puts that this delete marker can influence
135   // would have also expired. (Removing of delete markers on col family TTL will
136   // not happen if min-versions is set to non-zero)
137   //
138   // But, if time.to.purge.deletes has not expired then a delete
139   // marker will not be removed just because there are no Puts that it is
140   // currently influencing. This is because Puts, that this delete can
141   // influence.  may appear out of order.
142   private final long timeToPurgeDeletes;
143 
144   private final boolean isUserScan;
145 
146   private final boolean isReversed;
147 
148   /**
149    * True if we are doing a 'Get' Scan. Every Get is actually a one-row Scan.
150    */
151   private final boolean get;
152 
153   /**
154    * Construct a QueryMatcher for a scan
155    * @param scanInfo The store's immutable scan info
156    * @param scanType Type of the scan
157    * @param earliestPutTs Earliest put seen in any of the store files.
158    * @param oldestUnexpiredTS the oldest timestamp we are interested in, based on TTL
159    */
160   public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> columns,
161       ScanType scanType, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS,
162       long now, RegionCoprocessorHost regionCoprocessorHost) throws IOException {
163     TimeRange timeRange = scan.getColumnFamilyTimeRange().get(scanInfo.getFamily());
164     if (timeRange == null) {
165       this.tr = scan.getTimeRange();
166     } else {
167       this.tr = timeRange;
168     }
169     this.get = scan.isGetScan();
170     this.rowComparator = scanInfo.getComparator();
171     this.regionCoprocessorHost = regionCoprocessorHost;
172     this.deletes =  instantiateDeleteTracker();
173     this.stopRow = scan.getStopRow();
174     this.startKey = CellUtil.createFirstDeleteFamilyCellOnRow(scan.getStartRow(),
175         scanInfo.getFamily());
176     this.filter = scan.getFilter();
177     this.earliestPutTs = earliestPutTs;
178     this.oldestUnexpiredTS = oldestUnexpiredTS;
179     this.now = now;
180 
181     this.maxReadPointToTrackVersions = readPointToUse;
182     this.timeToPurgeDeletes = scanInfo.getTimeToPurgeDeletes();
183     this.ttl = oldestUnexpiredTS;
184 
185     /* how to deal with deletes */
186     this.isUserScan = scanType == ScanType.USER_SCAN;
187     // keep deleted cells: if compaction or raw scan
188     this.keepDeletedCells = scan.isRaw() ? KeepDeletedCells.TRUE :
189       isUserScan ? KeepDeletedCells.FALSE : scanInfo.getKeepDeletedCells();
190     // retain deletes: if minor compaction or raw scanisDone
191     this.retainDeletesInOutput = scanType == ScanType.COMPACT_RETAIN_DELETES || scan.isRaw();
192     // seePastDeleteMarker: user initiated scans
193     this.seePastDeleteMarkers =
194         scanInfo.getKeepDeletedCells() != KeepDeletedCells.FALSE && isUserScan;
195 
196     int maxVersions =
197         scan.isRaw() ? scan.getMaxVersions() : Math.min(scan.getMaxVersions(),
198           scanInfo.getMaxVersions());
199 
200     // Single branch to deal with two types of reads (columns vs all in family)
201     if (columns == null || columns.size() == 0) {
202       // there is always a null column in the wildcard column query.
203       hasNullColumn = true;
204 
205       // use a specialized scan for wildcard column tracker.
206       this.columns = new ScanWildcardColumnTracker(
207           scanInfo.getMinVersions(), maxVersions, oldestUnexpiredTS);
208     } else {
209       // whether there is null column in the explicit column query
210       hasNullColumn = (columns.first().length == 0);
211 
212       // We can share the ExplicitColumnTracker, diff is we reset
213       // between rows, not between storefiles.
214       this.columns = new ExplicitColumnTracker(columns, scanInfo.getMinVersions(), maxVersions,
215           oldestUnexpiredTS);
216     }
217     this.isReversed = scan.isReversed();
218   }
219 
220   private DeleteTracker instantiateDeleteTracker() throws IOException {
221     DeleteTracker tracker = new ScanDeleteTracker();
222     if (regionCoprocessorHost != null) {
223       tracker = regionCoprocessorHost.postInstantiateDeleteTracker(tracker);
224     }
225     return tracker;
226   }
227 
228   /**
229    * Construct a QueryMatcher for a scan that drop deletes from a limited range of rows.
230    * @param scan
231    * @param scanInfo The store's immutable scan info
232    * @param columns
233    * @param earliestPutTs Earliest put seen in any of the store files.
234    * @param oldestUnexpiredTS the oldest timestamp we are interested in, based on TTL
235    * @param now the current server time
236    * @param dropDeletesFromRow The inclusive left bound of the range; can be EMPTY_START_ROW.
237    * @param dropDeletesToRow The exclusive right bound of the range; can be EMPTY_END_ROW.
238    * @param regionCoprocessorHost
239    * @throws IOException
240    */
241   public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> columns,
242       long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long now,
243       byte[] dropDeletesFromRow, byte[] dropDeletesToRow,
244       RegionCoprocessorHost regionCoprocessorHost) throws IOException {
245     this(scan, scanInfo, columns, ScanType.COMPACT_RETAIN_DELETES, readPointToUse, earliestPutTs,
246         oldestUnexpiredTS, now, regionCoprocessorHost);
247     Preconditions.checkArgument((dropDeletesFromRow != null) && (dropDeletesToRow != null));
248     this.dropDeletesFromRow = dropDeletesFromRow;
249     this.dropDeletesToRow = dropDeletesToRow;
250   }
251 
252   /*
253    * Constructor for tests
254    */
255   ScanQueryMatcher(Scan scan, ScanInfo scanInfo,
256       NavigableSet<byte[]> columns, long oldestUnexpiredTS, long now) throws IOException {
257     this(scan, scanInfo, columns, ScanType.USER_SCAN,
258           Long.MAX_VALUE, /* max Readpoint to track versions */
259         HConstants.LATEST_TIMESTAMP, oldestUnexpiredTS, now, null);
260   }
261 
262   /**
263    *
264    * @return  whether there is an null column in the query
265    */
266   public boolean hasNullColumnInQuery() {
267     return hasNullColumn;
268   }
269 
270   /**
271    * Determines if the caller should do one of several things:
272    * - seek/skip to the next row (MatchCode.SEEK_NEXT_ROW)
273    * - seek/skip to the next column (MatchCode.SEEK_NEXT_COL)
274    * - include the current KeyValue (MatchCode.INCLUDE)
275    * - ignore the current KeyValue (MatchCode.SKIP)
276    * - got to the next row (MatchCode.DONE)
277    *
278    * @param cell KeyValue to check
279    * @return The match code instance.
280    * @throws IOException in case there is an internal consistency problem
281    *      caused by a data corruption.
282    */
283   public MatchCode match(Cell cell) throws IOException {
284     if (filter != null && filter.filterAllRemaining()) {
285       return MatchCode.DONE_SCAN;
286     }
287     if (curCell != null) {
288       int ret = this.rowComparator.compareRows(curCell, cell);
289       if (!this.isReversed) {
290         if (ret <= -1) {
291           return MatchCode.DONE;
292         } else if (ret >= 1) {
293           // could optimize this, if necessary?
294           // Could also be called SEEK_TO_CURRENT_ROW, but this
295           // should be rare/never happens.
296           return MatchCode.SEEK_NEXT_ROW;
297         }
298       } else {
299         if (ret <= -1) {
300           return MatchCode.SEEK_NEXT_ROW;
301         } else if (ret >= 1) {
302           return MatchCode.DONE;
303         }
304       }
305     } else {
306       // Since the curCell is null it means we are already sure that we have moved over to the next row
307       return MatchCode.DONE;
308     }
309 
310     // optimize case.
311     if (this.stickyNextRow) {
312       return MatchCode.SEEK_NEXT_ROW;
313     }
314 
315     if (this.columns.done()) {
316       stickyNextRow = true;
317       return MatchCode.SEEK_NEXT_ROW;
318     }
319 
320     long timestamp = cell.getTimestamp();
321     // check for early out based on timestamp alone
322     if (columns.isDone(timestamp)) {
323       return columns.getNextRowOrNextColumn(cell);
324     }
325     // check if the cell is expired by cell TTL
326     if (HStore.isCellTTLExpired(cell, this.oldestUnexpiredTS, this.now)) {
327       return MatchCode.SKIP;
328     }
329 
330     /*
331      * The delete logic is pretty complicated now.
332      * This is corroborated by the following:
333      * 1. The store might be instructed to keep deleted rows around.
334      * 2. A scan can optionally see past a delete marker now.
335      * 3. If deleted rows are kept, we have to find out when we can
336      *    remove the delete markers.
337      * 4. Family delete markers are always first (regardless of their TS)
338      * 5. Delete markers should not be counted as version
339      * 6. Delete markers affect puts of the *same* TS
340      * 7. Delete marker need to be version counted together with puts
341      *    they affect
342      */
343     byte typeByte = cell.getTypeByte();
344     long mvccVersion = cell.getSequenceId();
345     if (CellUtil.isDelete(cell)) {
346       if (keepDeletedCells == KeepDeletedCells.FALSE
347           || (keepDeletedCells == KeepDeletedCells.TTL && timestamp < ttl)) {
348         // first ignore delete markers if the scanner can do so, and the
349         // range does not include the marker
350         //
351         // during flushes and compactions also ignore delete markers newer
352         // than the readpoint of any open scanner, this prevents deleted
353         // rows that could still be seen by a scanner from being collected
354         boolean includeDeleteMarker = seePastDeleteMarkers ?
355             tr.withinTimeRange(timestamp) :
356             tr.withinOrAfterTimeRange(timestamp);
357         if (includeDeleteMarker
358             && mvccVersion <= maxReadPointToTrackVersions) {
359           this.deletes.add(cell);
360         }
361         // Can't early out now, because DelFam come before any other keys
362       }
363 
364       if ((!isUserScan)
365           && timeToPurgeDeletes > 0
366           && (EnvironmentEdgeManager.currentTime() - timestamp)
367             <= timeToPurgeDeletes) {
368         return MatchCode.INCLUDE;
369       } else if (retainDeletesInOutput || mvccVersion > maxReadPointToTrackVersions) {
370         // always include or it is not time yet to check whether it is OK
371         // to purge deltes or not
372         if (!isUserScan) {
373           // if this is not a user scan (compaction), we can filter this deletemarker right here
374           // otherwise (i.e. a "raw" scan) we fall through to normal version and timerange checking
375           return MatchCode.INCLUDE;
376         }
377       } else if (keepDeletedCells == KeepDeletedCells.TRUE
378           || (keepDeletedCells == KeepDeletedCells.TTL && timestamp >= ttl)) {
379         if (timestamp < earliestPutTs) {
380           // keeping delete rows, but there are no puts older than
381           // this delete in the store files.
382           return columns.getNextRowOrNextColumn(cell);
383         }
384         // else: fall through and do version counting on the
385         // delete markers
386       } else {
387         return MatchCode.SKIP;
388       }
389       // note the following next else if...
390       // delete marker are not subject to other delete markers
391     } else if (!this.deletes.isEmpty()) {
392       DeleteResult deleteResult = deletes.isDeleted(cell);
393       switch (deleteResult) {
394         case FAMILY_DELETED:
395         case COLUMN_DELETED:
396           return columns.getNextRowOrNextColumn(cell);
397         case VERSION_DELETED:
398         case FAMILY_VERSION_DELETED:
399           return MatchCode.SKIP;
400         case NOT_DELETED:
401           break;
402         default:
403           throw new RuntimeException("UNEXPECTED");
404         }
405     }
406 
407     int timestampComparison = tr.compare(timestamp);
408     if (timestampComparison >= 1) {
409       return MatchCode.SKIP;
410     } else if (timestampComparison <= -1) {
411       return columns.getNextRowOrNextColumn(cell);
412     }
413 
414     // STEP 1: Check if the column is part of the requested columns
415     MatchCode colChecker = columns.checkColumn(cell, typeByte);
416     if (colChecker == MatchCode.INCLUDE) {
417       ReturnCode filterResponse = ReturnCode.SKIP;
418       // STEP 2: Yes, the column is part of the requested columns. Check if filter is present
419       if (filter != null) {
420         // STEP 3: Filter the key value and return if it filters out
421         filterResponse = filter.filterKeyValue(cell);
422         switch (filterResponse) {
423         case SKIP:
424           return MatchCode.SKIP;
425         case NEXT_COL:
426           return columns.getNextRowOrNextColumn(cell);
427         case NEXT_ROW:
428           stickyNextRow = true;
429           return MatchCode.SEEK_NEXT_ROW;
430         case SEEK_NEXT_USING_HINT:
431           return MatchCode.SEEK_NEXT_USING_HINT;
432         default:
433           //It means it is either include or include and seek next
434           break;
435         }
436       }
437       /*
438        * STEP 4: Reaching this step means the column is part of the requested columns and either
439        * the filter is null or the filter has returned INCLUDE or INCLUDE_AND_NEXT_COL response.
440        * Now check the number of versions needed. This method call returns SKIP, INCLUDE,
441        * INCLUDE_AND_SEEK_NEXT_ROW, INCLUDE_AND_SEEK_NEXT_COL.
442        *
443        * FilterResponse            ColumnChecker               Desired behavior
444        * INCLUDE                   SKIP                        row has already been included, SKIP.
445        * INCLUDE                   INCLUDE                     INCLUDE
446        * INCLUDE                   INCLUDE_AND_SEEK_NEXT_COL   INCLUDE_AND_SEEK_NEXT_COL
447        * INCLUDE                   INCLUDE_AND_SEEK_NEXT_ROW   INCLUDE_AND_SEEK_NEXT_ROW
448        * INCLUDE_AND_SEEK_NEXT_COL SKIP                        row has already been included, SKIP.
449        * INCLUDE_AND_SEEK_NEXT_COL INCLUDE                     INCLUDE_AND_SEEK_NEXT_COL
450        * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_COL   INCLUDE_AND_SEEK_NEXT_COL
451        * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_ROW   INCLUDE_AND_SEEK_NEXT_ROW
452        *
453        * In all the above scenarios, we return the column checker return value except for
454        * FilterResponse (INCLUDE_AND_SEEK_NEXT_COL) and ColumnChecker(INCLUDE)
455        */
456       colChecker = columns.checkVersions(cell, timestamp, typeByte,
457           mvccVersion > maxReadPointToTrackVersions);
458       //Optimize with stickyNextRow
459       boolean seekNextRowFromEssential = filterResponse == ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW &&
460           filter.isFamilyEssential(cell.getFamilyArray());
461       if (colChecker == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW || seekNextRowFromEssential) {
462         stickyNextRow = true;
463       }
464       if (filterResponse == ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) {
465         if (colChecker != MatchCode.SKIP) {
466           return MatchCode.INCLUDE_AND_SEEK_NEXT_ROW;
467         }
468         return MatchCode.SEEK_NEXT_ROW;
469       }
470       return (filterResponse == ReturnCode.INCLUDE_AND_NEXT_COL &&
471           colChecker == MatchCode.INCLUDE) ? MatchCode.INCLUDE_AND_SEEK_NEXT_COL
472           : colChecker;
473     }
474     stickyNextRow = (colChecker == MatchCode.SEEK_NEXT_ROW) ? true
475         : stickyNextRow;
476     return colChecker;
477   }
478 
479   /** Handle partial-drop-deletes. As we match keys in order, when we have a range from which
480    * we can drop deletes, we can set retainDeletesInOutput to false for the duration of this
481    * range only, and maintain consistency. */
482   private void checkPartialDropDeleteRange(Cell curCell) {
483     // If partial-drop-deletes are used, initially, dropDeletesFromRow and dropDeletesToRow
484     // are both set, and the matcher is set to retain deletes. We assume ordered keys. When
485     // dropDeletesFromRow is leq current kv, we start dropping deletes and reset
486     // dropDeletesFromRow; thus the 2nd "if" starts to apply.
487     if ((dropDeletesFromRow != null)
488         && (Arrays.equals(dropDeletesFromRow, HConstants.EMPTY_START_ROW) ||
489             (CellComparator.COMPARATOR.compareRows(curCell, dropDeletesFromRow, 0,
490                 dropDeletesFromRow.length) >= 0))) {
491       retainDeletesInOutput = false;
492       dropDeletesFromRow = null;
493     }
494     // If dropDeletesFromRow is null and dropDeletesToRow is set, we are inside the partial-
495     // drop-deletes range. When dropDeletesToRow is leq current kv, we stop dropping deletes,
496     // and reset dropDeletesToRow so that we don't do any more compares.
497     if ((dropDeletesFromRow == null)
498         && (dropDeletesToRow != null)
499         && !Arrays.equals(dropDeletesToRow, HConstants.EMPTY_END_ROW)
500         && (CellComparator.COMPARATOR
501             .compareRows(curCell, dropDeletesToRow, 0, dropDeletesToRow.length) >= 0)) {
502       retainDeletesInOutput = true;
503       dropDeletesToRow = null;
504     }
505   }
506 
507   /**
508    * @return Returns false if we know there are no more rows to be scanned (We've reached the
509    * <code>stopRow</code> or we are scanning on row only because this Scan is for a Get, etc.
510    */
511   public boolean moreRowsMayExistAfter(Cell kv) {
512     // If a 'get' Scan -- we are doing a Get (every Get is a single-row Scan in implementation) --
513     // then we are looking at one row only, the one specified in the Get coordinate..so we know
514     // for sure that there are no more rows on this Scan
515     if (this.get) {
516       return false;
517     }
518     // If no stopRow, return that there may be more rows. The tests that follow depend on a
519     // non-empty, non-default stopRow so this little test below short-circuits out doing the
520     // following compares.
521     if (this.stopRow == null || this.stopRow == HConstants.EMPTY_BYTE_ARRAY) {
522       return true;
523     }
524     return this.isReversed?
525       rowComparator.compareRows(kv, stopRow, 0, stopRow.length) > 0:
526       Bytes.equals(stopRow, HConstants.EMPTY_END_ROW) ||
527         rowComparator.compareRows(kv, stopRow, 0, stopRow.length) < 0;
528   }
529 
530   /**
531    * Set the row when there is change in row
532    * @param curCell
533    */
534   public void setToNewRow(Cell curCell) {
535     checkPartialDropDeleteRange(curCell);
536     this.curCell = curCell;
537     reset();
538   }
539 
540   public void reset() {
541     this.deletes.reset();
542     this.columns.reset();
543 
544     stickyNextRow = false;
545   }
546 
547   /**
548    *
549    * @return the start key
550    */
551   public Cell getStartKey() {
552     return this.startKey;
553   }
554 
555   /**
556    *
557    * @return the Filter
558    */
559   Filter getFilter() {
560     return this.filter;
561   }
562 
563   public Cell getNextKeyHint(Cell kv) throws IOException {
564     if (filter == null) {
565       return null;
566     } else {
567       return filter.getNextCellHint(kv);
568     }
569   }
570 
571   public Cell getKeyForNextColumn(Cell kv) {
572     ColumnCount nextColumn = columns.getColumnHint();
573     if (nextColumn == null) {
574       return CellUtil.createLastOnRowCol(kv);
575     } else {
576       return CellUtil.createFirstOnRowCol(kv, nextColumn.getBuffer(), nextColumn.getOffset(),
577           nextColumn.getLength());
578     }
579   }
580 
581   public Cell getKeyForNextRow(Cell c) {
582     return CellUtil.createLastOnRow(c);
583   }
584 
585   /**
586    * @param nextIndexed the key of the next entry in the block index (if any)
587    * @param kv The Cell we're using to calculate the seek key
588    * @return result of the compare between the indexed key and the key portion of the passed cell
589    */
590   public int compareKeyForNextRow(Cell nextIndexed, Cell kv) {
591     return rowComparator.compareKeyBasedOnColHint(nextIndexed, kv, 0, 0, null, 0, 0,
592         HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode());
593   }
594 
595   /**
596    * @param nextIndexed the key of the next entry in the block index (if any)
597    * @param currentCell The Cell we're using to calculate the seek key
598    * @return result of the compare between the indexed key and the key portion of the passed cell
599    */
600   public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) {
601     ColumnCount nextColumn = columns.getColumnHint();
602     if (nextColumn == null) {
603       return rowComparator.compareKeyBasedOnColHint(nextIndexed, currentCell, 0, 0, null, 0, 0,
604           HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode());
605     } else {
606       return rowComparator.compareKeyBasedOnColHint(nextIndexed, currentCell,
607           currentCell.getFamilyOffset(), currentCell.getFamilyLength(), nextColumn.getBuffer(),
608           nextColumn.getOffset(), nextColumn.getLength(), HConstants.LATEST_TIMESTAMP,
609           Type.Maximum.getCode());
610     }
611   }
612 
613   boolean isUserScan() {
614     return this.isUserScan;
615   }
616 
617   //Used only for testing purposes
618   static MatchCode checkColumn(ColumnTracker columnTracker, byte[] bytes, int offset,
619       int length, long ttl, byte type, boolean ignoreCount) throws IOException {
620     KeyValue kv = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY, 0, 0,
621         HConstants.EMPTY_BYTE_ARRAY, 0, 0, bytes, offset, length);
622     MatchCode matchCode = columnTracker.checkColumn(kv, type);
623     if (matchCode == MatchCode.INCLUDE) {
624       return columnTracker.checkVersions(kv, ttl, type, ignoreCount);
625     }
626     return matchCode;
627   }
628 
629   /**
630    * {@link #match} return codes.  These instruct the scanner moving through
631    * memstores and StoreFiles what to do with the current KeyValue.
632    * <p>
633    * Additionally, this contains "early-out" language to tell the scanner to
634    * move on to the next File (memstore or Storefile), or to return immediately.
635    */
636   public static enum MatchCode {
637     /**
638      * Include KeyValue in the returned result
639      */
640     INCLUDE,
641 
642     /**
643      * Do not include KeyValue in the returned result
644      */
645     SKIP,
646 
647     /**
648      * Do not include, jump to next StoreFile or memstore (in time order)
649      */
650     NEXT,
651 
652     /**
653      * Do not include, return current result
654      */
655     DONE,
656 
657     /**
658      * These codes are used by the ScanQueryMatcher
659      */
660 
661     /**
662      * Done with the row, seek there.
663      */
664     SEEK_NEXT_ROW,
665     /**
666      * Done with column, seek to next.
667      */
668     SEEK_NEXT_COL,
669 
670     /**
671      * Done with scan, thanks to the row filter.
672      */
673     DONE_SCAN,
674 
675     /*
676      * Seek to next key which is given as hint.
677      */
678     SEEK_NEXT_USING_HINT,
679 
680     /**
681      * Include KeyValue and done with column, seek to next.
682      */
683     INCLUDE_AND_SEEK_NEXT_COL,
684 
685     /**
686      * Include KeyValue and done with row, seek to next.
687      */
688     INCLUDE_AND_SEEK_NEXT_ROW,
689   }
690 }