View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import java.io.IOException;
23  import java.util.NavigableSet;
24  
25  import org.apache.hadoop.classification.InterfaceAudience;
26  import org.apache.hadoop.hbase.Cell;
27  import org.apache.hadoop.hbase.HConstants;
28  import org.apache.hadoop.hbase.KeyValue;
29  import org.apache.hadoop.hbase.client.Scan;
30  import org.apache.hadoop.hbase.filter.Filter;
31  import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
32  import org.apache.hadoop.hbase.io.TimeRange;
33  import org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteResult;
34  import org.apache.hadoop.hbase.util.Bytes;
35  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
36  
37  import com.google.common.base.Preconditions;
38  
39  /**
40   * A query matcher that is specifically designed for the scan case.
41   */
42  @InterfaceAudience.Private
43  public class ScanQueryMatcher {
44    // Optimization so we can skip lots of compares when we decide to skip
45    // to the next row.
46    private boolean stickyNextRow;
47    private final byte[] stopRow;
48  
49    private final TimeRange tr;
50  
51    private final Filter filter;
52  
53    /** Keeps track of deletes */
54    private final DeleteTracker deletes;
55  
56    /*
57     * The following three booleans define how we deal with deletes.
58     * There are three different aspects:
59     * 1. Whether to keep delete markers. This is used in compactions.
60     *    Minor compactions always keep delete markers.
61     * 2. Whether to keep deleted rows. This is also used in compactions,
62     *    if the store is set to keep deleted rows. This implies keeping
63     *    the delete markers as well.
64     *    In this case deleted rows are subject to the normal max version
65     *    and TTL/min version rules just like "normal" rows.
66     * 3. Whether a scan can do time travel queries even before deleted
67     *    marker to reach deleted rows.
68     */
69    /** whether to retain delete markers */
70    private boolean retainDeletesInOutput;
71  
72    /** whether to return deleted rows */
73    private final boolean keepDeletedCells;
74    /** whether time range queries can see rows "behind" a delete */
75    private final boolean seePastDeleteMarkers;
76  
77  
78    /** Keeps track of columns and versions */
79    private final ColumnTracker columns;
80  
81    /** Key to seek to in memstore and StoreFiles */
82    private final KeyValue startKey;
83  
84    /** Row comparator for the region this query is for */
85    private final KeyValue.KVComparator rowComparator;
86  
87    /* row is not private for tests */
88    /** Row the query is on */
89    byte [] row;
90    int rowOffset;
91    short rowLength;
92    
93    /**
94     * Oldest put in any of the involved store files
95     * Used to decide whether it is ok to delete
96     * family delete marker of this store keeps
97     * deleted KVs.
98     */
99    private final long earliestPutTs;
100 
101   /** readPoint over which the KVs are unconditionally included */
102   protected long maxReadPointToTrackVersions;
103 
104   private byte[] dropDeletesFromRow = null, dropDeletesToRow = null;
105 
106   /**
107    * This variable shows whether there is an null column in the query. There
108    * always exists a null column in the wildcard column query.
109    * There maybe exists a null column in the explicit column query based on the
110    * first column.
111    * */
112   private boolean hasNullColumn = true;
113 
114   // By default, when hbase.hstore.time.to.purge.deletes is 0ms, a delete
115   // marker is always removed during a major compaction. If set to non-zero
116   // value then major compaction will try to keep a delete marker around for
117   // the given number of milliseconds. We want to keep the delete markers
118   // around a bit longer because old puts might appear out-of-order. For
119   // example, during log replication between two clusters.
120   //
121   // If the delete marker has lived longer than its column-family's TTL then
122   // the delete marker will be removed even if time.to.purge.deletes has not
123   // passed. This is because all the Puts that this delete marker can influence
124   // would have also expired. (Removing of delete markers on col family TTL will
125   // not happen if min-versions is set to non-zero)
126   //
127   // But, if time.to.purge.deletes has not expired then a delete
128   // marker will not be removed just because there are no Puts that it is
129   // currently influencing. This is because Puts, that this delete can
130   // influence.  may appear out of order.
131   private final long timeToPurgeDeletes;
132   
133   private final boolean isUserScan;
134 
135   private final boolean isReversed;
136 
137   /**
138    * Construct a QueryMatcher for a scan
139    * @param scan
140    * @param scanInfo The store's immutable scan info
141    * @param columns
142    * @param scanType Type of the scan
143    * @param earliestPutTs Earliest put seen in any of the store files.
144    * @param oldestUnexpiredTS the oldest timestamp we are interested in,
145    *  based on TTL
146    */
147   public ScanQueryMatcher(Scan scan, ScanInfo scanInfo,
148       NavigableSet<byte[]> columns, ScanType scanType,
149       long readPointToUse, long earliestPutTs, long oldestUnexpiredTS) {
150     this.tr = scan.getTimeRange();
151     this.rowComparator = scanInfo.getComparator();
152     this.deletes =  new ScanDeleteTracker();
153     this.stopRow = scan.getStopRow();
154     this.startKey = KeyValue.createFirstDeleteFamilyOnRow(scan.getStartRow(),
155         scanInfo.getFamily());
156     this.filter = scan.getFilter();
157     this.earliestPutTs = earliestPutTs;
158     this.maxReadPointToTrackVersions = readPointToUse;
159     this.timeToPurgeDeletes = scanInfo.getTimeToPurgeDeletes();
160 
161     /* how to deal with deletes */
162     this.isUserScan = scanType == ScanType.USER_SCAN;
163     // keep deleted cells: if compaction or raw scan
164     this.keepDeletedCells = (scanInfo.getKeepDeletedCells() && !isUserScan) || scan.isRaw();
165     // retain deletes: if minor compaction or raw scan
166     this.retainDeletesInOutput = scanType == ScanType.COMPACT_RETAIN_DELETES || scan.isRaw();
167     // seePastDeleteMarker: user initiated scans
168     this.seePastDeleteMarkers = scanInfo.getKeepDeletedCells() && isUserScan;
169 
170     int maxVersions =
171         scan.isRaw() ? scan.getMaxVersions() : Math.min(scan.getMaxVersions(),
172           scanInfo.getMaxVersions());
173 
174     // Single branch to deal with two types of reads (columns vs all in family)
175     if (columns == null || columns.size() == 0) {
176       // there is always a null column in the wildcard column query.
177       hasNullColumn = true;
178 
179       // use a specialized scan for wildcard column tracker.
180       this.columns = new ScanWildcardColumnTracker(
181           scanInfo.getMinVersions(), maxVersions, oldestUnexpiredTS);
182     } else {
183       // whether there is null column in the explicit column query
184       hasNullColumn = (columns.first().length == 0);
185 
186       // We can share the ExplicitColumnTracker, diff is we reset
187       // between rows, not between storefiles.
188       byte[] attr = scan.getAttribute(Scan.HINT_LOOKAHEAD);
189       this.columns = new ExplicitColumnTracker(columns, scanInfo.getMinVersions(), maxVersions,
190           oldestUnexpiredTS, attr == null ? 0 : Bytes.toInt(attr));
191     }
192     this.isReversed = scan.isReversed();
193   }
194 
195   /**
196    * Construct a QueryMatcher for a scan that drop deletes from a limited range of rows.
197    * @param scan
198    * @param scanInfo The store's immutable scan info
199    * @param columns
200    * @param earliestPutTs Earliest put seen in any of the store files.
201    * @param oldestUnexpiredTS the oldest timestamp we are interested in,
202    *  based on TTL
203    * @param dropDeletesFromRow The inclusive left bound of the range; can be EMPTY_START_ROW.
204    * @param dropDeletesToRow The exclusive right bound of the range; can be EMPTY_END_ROW.
205    */
206   public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> columns,
207       long readPointToUse, long earliestPutTs, long oldestUnexpiredTS,
208       byte[] dropDeletesFromRow, byte[] dropDeletesToRow) {
209     this(scan, scanInfo, columns, ScanType.COMPACT_RETAIN_DELETES, readPointToUse, earliestPutTs,
210         oldestUnexpiredTS);
211     Preconditions.checkArgument((dropDeletesFromRow != null) && (dropDeletesToRow != null));
212     this.dropDeletesFromRow = dropDeletesFromRow;
213     this.dropDeletesToRow = dropDeletesToRow;
214   }
215 
216   /*
217    * Constructor for tests
218    */
219   ScanQueryMatcher(Scan scan, ScanInfo scanInfo,
220       NavigableSet<byte[]> columns, long oldestUnexpiredTS) {
221     this(scan, scanInfo, columns, ScanType.USER_SCAN,
222           Long.MAX_VALUE, /* max Readpoint to track versions */
223         HConstants.LATEST_TIMESTAMP, oldestUnexpiredTS);
224   }
225 
226   /**
227    *
228    * @return  whether there is an null column in the query
229    */
230   public boolean hasNullColumnInQuery() {
231     return hasNullColumn;
232   }
233 
234   /**
235    * Determines if the caller should do one of several things:
236    * - seek/skip to the next row (MatchCode.SEEK_NEXT_ROW)
237    * - seek/skip to the next column (MatchCode.SEEK_NEXT_COL)
238    * - include the current KeyValue (MatchCode.INCLUDE)
239    * - ignore the current KeyValue (MatchCode.SKIP)
240    * - got to the next row (MatchCode.DONE)
241    *
242    * @param kv KeyValue to check
243    * @return The match code instance.
244    * @throws IOException in case there is an internal consistency problem
245    *      caused by a data corruption.
246    */
247   public MatchCode match(KeyValue kv) throws IOException {
248     if (filter != null && filter.filterAllRemaining()) {
249       return MatchCode.DONE_SCAN;
250     }
251 
252     byte [] bytes = kv.getBuffer();
253     int offset = kv.getOffset();
254 
255     int keyLength = Bytes.toInt(bytes, offset, Bytes.SIZEOF_INT);
256     offset += KeyValue.ROW_OFFSET;
257 
258     int initialOffset = offset;
259 
260     short rowLength = Bytes.toShort(bytes, offset, Bytes.SIZEOF_SHORT);
261     offset += Bytes.SIZEOF_SHORT;
262 
263     int ret = this.rowComparator.compareRows(row, this.rowOffset, this.rowLength,
264         bytes, offset, rowLength);
265     if (!this.isReversed) {
266       if (ret <= -1) {
267         return MatchCode.DONE;
268       } else if (ret >= 1) {
269         // could optimize this, if necessary?
270         // Could also be called SEEK_TO_CURRENT_ROW, but this
271         // should be rare/never happens.
272         return MatchCode.SEEK_NEXT_ROW;
273       }
274     } else {
275       if (ret <= -1) {
276         return MatchCode.SEEK_NEXT_ROW;
277       } else if (ret >= 1) {
278         return MatchCode.DONE;
279       }
280     }
281 
282 
283     // optimize case.
284     if (this.stickyNextRow)
285         return MatchCode.SEEK_NEXT_ROW;
286 
287     if (this.columns.done()) {
288       stickyNextRow = true;
289       return MatchCode.SEEK_NEXT_ROW;
290     }
291 
292     //Passing rowLength
293     offset += rowLength;
294 
295     //Skipping family
296     byte familyLength = bytes [offset];
297     offset += familyLength + 1;
298 
299     int qualLength = keyLength -
300       (offset - initialOffset) - KeyValue.TIMESTAMP_TYPE_SIZE;
301 
302     long timestamp = Bytes.toLong(bytes, initialOffset + keyLength - KeyValue.TIMESTAMP_TYPE_SIZE);
303     // check for early out based on timestamp alone
304     if (columns.isDone(timestamp)) {
305         return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
306     }
307 
308     /*
309      * The delete logic is pretty complicated now.
310      * This is corroborated by the following:
311      * 1. The store might be instructed to keep deleted rows around.
312      * 2. A scan can optionally see past a delete marker now.
313      * 3. If deleted rows are kept, we have to find out when we can
314      *    remove the delete markers.
315      * 4. Family delete markers are always first (regardless of their TS)
316      * 5. Delete markers should not be counted as version
317      * 6. Delete markers affect puts of the *same* TS
318      * 7. Delete marker need to be version counted together with puts
319      *    they affect
320      */
321     byte type = bytes[initialOffset + keyLength - 1];
322     if (kv.isDelete()) {
323       if (!keepDeletedCells) {
324         // first ignore delete markers if the scanner can do so, and the
325         // range does not include the marker
326         //
327         // during flushes and compactions also ignore delete markers newer
328         // than the readpoint of any open scanner, this prevents deleted
329         // rows that could still be seen by a scanner from being collected
330         boolean includeDeleteMarker = seePastDeleteMarkers ?
331             tr.withinTimeRange(timestamp) :
332             tr.withinOrAfterTimeRange(timestamp);
333         if (includeDeleteMarker
334             && kv.getMvccVersion() <= maxReadPointToTrackVersions) {
335           this.deletes.add(bytes, offset, qualLength, timestamp, type);
336         }
337         // Can't early out now, because DelFam come before any other keys
338       }
339       if (retainDeletesInOutput
340           || (!isUserScan && (EnvironmentEdgeManager.currentTimeMillis() - timestamp) <= timeToPurgeDeletes)
341           || kv.getMvccVersion() > maxReadPointToTrackVersions) {
342         // always include or it is not time yet to check whether it is OK
343         // to purge deltes or not
344         if (!isUserScan) {
345           // if this is not a user scan (compaction), we can filter this deletemarker right here
346           // otherwise (i.e. a "raw" scan) we fall through to normal version and timerange checking
347           return MatchCode.INCLUDE;
348         }
349       } else if (keepDeletedCells) {
350         if (timestamp < earliestPutTs) {
351           // keeping delete rows, but there are no puts older than
352           // this delete in the store files.
353           return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
354         }
355         // else: fall through and do version counting on the
356         // delete markers
357       } else {
358         return MatchCode.SKIP;
359       }
360       // note the following next else if...
361       // delete marker are not subject to other delete markers
362     } else if (!this.deletes.isEmpty()) {
363       DeleteResult deleteResult = deletes.isDeleted(bytes, offset, qualLength,
364           timestamp);
365       switch (deleteResult) {
366         case FAMILY_DELETED:
367         case COLUMN_DELETED:
368           return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
369         case VERSION_DELETED:
370         case FAMILY_VERSION_DELETED:
371           return MatchCode.SKIP;
372         case NOT_DELETED:
373           break;
374         default:
375           throw new RuntimeException("UNEXPECTED");
376         }
377     }
378 
379     int timestampComparison = tr.compare(timestamp);
380     if (timestampComparison >= 1) {
381       return MatchCode.SKIP;
382     } else if (timestampComparison <= -1) {
383       return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
384     }
385 
386     // STEP 1: Check if the column is part of the requested columns
387     MatchCode colChecker = columns.checkColumn(bytes, offset, qualLength, type);
388     if (colChecker == MatchCode.INCLUDE) {
389       ReturnCode filterResponse = ReturnCode.SKIP;
390       // STEP 2: Yes, the column is part of the requested columns. Check if filter is present
391       if (filter != null) {
392         // STEP 3: Filter the key value and return if it filters out
393         filterResponse = filter.filterKeyValue(kv);
394         switch (filterResponse) {
395         case SKIP:
396           return MatchCode.SKIP;
397         case NEXT_COL:
398           return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
399         case NEXT_ROW:
400           stickyNextRow = true;
401           return MatchCode.SEEK_NEXT_ROW;
402         case SEEK_NEXT_USING_HINT:
403           return MatchCode.SEEK_NEXT_USING_HINT;
404         default:
405           //It means it is either include or include and seek next
406           break;
407         }
408       }
409       /*
410        * STEP 4: Reaching this step means the column is part of the requested columns and either
411        * the filter is null or the filter has returned INCLUDE or INCLUDE_AND_NEXT_COL response.
412        * Now check the number of versions needed. This method call returns SKIP, INCLUDE,
413        * INCLUDE_AND_SEEK_NEXT_ROW, INCLUDE_AND_SEEK_NEXT_COL.
414        *
415        * FilterResponse            ColumnChecker               Desired behavior
416        * INCLUDE                   SKIP                        row has already been included, SKIP.
417        * INCLUDE                   INCLUDE                     INCLUDE
418        * INCLUDE                   INCLUDE_AND_SEEK_NEXT_COL   INCLUDE_AND_SEEK_NEXT_COL
419        * INCLUDE                   INCLUDE_AND_SEEK_NEXT_ROW   INCLUDE_AND_SEEK_NEXT_ROW
420        * INCLUDE_AND_SEEK_NEXT_COL SKIP                        row has already been included, SKIP.
421        * INCLUDE_AND_SEEK_NEXT_COL INCLUDE                     INCLUDE_AND_SEEK_NEXT_COL
422        * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_COL   INCLUDE_AND_SEEK_NEXT_COL
423        * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_ROW   INCLUDE_AND_SEEK_NEXT_ROW
424        *
425        * In all the above scenarios, we return the column checker return value except for
426        * FilterResponse (INCLUDE_AND_SEEK_NEXT_COL) and ColumnChecker(INCLUDE)
427        */
428       colChecker =
429           columns.checkVersions(bytes, offset, qualLength, timestamp, type,
430             kv.getMvccVersion() > maxReadPointToTrackVersions);
431       //Optimize with stickyNextRow
432       stickyNextRow = colChecker == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW ? true : stickyNextRow;
433       return (filterResponse == ReturnCode.INCLUDE_AND_NEXT_COL &&
434           colChecker == MatchCode.INCLUDE) ? MatchCode.INCLUDE_AND_SEEK_NEXT_COL
435           : colChecker;
436     }
437     stickyNextRow = (colChecker == MatchCode.SEEK_NEXT_ROW) ? true
438         : stickyNextRow;
439     return colChecker;
440   }
441 
442   /** Handle partial-drop-deletes. As we match keys in order, when we have a range from which
443    * we can drop deletes, we can set retainDeletesInOutput to false for the duration of this
444    * range only, and maintain consistency. */
445   private void checkPartialDropDeleteRange(byte [] row, int offset, short length) {
446     // If partial-drop-deletes are used, initially, dropDeletesFromRow and dropDeletesToRow
447     // are both set, and the matcher is set to retain deletes. We assume ordered keys. When
448     // dropDeletesFromRow is leq current kv, we start dropping deletes and reset
449     // dropDeletesFromRow; thus the 2nd "if" starts to apply.
450     if ((dropDeletesFromRow != null)
451         && ((dropDeletesFromRow == HConstants.EMPTY_START_ROW)
452           || (Bytes.compareTo(row, offset, length,
453               dropDeletesFromRow, 0, dropDeletesFromRow.length) >= 0))) {
454       retainDeletesInOutput = false;
455       dropDeletesFromRow = null;
456     }
457     // If dropDeletesFromRow is null and dropDeletesToRow is set, we are inside the partial-
458     // drop-deletes range. When dropDeletesToRow is leq current kv, we stop dropping deletes,
459     // and reset dropDeletesToRow so that we don't do any more compares.
460     if ((dropDeletesFromRow == null)
461         && (dropDeletesToRow != null) && (dropDeletesToRow != HConstants.EMPTY_END_ROW)
462         && (Bytes.compareTo(row, offset, length,
463             dropDeletesToRow, 0, dropDeletesToRow.length) >= 0)) {
464       retainDeletesInOutput = true;
465       dropDeletesToRow = null;
466     }
467   }
468 
469   public boolean moreRowsMayExistAfter(KeyValue kv) {
470     if (this.isReversed) {
471       if (rowComparator.compareRows(kv.getRowArray(), kv.getRowOffset(),
472           kv.getRowLength(), stopRow, 0, stopRow.length) <= 0) {
473         return false;
474       } else {
475         return true;
476       }
477     }
478     if (!Bytes.equals(stopRow , HConstants.EMPTY_END_ROW) &&
479         rowComparator.compareRows(kv.getRowArray(),kv.getRowOffset(),
480             kv.getRowLength(), stopRow, 0, stopRow.length) >= 0) {
481       // KV >= STOPROW
482       // then NO there is nothing left.
483       return false;
484     } else {
485       return true;
486     }
487   }
488 
489   /**
490    * Set current row
491    * @param row
492    */
493   public void setRow(byte [] row, int offset, short length) {
494     checkPartialDropDeleteRange(row, offset, length);
495     this.row = row;
496     this.rowOffset = offset;
497     this.rowLength = length;
498     reset();
499   }
500 
501   public void reset() {
502     this.deletes.reset();
503     this.columns.reset();
504 
505     stickyNextRow = false;
506   }
507 
508   /**
509    *
510    * @return the start key
511    */
512   public KeyValue getStartKey() {
513     return this.startKey;
514   }
515 
516   /**
517    *
518    * @return the Filter
519    */
520   Filter getFilter() {
521     return this.filter;
522   }
523 
524   public Cell getNextKeyHint(Cell kv) throws IOException {
525     if (filter == null) {
526       return null;
527     } else {
528       return filter.getNextCellHint(kv);
529     }
530   }
531 
532   public KeyValue getKeyForNextColumn(KeyValue kv) {
533     ColumnCount nextColumn = columns.getColumnHint();
534     if (nextColumn == null) {
535       return KeyValue.createLastOnRow(
536           kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
537           kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(),
538           kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength());
539     } else {
540       return KeyValue.createFirstOnRow(
541           kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
542           kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(),
543           nextColumn.getBuffer(), nextColumn.getOffset(), nextColumn.getLength());
544     }
545   }
546 
547   public KeyValue getKeyForNextRow(KeyValue kv) {
548     return KeyValue.createLastOnRow(
549         kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
550         null, 0, 0,
551         null, 0, 0);
552   }
553 
554   //Used only for testing purposes
555   static MatchCode checkColumn(ColumnTracker columnTracker, byte[] bytes, int offset,
556       int length, long ttl, byte type, boolean ignoreCount) throws IOException {
557     MatchCode matchCode = columnTracker.checkColumn(bytes, offset, length, type);
558     if (matchCode == MatchCode.INCLUDE) {
559       return columnTracker.checkVersions(bytes, offset, length, ttl, type, ignoreCount);
560     }
561     return matchCode;
562   }
563 
564   /**
565    * {@link #match} return codes.  These instruct the scanner moving through
566    * memstores and StoreFiles what to do with the current KeyValue.
567    * <p>
568    * Additionally, this contains "early-out" language to tell the scanner to
569    * move on to the next File (memstore or Storefile), or to return immediately.
570    */
571   public static enum MatchCode {
572     /**
573      * Include KeyValue in the returned result
574      */
575     INCLUDE,
576 
577     /**
578      * Do not include KeyValue in the returned result
579      */
580     SKIP,
581 
582     /**
583      * Do not include, jump to next StoreFile or memstore (in time order)
584      */
585     NEXT,
586 
587     /**
588      * Do not include, return current result
589      */
590     DONE,
591 
592     /**
593      * These codes are used by the ScanQueryMatcher
594      */
595 
596     /**
597      * Done with the row, seek there.
598      */
599     SEEK_NEXT_ROW,
600     /**
601      * Done with column, seek to next.
602      */
603     SEEK_NEXT_COL,
604 
605     /**
606      * Done with scan, thanks to the row filter.
607      */
608     DONE_SCAN,
609 
610     /*
611      * Seek to next key which is given as hint.
612      */
613     SEEK_NEXT_USING_HINT,
614 
615     /**
616      * Include KeyValue and done with column, seek to next.
617      */
618     INCLUDE_AND_SEEK_NEXT_COL,
619 
620     /**
621      * Include KeyValue and done with row, seek to next.
622      */
623     INCLUDE_AND_SEEK_NEXT_ROW,
624   }
625 }