View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import java.io.IOException;
23  import java.util.NavigableSet;
24  
25  import org.apache.hadoop.classification.InterfaceAudience;
26  import org.apache.hadoop.hbase.Cell;
27  import org.apache.hadoop.hbase.CellUtil;
28  import org.apache.hadoop.hbase.HConstants;
29  import org.apache.hadoop.hbase.KeyValue;
30  import org.apache.hadoop.hbase.KeyValueUtil;
31  import org.apache.hadoop.hbase.client.Scan;
32  import org.apache.hadoop.hbase.filter.Filter;
33  import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
34  import org.apache.hadoop.hbase.io.TimeRange;
35  import org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteResult;
36  import org.apache.hadoop.hbase.util.Bytes;
37  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
38  
39  import com.google.common.base.Preconditions;
40  
41  /**
42   * A query matcher that is specifically designed for the scan case.
43   */
44  @InterfaceAudience.Private
45  public class ScanQueryMatcher {
46    // Optimization so we can skip lots of compares when we decide to skip
47    // to the next row.
48    private boolean stickyNextRow;
49    private final byte[] stopRow;
50  
51    private final TimeRange tr;
52  
53    private final Filter filter;
54  
55    /** Keeps track of deletes */
56    private final DeleteTracker deletes;
57  
58    /*
59     * The following three booleans define how we deal with deletes.
60     * There are three different aspects:
61     * 1. Whether to keep delete markers. This is used in compactions.
62     *    Minor compactions always keep delete markers.
63     * 2. Whether to keep deleted rows. This is also used in compactions,
64     *    if the store is set to keep deleted rows. This implies keeping
65     *    the delete markers as well.
66     *    In this case deleted rows are subject to the normal max version
67     *    and TTL/min version rules just like "normal" rows.
68     * 3. Whether a scan can do time travel queries even before deleted
69     *    marker to reach deleted rows.
70     */
71    /** whether to retain delete markers */
72    private boolean retainDeletesInOutput;
73  
74    /** whether to return deleted rows */
75    private final boolean keepDeletedCells;
76    /** whether time range queries can see rows "behind" a delete */
77    private final boolean seePastDeleteMarkers;
78  
79  
80    /** Keeps track of columns and versions */
81    private final ColumnTracker columns;
82  
83    /** Key to seek to in memstore and StoreFiles */
84    private final Cell startKey;
85  
86    /** Row comparator for the region this query is for */
87    private final KeyValue.KVComparator rowComparator;
88  
89    /* row is not private for tests */
90    /** Row the query is on */
91    byte [] row;
92    int rowOffset;
93    short rowLength;
94    
95    /**
96     * Oldest put in any of the involved store files
97     * Used to decide whether it is ok to delete
98     * family delete marker of this store keeps
99     * deleted KVs.
100    */
101   private final long earliestPutTs;
102 
103   /** readPoint over which the KVs are unconditionally included */
104   protected long maxReadPointToTrackVersions;
105 
106   private byte[] dropDeletesFromRow = null, dropDeletesToRow = null;
107 
108   /**
109    * This variable shows whether there is an null column in the query. There
110    * always exists a null column in the wildcard column query.
111    * There maybe exists a null column in the explicit column query based on the
112    * first column.
113    * */
114   private boolean hasNullColumn = true;
115   
116   private RegionCoprocessorHost regionCoprocessorHost= null;
117 
118   // By default, when hbase.hstore.time.to.purge.deletes is 0ms, a delete
119   // marker is always removed during a major compaction. If set to non-zero
120   // value then major compaction will try to keep a delete marker around for
121   // the given number of milliseconds. We want to keep the delete markers
122   // around a bit longer because old puts might appear out-of-order. For
123   // example, during log replication between two clusters.
124   //
125   // If the delete marker has lived longer than its column-family's TTL then
126   // the delete marker will be removed even if time.to.purge.deletes has not
127   // passed. This is because all the Puts that this delete marker can influence
128   // would have also expired. (Removing of delete markers on col family TTL will
129   // not happen if min-versions is set to non-zero)
130   //
131   // But, if time.to.purge.deletes has not expired then a delete
132   // marker will not be removed just because there are no Puts that it is
133   // currently influencing. This is because Puts, that this delete can
134   // influence.  may appear out of order.
135   private final long timeToPurgeDeletes;
136   
137   private final boolean isUserScan;
138 
139   private final boolean isReversed;
140 
141   /**
142    * Construct a QueryMatcher for a scan
143    * @param scan
144    * @param scanInfo The store's immutable scan info
145    * @param columns
146    * @param scanType Type of the scan
147    * @param earliestPutTs Earliest put seen in any of the store files.
148    * @param oldestUnexpiredTS the oldest timestamp we are interested in,
149    *  based on TTL
150    * @param regionCoprocessorHost 
151    * @throws IOException 
152    */
153   public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> columns,
154       ScanType scanType, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS,
155       RegionCoprocessorHost regionCoprocessorHost) throws IOException {
156     this.tr = scan.getTimeRange();
157     this.rowComparator = scanInfo.getComparator();
158     this.regionCoprocessorHost = regionCoprocessorHost;
159     this.deletes =  instantiateDeleteTracker();
160     this.stopRow = scan.getStopRow();
161     this.startKey = KeyValueUtil.createFirstDeleteFamilyOnRow(scan.getStartRow(),
162         scanInfo.getFamily());
163     this.filter = scan.getFilter();
164     this.earliestPutTs = earliestPutTs;
165     this.maxReadPointToTrackVersions = readPointToUse;
166     this.timeToPurgeDeletes = scanInfo.getTimeToPurgeDeletes();
167 
168     /* how to deal with deletes */
169     this.isUserScan = scanType == ScanType.USER_SCAN;
170     // keep deleted cells: if compaction or raw scan
171     this.keepDeletedCells = (scanInfo.getKeepDeletedCells() && !isUserScan) || scan.isRaw();
172     // retain deletes: if minor compaction or raw scan
173     this.retainDeletesInOutput = scanType == ScanType.COMPACT_RETAIN_DELETES || scan.isRaw();
174     // seePastDeleteMarker: user initiated scans
175     this.seePastDeleteMarkers = scanInfo.getKeepDeletedCells() && isUserScan;
176 
177     int maxVersions =
178         scan.isRaw() ? scan.getMaxVersions() : Math.min(scan.getMaxVersions(),
179           scanInfo.getMaxVersions());
180 
181     // Single branch to deal with two types of reads (columns vs all in family)
182     if (columns == null || columns.size() == 0) {
183       // there is always a null column in the wildcard column query.
184       hasNullColumn = true;
185 
186       // use a specialized scan for wildcard column tracker.
187       this.columns = new ScanWildcardColumnTracker(
188           scanInfo.getMinVersions(), maxVersions, oldestUnexpiredTS);
189     } else {
190       // whether there is null column in the explicit column query
191       hasNullColumn = (columns.first().length == 0);
192 
193       // We can share the ExplicitColumnTracker, diff is we reset
194       // between rows, not between storefiles.
195       byte[] attr = scan.getAttribute(Scan.HINT_LOOKAHEAD);
196       this.columns = new ExplicitColumnTracker(columns, scanInfo.getMinVersions(), maxVersions,
197           oldestUnexpiredTS, attr == null ? 0 : Bytes.toInt(attr));
198     }
199     this.isReversed = scan.isReversed();
200   }
201 
202   private DeleteTracker instantiateDeleteTracker() throws IOException {
203     DeleteTracker tracker = new ScanDeleteTracker();
204     if (regionCoprocessorHost != null) {
205       tracker = regionCoprocessorHost.postInstantiateDeleteTracker(tracker);
206     }
207     return tracker;
208   }
209 
210   /**
211    * Construct a QueryMatcher for a scan that drop deletes from a limited range of rows.
212    * @param scan
213    * @param scanInfo The store's immutable scan info
214    * @param columns
215    * @param earliestPutTs Earliest put seen in any of the store files.
216    * @param oldestUnexpiredTS the oldest timestamp we are interested in,
217    *  based on TTL
218    * @param dropDeletesFromRow The inclusive left bound of the range; can be EMPTY_START_ROW.
219    * @param dropDeletesToRow The exclusive right bound of the range; can be EMPTY_END_ROW.
220    * @param regionCoprocessorHost 
221    * @throws IOException 
222    */
223   public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> columns,
224       long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, byte[] dropDeletesFromRow,
225       byte[] dropDeletesToRow, RegionCoprocessorHost regionCoprocessorHost) throws IOException {
226     this(scan, scanInfo, columns, ScanType.COMPACT_RETAIN_DELETES, readPointToUse, earliestPutTs,
227         oldestUnexpiredTS, regionCoprocessorHost);
228     Preconditions.checkArgument((dropDeletesFromRow != null) && (dropDeletesToRow != null));
229     this.dropDeletesFromRow = dropDeletesFromRow;
230     this.dropDeletesToRow = dropDeletesToRow;
231   }
232 
233   /*
234    * Constructor for tests
235    */
236   ScanQueryMatcher(Scan scan, ScanInfo scanInfo,
237       NavigableSet<byte[]> columns, long oldestUnexpiredTS) throws IOException {
238     this(scan, scanInfo, columns, ScanType.USER_SCAN,
239           Long.MAX_VALUE, /* max Readpoint to track versions */
240         HConstants.LATEST_TIMESTAMP, oldestUnexpiredTS, null);
241   }
242 
243   /**
244    *
245    * @return  whether there is an null column in the query
246    */
247   public boolean hasNullColumnInQuery() {
248     return hasNullColumn;
249   }
250 
251   /**
252    * Determines if the caller should do one of several things:
253    * - seek/skip to the next row (MatchCode.SEEK_NEXT_ROW)
254    * - seek/skip to the next column (MatchCode.SEEK_NEXT_COL)
255    * - include the current KeyValue (MatchCode.INCLUDE)
256    * - ignore the current KeyValue (MatchCode.SKIP)
257    * - got to the next row (MatchCode.DONE)
258    *
259    * @param cell KeyValue to check
260    * @return The match code instance.
261    * @throws IOException in case there is an internal consistency problem
262    *      caused by a data corruption.
263    */
264   public MatchCode match(Cell cell) throws IOException {
265     if (filter != null && filter.filterAllRemaining()) {
266       return MatchCode.DONE_SCAN;
267     }
268     int ret = this.rowComparator.compareRows(row, this.rowOffset, this.rowLength,
269         cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
270     if (!this.isReversed) {
271       if (ret <= -1) {
272         return MatchCode.DONE;
273       } else if (ret >= 1) {
274         // could optimize this, if necessary?
275         // Could also be called SEEK_TO_CURRENT_ROW, but this
276         // should be rare/never happens.
277         return MatchCode.SEEK_NEXT_ROW;
278       }
279     } else {
280       if (ret <= -1) {
281         return MatchCode.SEEK_NEXT_ROW;
282       } else if (ret >= 1) {
283         return MatchCode.DONE;
284       }
285     }
286 
287     // optimize case.
288     if (this.stickyNextRow)
289       return MatchCode.SEEK_NEXT_ROW;
290 
291     if (this.columns.done()) {
292       stickyNextRow = true;
293       return MatchCode.SEEK_NEXT_ROW;
294     }
295 
296     int qualifierOffset = cell.getQualifierOffset();
297     int qualifierLength = cell.getQualifierLength();
298     long timestamp = cell.getTimestamp();
299     // check for early out based on timestamp alone
300     if (columns.isDone(timestamp)) {
301       return columns.getNextRowOrNextColumn(cell.getQualifierArray(), qualifierOffset,
302           qualifierLength);
303     }
304 
305     /*
306      * The delete logic is pretty complicated now.
307      * This is corroborated by the following:
308      * 1. The store might be instructed to keep deleted rows around.
309      * 2. A scan can optionally see past a delete marker now.
310      * 3. If deleted rows are kept, we have to find out when we can
311      *    remove the delete markers.
312      * 4. Family delete markers are always first (regardless of their TS)
313      * 5. Delete markers should not be counted as version
314      * 6. Delete markers affect puts of the *same* TS
315      * 7. Delete marker need to be version counted together with puts
316      *    they affect
317      */
318     byte typeByte = cell.getTypeByte();
319     long mvccVersion = cell.getMvccVersion();
320     if (CellUtil.isDelete(cell)) {
321       if (!keepDeletedCells) {
322         // first ignore delete markers if the scanner can do so, and the
323         // range does not include the marker
324         //
325         // during flushes and compactions also ignore delete markers newer
326         // than the readpoint of any open scanner, this prevents deleted
327         // rows that could still be seen by a scanner from being collected
328         boolean includeDeleteMarker = seePastDeleteMarkers ?
329             tr.withinTimeRange(timestamp) :
330             tr.withinOrAfterTimeRange(timestamp);
331         if (includeDeleteMarker
332             && mvccVersion <= maxReadPointToTrackVersions) {
333           this.deletes.add(cell);
334         }
335         // Can't early out now, because DelFam come before any other keys
336       }
337      
338       if ((!isUserScan)
339           && timeToPurgeDeletes > 0
340           && (EnvironmentEdgeManager.currentTimeMillis() - timestamp) 
341             <= timeToPurgeDeletes) {
342         return MatchCode.INCLUDE;
343       } else if (retainDeletesInOutput || mvccVersion > maxReadPointToTrackVersions) {
344         // always include or it is not time yet to check whether it is OK
345         // to purge deltes or not
346         if (!isUserScan) {
347           // if this is not a user scan (compaction), we can filter this deletemarker right here
348           // otherwise (i.e. a "raw" scan) we fall through to normal version and timerange checking
349           return MatchCode.INCLUDE;
350         }
351       } else if (keepDeletedCells) {
352         if (timestamp < earliestPutTs) {
353           // keeping delete rows, but there are no puts older than
354           // this delete in the store files.
355           return columns.getNextRowOrNextColumn(cell.getQualifierArray(),
356               qualifierOffset, qualifierLength);
357         }
358         // else: fall through and do version counting on the
359         // delete markers
360       } else {
361         return MatchCode.SKIP;
362       }
363       // note the following next else if...
364       // delete marker are not subject to other delete markers
365     } else if (!this.deletes.isEmpty()) {
366       DeleteResult deleteResult = deletes.isDeleted(cell);
367       switch (deleteResult) {
368         case FAMILY_DELETED:
369         case COLUMN_DELETED:
370           return columns.getNextRowOrNextColumn(cell.getQualifierArray(),
371               qualifierOffset, qualifierLength);
372         case VERSION_DELETED:
373         case FAMILY_VERSION_DELETED:
374           return MatchCode.SKIP;
375         case NOT_DELETED:
376           break;
377         default:
378           throw new RuntimeException("UNEXPECTED");
379         }
380     }
381 
382     int timestampComparison = tr.compare(timestamp);
383     if (timestampComparison >= 1) {
384       return MatchCode.SKIP;
385     } else if (timestampComparison <= -1) {
386       return columns.getNextRowOrNextColumn(cell.getQualifierArray(), qualifierOffset,
387           qualifierLength);
388     }
389 
390     // STEP 1: Check if the column is part of the requested columns
391     MatchCode colChecker = columns.checkColumn(cell.getQualifierArray(), 
392         qualifierOffset, qualifierLength, typeByte);
393     if (colChecker == MatchCode.INCLUDE) {
394       ReturnCode filterResponse = ReturnCode.SKIP;
395       // STEP 2: Yes, the column is part of the requested columns. Check if filter is present
396       if (filter != null) {
397         // STEP 3: Filter the key value and return if it filters out
398         filterResponse = filter.filterKeyValue(cell);
399         switch (filterResponse) {
400         case SKIP:
401           return MatchCode.SKIP;
402         case NEXT_COL:
403           return columns.getNextRowOrNextColumn(cell.getQualifierArray(), 
404               qualifierOffset, qualifierLength);
405         case NEXT_ROW:
406           stickyNextRow = true;
407           return MatchCode.SEEK_NEXT_ROW;
408         case SEEK_NEXT_USING_HINT:
409           return MatchCode.SEEK_NEXT_USING_HINT;
410         default:
411           //It means it is either include or include and seek next
412           break;
413         }
414       }
415       /*
416        * STEP 4: Reaching this step means the column is part of the requested columns and either
417        * the filter is null or the filter has returned INCLUDE or INCLUDE_AND_NEXT_COL response.
418        * Now check the number of versions needed. This method call returns SKIP, INCLUDE,
419        * INCLUDE_AND_SEEK_NEXT_ROW, INCLUDE_AND_SEEK_NEXT_COL.
420        *
421        * FilterResponse            ColumnChecker               Desired behavior
422        * INCLUDE                   SKIP                        row has already been included, SKIP.
423        * INCLUDE                   INCLUDE                     INCLUDE
424        * INCLUDE                   INCLUDE_AND_SEEK_NEXT_COL   INCLUDE_AND_SEEK_NEXT_COL
425        * INCLUDE                   INCLUDE_AND_SEEK_NEXT_ROW   INCLUDE_AND_SEEK_NEXT_ROW
426        * INCLUDE_AND_SEEK_NEXT_COL SKIP                        row has already been included, SKIP.
427        * INCLUDE_AND_SEEK_NEXT_COL INCLUDE                     INCLUDE_AND_SEEK_NEXT_COL
428        * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_COL   INCLUDE_AND_SEEK_NEXT_COL
429        * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_ROW   INCLUDE_AND_SEEK_NEXT_ROW
430        *
431        * In all the above scenarios, we return the column checker return value except for
432        * FilterResponse (INCLUDE_AND_SEEK_NEXT_COL) and ColumnChecker(INCLUDE)
433        */
434       colChecker =
435           columns.checkVersions(cell.getQualifierArray(), qualifierOffset,
436               qualifierLength, timestamp, typeByte,
437             mvccVersion > maxReadPointToTrackVersions);
438       //Optimize with stickyNextRow
439       stickyNextRow = colChecker == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW ? true : stickyNextRow;
440       return (filterResponse == ReturnCode.INCLUDE_AND_NEXT_COL &&
441           colChecker == MatchCode.INCLUDE) ? MatchCode.INCLUDE_AND_SEEK_NEXT_COL
442           : colChecker;
443     }
444     stickyNextRow = (colChecker == MatchCode.SEEK_NEXT_ROW) ? true
445         : stickyNextRow;
446     return colChecker;
447   }
448 
449   /** Handle partial-drop-deletes. As we match keys in order, when we have a range from which
450    * we can drop deletes, we can set retainDeletesInOutput to false for the duration of this
451    * range only, and maintain consistency. */
452   private void checkPartialDropDeleteRange(byte [] row, int offset, short length) {
453     // If partial-drop-deletes are used, initially, dropDeletesFromRow and dropDeletesToRow
454     // are both set, and the matcher is set to retain deletes. We assume ordered keys. When
455     // dropDeletesFromRow is leq current kv, we start dropping deletes and reset
456     // dropDeletesFromRow; thus the 2nd "if" starts to apply.
457     if ((dropDeletesFromRow != null)
458         && ((dropDeletesFromRow == HConstants.EMPTY_START_ROW)
459           || (Bytes.compareTo(row, offset, length,
460               dropDeletesFromRow, 0, dropDeletesFromRow.length) >= 0))) {
461       retainDeletesInOutput = false;
462       dropDeletesFromRow = null;
463     }
464     // If dropDeletesFromRow is null and dropDeletesToRow is set, we are inside the partial-
465     // drop-deletes range. When dropDeletesToRow is leq current kv, we stop dropping deletes,
466     // and reset dropDeletesToRow so that we don't do any more compares.
467     if ((dropDeletesFromRow == null)
468         && (dropDeletesToRow != null) && (dropDeletesToRow != HConstants.EMPTY_END_ROW)
469         && (Bytes.compareTo(row, offset, length,
470             dropDeletesToRow, 0, dropDeletesToRow.length) >= 0)) {
471       retainDeletesInOutput = true;
472       dropDeletesToRow = null;
473     }
474   }
475 
476   public boolean moreRowsMayExistAfter(Cell kv) {
477     if (this.isReversed) {
478       if (rowComparator.compareRows(kv.getRowArray(), kv.getRowOffset(),
479           kv.getRowLength(), stopRow, 0, stopRow.length) <= 0) {
480         return false;
481       } else {
482         return true;
483       }
484     }
485     if (!Bytes.equals(stopRow , HConstants.EMPTY_END_ROW) &&
486         rowComparator.compareRows(kv.getRowArray(),kv.getRowOffset(),
487             kv.getRowLength(), stopRow, 0, stopRow.length) >= 0) {
488       // KV >= STOPROW
489       // then NO there is nothing left.
490       return false;
491     } else {
492       return true;
493     }
494   }
495 
496   /**
497    * Set current row
498    * @param row
499    */
500   public void setRow(byte [] row, int offset, short length) {
501     checkPartialDropDeleteRange(row, offset, length);
502     this.row = row;
503     this.rowOffset = offset;
504     this.rowLength = length;
505     reset();
506   }
507 
508   public void reset() {
509     this.deletes.reset();
510     this.columns.reset();
511 
512     stickyNextRow = false;
513   }
514 
515   /**
516    *
517    * @return the start key
518    */
519   public Cell getStartKey() {
520     return this.startKey;
521   }
522 
523   /**
524    *
525    * @return the Filter
526    */
527   Filter getFilter() {
528     return this.filter;
529   }
530 
531   public Cell getNextKeyHint(Cell kv) throws IOException {
532     if (filter == null) {
533       return null;
534     } else {
535       return filter.getNextCellHint(kv);
536     }
537   }
538 
539   public Cell getKeyForNextColumn(Cell kv) {
540     ColumnCount nextColumn = columns.getColumnHint();
541     if (nextColumn == null) {
542       return KeyValueUtil.createLastOnRow(
543           kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
544           kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(),
545           kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength());
546     } else {
547       return KeyValueUtil.createFirstOnRow(
548           kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
549           kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(),
550           nextColumn.getBuffer(), nextColumn.getOffset(), nextColumn.getLength());
551     }
552   }
553 
554   public Cell getKeyForNextRow(Cell kv) {
555     return KeyValueUtil.createLastOnRow(
556         kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
557         null, 0, 0,
558         null, 0, 0);
559   }
560 
561   //Used only for testing purposes
562   static MatchCode checkColumn(ColumnTracker columnTracker, byte[] bytes, int offset,
563       int length, long ttl, byte type, boolean ignoreCount) throws IOException {
564     MatchCode matchCode = columnTracker.checkColumn(bytes, offset, length, type);
565     if (matchCode == MatchCode.INCLUDE) {
566       return columnTracker.checkVersions(bytes, offset, length, ttl, type, ignoreCount);
567     }
568     return matchCode;
569   }
570 
571   /**
572    * {@link #match} return codes.  These instruct the scanner moving through
573    * memstores and StoreFiles what to do with the current KeyValue.
574    * <p>
575    * Additionally, this contains "early-out" language to tell the scanner to
576    * move on to the next File (memstore or Storefile), or to return immediately.
577    */
578   public static enum MatchCode {
579     /**
580      * Include KeyValue in the returned result
581      */
582     INCLUDE,
583 
584     /**
585      * Do not include KeyValue in the returned result
586      */
587     SKIP,
588 
589     /**
590      * Do not include, jump to next StoreFile or memstore (in time order)
591      */
592     NEXT,
593 
594     /**
595      * Do not include, return current result
596      */
597     DONE,
598 
599     /**
600      * These codes are used by the ScanQueryMatcher
601      */
602 
603     /**
604      * Done with the row, seek there.
605      */
606     SEEK_NEXT_ROW,
607     /**
608      * Done with column, seek to next.
609      */
610     SEEK_NEXT_COL,
611 
612     /**
613      * Done with scan, thanks to the row filter.
614      */
615     DONE_SCAN,
616 
617     /*
618      * Seek to next key which is given as hint.
619      */
620     SEEK_NEXT_USING_HINT,
621 
622     /**
623      * Include KeyValue and done with column, seek to next.
624      */
625     INCLUDE_AND_SEEK_NEXT_COL,
626 
627     /**
628      * Include KeyValue and done with row, seek to next.
629      */
630     INCLUDE_AND_SEEK_NEXT_ROW,
631   }
632 }