View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import java.io.IOException;
22  import java.util.Collection;
23  import java.util.Iterator;
24  import java.util.List;
25  
26  import org.apache.hadoop.hbase.classification.InterfaceAudience;
27  import org.apache.hadoop.hbase.Cell;
28  import org.apache.hadoop.hbase.KeyValue;
29  
30  import com.google.common.collect.ImmutableCollection;
31  
32  /**
33   * Manages the store files and basic metadata about that that determines the logical structure
34   * (e.g. what files to return for scan, how to determine split point, and such).
35   * Does NOT affect the physical structure of files in HDFS.
36   * Example alternative structures - the default list of files by seqNum; levelDB one sorted
37   * by level and seqNum.
38   *
39   * Implementations are assumed to be not thread safe.
40   */
41  @InterfaceAudience.Private
42  public interface StoreFileManager {
43    /**
44     * Loads the initial store files into empty StoreFileManager.
45     * @param storeFiles The files to load.
46     */
47    void loadFiles(List<StoreFile> storeFiles);
48  
49    /**
50     * Adds new files, either for from MemStore flush or bulk insert, into the structure.
51     * @param sfs New store files.
52     */
53    void insertNewFiles(Collection<StoreFile> sfs) throws IOException;
54  
55    /**
56     * Adds compaction results into the structure.
57     * @param compactedFiles The input files for the compaction.
58     * @param results The resulting files for the compaction.
59     */
60    void addCompactionResults(
61        Collection<StoreFile> compactedFiles, Collection<StoreFile> results) throws IOException;
62  
63    /**
64     * Clears all the files currently in use and returns them.
65     * @return The files previously in use.
66     */
67    ImmutableCollection<StoreFile> clearFiles();
68  
69    /**
70     * Gets the snapshot of the store files currently in use. Can be used for things like metrics
71     * and checks; should not assume anything about relations between store files in the list.
72     * @return The list of StoreFiles.
73     */
74    Collection<StoreFile> getStorefiles();
75  
76    /**
77     * Returns the number of files currently in use.
78     * @return The number of files.
79     */
80    int getStorefileCount();
81  
82    /**
83     * Gets the store files to scan for a Scan or Get request.
84     * @param isGet Whether it's a get.
85     * @param startRow Start row of the request.
86     * @param stopRow Stop row of the request.
87     * @return The list of files that are to be read for this request.
88     */
89    Collection<StoreFile> getFilesForScanOrGet(
90      boolean isGet, byte[] startRow, byte[] stopRow
91    );
92  
93    /**
94     * Gets initial, full list of candidate store files to check for row-key-before.
95     * @param targetKey The key that is the basis of the search.
96     * @return The files that may have the key less than or equal to targetKey, in reverse
97     *         order of new-ness, and preference for target key.
98     */
99    Iterator<StoreFile> getCandidateFilesForRowKeyBefore(
100     KeyValue targetKey
101   );
102 
103   /**
104    * Updates the candidate list for finding row key before. Based on the list of candidates
105    * remaining to check from getCandidateFilesForRowKeyBefore, targetKey and current candidate,
106    * may trim and reorder the list to remove the files where a better candidate cannot be found.
107    * @param candidateFiles The candidate files not yet checked for better candidates - return
108    *                       value from {@link #getCandidateFilesForRowKeyBefore(KeyValue)},
109    *                       with some files already removed.
110    * @param targetKey The key to search for.
111    * @param candidate The current best candidate found.
112    * @return The list to replace candidateFiles.
113    */
114   Iterator<StoreFile> updateCandidateFilesForRowKeyBefore(
115     Iterator<StoreFile> candidateFiles, KeyValue targetKey, Cell candidate
116   );
117 
118 
119   /**
120    * Gets the split point for the split of this set of store files (approx. middle).
121    * @return The mid-point, or null if no split is possible.
122    * @throws IOException
123    */
124   byte[] getSplitPoint() throws IOException;
125 
126   /**
127    * @return The store compaction priority.
128    */
129   int getStoreCompactionPriority();
130 
131   /**
132    * @param maxTs Maximum expired timestamp.
133    * @param filesCompacting Files that are currently compacting.
134    * @return The files which don't have any necessary data according to TTL and other criteria.
135    */
136   Collection<StoreFile> getUnneededFiles(long maxTs, List<StoreFile> filesCompacting);
137 
138   /**
139    * @return the compaction pressure used for compaction throughput tuning.
140    * @see Store#getCompactionPressure()
141    */
142   double getCompactionPressure();
143 }