001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.regionserver;
020
021import java.io.IOException;
022import java.util.Collection;
023import java.util.Comparator;
024import java.util.Iterator;
025import java.util.List;
026import java.util.Optional;
027
028import org.apache.hadoop.hbase.Cell;
029import org.apache.hadoop.hbase.KeyValue;
030import org.apache.yetus.audience.InterfaceAudience;
031
032import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollection;
033
034/**
035 * Manages the store files and basic metadata about that that determines the logical structure
036 * (e.g. what files to return for scan, how to determine split point, and such).
037 * Does NOT affect the physical structure of files in HDFS.
038 * Example alternative structures - the default list of files by seqNum; levelDB one sorted
039 * by level and seqNum.
040 *
041 * Implementations are assumed to be not thread safe.
042 */
043@InterfaceAudience.Private
044public interface StoreFileManager {
045  /**
046   * Loads the initial store files into empty StoreFileManager.
047   * @param storeFiles The files to load.
048   */
049  void loadFiles(List<HStoreFile> storeFiles);
050
051  /**
052   * Adds new files, either for from MemStore flush or bulk insert, into the structure.
053   * @param sfs New store files.
054   */
055  void insertNewFiles(Collection<HStoreFile> sfs) throws IOException;
056
057  /**
058   * Adds only the new compaction results into the structure.
059   * @param compactedFiles The input files for the compaction.
060   * @param results The resulting files for the compaction.
061   */
062  void addCompactionResults(
063      Collection<HStoreFile> compactedFiles, Collection<HStoreFile> results) throws IOException;
064
065  /**
066   * Remove the compacted files
067   * @param compactedFiles the list of compacted files
068   * @throws IOException
069   */
070  void removeCompactedFiles(Collection<HStoreFile> compactedFiles) throws IOException;
071
072  /**
073   * Clears all the files currently in use and returns them.
074   * @return The files previously in use.
075   */
076  ImmutableCollection<HStoreFile> clearFiles();
077
078  /**
079   * Clears all the compacted files and returns them. This method is expected to be
080   * accessed single threaded.
081   * @return The files compacted previously.
082   */
083  Collection<HStoreFile> clearCompactedFiles();
084
085  /**
086   * Gets the snapshot of the store files currently in use. Can be used for things like metrics
087   * and checks; should not assume anything about relations between store files in the list.
088   * @return The list of StoreFiles.
089   */
090  Collection<HStoreFile> getStorefiles();
091
092  /**
093   * List of compacted files inside this store that needs to be excluded in reads
094   * because further new reads will be using only the newly created files out of compaction.
095   * These compacted files will be deleted/cleared once all the existing readers on these
096   * compacted files are done.
097   * @return the list of compacted files
098   */
099  Collection<HStoreFile> getCompactedfiles();
100
101  /**
102   * Returns the number of files currently in use.
103   * @return The number of files.
104   */
105  int getStorefileCount();
106
107  /**
108   * Returns the number of compacted files.
109   * @return The number of files.
110   */
111  int getCompactedFilesCount();
112
113  /**
114   * Gets the store files to scan for a Scan or Get request.
115   * @param startRow Start row of the request.
116   * @param stopRow Stop row of the request.
117   * @return The list of files that are to be read for this request.
118   */
119  Collection<HStoreFile> getFilesForScan(byte[] startRow, boolean includeStartRow, byte[] stopRow,
120      boolean includeStopRow);
121
122  /**
123   * Gets initial, full list of candidate store files to check for row-key-before.
124   * @param targetKey The key that is the basis of the search.
125   * @return The files that may have the key less than or equal to targetKey, in reverse
126   *         order of new-ness, and preference for target key.
127   */
128  Iterator<HStoreFile> getCandidateFilesForRowKeyBefore(KeyValue targetKey);
129
130  /**
131   * Updates the candidate list for finding row key before. Based on the list of candidates
132   * remaining to check from getCandidateFilesForRowKeyBefore, targetKey and current candidate,
133   * may trim and reorder the list to remove the files where a better candidate cannot be found.
134   * @param candidateFiles The candidate files not yet checked for better candidates - return
135   *                       value from {@link #getCandidateFilesForRowKeyBefore(KeyValue)},
136   *                       with some files already removed.
137   * @param targetKey The key to search for.
138   * @param candidate The current best candidate found.
139   * @return The list to replace candidateFiles.
140   */
141  Iterator<HStoreFile> updateCandidateFilesForRowKeyBefore(Iterator<HStoreFile> candidateFiles,
142      KeyValue targetKey, Cell candidate);
143
144
145  /**
146   * Gets the split point for the split of this set of store files (approx. middle).
147   * @return The mid-point if possible.
148   * @throws IOException
149   */
150  Optional<byte[]> getSplitPoint() throws IOException;
151
152  /**
153   * @return The store compaction priority.
154   */
155  int getStoreCompactionPriority();
156
157  /**
158   * @param maxTs Maximum expired timestamp.
159   * @param filesCompacting Files that are currently compacting.
160   * @return The files which don't have any necessary data according to TTL and other criteria.
161   */
162  Collection<HStoreFile> getUnneededFiles(long maxTs, List<HStoreFile> filesCompacting);
163
164  /**
165   * @return the compaction pressure used for compaction throughput tuning.
166   * @see HStore#getCompactionPressure()
167   */
168  double getCompactionPressure();
169
170  /**
171   * @return the comparator used to sort storefiles. Usually, the
172   *         {@link HStoreFile#getMaxSequenceId()} is the first priority.
173   */
174  Comparator<HStoreFile> getStoreFileComparator();
175}