001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import java.io.IOException;
021import java.util.Collection;
022import java.util.OptionalDouble;
023import java.util.OptionalLong;
024import org.apache.hadoop.conf.Configuration;
025import org.apache.hadoop.fs.FileSystem;
026import org.apache.hadoop.hbase.CellComparator;
027import org.apache.hadoop.hbase.HBaseInterfaceAudience;
028import org.apache.hadoop.hbase.TableName;
029import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
030import org.apache.hadoop.hbase.client.RegionInfo;
031import org.apache.yetus.audience.InterfaceAudience;
032import org.apache.yetus.audience.InterfaceStability;
033
034/**
035 * Interface for objects that hold a column family in a Region. Its a memstore and a set of zero or
036 * more StoreFiles, which stretch backwards over time.
037 */
038@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
039@InterfaceStability.Evolving
040public interface Store {
041
042  /**
043   * The default priority for user-specified compaction requests. The user gets top priority unless
044   * we have blocking compactions. (Pri <= 0)
045   */
046  int PRIORITY_USER = 1;
047  int NO_PRIORITY = Integer.MIN_VALUE;
048
049  // General Accessors
050  CellComparator getComparator();
051
052  Collection<? extends StoreFile> getStorefiles();
053
054  Collection<? extends StoreFile> getCompactedFiles();
055
056  /**
057   * When was the last edit done in the memstore
058   */
059  long timeOfOldestEdit();
060
061  FileSystem getFileSystem();
062
063  /**
064   * Tests whether we should run a major compaction. For example, if the configured major compaction
065   * interval is reached.
066   * @return true if we should run a major compaction.
067   */
068  boolean shouldPerformMajorCompaction() throws IOException;
069
070  /**
071   * See if there's too much store files in this store
072   * @return <code>true</code> if number of store files is greater than the number defined in
073   *         minFilesToCompact
074   */
075  boolean needsCompaction();
076
077  int getCompactPriority();
078
079  /**
080   * Returns whether this store is splittable, i.e., no reference file in this store.
081   */
082  boolean canSplit();
083
084  /** Returns <code>true</code> if the store has any underlying reference files to older HFiles */
085  boolean hasReferences();
086
087  /** Returns The size of this store's memstore. */
088  MemStoreSize getMemStoreSize();
089
090  /**
091   * @return The amount of memory we could flush from this memstore; usually this is equal to
092   *         {@link #getMemStoreSize()} unless we are carrying snapshots and then it will be the
093   *         size of outstanding snapshots.
094   */
095  MemStoreSize getFlushableSize();
096
097  /** Returns size of the memstore snapshot */
098  MemStoreSize getSnapshotSize();
099
100  ColumnFamilyDescriptor getColumnFamilyDescriptor();
101
102  /** Returns The maximum sequence id in all store files. */
103  OptionalLong getMaxSequenceId();
104
105  /** Returns The maximum memstoreTS in all store files. */
106  OptionalLong getMaxMemStoreTS();
107
108  /** Returns aggregate size of all HStores used in the last compaction */
109  long getLastCompactSize();
110
111  /** Returns aggregate size of HStore */
112  long getSize();
113
114  /** Returns Count of store files */
115  int getStorefilesCount();
116
117  /** Returns Count of compacted store files */
118  int getCompactedFilesCount();
119
120  /** Returns Max age of store files in this store */
121  OptionalLong getMaxStoreFileAge();
122
123  /** Returns Min age of store files in this store */
124  OptionalLong getMinStoreFileAge();
125
126  /** Returns Average age of store files in this store */
127  OptionalDouble getAvgStoreFileAge();
128
129  /** Returns Number of reference files in this store */
130  long getNumReferenceFiles();
131
132  /** Returns Number of HFiles in this store */
133  long getNumHFiles();
134
135  /** Returns The size of the store files, in bytes, uncompressed. */
136  long getStoreSizeUncompressed();
137
138  /** Returns The size of the store files, in bytes. */
139  long getStorefilesSize();
140
141  /** Returns The size of only the store files which are HFiles, in bytes. */
142  long getHFilesSize();
143
144  /** Returns The size of the store file root-level indexes, in bytes. */
145  long getStorefilesRootLevelIndexSize();
146
147  /**
148   * Returns the total size of all index blocks in the data block indexes, including the root level,
149   * intermediate levels, and the leaf level for multi-level indexes, or just the root level for
150   * single-level indexes.
151   * @return the total size of block indexes in the store
152   */
153  long getTotalStaticIndexSize();
154
155  /**
156   * Returns the total byte size of all Bloom filter bit arrays. For compound Bloom filters even the
157   * Bloom blocks currently not loaded into the block cache are counted.
158   * @return the total size of all Bloom filters in the store
159   */
160  long getTotalStaticBloomSize();
161
162  /** Returns the parent region info hosting this store */
163  RegionInfo getRegionInfo();
164
165  boolean areWritesEnabled();
166
167  /**
168   * @return The smallest mvcc readPoint across all the scanners in this region. Writes older than
169   *         this readPoint, are included in every read operation.
170   */
171  long getSmallestReadPoint();
172
173  String getColumnFamilyName();
174
175  TableName getTableName();
176
177  /** Returns The number of cells flushed to disk */
178  long getFlushedCellsCount();
179
180  /** Returns The total size of data flushed to disk, in bytes */
181  long getFlushedCellsSize();
182
183  /** Returns The total size of out output files on disk, in bytes */
184  long getFlushedOutputFileSize();
185
186  /** Returns The number of cells processed during minor compactions */
187  long getCompactedCellsCount();
188
189  /** Returns The total amount of data processed during minor compactions, in bytes */
190  long getCompactedCellsSize();
191
192  /** Returns The number of cells processed during major compactions */
193  long getMajorCompactedCellsCount();
194
195  /** Returns The total amount of data processed during major compactions, in bytes */
196  long getMajorCompactedCellsSize();
197
198  /** Returns Whether this store has too many store files. */
199  boolean hasTooManyStoreFiles();
200
201  /**
202   * Checks the underlying store files, and opens the files that have not been opened, and removes
203   * the store file readers for store files no longer available. Mainly used by secondary region
204   * replicas to keep up to date with the primary region files.
205   */
206  void refreshStoreFiles() throws IOException;
207
208  /**
209   * This value can represent the degree of emergency of compaction for this store. It should be
210   * greater than or equal to 0.0, any value greater than 1.0 means we have too many store files.
211   * <ul>
212   * <li>if getStorefilesCount &lt;= getMinFilesToCompact, return 0.0</li>
213   * <li>return (getStorefilesCount - getMinFilesToCompact) / (blockingFileCount -
214   * getMinFilesToCompact)</li>
215   * </ul>
216   * <p>
217   * And for striped stores, we should calculate this value by the files in each stripe separately
218   * and return the maximum value.
219   * <p>
220   * It is similar to {@link #getCompactPriority()} except that it is more suitable to use in a
221   * linear formula.
222   */
223  double getCompactionPressure();
224
225  boolean isPrimaryReplicaStore();
226
227  /** Returns true if the memstore may need some extra memory space */
228  boolean isSloppyMemStore();
229
230  int getCurrentParallelPutCount();
231
232  /** Returns the number of read requests purely from the memstore. */
233  long getMemstoreOnlyRowReadsCount();
234
235  /** Returns the number of read requests from the files under this store. */
236  long getMixedRowReadsCount();
237
238  /**
239   * @return a read only configuration of this store; throws {@link UnsupportedOperationException}
240   *         if you try to set a configuration.
241   */
242  Configuration getReadOnlyConfiguration();
243
244  /** Returns count of bloom filter results for this store. */
245  long getBloomFilterRequestsCount();
246
247  /** Returns count of negative results for bloom filter requests for this store. */
248  long getBloomFilterNegativeResultsCount();
249
250  /**
251   * Returns count of requests which could have used bloom filters, but they weren't configured or
252   * loaded.
253   */
254  long getBloomFilterEligibleRequestsCount();
255}