001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import java.io.IOException; 021import java.util.Collection; 022import java.util.OptionalDouble; 023import java.util.OptionalLong; 024import org.apache.hadoop.conf.Configuration; 025import org.apache.hadoop.fs.FileSystem; 026import org.apache.hadoop.hbase.CellComparator; 027import org.apache.hadoop.hbase.HBaseInterfaceAudience; 028import org.apache.hadoop.hbase.TableName; 029import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 030import org.apache.hadoop.hbase.client.RegionInfo; 031import org.apache.yetus.audience.InterfaceAudience; 032import org.apache.yetus.audience.InterfaceStability; 033 034/** 035 * Interface for objects that hold a column family in a Region. Its a memstore and a set of zero or 036 * more StoreFiles, which stretch backwards over time. 037 */ 038@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) 039@InterfaceStability.Evolving 040public interface Store { 041 042 /** 043 * The default priority for user-specified compaction requests. The user gets top priority unless 044 * we have blocking compactions. (Pri <= 0) 045 */ 046 int PRIORITY_USER = 1; 047 int NO_PRIORITY = Integer.MIN_VALUE; 048 049 // General Accessors 050 CellComparator getComparator(); 051 052 Collection<? extends StoreFile> getStorefiles(); 053 054 Collection<? extends StoreFile> getCompactedFiles(); 055 056 /** 057 * When was the last edit done in the memstore 058 */ 059 long timeOfOldestEdit(); 060 061 FileSystem getFileSystem(); 062 063 /** 064 * Tests whether we should run a major compaction. For example, if the configured major compaction 065 * interval is reached. 066 * @return true if we should run a major compaction. 067 */ 068 boolean shouldPerformMajorCompaction() throws IOException; 069 070 /** 071 * See if there's too much store files in this store 072 * @return <code>true</code> if number of store files is greater than the number defined in 073 * minFilesToCompact 074 */ 075 boolean needsCompaction(); 076 077 int getCompactPriority(); 078 079 /** 080 * Returns whether this store is splittable, i.e., no reference file in this store. 081 */ 082 boolean canSplit(); 083 084 /** 085 * @return <code>true</code> if the store has any underlying reference files to older HFiles 086 */ 087 boolean hasReferences(); 088 089 /** 090 * @return The size of this store's memstore. 091 */ 092 MemStoreSize getMemStoreSize(); 093 094 /** 095 * @return The amount of memory we could flush from this memstore; usually this is equal to 096 * {@link #getMemStoreSize()} unless we are carrying snapshots and then it will be the 097 * size of outstanding snapshots. 098 */ 099 MemStoreSize getFlushableSize(); 100 101 /** 102 * @return size of the memstore snapshot 103 */ 104 MemStoreSize getSnapshotSize(); 105 106 ColumnFamilyDescriptor getColumnFamilyDescriptor(); 107 108 /** 109 * @return The maximum sequence id in all store files. 110 */ 111 OptionalLong getMaxSequenceId(); 112 113 /** 114 * @return The maximum memstoreTS in all store files. 115 */ 116 OptionalLong getMaxMemStoreTS(); 117 118 /** @return aggregate size of all HStores used in the last compaction */ 119 long getLastCompactSize(); 120 121 /** @return aggregate size of HStore */ 122 long getSize(); 123 124 /** 125 * @return Count of store files 126 */ 127 int getStorefilesCount(); 128 129 /** 130 * @return Count of compacted store files 131 */ 132 int getCompactedFilesCount(); 133 134 /** 135 * @return Max age of store files in this store 136 */ 137 OptionalLong getMaxStoreFileAge(); 138 139 /** 140 * @return Min age of store files in this store 141 */ 142 OptionalLong getMinStoreFileAge(); 143 144 /** 145 * @return Average age of store files in this store 146 */ 147 OptionalDouble getAvgStoreFileAge(); 148 149 /** 150 * @return Number of reference files in this store 151 */ 152 long getNumReferenceFiles(); 153 154 /** 155 * @return Number of HFiles in this store 156 */ 157 long getNumHFiles(); 158 159 /** 160 * @return The size of the store files, in bytes, uncompressed. 161 */ 162 long getStoreSizeUncompressed(); 163 164 /** 165 * @return The size of the store files, in bytes. 166 */ 167 long getStorefilesSize(); 168 169 /** 170 * @return The size of only the store files which are HFiles, in bytes. 171 */ 172 long getHFilesSize(); 173 174 /** 175 * @return The size of the store file root-level indexes, in bytes. 176 */ 177 long getStorefilesRootLevelIndexSize(); 178 179 /** 180 * Returns the total size of all index blocks in the data block indexes, including the root level, 181 * intermediate levels, and the leaf level for multi-level indexes, or just the root level for 182 * single-level indexes. 183 * @return the total size of block indexes in the store 184 */ 185 long getTotalStaticIndexSize(); 186 187 /** 188 * Returns the total byte size of all Bloom filter bit arrays. For compound Bloom filters even the 189 * Bloom blocks currently not loaded into the block cache are counted. 190 * @return the total size of all Bloom filters in the store 191 */ 192 long getTotalStaticBloomSize(); 193 194 /** 195 * @return the parent region info hosting this store 196 */ 197 RegionInfo getRegionInfo(); 198 199 boolean areWritesEnabled(); 200 201 /** 202 * @return The smallest mvcc readPoint across all the scanners in this region. Writes older than 203 * this readPoint, are included in every read operation. 204 */ 205 long getSmallestReadPoint(); 206 207 String getColumnFamilyName(); 208 209 TableName getTableName(); 210 211 /** 212 * @return The number of cells flushed to disk 213 */ 214 long getFlushedCellsCount(); 215 216 /** 217 * @return The total size of data flushed to disk, in bytes 218 */ 219 long getFlushedCellsSize(); 220 221 /** 222 * @return The total size of out output files on disk, in bytes 223 */ 224 long getFlushedOutputFileSize(); 225 226 /** 227 * @return The number of cells processed during minor compactions 228 */ 229 long getCompactedCellsCount(); 230 231 /** 232 * @return The total amount of data processed during minor compactions, in bytes 233 */ 234 long getCompactedCellsSize(); 235 236 /** 237 * @return The number of cells processed during major compactions 238 */ 239 long getMajorCompactedCellsCount(); 240 241 /** 242 * @return The total amount of data processed during major compactions, in bytes 243 */ 244 long getMajorCompactedCellsSize(); 245 246 /** 247 * @return Whether this store has too many store files. 248 */ 249 boolean hasTooManyStoreFiles(); 250 251 /** 252 * Checks the underlying store files, and opens the files that have not been opened, and removes 253 * the store file readers for store files no longer available. Mainly used by secondary region 254 * replicas to keep up to date with the primary region files. n 255 */ 256 void refreshStoreFiles() throws IOException; 257 258 /** 259 * This value can represent the degree of emergency of compaction for this store. It should be 260 * greater than or equal to 0.0, any value greater than 1.0 means we have too many store files. 261 * <ul> 262 * <li>if getStorefilesCount <= getMinFilesToCompact, return 0.0</li> 263 * <li>return (getStorefilesCount - getMinFilesToCompact) / (blockingFileCount - 264 * getMinFilesToCompact)</li> 265 * </ul> 266 * <p> 267 * And for striped stores, we should calculate this value by the files in each stripe separately 268 * and return the maximum value. 269 * <p> 270 * It is similar to {@link #getCompactPriority()} except that it is more suitable to use in a 271 * linear formula. 272 */ 273 double getCompactionPressure(); 274 275 boolean isPrimaryReplicaStore(); 276 277 /** 278 * @return true if the memstore may need some extra memory space 279 */ 280 boolean isSloppyMemStore(); 281 282 int getCurrentParallelPutCount(); 283 284 /** 285 * @return the number of read requests purely from the memstore. 286 */ 287 long getMemstoreOnlyRowReadsCount(); 288 289 /** 290 * @return the number of read requests from the files under this store. 291 */ 292 long getMixedRowReadsCount(); 293 294 /** 295 * @return a read only configuration of this store; throws {@link UnsupportedOperationException} 296 * if you try to set a configuration. 297 */ 298 Configuration getReadOnlyConfiguration(); 299}