View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  
20  import java.io.IOException;
21  import java.util.Collection;
22  import java.util.List;
23  import java.util.NavigableSet;
24  
25  import org.apache.hadoop.hbase.classification.InterfaceAudience;
26  import org.apache.hadoop.hbase.classification.InterfaceStability;
27  import org.apache.hadoop.fs.FileSystem;
28  import org.apache.hadoop.fs.Path;
29  import org.apache.hadoop.hbase.Cell;
30  import org.apache.hadoop.hbase.HBaseInterfaceAudience;
31  import org.apache.hadoop.hbase.HColumnDescriptor;
32  import org.apache.hadoop.hbase.HRegionInfo;
33  import org.apache.hadoop.hbase.KeyValue;
34  import org.apache.hadoop.hbase.TableName;
35  import org.apache.hadoop.hbase.client.Scan;
36  import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
37  import org.apache.hadoop.hbase.io.HeapSize;
38  import org.apache.hadoop.hbase.io.compress.Compression;
39  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
40  import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
41  import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
42  import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
43  import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
44  import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
45  import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController;
46  import org.apache.hadoop.hbase.security.User;
47  
48  /**
49   * Interface for objects that hold a column family in a Region. Its a memstore and a set of zero or
50   * more StoreFiles, which stretch backwards over time.
51   */
52  @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
53  @InterfaceStability.Evolving
54  public interface Store extends HeapSize, StoreConfigInformation, PropagatingConfigurationObserver {
55  
56    /* The default priority for user-specified compaction requests.
57     * The user gets top priority unless we have blocking compactions. (Pri <= 0)
58     */ int PRIORITY_USER = 1;
59    int NO_PRIORITY = Integer.MIN_VALUE;
60  
61    // General Accessors
62    KeyValue.KVComparator getComparator();
63  
64    Collection<StoreFile> getStorefiles();
65  
66    /**
67     * Close all the readers We don't need to worry about subsequent requests because the Region
68     * holds a write lock that will prevent any more reads or writes.
69     * @return the {@link StoreFile StoreFiles} that were previously being used.
70     * @throws IOException on failure
71     */
72    Collection<StoreFile> close() throws IOException;
73  
74    /**
75     * Return a scanner for both the memstore and the HStore files. Assumes we are not in a
76     * compaction.
77     * @param scan Scan to apply when scanning the stores
78     * @param targetCols columns to scan
79     * @return a scanner over the current key values
80     * @throws IOException on failure
81     */
82    KeyValueScanner getScanner(Scan scan, final NavigableSet<byte[]> targetCols, long readPt)
83        throws IOException;
84  
85    /**
86     * Get all scanners with no filtering based on TTL (that happens further down
87     * the line).
88     * @param cacheBlocks
89     * @param isGet
90     * @param usePread
91     * @param isCompaction
92     * @param matcher
93     * @param startRow
94     * @param stopRow
95     * @param readPt
96     * @return all scanners for this store
97     */
98    List<KeyValueScanner> getScanners(
99      boolean cacheBlocks,
100     boolean isGet,
101     boolean usePread,
102     boolean isCompaction,
103     ScanQueryMatcher matcher,
104     byte[] startRow,
105     byte[] stopRow,
106     long readPt
107   ) throws IOException;
108   
109   ScanInfo getScanInfo();
110 
111   /**
112    * Adds or replaces the specified KeyValues.
113    * <p>
114    * For each KeyValue specified, if a cell with the same row, family, and qualifier exists in
115    * MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore.
116    * <p>
117    * This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic
118    * across all of them.
119    * @param cells
120    * @param readpoint readpoint below which we can safely remove duplicate KVs
121    * @return memstore size delta
122    * @throws IOException
123    */
124   long upsert(Iterable<Cell> cells, long readpoint) throws IOException;
125 
126   /**
127    * Adds a value to the memstore
128    * @param cell
129    * @return memstore size delta
130    */
131   long add(Cell cell);
132 
133   /**
134    * When was the last edit done in the memstore
135    */
136   long timeOfOldestEdit();
137 
138   /**
139    * Removes a Cell from the memstore. The Cell is removed only if its key
140    * &amp; memstoreTS match the key &amp; memstoreTS value of the cell
141    * parameter.
142    * @param cell
143    */
144   void rollback(final Cell cell);
145 
146   /**
147    * Find the key that matches <i>row</i> exactly, or the one that immediately precedes it. WARNING:
148    * Only use this method on a table where writes occur with strictly increasing timestamps. This
149    * method assumes this pattern of writes in order to make it reasonably performant. Also our
150    * search is dependent on the axiom that deletes are for cells that are in the container that
151    * follows whether a memstore snapshot or a storefile, not for the current container: i.e. we'll
152    * see deletes before we come across cells we are to delete. Presumption is that the
153    * memstore#kvset is processed before memstore#snapshot and so on.
154    * @param row The row key of the targeted row.
155    * @return Found Cell or null if none found.
156    * @throws IOException
157    */
158   Cell getRowKeyAtOrBefore(final byte[] row) throws IOException;
159 
160   FileSystem getFileSystem();
161 
162 
163   /**
164    * @param maxKeyCount
165    * @param compression Compression algorithm to use
166    * @param isCompaction whether we are creating a new file in a compaction
167    * @param includeMVCCReadpoint whether we should out the MVCC readpoint
168    * @return Writer for a new StoreFile in the tmp dir.
169    */
170   StoreFile.Writer createWriterInTmp(
171       long maxKeyCount,
172       Compression.Algorithm compression,
173       boolean isCompaction,
174       boolean includeMVCCReadpoint,
175       boolean includesTags
176   ) throws IOException;
177 
178   /**
179    * @param maxKeyCount
180    * @param compression Compression algorithm to use
181    * @param isCompaction whether we are creating a new file in a compaction
182    * @param includeMVCCReadpoint whether we should out the MVCC readpoint
183    * @param shouldDropBehind should the writer drop caches behind writes
184    * @return Writer for a new StoreFile in the tmp dir.
185    */
186   StoreFile.Writer createWriterInTmp(
187     long maxKeyCount,
188     Compression.Algorithm compression,
189     boolean isCompaction,
190     boolean includeMVCCReadpoint,
191     boolean includesTags,
192     boolean shouldDropBehind
193   ) throws IOException;
194 
195 
196 
197 
198   // Compaction oriented methods
199 
200   boolean throttleCompaction(long compactionSize);
201 
202   /**
203    * getter for CompactionProgress object
204    * @return CompactionProgress object; can be null
205    */
206   CompactionProgress getCompactionProgress();
207 
208   CompactionContext requestCompaction() throws IOException;
209 
210   /**
211    * @deprecated see requestCompaction(int, CompactionRequest, User)
212    */
213   @Deprecated
214   CompactionContext requestCompaction(int priority, CompactionRequest baseRequest)
215       throws IOException;
216 
217   CompactionContext requestCompaction(int priority, CompactionRequest baseRequest, User user)
218       throws IOException;
219 
220   void cancelRequestedCompaction(CompactionContext compaction);
221 
222   /**
223    * @deprecated see compact(CompactionContext, CompactionThroughputController, User)
224    */
225   @Deprecated
226   List<StoreFile> compact(CompactionContext compaction,
227       CompactionThroughputController throughputController) throws IOException;
228 
229   List<StoreFile> compact(CompactionContext compaction,
230     CompactionThroughputController throughputController, User user) throws IOException;
231 
232   /**
233    * @return true if we should run a major compaction.
234    */
235   boolean isMajorCompaction() throws IOException;
236 
237   void triggerMajorCompaction();
238 
239   /**
240    * See if there's too much store files in this store
241    * @return true if number of store files is greater than the number defined in minFilesToCompact
242    */
243   boolean needsCompaction();
244 
245   int getCompactPriority();
246 
247   StoreFlushContext createFlushContext(long cacheFlushId);
248 
249   /**
250    * Call to complete a compaction. Its for the case where we find in the WAL a compaction
251    * that was not finished.  We could find one recovering a WAL after a regionserver crash.
252    * See HBASE-2331.
253    * @param compaction the descriptor for compaction
254    * @param pickCompactionFiles whether or not pick up the new compaction output files and
255    * add it to the store
256    * @param removeFiles whether to remove/archive files from filesystem
257    */
258   void replayCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles,
259       boolean removeFiles)
260       throws IOException;
261 
262   // Split oriented methods
263 
264   boolean canSplit();
265 
266   /**
267    * Determines if Store should be split
268    * @return byte[] if store should be split, null otherwise.
269    */
270   byte[] getSplitPoint();
271 
272   // Bulk Load methods
273 
274   /**
275    * This throws a WrongRegionException if the HFile does not fit in this region, or an
276    * InvalidHFileException if the HFile is not valid.
277    */
278   void assertBulkLoadHFileOk(Path srcPath) throws IOException;
279 
280   /**
281    * This method should only be called from Region. It is assumed that the ranges of values in the
282    * HFile fit within the stores assigned region. (assertBulkLoadHFileOk checks this)
283    *
284    * @param srcPathStr
285    * @param sequenceId sequence Id associated with the HFile
286    */
287   Path bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException;
288 
289   // General accessors into the state of the store
290   // TODO abstract some of this out into a metrics class
291 
292   /**
293    * @return <tt>true</tt> if the store has any underlying reference files to older HFiles
294    */
295   boolean hasReferences();
296 
297   /**
298    * @return The size of this store's memstore, in bytes
299    */
300   long getMemStoreSize();
301 
302   /**
303    * @return The amount of memory we could flush from this memstore; usually this is equal to
304    * {@link #getMemStoreSize()} unless we are carrying snapshots and then it will be the size of
305    * outstanding snapshots.
306    */
307   long getFlushableSize();
308 
309   /**
310    * Returns the memstore snapshot size
311    * @return size of the memstore snapshot
312    */
313   long getSnapshotSize();
314 
315   HColumnDescriptor getFamily();
316 
317   /**
318    * @return The maximum sequence id in all store files.
319    */
320   long getMaxSequenceId();
321 
322   /**
323    * @return The maximum memstoreTS in all store files.
324    */
325   long getMaxMemstoreTS();
326 
327   /**
328    * @return the data block encoder
329    */
330   HFileDataBlockEncoder getDataBlockEncoder();
331 
332   /** @return aggregate size of all HStores used in the last compaction */
333   long getLastCompactSize();
334 
335   /** @return aggregate size of HStore */
336   long getSize();
337 
338   /**
339    * @return Count of store files
340    */
341   int getStorefilesCount();
342 
343   /**
344    * @return The size of the store files, in bytes, uncompressed.
345    */
346   long getStoreSizeUncompressed();
347 
348   /**
349    * @return The size of the store files, in bytes.
350    */
351   long getStorefilesSize();
352 
353   /**
354    * @return The size of the store file indexes, in bytes.
355    */
356   long getStorefilesIndexSize();
357 
358   /**
359    * Returns the total size of all index blocks in the data block indexes, including the root level,
360    * intermediate levels, and the leaf level for multi-level indexes, or just the root level for
361    * single-level indexes.
362    * @return the total size of block indexes in the store
363    */
364   long getTotalStaticIndexSize();
365 
366   /**
367    * Returns the total byte size of all Bloom filter bit arrays. For compound Bloom filters even the
368    * Bloom blocks currently not loaded into the block cache are counted.
369    * @return the total size of all Bloom filters in the store
370    */
371   long getTotalStaticBloomSize();
372 
373   // Test-helper methods
374 
375   /**
376    * Used for tests.
377    * @return cache configuration for this Store.
378    */
379   CacheConfig getCacheConfig();
380 
381   /**
382    * @return the parent region info hosting this store
383    */
384   HRegionInfo getRegionInfo();
385 
386   RegionCoprocessorHost getCoprocessorHost();
387 
388   boolean areWritesEnabled();
389 
390   /**
391    * @return The smallest mvcc readPoint across all the scanners in this
392    * region. Writes older than this readPoint, are included  in every
393    * read operation.
394    */
395   long getSmallestReadPoint();
396 
397   String getColumnFamilyName();
398 
399   TableName getTableName();
400 
401   /**
402    * @return The number of cells flushed to disk
403    */
404   long getFlushedCellsCount();
405 
406   /**
407    * @return The total size of data flushed to disk, in bytes
408    */
409   long getFlushedCellsSize();
410 
411   /**
412    * @return The number of cells processed during minor compactions
413    */
414   long getCompactedCellsCount();
415 
416   /**
417    * @return The total amount of data processed during minor compactions, in bytes
418    */
419   long getCompactedCellsSize();
420 
421   /**
422    * @return The number of cells processed during major compactions
423    */
424   long getMajorCompactedCellsCount();
425 
426   /**
427    * @return The total amount of data processed during major compactions, in bytes
428    */
429   long getMajorCompactedCellsSize();
430 
431   /*
432    * @param o Observer who wants to know about changes in set of Readers
433    */
434   void addChangedReaderObserver(ChangedReadersObserver o);
435 
436   /*
437    * @param o Observer no longer interested in changes in set of Readers.
438    */
439   void deleteChangedReaderObserver(ChangedReadersObserver o);
440 
441   /**
442    * @return Whether this store has too many store files.
443    */
444   boolean hasTooManyStoreFiles();
445 
446   /**
447    * Checks the underlying store files, and opens the files that  have not
448    * been opened, and removes the store file readers for store files no longer
449    * available. Mainly used by secondary region replicas to keep up to date with
450    * the primary region files.
451    * @throws IOException
452    */
453   void refreshStoreFiles() throws IOException;
454 
455   /**
456    * This value can represent the degree of emergency of compaction for this store. It should be
457    * greater than or equal to 0.0, any value greater than 1.0 means we have too many store files.
458    * <ul>
459    * <li>if getStorefilesCount &lt;= getMinFilesToCompact, return 0.0</li>
460    * <li>return (getStorefilesCount - getMinFilesToCompact) / (blockingFileCount -
461    * getMinFilesToCompact)</li>
462    * </ul>
463    * <p>
464    * And for striped stores, we should calculate this value by the files in each stripe separately
465    * and return the maximum value.
466    * <p>
467    * It is similar to {@link #getCompactPriority()} except that it is more suitable to use in a
468    * linear formula.
469    */
470   double getCompactionPressure();
471 
472    /**
473     * Replaces the store files that the store has with the given files. Mainly used by
474     * secondary region replicas to keep up to date with
475     * the primary region files.
476     * @throws IOException
477     */
478   void refreshStoreFiles(Collection<String> newFiles) throws IOException;
479 
480   void bulkLoadHFile(StoreFileInfo fileInfo) throws IOException;
481 
482   boolean isPrimaryReplicaStore();
483 }