@InterfaceAudience.LimitedPrivate(value="Coprocesssor") @InterfaceStability.Evolving public interface Store extends HeapSize, StoreConfigInformation, PropagatingConfigurationObserver
Modifier and Type | Field and Description |
---|---|
static int |
NO_PRIORITY |
static int |
PRIORITY_USER |
Modifier and Type | Method and Description |
---|---|
Pair<Long,Cell> |
add(Cell cell)
Adds a value to the memstore
|
void |
addChangedReaderObserver(ChangedReadersObserver o) |
boolean |
areWritesEnabled() |
void |
assertBulkLoadHFileOk(org.apache.hadoop.fs.Path srcPath)
This throws a WrongRegionException if the HFile does not fit in this region, or an
InvalidHFileException if the HFile is not valid.
|
void |
bulkLoadHFile(StoreFileInfo fileInfo) |
org.apache.hadoop.fs.Path |
bulkLoadHFile(String srcPathStr,
long sequenceId)
This method should only be called from Region.
|
void |
cancelRequestedCompaction(CompactionContext compaction) |
boolean |
canSplit() |
Collection<StoreFile> |
close()
Close all the readers We don't need to worry about subsequent requests because the Region
holds a write lock that will prevent any more reads or writes.
|
List<StoreFile> |
compact(CompactionContext compaction,
CompactionThroughputController throughputController)
Deprecated.
see compact(CompactionContext, CompactionThroughputController, User)
|
List<StoreFile> |
compact(CompactionContext compaction,
CompactionThroughputController throughputController,
User user) |
org.apache.hadoop.hbase.regionserver.StoreFlushContext |
createFlushContext(long cacheFlushId) |
StoreFile.Writer |
createWriterInTmp(long maxKeyCount,
Compression.Algorithm compression,
boolean isCompaction,
boolean includeMVCCReadpoint,
boolean includesTags) |
void |
deleteChangedReaderObserver(ChangedReadersObserver o) |
CacheConfig |
getCacheConfig()
Used for tests.
|
String |
getColumnFamilyName() |
long |
getCompactedCellsCount() |
long |
getCompactedCellsSize() |
double |
getCompactionPressure()
This value can represent the degree of emergency of compaction for this store.
|
CompactionProgress |
getCompactionProgress()
getter for CompactionProgress object
|
int |
getCompactPriority() |
KeyValue.KVComparator |
getComparator() |
RegionCoprocessorHost |
getCoprocessorHost() |
HFileDataBlockEncoder |
getDataBlockEncoder() |
HColumnDescriptor |
getFamily() |
org.apache.hadoop.fs.FileSystem |
getFileSystem() |
long |
getFlushableSize() |
long |
getFlushedCellsCount() |
long |
getFlushedCellsSize() |
long |
getLastCompactSize() |
long |
getMajorCompactedCellsCount() |
long |
getMajorCompactedCellsSize() |
long |
getMaxMemstoreTS() |
long |
getMaxSequenceId() |
long |
getMemStoreSize() |
HRegionInfo |
getRegionInfo() |
Cell |
getRowKeyAtOrBefore(byte[] row)
Find the key that matches row exactly, or the one that immediately precedes it.
|
ScanInfo |
getScanInfo() |
KeyValueScanner |
getScanner(Scan scan,
NavigableSet<byte[]> targetCols,
long readPt)
Return a scanner for both the memstore and the HStore files.
|
List<KeyValueScanner> |
getScanners(boolean cacheBlocks,
boolean isGet,
boolean usePread,
boolean isCompaction,
ScanQueryMatcher matcher,
byte[] startRow,
byte[] stopRow,
long readPt)
Get all scanners with no filtering based on TTL (that happens further down
the line).
|
long |
getSize() |
long |
getSmallestReadPoint() |
long |
getSnapshotSize()
Returns the memstore snapshot size
|
byte[] |
getSplitPoint()
Determines if Store should be split
|
Collection<StoreFile> |
getStorefiles() |
int |
getStorefilesCount() |
long |
getStorefilesIndexSize() |
long |
getStorefilesSize() |
long |
getStoreSizeUncompressed() |
TableName |
getTableName() |
long |
getTotalStaticBloomSize()
Returns the total byte size of all Bloom filter bit arrays.
|
long |
getTotalStaticIndexSize()
Returns the total size of all index blocks in the data block indexes, including the root level,
intermediate levels, and the leaf level for multi-level indexes, or just the root level for
single-level indexes.
|
boolean |
hasReferences() |
boolean |
hasTooManyStoreFiles() |
boolean |
isMajorCompaction() |
boolean |
needsCompaction()
See if there's too much store files in this store
|
void |
refreshStoreFiles()
Checks the underlying store files, and opens the files that have not
been opened, and removes the store file readers for store files no longer
available.
|
void |
refreshStoreFiles(Collection<String> newFiles)
Replaces the store files that the store has with the given files.
|
void |
replayCompactionMarker(WALProtos.CompactionDescriptor compaction,
boolean pickCompactionFiles,
boolean removeFiles)
Call to complete a compaction.
|
CompactionContext |
requestCompaction() |
CompactionContext |
requestCompaction(int priority,
CompactionRequest baseRequest)
Deprecated.
see requestCompaction(int, CompactionRequest, User)
|
CompactionContext |
requestCompaction(int priority,
CompactionRequest baseRequest,
User user) |
void |
rollback(Cell cell)
Removes a Cell from the memstore.
|
boolean |
throttleCompaction(long compactionSize) |
long |
timeOfOldestEdit()
When was the last edit done in the memstore
|
void |
triggerMajorCompaction() |
long |
upsert(Iterable<Cell> cells,
long readpoint)
Adds or replaces the specified KeyValues.
|
getBlockingFileCount, getCompactionCheckMultiplier, getMemstoreFlushSize, getStoreFileTtl
deregisterChildren, registerChildren
onConfigurationChange
static final int PRIORITY_USER
static final int NO_PRIORITY
KeyValue.KVComparator getComparator()
Collection<StoreFile> getStorefiles()
Collection<StoreFile> close() throws IOException
StoreFiles
that were previously being used.IOException
- on failureKeyValueScanner getScanner(Scan scan, NavigableSet<byte[]> targetCols, long readPt) throws IOException
scan
- Scan to apply when scanning the storestargetCols
- columns to scanIOException
- on failureList<KeyValueScanner> getScanners(boolean cacheBlocks, boolean isGet, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, byte[] stopRow, long readPt) throws IOException
cacheBlocks
- isGet
- usePread
- isCompaction
- matcher
- startRow
- stopRow
- readPt
- IOException
ScanInfo getScanInfo()
long upsert(Iterable<Cell> cells, long readpoint) throws IOException
For each KeyValue specified, if a cell with the same row, family, and qualifier exists in MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore.
This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic across all of them.
cells
- readpoint
- readpoint below which we can safely remove duplicate KVsIOException
Pair<Long,Cell> add(Cell cell)
cell
- long timeOfOldestEdit()
void rollback(Cell cell)
cell
- Cell getRowKeyAtOrBefore(byte[] row) throws IOException
row
- The row key of the targeted row.IOException
org.apache.hadoop.fs.FileSystem getFileSystem()
StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTags) throws IOException
IOException
boolean throttleCompaction(long compactionSize)
CompactionProgress getCompactionProgress()
CompactionContext requestCompaction() throws IOException
IOException
@Deprecated CompactionContext requestCompaction(int priority, CompactionRequest baseRequest) throws IOException
IOException
CompactionContext requestCompaction(int priority, CompactionRequest baseRequest, User user) throws IOException
IOException
void cancelRequestedCompaction(CompactionContext compaction)
@Deprecated List<StoreFile> compact(CompactionContext compaction, CompactionThroughputController throughputController) throws IOException
IOException
List<StoreFile> compact(CompactionContext compaction, CompactionThroughputController throughputController, User user) throws IOException
IOException
boolean isMajorCompaction() throws IOException
IOException
void triggerMajorCompaction()
boolean needsCompaction()
int getCompactPriority()
org.apache.hadoop.hbase.regionserver.StoreFlushContext createFlushContext(long cacheFlushId)
void replayCompactionMarker(WALProtos.CompactionDescriptor compaction, boolean pickCompactionFiles, boolean removeFiles) throws IOException
compaction
- the descriptor for compactionpickCompactionFiles
- whether or not pick up the new compaction output files and
add it to the storeremoveFiles
- whether to remove/archive files from filesystemIOException
boolean canSplit()
byte[] getSplitPoint()
void assertBulkLoadHFileOk(org.apache.hadoop.fs.Path srcPath) throws IOException
IOException
org.apache.hadoop.fs.Path bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException
srcPathStr
- sequenceId
- sequence Id associated with the HFileIOException
boolean hasReferences()
long getMemStoreSize()
long getFlushableSize()
getMemStoreSize()
unless we are carrying snapshots and then it will be the size of
outstanding snapshots.long getSnapshotSize()
HColumnDescriptor getFamily()
long getMaxSequenceId()
long getMaxMemstoreTS()
HFileDataBlockEncoder getDataBlockEncoder()
long getLastCompactSize()
long getSize()
int getStorefilesCount()
long getStoreSizeUncompressed()
long getStorefilesSize()
long getStorefilesIndexSize()
long getTotalStaticIndexSize()
long getTotalStaticBloomSize()
CacheConfig getCacheConfig()
HRegionInfo getRegionInfo()
RegionCoprocessorHost getCoprocessorHost()
boolean areWritesEnabled()
long getSmallestReadPoint()
String getColumnFamilyName()
TableName getTableName()
long getFlushedCellsCount()
long getFlushedCellsSize()
long getCompactedCellsCount()
long getCompactedCellsSize()
long getMajorCompactedCellsCount()
long getMajorCompactedCellsSize()
void addChangedReaderObserver(ChangedReadersObserver o)
void deleteChangedReaderObserver(ChangedReadersObserver o)
boolean hasTooManyStoreFiles()
void refreshStoreFiles() throws IOException
IOException
double getCompactionPressure()
And for striped stores, we should calculate this value by the files in each stripe separately and return the maximum value.
It is similar to getCompactPriority()
except that it is more suitable to use in a
linear formula.
void refreshStoreFiles(Collection<String> newFiles) throws IOException
IOException
void bulkLoadHFile(StoreFileInfo fileInfo) throws IOException
IOException
Copyright © 2007-2016 The Apache Software Foundation. All Rights Reserved.