@InterfaceAudience.Private public class HStore extends Object implements Store
There's no reason to consider append-logging at this level; all logging and locking is handled at the HRegion level. Store just provides services to manage sets of StoreFiles. One of the most important of those services is compaction services where files are aggregated once they pass a configurable threshold.
Locking and transactions are handled at a higher level. This API should not be called directly but by an HRegion manager.
Modifier and Type | Class and Description |
---|---|
private class |
HStore.StoreFlusherImpl |
NO_PRIORITY, PRIORITY_USER
Modifier | Constructor and Description |
---|---|
protected |
HStore(HRegion region,
HColumnDescriptor family,
org.apache.hadoop.conf.Configuration confParam)
Constructor
|
Modifier and Type | Method and Description |
---|---|
long |
add(Cell cell)
Adds a value to the memstore
|
void |
addChangedReaderObserver(ChangedReadersObserver o) |
private void |
addToCompactingFiles(Collection<StoreFile> filesToAdd)
Adds the files to compacting files.
|
boolean |
areWritesEnabled() |
void |
assertBulkLoadHFileOk(org.apache.hadoop.fs.Path srcPath)
This throws a WrongRegionException if the HFile does not fit in this region, or an
InvalidHFileException if the HFile is not valid.
|
private void |
bulkLoadHFile(StoreFile sf) |
void |
bulkLoadHFile(StoreFileInfo fileInfo) |
org.apache.hadoop.fs.Path |
bulkLoadHFile(String srcPathStr,
long seqNum)
This method should only be called from Region.
|
void |
cancelRequestedCompaction(CompactionContext compaction) |
boolean |
canSplit() |
com.google.common.collect.ImmutableCollection<StoreFile> |
close()
Close all the readers We don't need to worry about subsequent requests because the Region
holds a write lock that will prevent any more reads or writes.
|
private StoreFile |
commitFile(org.apache.hadoop.fs.Path path,
long logCacheFlushId,
MonitoredTask status) |
List<StoreFile> |
compact(CompactionContext compaction,
CompactionThroughputController throughputController)
Compact the StoreFiles.
|
List<StoreFile> |
compact(CompactionContext compaction,
CompactionThroughputController throughputController,
User user) |
void |
compactRecentForTestingAssumingDefaultPolicy(int N)
This method tries to compact N recent files for testing.
|
protected void |
completeCompaction(Collection<StoreFile> compactedFiles)
It works by processing a compaction that's been written to disk.
|
protected void |
completeCompaction(Collection<StoreFile> compactedFiles,
boolean removeFiles)
It works by processing a compaction that's been written to disk.
|
private HFileContext |
createFileContext(Compression.Algorithm compression,
boolean includeMVCCReadpoint,
boolean includesTag,
Encryption.Context cryptoContext) |
StoreFlushContext |
createFlushContext(long cacheFlushId) |
private StoreFile |
createStoreFileAndReader(org.apache.hadoop.fs.Path p) |
private StoreFile |
createStoreFileAndReader(StoreFileInfo info) |
StoreFile.Writer |
createWriterInTmp(long maxKeyCount,
Compression.Algorithm compression,
boolean isCompaction,
boolean includeMVCCReadpoint,
boolean includesTag) |
StoreFile.Writer |
createWriterInTmp(long maxKeyCount,
Compression.Algorithm compression,
boolean isCompaction,
boolean includeMVCCReadpoint,
boolean includesTag,
boolean shouldDropBehind) |
protected long |
delete(KeyValue kv)
Adds a value to the memstore
|
void |
deleteChangedReaderObserver(ChangedReadersObserver o) |
void |
deregisterChildren(ConfigurationManager manager)
Needs to be called to deregister the children from the manager.
|
private static long |
determineTTLFromFamily(HColumnDescriptor family) |
private void |
finishCompactionRequest(CompactionRequest cr) |
protected List<org.apache.hadoop.fs.Path> |
flushCache(long logCacheFlushId,
MemStoreSnapshot snapshot,
MonitoredTask status)
Write out current snapshot.
|
long |
getBlockingFileCount()
The number of files required before flushes for this store will be blocked.
|
static int |
getBytesPerChecksum(org.apache.hadoop.conf.Configuration conf)
Returns the configured bytesPerChecksum value.
|
CacheConfig |
getCacheConfig()
Used for tests.
|
static ChecksumType |
getChecksumType(org.apache.hadoop.conf.Configuration conf)
Returns the configured checksum algorithm.
|
static int |
getCloseCheckInterval() |
String |
getColumnFamilyName() |
long |
getCompactedCellsCount() |
long |
getCompactedCellsSize() |
long |
getCompactionCheckMultiplier() |
double |
getCompactionPressure()
This value can represent the degree of emergency of compaction for this store.
|
CompactionProgress |
getCompactionProgress()
getter for CompactionProgress object
|
int |
getCompactPriority() |
KeyValue.KVComparator |
getComparator() |
RegionCoprocessorHost |
getCoprocessorHost() |
HFileDataBlockEncoder |
getDataBlockEncoder() |
HColumnDescriptor |
getFamily() |
org.apache.hadoop.fs.FileSystem |
getFileSystem() |
long |
getFlushableSize() |
long |
getFlushedCellsCount() |
long |
getFlushedCellsSize() |
HRegion |
getHRegion() |
long |
getLastCompactSize() |
long |
getMajorCompactedCellsCount() |
long |
getMajorCompactedCellsSize() |
long |
getMaxMemstoreTS() |
long |
getMaxSequenceId() |
long |
getMemstoreFlushSize() |
long |
getMemStoreSize() |
protected OffPeakHours |
getOffPeakHours() |
HRegionFileSystem |
getRegionFileSystem() |
HRegionInfo |
getRegionInfo() |
Cell |
getRowKeyAtOrBefore(byte[] row)
Find the key that matches row exactly, or the one that immediately precedes it.
|
ScanInfo |
getScanInfo() |
KeyValueScanner |
getScanner(Scan scan,
NavigableSet<byte[]> targetCols,
long readPt)
Return a scanner for both the memstore and the HStore files.
|
List<KeyValueScanner> |
getScanners(boolean cacheBlocks,
boolean isGet,
boolean usePread,
boolean isCompaction,
ScanQueryMatcher matcher,
byte[] startRow,
byte[] stopRow,
long readPt)
Get all scanners with no filtering based on TTL (that happens further down
the line).
|
long |
getSize() |
long |
getSmallestReadPoint() |
long |
getSnapshotSize()
Returns the memstore snapshot size
|
byte[] |
getSplitPoint()
Determines if Store should be split
|
StoreEngine<?,?,?,?> |
getStoreEngine()
Returns the StoreEngine that is backing this concrete implementation of Store.
|
Collection<StoreFile> |
getStorefiles() |
int |
getStorefilesCount() |
long |
getStorefilesIndexSize() |
long |
getStorefilesSize() |
long |
getStoreFileTtl() |
static org.apache.hadoop.fs.Path |
getStoreHomedir(org.apache.hadoop.fs.Path tabledir,
HRegionInfo hri,
byte[] family)
Deprecated.
|
static org.apache.hadoop.fs.Path |
getStoreHomedir(org.apache.hadoop.fs.Path tabledir,
String encodedName,
byte[] family)
Deprecated.
|
long |
getStoreSizeUncompressed() |
TableName |
getTableName() |
long |
getTotalStaticBloomSize()
Returns the total byte size of all Bloom filter bit arrays.
|
long |
getTotalStaticIndexSize()
Returns the total size of all index blocks in the data block indexes, including the root level,
intermediate levels, and the leaf level for multi-level indexes, or just the root level for
single-level indexes.
|
boolean |
hasReferences() |
boolean |
hasTooManyStoreFiles() |
long |
heapSize() |
(package private) static boolean |
isCellTTLExpired(Cell cell,
long oldestTimestamp,
long now) |
boolean |
isMajorCompaction() |
boolean |
isPrimaryReplicaStore() |
private List<StoreFile> |
loadStoreFiles()
Creates an unsorted list of StoreFile loaded in parallel
from the given directory.
|
private void |
logCompactionEndMessage(CompactionRequest cr,
List<StoreFile> sfs,
long compactionStartTime)
Log a very elaborate compaction completion message.
|
private List<StoreFile> |
moveCompatedFilesIntoPlace(CompactionRequest cr,
List<org.apache.hadoop.fs.Path> newFiles,
User user) |
(package private) StoreFile |
moveFileIntoPlace(org.apache.hadoop.fs.Path newFile) |
boolean |
needsCompaction()
See if there's too much store files in this store
|
private void |
notifyChangedReadersObservers() |
void |
onConfigurationChange(org.apache.hadoop.conf.Configuration conf)
This method would be called by the
ConfigurationManager
object when the Configuration object is reloaded from disk. |
private List<StoreFile> |
openStoreFiles(Collection<StoreFileInfo> files) |
void |
refreshStoreFiles()
Checks the underlying store files, and opens the files that have not
been opened, and removes the store file readers for store files no longer
available.
|
void |
refreshStoreFiles(Collection<String> newFiles)
Replaces the store files that the store has with the given files.
|
private void |
refreshStoreFilesInternal(Collection<StoreFileInfo> newFiles)
Checks the underlying store files, and opens the files that have not
been opened, and removes the store file readers for store files no longer
available.
|
void |
registerChildren(ConfigurationManager manager)
Needs to be called to register the children to the manager.
|
private void |
removeUnneededFiles() |
(package private) void |
replaceStoreFiles(Collection<StoreFile> compactedFiles,
Collection<StoreFile> result) |
void |
replayCompactionMarker(org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor compaction,
boolean pickCompactionFiles,
boolean removeFiles)
Call to complete a compaction.
|
CompactionContext |
requestCompaction() |
CompactionContext |
requestCompaction(int priority,
CompactionRequest baseRequest) |
CompactionContext |
requestCompaction(int priority,
CompactionRequest baseRequest,
User user) |
void |
rollback(Cell cell)
Removes a Cell from the memstore.
|
private boolean |
rowAtOrBeforeFromStoreFile(StoreFile f,
GetClosestRowBeforeTracker state) |
private boolean |
seekToScanner(HFileScanner scanner,
KeyValue firstOnRow,
KeyValue firstKV) |
(package private) void |
setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder)
Should be used only in tests.
|
(package private) void |
setScanInfo(ScanInfo scanInfo)
Set scan info, used by test
|
(package private) void |
snapshot()
Snapshot this stores memstore.
|
boolean |
throttleCompaction(long compactionSize) |
long |
timeOfOldestEdit()
When was the last edit done in the memstore
|
String |
toString() |
void |
triggerMajorCompaction() |
long |
updateColumnValue(byte[] row,
byte[] f,
byte[] qualifier,
long newValue)
Used in tests.
|
private boolean |
updateStorefiles(List<StoreFile> sfs,
long snapshotId) |
long |
upsert(Iterable<Cell> cells,
long readpoint)
Adds or replaces the specified KeyValues.
|
private void |
validateStoreFile(org.apache.hadoop.fs.Path path)
Validates a store file by opening and closing it.
|
(package private) int |
versionsToReturn(int wantedVersions) |
private boolean |
walkForwardInSingleRow(HFileScanner scanner,
KeyValue firstOnRow,
GetClosestRowBeforeTracker state) |
private void |
writeCompactionWalRecord(Collection<StoreFile> filesCompacted,
Collection<StoreFile> newFiles)
Writes the compaction WAL record.
|
private static final String MEMSTORE_CLASS_NAME
public static final String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
public static final String BLOCKING_STOREFILES_KEY
public static final int DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER
public static final int DEFAULT_BLOCKING_STOREFILE_COUNT
private static final org.apache.commons.logging.Log LOG
protected final MemStore memstore
private final HRegion region
private final HColumnDescriptor family
private final HRegionFileSystem fs
private org.apache.hadoop.conf.Configuration conf
private final CacheConfig cacheConf
private long lastCompactSize
volatile boolean forceMajor
static int closeCheckInterval
private volatile long storeSize
private volatile long totalUncompressedBytes
final ReentrantReadWriteLock lock
private final boolean verifyBulkLoads
private ScanInfo scanInfo
private final Set<ChangedReadersObserver> changedReaderObservers
private final int blocksize
private HFileDataBlockEncoder dataBlockEncoder
private ChecksumType checksumType
private int bytesPerChecksum
private final KeyValue.KVComparator comparator
final StoreEngine<?,?,?,?> storeEngine
private static final AtomicBoolean offPeakCompactionTracker
private volatile OffPeakHours offPeakHours
private static final int DEFAULT_FLUSH_RETRIES_NUMBER
private int flushRetriesNumber
private int pauseTime
private long blockingFileCount
private int compactionCheckMultiplier
private Encryption.Context cryptoContext
private volatile long flushedCellsCount
private volatile long compactedCellsCount
private volatile long majorCompactedCellsCount
private volatile long flushedCellsSize
private volatile long compactedCellsSize
private volatile long majorCompactedCellsSize
public static final long FIXED_OVERHEAD
public static final long DEEP_OVERHEAD
protected HStore(HRegion region, HColumnDescriptor family, org.apache.hadoop.conf.Configuration confParam) throws IOException
region
- family
- HColumnDescriptor for this columnconfParam
- configuration object
failed. Can be null.IOException
private static long determineTTLFromFamily(HColumnDescriptor family)
family
- public String getColumnFamilyName()
getColumnFamilyName
in interface Store
public TableName getTableName()
getTableName
in interface Store
public org.apache.hadoop.fs.FileSystem getFileSystem()
getFileSystem
in interface Store
public HRegionFileSystem getRegionFileSystem()
public long getStoreFileTtl()
getStoreFileTtl
in interface StoreConfigInformation
public long getMemstoreFlushSize()
getMemstoreFlushSize
in interface StoreConfigInformation
public long getFlushableSize()
getFlushableSize
in interface Store
Store.getMemStoreSize()
unless we are carrying snapshots and then it will be the size of
outstanding snapshots.public long getSnapshotSize()
Store
getSnapshotSize
in interface Store
public long getCompactionCheckMultiplier()
getCompactionCheckMultiplier
in interface StoreConfigInformation
public long getBlockingFileCount()
StoreConfigInformation
getBlockingFileCount
in interface StoreConfigInformation
public static int getBytesPerChecksum(org.apache.hadoop.conf.Configuration conf)
conf
- The configurationpublic static ChecksumType getChecksumType(org.apache.hadoop.conf.Configuration conf)
conf
- The configurationpublic static int getCloseCheckInterval()
public HColumnDescriptor getFamily()
public long getMaxSequenceId()
getMaxSequenceId
in interface Store
public long getMaxMemstoreTS()
getMaxMemstoreTS
in interface Store
@Deprecated public static org.apache.hadoop.fs.Path getStoreHomedir(org.apache.hadoop.fs.Path tabledir, HRegionInfo hri, byte[] family)
tabledir
- Path
to where the table is being storedhri
- HRegionInfo
for the region.family
- HColumnDescriptor
describing the column family@Deprecated public static org.apache.hadoop.fs.Path getStoreHomedir(org.apache.hadoop.fs.Path tabledir, String encodedName, byte[] family)
tabledir
- Path
to where the table is being storedencodedName
- Encoded region name.family
- HColumnDescriptor
describing the column familypublic HFileDataBlockEncoder getDataBlockEncoder()
getDataBlockEncoder
in interface Store
void setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder)
blockEncoder
- the block delta encoder to useprivate List<StoreFile> loadStoreFiles() throws IOException
IOException
private List<StoreFile> openStoreFiles(Collection<StoreFileInfo> files) throws IOException
IOException
public void refreshStoreFiles() throws IOException
refreshStoreFiles
in interface Store
IOException
public void refreshStoreFiles(Collection<String> newFiles) throws IOException
Store
refreshStoreFiles
in interface Store
IOException
private void refreshStoreFilesInternal(Collection<StoreFileInfo> newFiles) throws IOException
IOException
private StoreFile createStoreFileAndReader(org.apache.hadoop.fs.Path p) throws IOException
IOException
private StoreFile createStoreFileAndReader(StoreFileInfo info) throws IOException
IOException
public long add(Cell cell)
Store
public long timeOfOldestEdit()
Store
timeOfOldestEdit
in interface Store
protected long delete(KeyValue kv)
kv
- public void rollback(Cell cell)
Store
public Collection<StoreFile> getStorefiles()
getStorefiles
in interface Store
public void assertBulkLoadHFileOk(org.apache.hadoop.fs.Path srcPath) throws IOException
Store
assertBulkLoadHFileOk
in interface Store
IOException
public org.apache.hadoop.fs.Path bulkLoadHFile(String srcPathStr, long seqNum) throws IOException
Store
bulkLoadHFile
in interface Store
seqNum
- sequence Id associated with the HFileIOException
public void bulkLoadHFile(StoreFileInfo fileInfo) throws IOException
bulkLoadHFile
in interface Store
IOException
private void bulkLoadHFile(StoreFile sf) throws IOException
IOException
public com.google.common.collect.ImmutableCollection<StoreFile> close() throws IOException
Store
close
in interface Store
StoreFiles
that were previously being used.IOException
- on failurevoid snapshot()
flushCache(long, MemStoreSnapshot, MonitoredTask)
so it has some work to do.protected List<org.apache.hadoop.fs.Path> flushCache(long logCacheFlushId, MemStoreSnapshot snapshot, MonitoredTask status) throws IOException
snapshot()
has been called previously.logCacheFlushId
- flush sequence numbersnapshot
- status
- IOException
private StoreFile commitFile(org.apache.hadoop.fs.Path path, long logCacheFlushId, MonitoredTask status) throws IOException
IOException
public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag) throws IOException
createWriterInTmp
in interface Store
compression
- Compression algorithm to useisCompaction
- whether we are creating a new file in a compactionincludeMVCCReadpoint
- whether we should out the MVCC readpointIOException
public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag, boolean shouldDropBehind) throws IOException
createWriterInTmp
in interface Store
compression
- Compression algorithm to useisCompaction
- whether we are creating a new file in a compactionincludeMVCCReadpoint
- whether we should out the MVCC readpointshouldDropBehind
- should the writer drop caches behind writesIOException
private HFileContext createFileContext(Compression.Algorithm compression, boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext)
private boolean updateStorefiles(List<StoreFile> sfs, long snapshotId) throws IOException
IOException
private void notifyChangedReadersObservers() throws IOException
IOException
public List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean isGet, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, byte[] stopRow, long readPt) throws IOException
getScanners
in interface Store
IOException
public void addChangedReaderObserver(ChangedReadersObserver o)
addChangedReaderObserver
in interface Store
public void deleteChangedReaderObserver(ChangedReadersObserver o)
deleteChangedReaderObserver
in interface Store
public List<StoreFile> compact(CompactionContext compaction, CompactionThroughputController throughputController) throws IOException
During this time, the Store can work as usual, getting values from StoreFiles and writing new StoreFiles from the memstore. Existing StoreFiles are not destroyed until the new compacted StoreFile is completely written-out to disk.
The compactLock prevents multiple simultaneous compactions. The structureLock prevents us from interfering with other write operations.
We don't want to hold the structureLock for the whole time, as a compact() can be lengthy and we want to allow cache-flushes during this period.
Compaction event should be idempotent, since there is no IO Fencing for the region directory in hdfs. A region server might still try to complete the compaction after it lost the region. That is why the following events are carefully ordered for a compaction: 1. Compaction writes new files under region/.tmp directory (compaction output) 2. Compaction atomically moves the temporary file under region directory 3. Compaction appends a WAL edit containing the compaction input and output files. Forces sync on WAL. 4. Compaction deletes the input files from the region directory. Failure conditions are handled like this: - If RS fails before 2, compaction wont complete. Even if RS lives on and finishes the compaction later, it will only write the new data file to the region directory. Since we already have this data, this will be idempotent but we will have a redundant copy of the data. - If RS fails between 2 and 3, the region will have a redundant copy of the data. The RS that failed won't be able to finish snyc() for WAL because of lease recovery in WAL. - If RS fails after 3, the region region server who opens the region will pick up the the compaction marker from the WAL and replay it by removing the compaction input files. Failed RS can also attempt to delete those files, but the operation will be idempotent See HBASE-2231 for details.
compact
in interface Store
compaction
- compaction details obtained from requestCompaction()IOException
public List<StoreFile> compact(CompactionContext compaction, CompactionThroughputController throughputController, User user) throws IOException
compact
in interface Store
IOException
private List<StoreFile> moveCompatedFilesIntoPlace(CompactionRequest cr, List<org.apache.hadoop.fs.Path> newFiles, User user) throws IOException
IOException
StoreFile moveFileIntoPlace(org.apache.hadoop.fs.Path newFile) throws IOException
IOException
private void writeCompactionWalRecord(Collection<StoreFile> filesCompacted, Collection<StoreFile> newFiles) throws IOException
filesCompacted
- Files compacted (input).newFiles
- Files from compaction.IOException
void replaceStoreFiles(Collection<StoreFile> compactedFiles, Collection<StoreFile> result) throws IOException
IOException
private void logCompactionEndMessage(CompactionRequest cr, List<StoreFile> sfs, long compactionStartTime)
cr
- Request.sfs
- Resulting files.compactionStartTime
- Start time.public void replayCompactionMarker(org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor compaction, boolean pickCompactionFiles, boolean removeFiles) throws IOException
replayCompactionMarker
in interface Store
compaction
- pickCompactionFiles
- whether or not pick up the new compaction output files and
add it to the storeremoveFiles
- whether to remove/archive files from filesystemIOException
public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException
N
- Number of files.IOException
public boolean hasReferences()
hasReferences
in interface Store
public CompactionProgress getCompactionProgress()
Store
getCompactionProgress
in interface Store
public boolean isMajorCompaction() throws IOException
isMajorCompaction
in interface Store
IOException
public CompactionContext requestCompaction() throws IOException
requestCompaction
in interface Store
IOException
public CompactionContext requestCompaction(int priority, CompactionRequest baseRequest) throws IOException
requestCompaction
in interface Store
IOException
public CompactionContext requestCompaction(int priority, CompactionRequest baseRequest, User user) throws IOException
requestCompaction
in interface Store
IOException
private void addToCompactingFiles(Collection<StoreFile> filesToAdd)
private void removeUnneededFiles() throws IOException
IOException
public void cancelRequestedCompaction(CompactionContext compaction)
cancelRequestedCompaction
in interface Store
private void finishCompactionRequest(CompactionRequest cr)
private void validateStoreFile(org.apache.hadoop.fs.Path path) throws IOException
path
- the path to the store fileIOException
protected void completeCompaction(Collection<StoreFile> compactedFiles) throws IOException
It works by processing a compaction that's been written to disk.
It is usually invoked at the end of a compaction, but might also be invoked at HStore startup, if the prior execution died midway through.
Moving the compacted TreeMap into place means:
1) Unload all replaced StoreFile, close and collect list to delete. 2) Compute new store size
compactedFiles
- list of files that were compactedIOException
protected void completeCompaction(Collection<StoreFile> compactedFiles, boolean removeFiles) throws IOException
It works by processing a compaction that's been written to disk.
It is usually invoked at the end of a compaction, but might also be invoked at HStore startup, if the prior execution died midway through.
Moving the compacted TreeMap into place means:
1) Unload all replaced StoreFile, close and collect list to delete. 2) Compute new store size
compactedFiles
- list of files that were compactedIOException
int versionsToReturn(int wantedVersions)
static boolean isCellTTLExpired(Cell cell, long oldestTimestamp, long now)
cell
- oldestTimestamp
- public Cell getRowKeyAtOrBefore(byte[] row) throws IOException
Store
getRowKeyAtOrBefore
in interface Store
row
- The row key of the targeted row.IOException
private boolean rowAtOrBeforeFromStoreFile(StoreFile f, GetClosestRowBeforeTracker state) throws IOException
IOException
private boolean seekToScanner(HFileScanner scanner, KeyValue firstOnRow, KeyValue firstKV) throws IOException
IOException
private boolean walkForwardInSingleRow(HFileScanner scanner, KeyValue firstOnRow, GetClosestRowBeforeTracker state) throws IOException
IOException
public byte[] getSplitPoint()
Store
getSplitPoint
in interface Store
public long getLastCompactSize()
getLastCompactSize
in interface Store
public long getSize()
public void triggerMajorCompaction()
triggerMajorCompaction
in interface Store
public KeyValueScanner getScanner(Scan scan, NavigableSet<byte[]> targetCols, long readPt) throws IOException
Store
getScanner
in interface Store
scan
- Scan to apply when scanning the storestargetCols
- columns to scanIOException
- on failurepublic int getStorefilesCount()
getStorefilesCount
in interface Store
public long getStoreSizeUncompressed()
getStoreSizeUncompressed
in interface Store
public long getStorefilesSize()
getStorefilesSize
in interface Store
public long getStorefilesIndexSize()
getStorefilesIndexSize
in interface Store
public long getTotalStaticIndexSize()
Store
getTotalStaticIndexSize
in interface Store
public long getTotalStaticBloomSize()
Store
getTotalStaticBloomSize
in interface Store
public long getMemStoreSize()
getMemStoreSize
in interface Store
public int getCompactPriority()
getCompactPriority
in interface Store
public boolean throttleCompaction(long compactionSize)
throttleCompaction
in interface Store
public HRegion getHRegion()
public RegionCoprocessorHost getCoprocessorHost()
getCoprocessorHost
in interface Store
public HRegionInfo getRegionInfo()
getRegionInfo
in interface Store
public boolean areWritesEnabled()
areWritesEnabled
in interface Store
public long getSmallestReadPoint()
getSmallestReadPoint
in interface Store
public long updateColumnValue(byte[] row, byte[] f, byte[] qualifier, long newValue) throws IOException
row
- row to updatef
- family to updatequalifier
- qualifier to updatenewValue
- the new value to set into memstoreIOException
public long upsert(Iterable<Cell> cells, long readpoint) throws IOException
Store
For each KeyValue specified, if a cell with the same row, family, and qualifier exists in MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore.
This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic across all of them.
upsert
in interface Store
readpoint
- readpoint below which we can safely remove duplicate KVsIOException
public StoreFlushContext createFlushContext(long cacheFlushId)
createFlushContext
in interface Store
public boolean needsCompaction()
Store
needsCompaction
in interface Store
public CacheConfig getCacheConfig()
Store
getCacheConfig
in interface Store
public long heapSize()
public KeyValue.KVComparator getComparator()
getComparator
in interface Store
public ScanInfo getScanInfo()
getScanInfo
in interface Store
void setScanInfo(ScanInfo scanInfo)
scanInfo
- new scan info to use for testpublic boolean hasTooManyStoreFiles()
hasTooManyStoreFiles
in interface Store
public long getFlushedCellsCount()
getFlushedCellsCount
in interface Store
public long getFlushedCellsSize()
getFlushedCellsSize
in interface Store
public long getCompactedCellsCount()
getCompactedCellsCount
in interface Store
public long getCompactedCellsSize()
getCompactedCellsSize
in interface Store
public long getMajorCompactedCellsCount()
getMajorCompactedCellsCount
in interface Store
public long getMajorCompactedCellsSize()
getMajorCompactedCellsSize
in interface Store
public StoreEngine<?,?,?,?> getStoreEngine()
StoreEngine
object used internally inside this HStore object.protected OffPeakHours getOffPeakHours()
public void onConfigurationChange(org.apache.hadoop.conf.Configuration conf)
ConfigurationManager
object when the Configuration
object is reloaded from disk.onConfigurationChange
in interface ConfigurationObserver
public void registerChildren(ConfigurationManager manager)
registerChildren
in interface PropagatingConfigurationObserver
manager
- : to register topublic void deregisterChildren(ConfigurationManager manager)
deregisterChildren
in interface PropagatingConfigurationObserver
manager
- : to deregister frompublic double getCompactionPressure()
Store
And for striped stores, we should calculate this value by the files in each stripe separately and return the maximum value.
It is similar to Store.getCompactPriority()
except that it is more suitable to use in a
linear formula.
getCompactionPressure
in interface Store
public boolean isPrimaryReplicaStore()
isPrimaryReplicaStore
in interface Store
Copyright © 2007–2019 The Apache Software Foundation. All rights reserved.