Package | Description |
---|---|
org.apache.hadoop.hbase.mob | |
org.apache.hadoop.hbase.regionserver | |
org.apache.hadoop.hbase.regionserver.compactions | |
org.apache.hadoop.hbase.regionserver.throttle |
Modifier and Type | Method and Description |
---|---|
protected void |
MobStoreEngine.createCompactor(org.apache.hadoop.conf.Configuration conf,
HStore store)
Creates the DefaultMobCompactor.
|
protected void |
MobStoreEngine.createStoreFlusher(org.apache.hadoop.conf.Configuration conf,
HStore store) |
Constructor and Description |
---|
DefaultMobStoreCompactor(org.apache.hadoop.conf.Configuration conf,
HStore store) |
DefaultMobStoreFlusher(org.apache.hadoop.conf.Configuration conf,
HStore store) |
Modifier and Type | Class and Description |
---|---|
class |
HMobStore
The store implementation to save MOBs (medium objects), it extends the HStore.
|
Modifier and Type | Field and Description |
---|---|
private HStore |
CompactingMemStore.store |
private HStore |
CompactedHFilesDischargeHandler.store |
private HStore |
CompactSplit.CompactionRunner.store |
protected HStore |
StoreScanner.store |
protected HStore |
StoreFlusher.store |
Modifier and Type | Field and Description |
---|---|
private ConcurrentMap<HStore,Long> |
HRegion.lastStoreFlushTimeMap |
private Collection<HStore> |
FlushNonSloppyStoresFirstPolicy.regularStores |
private Collection<HStore> |
FlushNonSloppyStoresFirstPolicy.sloppyStores |
protected Map<byte[],HStore> |
HRegion.stores |
Modifier and Type | Method and Description |
---|---|
HStore |
CompactingMemStore.getStore() |
HStore |
HRegion.getStore(byte[] column) |
private HStore |
HRegion.getStore(Cell cell)
Return HStore instance.
|
private static HStore |
CompactionTool.CompactionWorker.getStore(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path tableDir,
TableDescriptor htd,
RegionInfo hri,
String familyName) |
protected HStore |
HRegion.instantiateHStore(ColumnFamilyDescriptor family,
boolean warmup) |
Modifier and Type | Method and Description |
---|---|
private Collection<HStore> |
HRegion.getSpecificStores(List<byte[]> families)
get stores which matches the specified families
|
List<HStore> |
HRegion.getStores() |
Collection<HStore> |
FlushAllStoresPolicy.selectStoresToFlush() |
abstract Collection<HStore> |
FlushPolicy.selectStoresToFlush()
Returns the stores need to be flushed.
|
Collection<HStore> |
FlushAllLargeStoresPolicy.selectStoresToFlush() |
Collection<HStore> |
FlushNonSloppyStoresFirstPolicy.selectStoresToFlush()
Returns the stores need to be flushed.
|
Modifier and Type | Method and Description |
---|---|
private void |
HRegion.applyToMemStore(HStore store,
Cell cell,
MemStoreSizing memstoreAccounting) |
private void |
HRegion.applyToMemStore(HStore store,
List<Cell> cells,
boolean delta,
MemStoreSizing memstoreAccounting) |
private void |
BrokenStoreFileCleaner.cleanFileIfNeeded(org.apache.hadoop.fs.FileStatus file,
HStore store,
AtomicLong deletedFiles,
AtomicLong failedDeletes) |
boolean |
HRegion.compact(CompactionContext compaction,
HStore store,
ThroughputController throughputController)
Called by compaction thread and after region is opened to compact the HStores if necessary.
|
boolean |
HRegion.compact(CompactionContext compaction,
HStore store,
ThroughputController throughputController,
User user) |
static StoreEngine<?,?,?,?> |
StoreEngine.create(HStore store,
org.apache.hadoop.conf.Configuration conf,
CellComparator cellComparator)
Create the StoreEngine configured for the given Store.
|
protected void |
DefaultStoreEngine.createCompactionPolicy(org.apache.hadoop.conf.Configuration conf,
HStore store) |
protected void |
DefaultStoreEngine.createCompactor(org.apache.hadoop.conf.Configuration conf,
HStore store) |
protected void |
StripeStoreEngine.createComponents(org.apache.hadoop.conf.Configuration conf,
HStore store,
CellComparator comparator) |
protected void |
DefaultStoreEngine.createComponents(org.apache.hadoop.conf.Configuration conf,
HStore store,
CellComparator kvComparator) |
protected abstract void |
StoreEngine.createComponents(org.apache.hadoop.conf.Configuration conf,
HStore store,
CellComparator cellComparator)
Create the StoreEngine's components.
|
protected void |
DateTieredStoreEngine.createComponents(org.apache.hadoop.conf.Configuration conf,
HStore store,
CellComparator kvComparator) |
protected void |
StoreEngine.createComponentsOnce(org.apache.hadoop.conf.Configuration conf,
HStore store,
CellComparator cellComparator) |
private InternalScanner |
MemStoreCompactorSegmentsIterator.createScanner(HStore store,
List<KeyValueScanner> scanners)
Creates the scanner for compacting the pipeline.
|
protected StoreEngine<?,?,?,?> |
HMobStore.createStoreEngine(HStore store,
org.apache.hadoop.conf.Configuration conf,
CellComparator cellComparator)
Creates the mob store engine.
|
protected StoreEngine<?,?,?,?> |
HStore.createStoreEngine(HStore store,
org.apache.hadoop.conf.Configuration conf,
CellComparator kvComparator)
Creates the store engine configured for the given Store.
|
private StoreFileTracker |
StoreEngine.createStoreFileTracker(org.apache.hadoop.conf.Configuration conf,
HStore store) |
protected void |
DefaultStoreEngine.createStoreFlusher(org.apache.hadoop.conf.Configuration conf,
HStore store) |
private void |
BrokenStoreFileCleaner.deleteFile(org.apache.hadoop.fs.FileStatus file,
HStore store,
AtomicLong deletedFiles,
AtomicLong failedDeletes) |
private MemStoreSize |
HRegion.doDropStoreMemStoreContentsForSeqId(HStore s,
long currentSeqId) |
private MemStoreSize |
HRegion.dropMemStoreContentsForSeqId(long seqId,
HStore store)
Drops the memstore contents after replaying a flush descriptor or region open event replay if
the memstore edits have seqNums smaller than the given seq id n
|
private String |
CompactSplit.getStoreNameForUnderCompaction(HStore store) |
private boolean |
BrokenStoreFileCleaner.isActiveStorefile(org.apache.hadoop.fs.FileStatus file,
HStore store) |
private boolean |
BrokenStoreFileCleaner.isCompactedFile(org.apache.hadoop.fs.FileStatus file,
HStore store) |
private boolean |
BrokenStoreFileCleaner.isCompactionResultFile(org.apache.hadoop.fs.FileStatus file,
HStore store) |
boolean |
CompactSplit.isUnderCompaction(HStore s)
Check if this store is under compaction
|
void |
RegionCoprocessorHost.postCompact(HStore store,
HStoreFile resultFile,
CompactionLifeCycleTracker tracker,
CompactionRequest request,
User user)
Called after the store compaction has completed.
|
void |
RegionCoprocessorHost.postCompactSelection(HStore store,
List<HStoreFile> selected,
CompactionLifeCycleTracker tracker,
CompactionRequest request,
User user)
Called after the
HStoreFile s to be compacted have been selected from the available
candidates. |
void |
RegionCoprocessorHost.postFlush(HStore store,
HStoreFile storeFile,
FlushLifeCycleTracker tracker)
Invoked after a memstore flush n
|
void |
RegionCoprocessorHost.postMemStoreCompaction(HStore store)
Invoked after in memory compaction.
|
InternalScanner |
RegionCoprocessorHost.preCompact(HStore store,
InternalScanner scanner,
ScanType scanType,
CompactionLifeCycleTracker tracker,
CompactionRequest request,
User user)
Called prior to rewriting the store files selected for compaction
|
ScanInfo |
RegionCoprocessorHost.preCompactScannerOpen(HStore store,
ScanType scanType,
CompactionLifeCycleTracker tracker,
CompactionRequest request,
User user)
Called prior to opening store scanner for compaction.
|
boolean |
RegionCoprocessorHost.preCompactSelection(HStore store,
List<HStoreFile> candidates,
CompactionLifeCycleTracker tracker,
User user)
Called prior to selecting the
HStoreFile s for compaction from the list of currently
available candidates. |
InternalScanner |
RegionCoprocessorHost.preFlush(HStore store,
InternalScanner scanner,
FlushLifeCycleTracker tracker)
Invoked before a memstore flush
|
ScanInfo |
RegionCoprocessorHost.preFlushScannerOpen(HStore store,
FlushLifeCycleTracker tracker)
Invoked before create StoreScanner for flush.
|
void |
RegionCoprocessorHost.preMemStoreCompaction(HStore store)
Invoked before in memory compaction.
|
InternalScanner |
RegionCoprocessorHost.preMemStoreCompactionCompact(HStore store,
InternalScanner scanner)
Invoked before compacting memstore.
|
ScanInfo |
RegionCoprocessorHost.preMemStoreCompactionCompactScannerOpen(HStore store)
Invoked before create StoreScanner for in memory compaction.
|
ScanInfo |
RegionCoprocessorHost.preStoreScannerOpen(HStore store,
Scan scan)
Called before open store scanner for user scan.
|
private List<Cell> |
HRegion.MutationBatchOperation.reckonDeltasByStore(HStore store,
Mutation mutation,
long now,
List<Cell> deltas,
List<Cell> results)
Reckon the Cells to apply to WAL, memstore, and to return to the Client in passed column
family/Store.
|
void |
CompactSplit.requestCompaction(HRegion region,
HStore store,
String why,
int priority,
CompactionLifeCycleTracker tracker,
User user) |
protected void |
CompactSplit.requestCompactionInternal(HRegion region,
HStore store,
String why,
int priority,
boolean selectNow,
CompactionLifeCycleTracker tracker,
CompactSplit.CompactionCompleteTracker completeTracker,
User user) |
void |
CompactSplit.requestSystemCompaction(HRegion region,
HStore store,
String why) |
void |
CompactSplit.requestSystemCompaction(HRegion region,
HStore store,
String why,
boolean giveUpIfRequestedOrCompacting) |
protected void |
HRegion.restoreEdit(HStore s,
Cell cell,
MemStoreSizing memstoreAccounting)
Used by tests
|
private Optional<CompactionContext> |
CompactSplit.selectCompaction(HRegion region,
HStore store,
int priority,
CompactionLifeCycleTracker tracker,
CompactSplit.CompactionCompleteTracker completeTracker,
User user) |
protected List<KeyValueScanner> |
StoreScanner.selectScannersFrom(HStore store,
List<? extends KeyValueScanner> allScanners)
Filters the given list of scanners using Bloom filter, time range, and TTL.
|
protected boolean |
FlushLargeStoresPolicy.shouldFlush(HStore store) |
protected boolean |
FlushAllLargeStoresPolicy.shouldFlush(HStore store) |
(package private) boolean |
HRegion.shouldFlushStore(HStore store)
Should the store be flushed because it is old enough.
|
boolean |
StoreFileScanner.shouldUseScanner(Scan scan,
HStore store,
long oldestUnexpiredTS) |
boolean |
NonLazyKeyValueScanner.shouldUseScanner(Scan scan,
HStore store,
long oldestUnexpiredTS) |
boolean |
KeyValueScanner.shouldUseScanner(Scan scan,
HStore store,
long oldestUnexpiredTS)
Allows to filter out scanners (both StoreFile and memstore) that we don't want to use based on
criteria such as Bloom filters and timestamp ranges.
|
boolean |
SegmentScanner.shouldUseScanner(Scan scan,
HStore store,
long oldestUnexpiredTS)
This functionality should be resolved in the higher level which is MemStoreScanner, currently
returns true as default.
|
Modifier and Type | Method and Description |
---|---|
private HRegion.FlushResultImpl |
HRegion.internalFlushcache(Collection<HStore> storesToFlush,
MonitoredTask status,
boolean writeFlushWalMarker,
FlushLifeCycleTracker tracker)
Flushing given stores.
|
protected HRegion.FlushResultImpl |
HRegion.internalFlushcache(WAL wal,
long myseqid,
Collection<HStore> storesToFlush,
MonitoredTask status,
boolean writeFlushWalMarker,
FlushLifeCycleTracker tracker)
Flush the memstore.
|
(package private) HRegion.FlushResultImpl |
HRegion.internalFlushCacheAndCommit(WAL wal,
MonitoredTask status,
HRegion.PrepareFlushResult prepareResult,
Collection<HStore> storesToFlush) |
protected HRegion.PrepareFlushResult |
HRegion.internalPrepareFlushCache(WAL wal,
long myseqid,
Collection<HStore> storesToFlush,
MonitoredTask status,
boolean writeFlushWalMarker,
FlushLifeCycleTracker tracker) |
private boolean |
HRegion.isAllFamilies(Collection<HStore> families)
Returns True if passed Set is all families in the region.
|
private long |
HRegion.loadRecoveredHFilesIfAny(Collection<HStore> stores) |
private void |
HRegion.logFatLineOnFlush(Collection<HStore> storesToFlush,
long sequenceId)
Utility method broken out of internalPrepareFlushCache so that method is smaller.
|
Constructor and Description |
---|
CompactedHFilesDischargeHandler(Server server,
EventType eventType,
HStore store) |
CompactingMemStore(org.apache.hadoop.conf.Configuration conf,
CellComparator c,
HStore store,
RegionServicesForStores regionServices,
MemoryCompactionPolicy compactionPolicy) |
CompactionRunner(HStore store,
HRegion region,
CompactionContext compaction,
CompactionLifeCycleTracker tracker,
CompactSplit.CompactionCompleteTracker completeTracker,
ThreadPoolExecutor parent,
User user) |
DefaultStoreFlusher(org.apache.hadoop.conf.Configuration conf,
HStore store) |
MemStoreCompactorSegmentsIterator(List<ImmutableSegment> segments,
CellComparator comparator,
int compactionKVMax,
HStore store) |
MobStoreScanner(HStore store,
ScanInfo scanInfo,
Scan scan,
NavigableSet<byte[]> columns,
long readPt) |
ReversedMobStoreScanner(HStore store,
ScanInfo scanInfo,
Scan scan,
NavigableSet<byte[]> columns,
long readPt) |
ReversedStoreScanner(HStore store,
ScanInfo scanInfo,
Scan scan,
NavigableSet<byte[]> columns,
long readPt)
Opens a scanner across memstore, snapshot, and all StoreFiles.
|
StoreFlusher(org.apache.hadoop.conf.Configuration conf,
HStore store) |
StoreScanner(HStore store,
ScanInfo scanInfo,
List<? extends KeyValueScanner> scanners,
long smallestReadPoint,
long earliestPutTs,
byte[] dropDeletesFromRow,
byte[] dropDeletesToRow)
Used for compactions that drop deletes from a limited range of rows.
|
StoreScanner(HStore store,
ScanInfo scanInfo,
List<? extends KeyValueScanner> scanners,
ScanType scanType,
long smallestReadPoint,
long earliestPutTs)
Used for store file compaction and memstore compaction.
|
StoreScanner(HStore store,
ScanInfo scanInfo,
List<? extends KeyValueScanner> scanners,
ScanType scanType,
long smallestReadPoint,
long earliestPutTs,
byte[] dropDeletesFromRow,
byte[] dropDeletesToRow) |
StoreScanner(HStore store,
ScanInfo scanInfo,
Scan scan,
NavigableSet<byte[]> columns,
long readPt)
Opens a scanner across memstore, snapshot, and all StoreFiles.
|
StoreScanner(HStore store,
Scan scan,
ScanInfo scanInfo,
int numColumns,
long readPt,
boolean cacheBlocks,
ScanType scanType)
An internal constructor.
|
StripeStoreFlusher(org.apache.hadoop.conf.Configuration conf,
HStore store,
StripeCompactionPolicy policy,
StripeStoreFileManager stripes) |
Modifier and Type | Field and Description |
---|---|
protected HStore |
Compactor.store |
Modifier and Type | Method and Description |
---|---|
protected InternalScanner |
Compactor.createScanner(HStore store,
ScanInfo scanInfo,
List<StoreFileScanner> scanners,
long smallestReadPoint,
long earliestPutTs,
byte[] dropDeletesFromRow,
byte[] dropDeletesToRow) |
protected InternalScanner |
Compactor.createScanner(HStore store,
ScanInfo scanInfo,
List<StoreFileScanner> scanners,
ScanType scanType,
long smallestReadPoint,
long earliestPutTs) |
void |
CompactionRequester.requestCompaction(HRegion region,
HStore store,
String why,
int priority,
CompactionLifeCycleTracker tracker,
User user)
Request compaction on the given store.
|
void |
CompactionRequester.requestSystemCompaction(HRegion region,
HStore store,
String why,
boolean giveUpIfRequestedOrCompacting)
Request system compaction on the given store.
|
Constructor and Description |
---|
AbstractMultiOutputCompactor(org.apache.hadoop.conf.Configuration conf,
HStore store) |
Compactor(org.apache.hadoop.conf.Configuration conf,
HStore store) |
DateTieredCompactor(org.apache.hadoop.conf.Configuration conf,
HStore store) |
DefaultCompactor(org.apache.hadoop.conf.Configuration conf,
HStore store) |
StripeCompactor(org.apache.hadoop.conf.Configuration conf,
HStore store) |
Modifier and Type | Method and Description |
---|---|
static String |
ThroughputControlUtil.getNameForThrottling(HStore store,
String opName)
Generate a name for throttling, to prevent name conflict when multiple IO operation running
parallel on the same store.
|
Copyright © 2007–2020 The Apache Software Foundation. All rights reserved.