Package | Description |
---|---|
org.apache.hadoop.hbase.mob | |
org.apache.hadoop.hbase.regionserver | |
org.apache.hadoop.hbase.regionserver.compactions |
Modifier and Type | Method and Description |
---|---|
protected List<org.apache.hadoop.fs.Path> |
DefaultMobStoreCompactor.commitWriter(StoreFileWriter writer,
Compactor.FileDetails fd,
CompactionRequestImpl request) |
List<org.apache.hadoop.fs.Path> |
DefaultMobStoreCompactor.compact(CompactionRequestImpl request,
ThroughputController throughputController,
User user) |
protected boolean |
DefaultMobStoreCompactor.performCompaction(Compactor.FileDetails fd,
InternalScanner scanner,
CellSink writer,
long smallestReadPoint,
boolean cleanSeqId,
ThroughputController throughputController,
CompactionRequestImpl request,
CompactionProgress progress)
Performs compaction on a column family with the mob flag enabled.
|
Modifier and Type | Method and Description |
---|---|
protected List<HStoreFile> |
HStore.doCompaction(CompactionRequestImpl cr,
Collection<HStoreFile> filesToCompact,
User user,
long compactionStartTime,
List<org.apache.hadoop.fs.Path> newFiles) |
protected void |
HStore.finishCompactionRequest(CompactionRequestImpl cr) |
void |
StripeStoreEngine.StripeCompaction.forceSelect(CompactionRequestImpl request) |
void |
DateTieredStoreEngine.DateTieredCompactionContext.forceSelect(CompactionRequestImpl request) |
private void |
HStore.logCompactionEndMessage(CompactionRequestImpl cr,
List<HStoreFile> sfs,
long now,
long compactionStartTime)
Log a very elaborate compaction completion message.
|
Modifier and Type | Class and Description |
---|---|
class |
DateTieredCompactionRequest |
Modifier and Type | Field and Description |
---|---|
protected CompactionRequestImpl |
StripeCompactionPolicy.StripeCompactionRequest.request |
protected CompactionRequestImpl |
CompactionContext.request |
Modifier and Type | Method and Description |
---|---|
protected CompactionRequestImpl |
DateTieredCompactionPolicy.createCompactionRequest(ArrayList<HStoreFile> candidateSelection,
boolean tryingMajor,
boolean mayUseOffPeak,
boolean mayBeStuck) |
protected CompactionRequestImpl |
RatioBasedCompactionPolicy.createCompactionRequest(ArrayList<HStoreFile> candidateSelection,
boolean tryingMajor,
boolean mayUseOffPeak,
boolean mayBeStuck) |
protected abstract CompactionRequestImpl |
SortedCompactionPolicy.createCompactionRequest(ArrayList<HStoreFile> candidateSelection,
boolean tryingMajor,
boolean mayUseOffPeak,
boolean mayBeStuck) |
CompactionRequestImpl |
StripeCompactionPolicy.StripeCompactionRequest.getRequest() |
CompactionRequestImpl |
CompactionContext.getRequest() |
CompactionRequestImpl |
SortedCompactionPolicy.selectCompaction(Collection<HStoreFile> candidateFiles,
List<HStoreFile> filesCompacting,
boolean isUserCompaction,
boolean mayUseOffPeak,
boolean forceMajor) |
CompactionRequestImpl |
FIFOCompactionPolicy.selectCompaction(Collection<HStoreFile> candidateFiles,
List<HStoreFile> filesCompacting,
boolean isUserCompaction,
boolean mayUseOffPeak,
boolean forceMajor) |
CompactionRequestImpl |
DateTieredCompactionPolicy.selectMajorCompaction(ArrayList<HStoreFile> candidateSelection) |
CompactionRequestImpl |
DateTieredCompactionPolicy.selectMinorCompaction(ArrayList<HStoreFile> candidateSelection,
boolean mayUseOffPeak,
boolean mayBeStuck)
We receive store files sorted in ascending order by seqId then scan the list of files.
|
Modifier and Type | Method and Description |
---|---|
protected List<org.apache.hadoop.fs.Path> |
DateTieredCompactor.commitWriter(DateTieredMultiFileWriter writer,
Compactor.FileDetails fd,
CompactionRequestImpl request) |
protected List<org.apache.hadoop.fs.Path> |
DefaultCompactor.commitWriter(StoreFileWriter writer,
Compactor.FileDetails fd,
CompactionRequestImpl request) |
protected List<org.apache.hadoop.fs.Path> |
StripeCompactor.commitWriter(StripeMultiFileWriter writer,
Compactor.FileDetails fd,
CompactionRequestImpl request) |
protected abstract List<org.apache.hadoop.fs.Path> |
Compactor.commitWriter(T writer,
Compactor.FileDetails fd,
CompactionRequestImpl request) |
protected List<org.apache.hadoop.fs.Path> |
Compactor.compact(CompactionRequestImpl request,
Compactor.InternalScannerFactory scannerFactory,
Compactor.CellSinkFactory<T> sinkFactory,
ThroughputController throughputController,
User user) |
List<org.apache.hadoop.fs.Path> |
StripeCompactor.compact(CompactionRequestImpl request,
int targetCount,
long targetSize,
byte[] left,
byte[] right,
byte[] majorRangeFromRow,
byte[] majorRangeToRow,
ThroughputController throughputController,
User user) |
List<org.apache.hadoop.fs.Path> |
StripeCompactor.compact(CompactionRequestImpl request,
List<byte[]> targetBoundaries,
byte[] majorRangeFromRow,
byte[] majorRangeToRow,
ThroughputController throughputController,
User user) |
List<org.apache.hadoop.fs.Path> |
DateTieredCompactor.compact(CompactionRequestImpl request,
List<Long> lowerBoundaries,
Map<Long,String> lowerBoundariesPolicies,
ThroughputController throughputController,
User user) |
List<org.apache.hadoop.fs.Path> |
DefaultCompactor.compact(CompactionRequestImpl request,
ThroughputController throughputController,
User user)
Do a minor/major compaction on an explicit set of storefiles from a Store.
|
StripeCompactionPolicy.StripeCompactionRequest |
StripeCompactionPolicy.createEmptyRequest(StripeCompactionPolicy.StripeInformationProvider si,
CompactionRequestImpl request) |
void |
CompactionContext.forceSelect(CompactionRequestImpl request)
Forces external selection to be applied for this compaction.
|
ScanType |
StripeCompactor.StripeInternalScannerFactory.getScanType(CompactionRequestImpl request) |
ScanType |
Compactor.InternalScannerFactory.getScanType(CompactionRequestImpl request) |
private boolean |
DateTieredCompactor.needEmptyFile(CompactionRequestImpl request) |
protected boolean |
Compactor.performCompaction(Compactor.FileDetails fd,
InternalScanner scanner,
CellSink writer,
long smallestReadPoint,
boolean cleanSeqId,
ThroughputController throughputController,
CompactionRequestImpl request,
CompactionProgress progress)
Performs the compaction.
|
private InternalScanner |
Compactor.postCompactScannerOpen(CompactionRequestImpl request,
ScanType scanType,
InternalScanner scanner,
User user)
Calls coprocessor, if any, to create scanners - after normal scanner creation.
|
private ScanInfo |
Compactor.preCompactScannerOpen(CompactionRequestImpl request,
ScanType scanType,
User user) |
void |
StripeCompactionPolicy.StripeCompactionRequest.setRequest(CompactionRequestImpl request) |
Constructor and Description |
---|
BoundaryStripeCompactionRequest(CompactionRequestImpl request,
List<byte[]> targetBoundaries) |
SplitStripeCompactionRequest(CompactionRequestImpl request,
byte[] startRow,
byte[] endRow,
int targetCount,
long targetKvs) |
StripeCompactionRequest(CompactionRequestImpl request) |
Copyright © 2007–2020 The Apache Software Foundation. All rights reserved.