Package | Description |
---|---|
org.apache.hadoop.hbase.mob | |
org.apache.hadoop.hbase.regionserver.compactions |
Modifier and Type | Method and Description |
---|---|
protected List<org.apache.hadoop.fs.Path> |
DefaultMobStoreCompactor.commitWriter(StoreFileWriter writer,
Compactor.FileDetails fd,
CompactionRequestImpl request) |
private StoreFileWriter |
DefaultMobStoreCompactor.newMobWriter(Compactor.FileDetails fd,
boolean major,
Consumer<org.apache.hadoop.fs.Path> writerCreationTracker) |
protected boolean |
DefaultMobStoreCompactor.performCompaction(Compactor.FileDetails fd,
InternalScanner scanner,
CellSink writer,
long smallestReadPoint,
boolean cleanSeqId,
ThroughputController throughputController,
CompactionRequestImpl request,
CompactionProgress progress)
Performs compaction on a column family with the mob flag enabled.
|
Modifier and Type | Method and Description |
---|---|
private Compactor.FileDetails |
Compactor.getFileDetails(Collection<HStoreFile> filesToCompact,
boolean allFiles,
boolean major)
Extracts some details about the files to compact that are commonly needed by compactors.
|
Modifier and Type | Method and Description |
---|---|
protected List<org.apache.hadoop.fs.Path> |
DateTieredCompactor.commitWriter(DateTieredMultiFileWriter writer,
Compactor.FileDetails fd,
CompactionRequestImpl request) |
protected List<org.apache.hadoop.fs.Path> |
DefaultCompactor.commitWriter(StoreFileWriter writer,
Compactor.FileDetails fd,
CompactionRequestImpl request) |
protected List<org.apache.hadoop.fs.Path> |
StripeCompactor.commitWriter(StripeMultiFileWriter writer,
Compactor.FileDetails fd,
CompactionRequestImpl request) |
protected abstract List<org.apache.hadoop.fs.Path> |
Compactor.commitWriter(T writer,
Compactor.FileDetails fd,
CompactionRequestImpl request) |
protected CreateStoreFileWriterParams |
Compactor.createParams(Compactor.FileDetails fd,
boolean shouldDropBehind,
boolean major,
Consumer<org.apache.hadoop.fs.Path> writerCreationTracker) |
InternalScanner |
StripeCompactor.StripeInternalScannerFactory.createScanner(ScanInfo scanInfo,
List<StoreFileScanner> scanners,
ScanType scanType,
Compactor.FileDetails fd,
long smallestReadPoint) |
InternalScanner |
Compactor.InternalScannerFactory.createScanner(ScanInfo scanInfo,
List<StoreFileScanner> scanners,
ScanType scanType,
Compactor.FileDetails fd,
long smallestReadPoint) |
protected StoreFileWriter |
Compactor.createWriter(Compactor.FileDetails fd,
boolean shouldDropBehind,
boolean major,
Consumer<org.apache.hadoop.fs.Path> writerCreationTracker)
Creates a writer for a new file.
|
protected StoreFileWriter |
Compactor.createWriter(Compactor.FileDetails fd,
boolean shouldDropBehind,
String fileStoragePolicy,
boolean major,
Consumer<org.apache.hadoop.fs.Path> writerCreationTracker) |
S |
Compactor.CellSinkFactory.createWriter(InternalScanner scanner,
Compactor.FileDetails fd,
boolean shouldDropBehind,
boolean major,
Consumer<org.apache.hadoop.fs.Path> writerCreationTracker) |
protected void |
AbstractMultiOutputCompactor.initMultiWriter(AbstractMultiFileWriter writer,
InternalScanner scanner,
Compactor.FileDetails fd,
boolean shouldDropBehind,
boolean major,
Consumer<org.apache.hadoop.fs.Path> writerCreationTracker) |
protected boolean |
Compactor.performCompaction(Compactor.FileDetails fd,
InternalScanner scanner,
CellSink writer,
long smallestReadPoint,
boolean cleanSeqId,
ThroughputController throughputController,
CompactionRequestImpl request,
CompactionProgress progress)
Performs the compaction.
|
Copyright © 2007–2020 The Apache Software Foundation. All rights reserved.