@InterfaceAudience.Private public class HMobStore extends HStore
Modifier and Type | Field and Description |
---|---|
private AtomicLong |
cellsCountCompactedFromMob |
private AtomicLong |
cellsCountCompactedToMob |
private AtomicLong |
cellsSizeCompactedFromMob |
private AtomicLong |
cellsSizeCompactedToMob |
private ColumnFamilyDescriptor |
family |
private org.apache.hadoop.fs.Path |
homePath |
private IdLock |
keyLock |
private static org.slf4j.Logger |
LOG |
private Map<String,List<org.apache.hadoop.fs.Path>> |
map |
private MobCacheConfig |
mobCacheConfig |
private org.apache.hadoop.fs.Path |
mobFamilyPath |
private AtomicLong |
mobFlushCount |
private AtomicLong |
mobFlushedCellsCount |
private AtomicLong |
mobFlushedCellsSize |
private AtomicLong |
mobScanCellsCount |
private AtomicLong |
mobScanCellsSize |
private byte[] |
refCellTags |
archiveLock, BLOCK_STORAGE_POLICY_KEY, BLOCKING_STOREFILES_KEY, blocksize, bytesPerChecksum, cacheConf, checksumType, closeCheckInterval, COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY, comparator, conf, cryptoContext, DEEP_OVERHEAD, DEFAULT_BLOCK_STORAGE_POLICY, DEFAULT_BLOCKING_STOREFILE_COUNT, DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER, FIXED_OVERHEAD, forceMajor, lock, memstore, MEMSTORE_CLASS_NAME, region, storeEngine
NO_PRIORITY, PRIORITY_USER
Constructor and Description |
---|
HMobStore(HRegion region,
ColumnFamilyDescriptor family,
org.apache.hadoop.conf.Configuration confParam) |
Modifier and Type | Method and Description |
---|---|
void |
commitFile(org.apache.hadoop.fs.Path sourceFile,
org.apache.hadoop.fs.Path targetPath)
Commits the mob file.
|
protected void |
createCacheConf(ColumnFamilyDescriptor family)
Creates the mob cache config.
|
StoreFileWriter |
createDelFileWriterInTmp(Date date,
long maxKeyCount,
Compression.Algorithm compression,
byte[] startKey)
Creates the writer for the del file in temp directory.
|
protected KeyValueScanner |
createScanner(Scan scan,
ScanInfo scanInfo,
NavigableSet<byte[]> targetCols,
long readPt)
Gets the MobStoreScanner or MobReversedStoreScanner.
|
protected StoreEngine<?,?,?,?> |
createStoreEngine(HStore store,
org.apache.hadoop.conf.Configuration conf,
CellComparator cellComparator)
Creates the mob store engine.
|
StoreFileWriter |
createWriterInTmp(Date date,
long maxKeyCount,
Compression.Algorithm compression,
byte[] startKey,
boolean isCompaction)
Creates the writer for the mob file in temp directory.
|
StoreFileWriter |
createWriterInTmp(MobFileName mobFileName,
org.apache.hadoop.fs.Path basePath,
long maxKeyCount,
Compression.Algorithm compression,
boolean isCompaction)
Creates the writer for the mob file in temp directory.
|
StoreFileWriter |
createWriterInTmp(String date,
org.apache.hadoop.fs.Path basePath,
long maxKeyCount,
Compression.Algorithm compression,
byte[] startKey,
boolean isCompaction)
Creates the writer for the mob file in temp directory.
|
long |
getCellsCountCompactedFromMob() |
long |
getCellsCountCompactedToMob() |
long |
getCellsSizeCompactedFromMob() |
long |
getCellsSizeCompactedToMob() |
org.apache.hadoop.conf.Configuration |
getConfiguration()
Gets current config.
|
long |
getMobFlushCount() |
long |
getMobFlushedCellsCount() |
long |
getMobFlushedCellsSize() |
long |
getMobScanCellsCount() |
long |
getMobScanCellsSize() |
org.apache.hadoop.fs.Path |
getPath()
Gets the mob file path.
|
byte[] |
getRefCellTags() |
private org.apache.hadoop.fs.Path |
getTempDir()
Gets the temp directory.
|
private Cell |
readCell(List<org.apache.hadoop.fs.Path> locations,
String fileName,
Cell search,
boolean cacheMobBlocks,
long readPt,
boolean readEmptyValueOnMobCellMiss)
Reads the cell from a mob file.
|
Cell |
resolve(Cell reference,
boolean cacheBlocks)
Reads the cell from the mob file, and the read point does not count.
|
Cell |
resolve(Cell reference,
boolean cacheBlocks,
long readPt,
boolean readEmptyValueOnMobCellMiss)
Reads the cell from the mob file.
|
void |
updateCellsCountCompactedFromMob(long count) |
void |
updateCellsCountCompactedToMob(long count) |
void |
updateCellsSizeCompactedFromMob(long size) |
void |
updateCellsSizeCompactedToMob(long size) |
void |
updateMobFlushCount() |
void |
updateMobFlushedCellsCount(long count) |
void |
updateMobFlushedCellsSize(long size) |
void |
updateMobScanCellsCount(long count) |
void |
updateMobScanCellsSize(long size) |
private void |
validateMobFile(org.apache.hadoop.fs.Path path)
Validates a mob file by opening and closing it.
|
add, add, addChangedReaderObserver, areWritesEnabled, assertBulkLoadHFileOk, bulkLoadHFile, bulkLoadHFile, cancelRequestedCompaction, canSplit, close, closeAndArchiveCompactedFiles, closeAndArchiveCompactedFiles, compact, compactRecentForTestingAssumingDefaultPolicy, completeCompaction, createFlushContext, createStoreFileAndReader, createWriterInTmp, deleteChangedReaderObserver, deregisterChildren, determineTTLFromFamily, doCompaction, flushCache, getAvgStoreFileAge, getBlockingFileCount, getBytesPerChecksum, getCacheConfig, getChecksumType, getCloseCheckInterval, getColumnFamilyDescriptor, getColumnFamilyName, getCompactedCellsCount, getCompactedCellsSize, getCompactedFiles, getCompactedFilesCount, getCompactionCheckMultiplier, getCompactionPressure, getCompactionProgress, getCompactPriority, getComparator, getCoprocessorHost, getDataBlockEncoder, getFileSystem, getFlushableSize, getFlushedCellsCount, getFlushedCellsSize, getFlushedOutputFileSize, getHFilesSize, getHRegion, getLastCompactSize, getMajorCompactedCellsCount, getMajorCompactedCellsSize, getMaxMemStoreTS, getMaxSequenceId, getMaxStoreFileAge, getMemStoreFlushSize, getMemStoreSize, getMinStoreFileAge, getNumHFiles, getNumReferenceFiles, getOffPeakHours, getRegionFileSystem, getRegionInfo, getScanInfo, getScanner, getScanners, getScanners, getScanners, getScanners, getSize, getSmallestReadPoint, getSnapshotSize, getSplitPoint, getStoreEngine, getStorefiles, getStorefilesCount, getStorefilesRootLevelIndexSize, getStorefilesSize, getStoreFileTtl, getStoreHomedir, getStoreHomedir, getStoreSizeUncompressed, getTableName, getTotalStaticBloomSize, getTotalStaticIndexSize, hasReferences, hasTooManyStoreFiles, heapSize, isPrimaryReplicaStore, isSloppyMemStore, moveFileIntoPlace, needsCompaction, onConfigurationChange, postSnapshotOperation, preBulkLoadHFile, preFlushSeqIDEstimation, preSnapshotOperation, recreateScanners, refreshStoreFiles, refreshStoreFiles, registerChildren, replaceStoreFiles, replayCompactionMarker, requestCompaction, requestCompaction, setDataBlockEncoderInTest, setScanInfo, shouldPerformMajorCompaction, snapshot, startReplayingFromWAL, stopReplayingFromWAL, throttleCompaction, timeOfOldestEdit, toString, triggerMajorCompaction, upsert, versionsToReturn
private static final org.slf4j.Logger LOG
private MobCacheConfig mobCacheConfig
private org.apache.hadoop.fs.Path homePath
private org.apache.hadoop.fs.Path mobFamilyPath
private AtomicLong cellsCountCompactedToMob
private AtomicLong cellsCountCompactedFromMob
private AtomicLong cellsSizeCompactedToMob
private AtomicLong cellsSizeCompactedFromMob
private AtomicLong mobFlushCount
private AtomicLong mobFlushedCellsCount
private AtomicLong mobFlushedCellsSize
private AtomicLong mobScanCellsCount
private AtomicLong mobScanCellsSize
private ColumnFamilyDescriptor family
private final byte[] refCellTags
public HMobStore(HRegion region, ColumnFamilyDescriptor family, org.apache.hadoop.conf.Configuration confParam) throws IOException
IOException
protected void createCacheConf(ColumnFamilyDescriptor family)
createCacheConf
in class HStore
family
- The current column family.public org.apache.hadoop.conf.Configuration getConfiguration()
protected KeyValueScanner createScanner(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> targetCols, long readPt) throws IOException
createScanner
in class HStore
IOException
protected StoreEngine<?,?,?,?> createStoreEngine(HStore store, org.apache.hadoop.conf.Configuration conf, CellComparator cellComparator) throws IOException
createStoreEngine
in class HStore
store
- The store. An unfortunate dependency needed due to it
being passed to coprocessors via the compactor.conf
- Store configuration.cellComparator
- KVComparator for storeFileManager.IOException
private org.apache.hadoop.fs.Path getTempDir()
public StoreFileWriter createWriterInTmp(Date date, long maxKeyCount, Compression.Algorithm compression, byte[] startKey, boolean isCompaction) throws IOException
date
- The latest date of written cells.maxKeyCount
- The key count.compression
- The compression algorithm.startKey
- The start key.isCompaction
- If the writer is used in compaction.IOException
public StoreFileWriter createDelFileWriterInTmp(Date date, long maxKeyCount, Compression.Algorithm compression, byte[] startKey) throws IOException
date
- The latest date of written cells.maxKeyCount
- The key count.compression
- The compression algorithm.startKey
- The start key.IOException
public StoreFileWriter createWriterInTmp(String date, org.apache.hadoop.fs.Path basePath, long maxKeyCount, Compression.Algorithm compression, byte[] startKey, boolean isCompaction) throws IOException
date
- The date string, its format is yyyymmmdd.basePath
- The basic path for a temp directory.maxKeyCount
- The key count.compression
- The compression algorithm.startKey
- The start key.isCompaction
- If the writer is used in compaction.IOException
public StoreFileWriter createWriterInTmp(MobFileName mobFileName, org.apache.hadoop.fs.Path basePath, long maxKeyCount, Compression.Algorithm compression, boolean isCompaction) throws IOException
mobFileName
- The mob file name.basePath
- The basic path for a temp directory.maxKeyCount
- The key count.compression
- The compression algorithm.isCompaction
- If the writer is used in compaction.IOException
public void commitFile(org.apache.hadoop.fs.Path sourceFile, org.apache.hadoop.fs.Path targetPath) throws IOException
sourceFile
- The source file.targetPath
- The directory path where the source file is renamed to.IOException
private void validateMobFile(org.apache.hadoop.fs.Path path) throws IOException
path
- the path to the mob fileIOException
public Cell resolve(Cell reference, boolean cacheBlocks) throws IOException
reference
- The cell found in the HBase, its value is a path to a mob file.cacheBlocks
- Whether the scanner should cache blocks.IOException
public Cell resolve(Cell reference, boolean cacheBlocks, long readPt, boolean readEmptyValueOnMobCellMiss) throws IOException
reference
- The cell found in the HBase, its value is a path to a mob file.cacheBlocks
- Whether the scanner should cache blocks.readPt
- the read point.readEmptyValueOnMobCellMiss
- Whether return null value when the mob file is
missing or corrupt.IOException
private Cell readCell(List<org.apache.hadoop.fs.Path> locations, String fileName, Cell search, boolean cacheMobBlocks, long readPt, boolean readEmptyValueOnMobCellMiss) throws IOException
locations
- The possible locations where the mob files are saved.fileName
- The file to be read.search
- The cell to be searched.cacheMobBlocks
- Whether the scanner should cache blocks.readPt
- the read point.readEmptyValueOnMobCellMiss
- Whether return null value when the mob file is
missing or corrupt.IOException
public org.apache.hadoop.fs.Path getPath()
public void updateCellsCountCompactedToMob(long count)
public long getCellsCountCompactedToMob()
public void updateCellsCountCompactedFromMob(long count)
public long getCellsCountCompactedFromMob()
public void updateCellsSizeCompactedToMob(long size)
public long getCellsSizeCompactedToMob()
public void updateCellsSizeCompactedFromMob(long size)
public long getCellsSizeCompactedFromMob()
public void updateMobFlushCount()
public long getMobFlushCount()
public void updateMobFlushedCellsCount(long count)
public long getMobFlushedCellsCount()
public void updateMobFlushedCellsSize(long size)
public long getMobFlushedCellsSize()
public void updateMobScanCellsCount(long count)
public long getMobScanCellsCount()
public void updateMobScanCellsSize(long size)
public long getMobScanCellsSize()
public byte[] getRefCellTags()
Copyright © 2007–2019 The Apache Software Foundation. All rights reserved.