@InterfaceAudience.Private public class MockHStoreFile extends org.apache.hadoop.hbase.regionserver.HStoreFile
| Modifier and Type | Field and Description |
|---|---|
(package private) long |
ageInDisk |
(package private) boolean |
compactedAway |
(package private) long |
entryCount |
(package private) org.apache.hadoop.hbase.HDFSBlocksDistribution |
hdfsBlocksDistribution |
(package private) boolean |
isMajor |
(package private) boolean |
isRef |
(package private) long |
length |
private Map<byte[],byte[]> |
metadata |
(package private) long |
modificationTime |
(package private) long |
sequenceid |
(package private) byte[] |
splitPoint |
(package private) org.apache.hadoop.hbase.regionserver.TimeRangeTracker |
timeRangeTracker |
BLOOM_FILTER_PARAM_KEY, BLOOM_FILTER_TYPE_KEY, BULKLOAD_TASK_KEY, BULKLOAD_TIME_KEY, COMPACTION_EVENT_KEY, DELETE_FAMILY_COUNT, EARLIEST_PUT_TS, EXCLUDE_FROM_MINOR_COMPACTION_KEY, LAST_BLOOM_KEY, MAJOR_COMPACTION_KEY, MAX_SEQ_ID_KEY, MOB_CELLS_COUNT, SKIP_RESET_SEQ_ID, STORE_FILE_READER_NO_READAHEAD, TIMERANGE_KEY| Constructor and Description |
|---|
MockHStoreFile(HBaseTestingUtility testUtil,
org.apache.hadoop.fs.Path testPath,
long length,
long ageInDisk,
boolean isRef,
long sequenceid) |
| Modifier and Type | Method and Description |
|---|---|
OptionalLong |
getBulkLoadTimestamp() |
org.apache.hadoop.hbase.HDFSBlocksDistribution |
getHDFSBlockDistribution() |
OptionalLong |
getMaximumTimestamp() |
long |
getMaxSequenceId() |
byte[] |
getMetadataValue(byte[] key) |
OptionalLong |
getMinimumTimestamp() |
long |
getModificationTimestamp() |
long |
getModificationTimeStamp() |
org.apache.hadoop.hbase.regionserver.StoreFileScanner |
getPreadScanner(boolean cacheBlocks,
long readPt,
long scannerOrder,
boolean canOptimizeForNonNullColumn) |
org.apache.hadoop.hbase.regionserver.StoreFileReader |
getReader() |
org.apache.hadoop.hbase.regionserver.StoreFileScanner |
getStreamScanner(boolean canUseDropBehind,
boolean cacheBlocks,
boolean isCompaction,
long readPt,
long scannerOrder,
boolean canOptimizeForNonNullColumn) |
void |
initReader() |
boolean |
isBulkLoadResult() |
boolean |
isCompactedAway() |
boolean |
isMajorCompactionResult() |
boolean |
isReference() |
void |
markCompactedAway() |
(package private) void |
setEntries(long entryCount) |
void |
setIsMajor(boolean isMajor) |
(package private) void |
setLength(long newLen) |
void |
setMetadataValue(byte[] key,
byte[] value) |
(package private) void |
setTimeRangeTracker(org.apache.hadoop.hbase.regionserver.TimeRangeTracker timeRangeTracker) |
closeStoreFile, deleteStoreFile, excludeFromMinorCompaction, getCacheConf, getCompactedStoreFiles, getComparator, getEncodedPath, getFileInfo, getFirstKey, getLastKey, getMaxMemStoreTS, getPath, getQualifiedPath, getRefCount, isHFile, isReferencedInReads, toString, toStringDetailedlong length
boolean isRef
long ageInDisk
long sequenceid
byte[] splitPoint
org.apache.hadoop.hbase.regionserver.TimeRangeTracker timeRangeTracker
long entryCount
boolean isMajor
org.apache.hadoop.hbase.HDFSBlocksDistribution hdfsBlocksDistribution
long modificationTime
boolean compactedAway
MockHStoreFile(HBaseTestingUtility testUtil, org.apache.hadoop.fs.Path testPath, long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException
IOExceptionvoid setLength(long newLen)
public long getMaxSequenceId()
getMaxSequenceId in interface org.apache.hadoop.hbase.regionserver.StoreFilegetMaxSequenceId in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic boolean isMajorCompactionResult()
isMajorCompactionResult in interface org.apache.hadoop.hbase.regionserver.StoreFileisMajorCompactionResult in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic void setIsMajor(boolean isMajor)
public boolean isReference()
isReference in interface org.apache.hadoop.hbase.regionserver.StoreFileisReference in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic boolean isBulkLoadResult()
isBulkLoadResult in interface org.apache.hadoop.hbase.regionserver.StoreFileisBulkLoadResult in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic byte[] getMetadataValue(byte[] key)
getMetadataValue in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic void setMetadataValue(byte[] key, byte[] value)
void setTimeRangeTracker(org.apache.hadoop.hbase.regionserver.TimeRangeTracker timeRangeTracker)
void setEntries(long entryCount)
public OptionalLong getMinimumTimestamp()
getMinimumTimestamp in interface org.apache.hadoop.hbase.regionserver.StoreFilegetMinimumTimestamp in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic OptionalLong getMaximumTimestamp()
getMaximumTimestamp in interface org.apache.hadoop.hbase.regionserver.StoreFilegetMaximumTimestamp in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic void markCompactedAway()
markCompactedAway in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic boolean isCompactedAway()
isCompactedAway in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic long getModificationTimeStamp()
getModificationTimeStamp in interface org.apache.hadoop.hbase.regionserver.StoreFilegetModificationTimeStamp in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic long getModificationTimestamp()
getModificationTimestamp in interface org.apache.hadoop.hbase.regionserver.StoreFilegetModificationTimestamp in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic org.apache.hadoop.hbase.HDFSBlocksDistribution getHDFSBlockDistribution()
getHDFSBlockDistribution in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic void initReader() throws IOException
initReader in class org.apache.hadoop.hbase.regionserver.HStoreFileIOExceptionpublic org.apache.hadoop.hbase.regionserver.StoreFileScanner getPreadScanner(boolean cacheBlocks, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn)
getPreadScanner in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic org.apache.hadoop.hbase.regionserver.StoreFileScanner getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks, boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) throws IOException
getStreamScanner in class org.apache.hadoop.hbase.regionserver.HStoreFileIOExceptionpublic org.apache.hadoop.hbase.regionserver.StoreFileReader getReader()
getReader in class org.apache.hadoop.hbase.regionserver.HStoreFilepublic OptionalLong getBulkLoadTimestamp()
getBulkLoadTimestamp in interface org.apache.hadoop.hbase.regionserver.StoreFilegetBulkLoadTimestamp in class org.apache.hadoop.hbase.regionserver.HStoreFileCopyright © 2007–2020 The Apache Software Foundation. All rights reserved.