@InterfaceAudience.Private public class MockHStoreFile extends org.apache.hadoop.hbase.regionserver.HStoreFile
Modifier and Type | Field and Description |
---|---|
(package private) long |
ageInDisk |
(package private) boolean |
compactedAway |
(package private) long |
entryCount |
(package private) org.apache.hadoop.hbase.HDFSBlocksDistribution |
hdfsBlocksDistribution |
(package private) boolean |
isMajor |
(package private) boolean |
isRef |
(package private) long |
length |
private Map<byte[],byte[]> |
metadata |
(package private) long |
modificationTime |
(package private) long |
sequenceid |
(package private) byte[] |
splitPoint |
(package private) org.apache.hadoop.hbase.regionserver.TimeRangeTracker |
timeRangeTracker |
BLOOM_FILTER_PARAM_KEY, BLOOM_FILTER_TYPE_KEY, BULKLOAD_TASK_KEY, BULKLOAD_TIME_KEY, COMPACTION_EVENT_KEY, DELETE_FAMILY_COUNT, EARLIEST_PUT_TS, EXCLUDE_FROM_MINOR_COMPACTION_KEY, LAST_BLOOM_KEY, MAJOR_COMPACTION_KEY, MAX_SEQ_ID_KEY, MOB_CELLS_COUNT, MOB_FILE_REFS, NULL_VALUE, SKIP_RESET_SEQ_ID, TIMERANGE_KEY
Constructor and Description |
---|
MockHStoreFile(HBaseTestingUtility testUtil,
org.apache.hadoop.fs.Path testPath,
long length,
long ageInDisk,
boolean isRef,
long sequenceid) |
Modifier and Type | Method and Description |
---|---|
OptionalLong |
getBulkLoadTimestamp() |
org.apache.hadoop.hbase.HDFSBlocksDistribution |
getHDFSBlockDistribution() |
OptionalLong |
getMaximumTimestamp() |
long |
getMaxSequenceId() |
byte[] |
getMetadataValue(byte[] key) |
OptionalLong |
getMinimumTimestamp() |
long |
getModificationTimestamp() |
long |
getModificationTimeStamp() |
org.apache.hadoop.hbase.regionserver.StoreFileScanner |
getPreadScanner(boolean cacheBlocks,
long readPt,
long scannerOrder,
boolean canOptimizeForNonNullColumn) |
org.apache.hadoop.hbase.regionserver.StoreFileReader |
getReader() |
org.apache.hadoop.hbase.regionserver.StoreFileScanner |
getStreamScanner(boolean canUseDropBehind,
boolean cacheBlocks,
boolean isCompaction,
long readPt,
long scannerOrder,
boolean canOptimizeForNonNullColumn) |
void |
initReader() |
boolean |
isBulkLoadResult() |
boolean |
isCompactedAway() |
boolean |
isMajorCompactionResult() |
boolean |
isReference() |
void |
markCompactedAway() |
(package private) void |
setEntries(long entryCount) |
void |
setIsMajor(boolean isMajor) |
(package private) void |
setLength(long newLen) |
void |
setMetadataValue(byte[] key,
byte[] value) |
(package private) void |
setTimeRangeTracker(org.apache.hadoop.hbase.regionserver.TimeRangeTracker timeRangeTracker) |
closeStoreFile, deleteStoreFile, excludeFromMinorCompaction, getCacheConf, getCompactedStoreFiles, getComparator, getEncodedPath, getFileInfo, getFirstKey, getLastKey, getMaxMemStoreTS, getPath, getQualifiedPath, getRefCount, isHFile, isReferencedInReads, toString, toStringDetailed
long length
boolean isRef
long ageInDisk
long sequenceid
byte[] splitPoint
org.apache.hadoop.hbase.regionserver.TimeRangeTracker timeRangeTracker
long entryCount
boolean isMajor
org.apache.hadoop.hbase.HDFSBlocksDistribution hdfsBlocksDistribution
long modificationTime
boolean compactedAway
MockHStoreFile(HBaseTestingUtility testUtil, org.apache.hadoop.fs.Path testPath, long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException
IOException
void setLength(long newLen)
public long getMaxSequenceId()
getMaxSequenceId
in interface org.apache.hadoop.hbase.regionserver.StoreFile
getMaxSequenceId
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public boolean isMajorCompactionResult()
isMajorCompactionResult
in interface org.apache.hadoop.hbase.regionserver.StoreFile
isMajorCompactionResult
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public void setIsMajor(boolean isMajor)
public boolean isReference()
isReference
in interface org.apache.hadoop.hbase.regionserver.StoreFile
isReference
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public boolean isBulkLoadResult()
isBulkLoadResult
in interface org.apache.hadoop.hbase.regionserver.StoreFile
isBulkLoadResult
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public byte[] getMetadataValue(byte[] key)
getMetadataValue
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public void setMetadataValue(byte[] key, byte[] value)
void setTimeRangeTracker(org.apache.hadoop.hbase.regionserver.TimeRangeTracker timeRangeTracker)
void setEntries(long entryCount)
public OptionalLong getMinimumTimestamp()
getMinimumTimestamp
in interface org.apache.hadoop.hbase.regionserver.StoreFile
getMinimumTimestamp
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public OptionalLong getMaximumTimestamp()
getMaximumTimestamp
in interface org.apache.hadoop.hbase.regionserver.StoreFile
getMaximumTimestamp
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public void markCompactedAway()
markCompactedAway
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public boolean isCompactedAway()
isCompactedAway
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public long getModificationTimeStamp()
getModificationTimeStamp
in interface org.apache.hadoop.hbase.regionserver.StoreFile
getModificationTimeStamp
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public long getModificationTimestamp()
getModificationTimestamp
in interface org.apache.hadoop.hbase.regionserver.StoreFile
getModificationTimestamp
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public org.apache.hadoop.hbase.HDFSBlocksDistribution getHDFSBlockDistribution()
getHDFSBlockDistribution
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public void initReader() throws IOException
initReader
in class org.apache.hadoop.hbase.regionserver.HStoreFile
IOException
public org.apache.hadoop.hbase.regionserver.StoreFileScanner getPreadScanner(boolean cacheBlocks, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn)
getPreadScanner
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public org.apache.hadoop.hbase.regionserver.StoreFileScanner getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks, boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) throws IOException
getStreamScanner
in class org.apache.hadoop.hbase.regionserver.HStoreFile
IOException
public org.apache.hadoop.hbase.regionserver.StoreFileReader getReader()
getReader
in class org.apache.hadoop.hbase.regionserver.HStoreFile
public OptionalLong getBulkLoadTimestamp()
getBulkLoadTimestamp
in interface org.apache.hadoop.hbase.regionserver.StoreFile
getBulkLoadTimestamp
in class org.apache.hadoop.hbase.regionserver.HStoreFile
Copyright © 2007–2020 The Apache Software Foundation. All rights reserved.