@InterfaceAudience.LimitedPrivate(value={"Coprocesssor","Phoenix"}) public static class StoreFile.Reader extends Object
Modifier and Type | Field and Description |
---|---|
protected BloomType |
bloomFilterType |
protected BloomFilter |
deleteFamilyBloomFilter |
protected BloomFilter |
generalBloomFilter |
protected long |
sequenceID |
protected TimeRange |
timeRange |
Constructor and Description |
---|
StoreFile.Reader(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
CacheConfig cacheConf,
org.apache.hadoop.conf.Configuration conf) |
StoreFile.Reader(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
FSDataInputStreamWrapper in,
long size,
CacheConfig cacheConf,
org.apache.hadoop.conf.Configuration conf) |
Modifier and Type | Method and Description |
---|---|
void |
close(boolean evictOnClose) |
BloomType |
getBloomFilterType() |
KeyValue.KVComparator |
getComparator() |
long |
getDeleteFamilyCnt() |
long |
getEntries() |
long |
getFilterEntries()
The number of Bloom filter entries in this store file, or an estimate
thereof, if the Bloom filter is not loaded.
|
byte[] |
getFirstKey() |
int |
getHFileMinorVersion() |
HFile.Reader |
getHFileReader() |
int |
getHFileVersion() |
byte[] |
getLastKey() |
byte[] |
getLastRowKey() |
long |
getMaxTimestamp() |
HFileScanner |
getScanner(boolean cacheBlocks,
boolean pread)
Deprecated.
Do not write further code which depends on this call. Instead
use getStoreFileScanner() which uses the StoreFileScanner class/interface
which is the preferred way to scan a store with higher level concepts.
|
HFileScanner |
getScanner(boolean cacheBlocks,
boolean pread,
boolean isCompaction)
Deprecated.
Do not write further code which depends on this call. Instead
use getStoreFileScanner() which uses the StoreFileScanner class/interface
which is the preferred way to scan a store with higher level concepts.
|
long |
getSequenceID() |
StoreFileScanner |
getStoreFileScanner(boolean cacheBlocks,
boolean pread,
boolean isCompaction,
long readPt,
long scannerOrder,
boolean canOptimizeForNonNullColumn)
Get a scanner to scan over this StoreFile.
|
long |
getTotalBloomSize() |
long |
getTotalUncompressedBytes() |
long |
indexSize() |
boolean |
isBulkLoaded() |
boolean |
isCompactedAway() |
boolean |
isPrimaryReplicaReader() |
boolean |
isReferencedInReads() |
long |
length() |
void |
loadBloomfilter() |
Map<byte[],byte[]> |
loadFileInfo() |
byte[] |
midkey() |
boolean |
passesDeleteFamilyBloomFilter(byte[] row,
int rowOffset,
int rowLen) |
boolean |
passesGeneralBloomFilter(byte[] row,
int rowOffset,
int rowLen,
byte[] col,
int colOffset,
int colLen)
A method for checking Bloom filters.
|
boolean |
passesKeyRangeFilter(Scan scan)
Checks whether the given scan rowkey range overlaps with the current storefile's
|
void |
setBulkLoaded(boolean bulkLoadResult) |
void |
setDeleteFamilyBloomFilterFaulty() |
void |
setGeneralBloomFilterFaulty() |
void |
setReplicaStoreFile(boolean isPrimaryReplicaStoreFile) |
void |
setSequenceID(long sequenceID) |
protected BloomFilter generalBloomFilter
protected BloomFilter deleteFamilyBloomFilter
protected BloomType bloomFilterType
protected TimeRange timeRange
protected long sequenceID
public StoreFile.Reader(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path, CacheConfig cacheConf, org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
public StoreFile.Reader(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
public void setReplicaStoreFile(boolean isPrimaryReplicaStoreFile)
public boolean isPrimaryReplicaReader()
public KeyValue.KVComparator getComparator()
public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread, boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn)
cacheBlocks
- should this scanner cache blocks?pread
- use pread (for highly concurrent small readers)isCompaction
- is scanner being used for compaction?scannerOrder
- Order of this scanner relative to other scanners. See
KeyValueScanner.getScannerOrder()
.canOptimizeForNonNullColumn
- true
if we can make sure there is no null column,
otherwise false
. This is a hint for optimization.public boolean isReferencedInReads()
public boolean isCompactedAway()
@Deprecated public HFileScanner getScanner(boolean cacheBlocks, boolean pread)
cacheBlocks
- should we cache the blocks?pread
- use pread (for concurrent small readers)@Deprecated public HFileScanner getScanner(boolean cacheBlocks, boolean pread, boolean isCompaction)
cacheBlocks
- should we cache the blocks?pread
- use pread (for concurrent small readers)isCompaction
- is scanner being used for compaction?public void close(boolean evictOnClose) throws IOException
IOException
public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, int rowLen)
public boolean passesGeneralBloomFilter(byte[] row, int rowOffset, int rowLen, byte[] col, int colOffset, int colLen)
row
- rowOffset
- rowLen
- col
- colOffset
- colLen
- public boolean passesKeyRangeFilter(Scan scan)
scan
- the scan specification. Used to determine the rowkey range.public Map<byte[],byte[]> loadFileInfo() throws IOException
IOException
public void loadBloomfilter()
public long getFilterEntries()
public void setGeneralBloomFilterFaulty()
public void setDeleteFamilyBloomFilterFaulty()
public byte[] getLastKey()
public byte[] getLastRowKey()
public byte[] midkey() throws IOException
IOException
public long length()
public long getTotalUncompressedBytes()
public long getEntries()
public long getDeleteFamilyCnt()
public byte[] getFirstKey()
public long indexSize()
public BloomType getBloomFilterType()
public long getSequenceID()
public void setSequenceID(long sequenceID)
public void setBulkLoaded(boolean bulkLoadResult)
public boolean isBulkLoaded()
public long getTotalBloomSize()
public int getHFileVersion()
public int getHFileMinorVersion()
public HFile.Reader getHFileReader()
public long getMaxTimestamp()
Copyright © 2007–2019 The Apache Software Foundation. All rights reserved.