Package org.apache.hadoop.hbase.io.hfile
Class TestHFile
java.lang.Object
org.apache.hadoop.hbase.io.hfile.TestHFile
test hfile features.
-
Field Summary
FieldsModifier and TypeFieldDescriptionprivate static org.apache.hadoop.hbase.io.hfile.CacheConfigstatic final HBaseClassTestRuleprivate static org.apache.hadoop.conf.Configurationprivate static org.apache.hadoop.fs.FileSystemprivate static Stringprivate static final org.slf4j.Loggerprivate final intprivate static final intprivate static Stringprivate static final HBaseTestingUtilityorg.junit.rules.TestName -
Constructor Summary
Constructors -
Method Summary
Modifier and TypeMethodDescription(package private) voidbasicWithSomeCodec(String codec, boolean useTags) test none codecsprivate org.apache.hadoop.fs.FSDataOutputStreamcreateFSOutput(org.apache.hadoop.fs.Path name) static org.apache.hadoop.hbase.io.hfile.HFile.ReadercreateReaderFromStream(org.apache.hadoop.hbase.io.hfile.ReaderContext context, org.apache.hadoop.hbase.io.hfile.CacheConfig cacheConf, org.apache.hadoop.conf.Configuration conf) private voidfillByteBuffAllocator(org.apache.hadoop.hbase.io.ByteBuffAllocator alloc, int bufCount) static org.apache.hadoop.hbase.KeyValue.TypegenerateKeyType(Random rand) private org.apache.hadoop.hbase.CellgetCell(byte[] row, byte[] family, byte[] qualifier) private byte[]getSomeKey(int rowId) private org.apache.hadoop.hbase.io.ByteBuffAllocatorinitAllocator(boolean reservoirEnabled, int bufSize, int bufCount, int minAllocSize) private org.apache.hadoop.hbase.io.hfile.BlockCacheinitCombinedBlockCache(String l1CachePolicy) private voidmetablocks(String compress) private voidreadAllRecords(org.apache.hadoop.hbase.io.hfile.HFileScanner scanner) private intreadAndCheckbytes(org.apache.hadoop.hbase.io.hfile.HFileScanner scanner, int start, int n) private longreadAtOffsetWithAllocationAsserts(org.apache.hadoop.hbase.io.ByteBuffAllocator alloc, org.apache.hadoop.hbase.io.hfile.HFile.Reader reader, long offset, boolean cacheBlock, org.apache.hadoop.hbase.io.hfile.BlockType blockType, boolean expectSharedMem) private voidreadNumMetablocks(org.apache.hadoop.hbase.io.hfile.HFile.Reader reader, int n) private voidreadStoreFile(org.apache.hadoop.fs.Path storeFilePath, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.io.ByteBuffAllocator alloc) static voidsetUp()private voidsomeReadingWithMetaBlock(org.apache.hadoop.hbase.io.hfile.HFile.Reader reader) private voidsomeTestingWithMetaBlock(org.apache.hadoop.hbase.io.hfile.HFile.Writer writer) voidMake sure the ordinals for our compression algorithms do not change on us.voidCreate 0-length hfile and show that it failsvoidvoidCreate a truncated hfile and verify that exception thrown.voidvoidTest empty HFile.voidvoidprotected voidtestHFilefeaturesInternals(boolean useTags) voidvoidvoidTests that we properly allocate from the off-heap or on-heap when CombinedCache is configured.private voidtestReaderBlockAllocationWithCombinedCache(boolean cacheConfigCacheBlockOnRead, boolean cacheBlock, org.apache.hadoop.hbase.io.hfile.BlockType blockType, boolean expectSharedMem) voidTests that we properly allocate from the off-heap or on-heap when LRUCache is configured.private voidtestReaderBlockAllocationWithLRUCache(boolean cacheConfigCacheBlockOnRead, boolean cacheBlock, org.apache.hadoop.hbase.io.hfile.BlockType blockType, boolean expectSharedMem) private voidtestReaderCombinedCache(String l1CachePolicy) voidTest case for CombinedBlockCache with AdaptiveLRU as L1 cachevoidTest case for HBASE-22127 in CombinedBlockCachevoidTest case for HBASE-22127 in LruBlockCache.voidTest case for CombinedBlockCache with AdaptiveLRU as L1 cachevoidvoidTest case for CombinedBlockCache with TinyLfu as L1 cachevoidvoidstatic voidtruncateFile(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.Path dst) private voidwriteNumMetablocks(org.apache.hadoop.hbase.io.hfile.HFile.Writer writer, int n) private voidwriteRecords(org.apache.hadoop.hbase.io.hfile.HFile.Writer writer, boolean useTags) private intwriteSomeRecords(org.apache.hadoop.hbase.io.hfile.HFile.Writer writer, int start, int n, boolean useTags) private org.apache.hadoop.fs.Path
-
Field Details
-
CLASS_RULE
-
testName
-
LOG
-
NUM_VALID_KEY_TYPES
-
TEST_UTIL
-
ROOT_DIR
-
minBlockSize
- See Also:
-
localFormatter
-
cacheConf
-
conf
-
fs
-
-
Constructor Details
-
TestHFile
public TestHFile()
-
-
Method Details
-
setUp
- Throws:
Exception
-
createReaderFromStream
public static org.apache.hadoop.hbase.io.hfile.HFile.Reader createReaderFromStream(org.apache.hadoop.hbase.io.hfile.ReaderContext context, org.apache.hadoop.hbase.io.hfile.CacheConfig cacheConf, org.apache.hadoop.conf.Configuration conf) throws IOException - Throws:
IOException
-
initAllocator
private org.apache.hadoop.hbase.io.ByteBuffAllocator initAllocator(boolean reservoirEnabled, int bufSize, int bufCount, int minAllocSize) -
fillByteBuffAllocator
private void fillByteBuffAllocator(org.apache.hadoop.hbase.io.ByteBuffAllocator alloc, int bufCount) -
testReaderWithoutBlockCache
- Throws:
Exception
-
testReaderWithLRUBlockCache
Test case for HBASE-22127 in LruBlockCache.- Throws:
Exception
-
initCombinedBlockCache
-
testReaderWithCombinedBlockCache
Test case for HBASE-22127 in CombinedBlockCache- Throws:
Exception
-
testReaderBlockAllocationWithLRUCache
Tests that we properly allocate from the off-heap or on-heap when LRUCache is configured. In this case, the determining factor is whether we end up caching the block or not. So the below test cases try different permutations of enabling/disabling via CacheConfig and via user request (cacheblocks), along with different expected block types.- Throws:
IOException
-
testReaderBlockAllocationWithLRUCache
private void testReaderBlockAllocationWithLRUCache(boolean cacheConfigCacheBlockOnRead, boolean cacheBlock, org.apache.hadoop.hbase.io.hfile.BlockType blockType, boolean expectSharedMem) throws IOException - Throws:
IOException
-
testReaderBlockAllocationWithCombinedCache
Tests that we properly allocate from the off-heap or on-heap when CombinedCache is configured. In this case, we should always use off-heap unless the block is an INDEX (which always goes to L1 cache which is on-heap)- Throws:
IOException
-
testReaderBlockAllocationWithCombinedCache
private void testReaderBlockAllocationWithCombinedCache(boolean cacheConfigCacheBlockOnRead, boolean cacheBlock, org.apache.hadoop.hbase.io.hfile.BlockType blockType, boolean expectSharedMem) throws IOException - Throws:
IOException
-
readAtOffsetWithAllocationAsserts
private long readAtOffsetWithAllocationAsserts(org.apache.hadoop.hbase.io.ByteBuffAllocator alloc, org.apache.hadoop.hbase.io.hfile.HFile.Reader reader, long offset, boolean cacheBlock, org.apache.hadoop.hbase.io.hfile.BlockType blockType, boolean expectSharedMem) throws IOException - Throws:
IOException
-
readStoreFile
private void readStoreFile(org.apache.hadoop.fs.Path storeFilePath, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.io.ByteBuffAllocator alloc) throws Exception - Throws:
Exception
-
writeStoreFile
- Throws:
IOException
-
generateKeyType
-
testEmptyHFile
Test empty HFile. Test all features work reasonably when hfile is empty of entries.- Throws:
IOException
-
testCorrupt0LengthHFile
Create 0-length hfile and show that it fails- Throws:
IOException
-
testCorruptOutOfOrderHFileWrite
- Throws:
IOException
-
truncateFile
public static void truncateFile(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.Path dst) throws IOException - Throws:
IOException
-
testCorruptTruncatedHFile
Create a truncated hfile and verify that exception thrown.- Throws:
IOException
-
writeSomeRecords
private int writeSomeRecords(org.apache.hadoop.hbase.io.hfile.HFile.Writer writer, int start, int n, boolean useTags) throws IOException - Throws:
IOException
-
readAllRecords
private void readAllRecords(org.apache.hadoop.hbase.io.hfile.HFileScanner scanner) throws IOException - Throws:
IOException
-
readAndCheckbytes
private int readAndCheckbytes(org.apache.hadoop.hbase.io.hfile.HFileScanner scanner, int start, int n) throws IOException - Throws:
IOException
-
getSomeKey
-
writeRecords
private void writeRecords(org.apache.hadoop.hbase.io.hfile.HFile.Writer writer, boolean useTags) throws IOException - Throws:
IOException
-
createFSOutput
private org.apache.hadoop.fs.FSDataOutputStream createFSOutput(org.apache.hadoop.fs.Path name) throws IOException - Throws:
IOException
-
basicWithSomeCodec
test none codecs- Throws:
IOException
-
testTFileFeatures
- Throws:
IOException
-
testHFilefeaturesInternals
- Throws:
IOException
-
writeNumMetablocks
-
someTestingWithMetaBlock
-
readNumMetablocks
private void readNumMetablocks(org.apache.hadoop.hbase.io.hfile.HFile.Reader reader, int n) throws IOException - Throws:
IOException
-
someReadingWithMetaBlock
private void someReadingWithMetaBlock(org.apache.hadoop.hbase.io.hfile.HFile.Reader reader) throws IOException - Throws:
IOException
-
metablocks
- Throws:
Exception
-
testMetaBlocks
- Throws:
Exception
-
testNullMetaBlocks
- Throws:
Exception
-
testCompressionOrdinance
Make sure the ordinals for our compression algorithms do not change on us. -
testShortMidpointSameQual
-
getCell
-
testGetShortMidpoint
-
testDBEShipped
- Throws:
IOException
-
testReaderWithTinyLfuCombinedBlockCache
Test case for CombinedBlockCache with TinyLfu as L1 cache- Throws:
Exception
-
testReaderWithAdaptiveLruCombinedBlockCache
Test case for CombinedBlockCache with AdaptiveLRU as L1 cache- Throws:
Exception
-
testReaderWithLruCombinedBlockCache
Test case for CombinedBlockCache with AdaptiveLRU as L1 cache- Throws:
Exception
-
testReaderCombinedCache
- Throws:
Exception
-
testHFileContextBuilderWithIndexEncoding
- Throws:
IOException
-