001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertNotEquals; 023import static org.junit.Assert.assertTrue; 024 025import java.io.IOException; 026import java.util.ArrayList; 027import java.util.Collection; 028import java.util.EnumMap; 029import java.util.HashMap; 030import java.util.Iterator; 031import java.util.List; 032import java.util.Map; 033import java.util.Random; 034import org.apache.hadoop.conf.Configuration; 035import org.apache.hadoop.fs.FileSystem; 036import org.apache.hadoop.fs.Path; 037import org.apache.hadoop.hbase.ArrayBackedTag; 038import org.apache.hadoop.hbase.CellComparatorImpl; 039import org.apache.hadoop.hbase.HBaseClassTestRule; 040import org.apache.hadoop.hbase.HBaseCommonTestingUtility; 041import org.apache.hadoop.hbase.HBaseTestingUtility; 042import org.apache.hadoop.hbase.HColumnDescriptor; 043import org.apache.hadoop.hbase.HConstants; 044import org.apache.hadoop.hbase.KeyValue; 045import org.apache.hadoop.hbase.Tag; 046import org.apache.hadoop.hbase.client.Durability; 047import org.apache.hadoop.hbase.client.Put; 048import org.apache.hadoop.hbase.fs.HFileSystem; 049import org.apache.hadoop.hbase.io.compress.Compression; 050import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; 051import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; 052import org.apache.hadoop.hbase.regionserver.BloomType; 053import org.apache.hadoop.hbase.regionserver.HRegion; 054import org.apache.hadoop.hbase.regionserver.StoreFileWriter; 055import org.apache.hadoop.hbase.testclassification.IOTests; 056import org.apache.hadoop.hbase.testclassification.LargeTests; 057import org.apache.hadoop.hbase.util.BloomFilterFactory; 058import org.apache.hadoop.hbase.util.Bytes; 059import org.apache.hadoop.hbase.util.ChecksumType; 060import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 061import org.junit.After; 062import org.junit.AfterClass; 063import org.junit.Before; 064import org.junit.ClassRule; 065import org.junit.Test; 066import org.junit.experimental.categories.Category; 067import org.junit.runner.RunWith; 068import org.junit.runners.Parameterized; 069import org.junit.runners.Parameterized.Parameters; 070import org.slf4j.Logger; 071import org.slf4j.LoggerFactory; 072 073import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 074 075/** 076 * Tests {@link HFile} cache-on-write functionality for the following block 077 * types: data blocks, non-root index blocks, and Bloom filter blocks. 078 */ 079@RunWith(Parameterized.class) 080@Category({IOTests.class, LargeTests.class}) 081public class TestCacheOnWrite { 082 083 @ClassRule 084 public static final HBaseClassTestRule CLASS_RULE = 085 HBaseClassTestRule.forClass(TestCacheOnWrite.class); 086 087 private static final Logger LOG = LoggerFactory.getLogger(TestCacheOnWrite.class); 088 089 private static final HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU(); 090 private Configuration conf; 091 private CacheConfig cacheConf; 092 private FileSystem fs; 093 private Random rand = new Random(12983177L); 094 private Path storeFilePath; 095 private BlockCache blockCache; 096 private String testDescription; 097 098 private final CacheOnWriteType cowType; 099 private final Compression.Algorithm compress; 100 private final boolean cacheCompressedData; 101 102 private static final int DATA_BLOCK_SIZE = 2048; 103 private static final int NUM_KV = 25000; 104 private static final int INDEX_BLOCK_SIZE = 512; 105 private static final int BLOOM_BLOCK_SIZE = 4096; 106 private static final BloomType BLOOM_TYPE = BloomType.ROWCOL; 107 private static final int CKBYTES = 512; 108 109 /** The number of valid key types possible in a store file */ 110 private static final int NUM_VALID_KEY_TYPES = 111 KeyValue.Type.values().length - 2; 112 113 private static enum CacheOnWriteType { 114 DATA_BLOCKS(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, 115 BlockType.DATA, BlockType.ENCODED_DATA), 116 BLOOM_BLOCKS(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, 117 BlockType.BLOOM_CHUNK), 118 INDEX_BLOCKS(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, 119 BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX); 120 121 private final String confKey; 122 private final BlockType blockType1; 123 private final BlockType blockType2; 124 125 private CacheOnWriteType(String confKey, BlockType blockType) { 126 this(confKey, blockType, blockType); 127 } 128 129 private CacheOnWriteType(String confKey, BlockType blockType1, 130 BlockType blockType2) { 131 this.blockType1 = blockType1; 132 this.blockType2 = blockType2; 133 this.confKey = confKey; 134 } 135 136 public boolean shouldBeCached(BlockType blockType) { 137 return blockType == blockType1 || blockType == blockType2; 138 } 139 140 public void modifyConf(Configuration conf) { 141 for (CacheOnWriteType cowType : CacheOnWriteType.values()) { 142 conf.setBoolean(cowType.confKey, cowType == this); 143 } 144 } 145 } 146 147 public TestCacheOnWrite(CacheOnWriteType cowType, Compression.Algorithm compress, 148 boolean cacheCompressedData, BlockCache blockCache) { 149 this.cowType = cowType; 150 this.compress = compress; 151 this.cacheCompressedData = cacheCompressedData; 152 this.blockCache = blockCache; 153 testDescription = "[cacheOnWrite=" + cowType + ", compress=" + compress + 154 ", cacheCompressedData=" + cacheCompressedData + "]"; 155 LOG.info(testDescription); 156 } 157 158 private static List<BlockCache> getBlockCaches() throws IOException { 159 Configuration conf = TEST_UTIL.getConfiguration(); 160 List<BlockCache> blockcaches = new ArrayList<>(); 161 // default 162 CacheConfig.instantiateBlockCache(conf); 163 blockcaches.add(new CacheConfig(conf).getBlockCache()); 164 165 //set LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME to 2.0f due to HBASE-16287 166 TEST_UTIL.getConfiguration().setFloat(LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, 2.0f); 167 // memory 168 BlockCache lru = new LruBlockCache(128 * 1024 * 1024, 64 * 1024, TEST_UTIL.getConfiguration()); 169 blockcaches.add(lru); 170 171 // bucket cache 172 FileSystem.get(conf).mkdirs(TEST_UTIL.getDataTestDir()); 173 int[] bucketSizes = 174 { INDEX_BLOCK_SIZE, DATA_BLOCK_SIZE, BLOOM_BLOCK_SIZE, 64 * 1024, 128 * 1024 }; 175 BlockCache bucketcache = 176 new BucketCache("offheap", 128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null); 177 blockcaches.add(bucketcache); 178 return blockcaches; 179 } 180 181 @Parameters 182 public static Collection<Object[]> getParameters() throws IOException { 183 List<Object[]> params = new ArrayList<>(); 184 for (BlockCache blockCache : getBlockCaches()) { 185 for (CacheOnWriteType cowType : CacheOnWriteType.values()) { 186 for (Compression.Algorithm compress : HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) { 187 for (boolean cacheCompressedData : new boolean[] { false, true }) { 188 params.add(new Object[] { cowType, compress, cacheCompressedData, blockCache }); 189 } 190 } 191 } 192 } 193 return params; 194 } 195 196 private void clearBlockCache(BlockCache blockCache) throws InterruptedException { 197 if (blockCache instanceof LruBlockCache) { 198 ((LruBlockCache) blockCache).clearCache(); 199 } else { 200 // BucketCache may not return all cached blocks(blocks in write queue), so check it here. 201 for (int clearCount = 0; blockCache.getBlockCount() > 0; clearCount++) { 202 if (clearCount > 0) { 203 LOG.warn("clear block cache " + blockCache + " " + clearCount + " times, " 204 + blockCache.getBlockCount() + " blocks remaining"); 205 Thread.sleep(10); 206 } 207 for (CachedBlock block : Lists.newArrayList(blockCache)) { 208 BlockCacheKey key = new BlockCacheKey(block.getFilename(), block.getOffset()); 209 // CombinedBucketCache may need evict two times. 210 for (int evictCount = 0; blockCache.evictBlock(key); evictCount++) { 211 if (evictCount > 1) { 212 LOG.warn("evict block " + block + " in " + blockCache + " " + evictCount 213 + " times, maybe a bug here"); 214 } 215 } 216 } 217 } 218 } 219 } 220 221 @Before 222 public void setUp() throws IOException { 223 conf = TEST_UTIL.getConfiguration(); 224 this.conf.set("dfs.datanode.data.dir.perm", "700"); 225 conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE); 226 conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, 227 BLOOM_BLOCK_SIZE); 228 conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData); 229 cowType.modifyConf(conf); 230 fs = HFileSystem.get(conf); 231 cacheConf = 232 new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA), 233 cowType.shouldBeCached(BlockType.LEAF_INDEX), 234 cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData, 235 false, false); 236 } 237 238 @After 239 public void tearDown() throws IOException, InterruptedException { 240 clearBlockCache(blockCache); 241 } 242 243 @AfterClass 244 public static void afterClass() throws IOException { 245 TEST_UTIL.cleanupTestDir(); 246 } 247 248 private void testStoreFileCacheOnWriteInternals(boolean useTags) throws IOException { 249 writeStoreFile(useTags); 250 readStoreFile(useTags); 251 } 252 253 private void readStoreFile(boolean useTags) throws IOException { 254 HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, true, conf); 255 LOG.info("HFile information: " + reader); 256 HFileContext meta = new HFileContextBuilder().withCompression(compress) 257 .withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL) 258 .withBlockSize(DATA_BLOCK_SIZE) 259 .withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()) 260 .withIncludesTags(useTags).build(); 261 final boolean cacheBlocks = false; 262 final boolean pread = false; 263 HFileScanner scanner = reader.getScanner(cacheBlocks, pread); 264 assertTrue(testDescription, scanner.seekTo()); 265 266 long offset = 0; 267 EnumMap<BlockType, Integer> blockCountByType = new EnumMap<>(BlockType.class); 268 269 DataBlockEncoding encodingInCache = NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding(); 270 List<Long> cachedBlocksOffset = new ArrayList<>(); 271 Map<Long, HFileBlock> cachedBlocks = new HashMap<>(); 272 while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { 273 // Flags: don't cache the block, use pread, this is not a compaction. 274 // Also, pass null for expected block type to avoid checking it. 275 HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, 276 encodingInCache); 277 BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), 278 offset); 279 HFileBlock fromCache = (HFileBlock) blockCache.getBlock(blockCacheKey, true, false, true); 280 boolean isCached = fromCache != null; 281 cachedBlocksOffset.add(offset); 282 cachedBlocks.put(offset, fromCache); 283 boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType()); 284 assertTrue("shouldBeCached: " + shouldBeCached+ "\n" + 285 "isCached: " + isCached + "\n" + 286 "Test description: " + testDescription + "\n" + 287 "block: " + block + "\n" + 288 "encodingInCache: " + encodingInCache + "\n" + 289 "blockCacheKey: " + blockCacheKey, 290 shouldBeCached == isCached); 291 if (isCached) { 292 if (cacheConf.shouldCacheCompressed(fromCache.getBlockType().getCategory())) { 293 if (compress != Compression.Algorithm.NONE) { 294 assertFalse(fromCache.isUnpacked()); 295 } 296 fromCache = fromCache.unpack(meta, reader.getUncachedBlockReader()); 297 } else { 298 assertTrue(fromCache.isUnpacked()); 299 } 300 // block we cached at write-time and block read from file should be identical 301 assertEquals(block.getChecksumType(), fromCache.getChecksumType()); 302 assertEquals(block.getBlockType(), fromCache.getBlockType()); 303 assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType()); 304 assertEquals(block.getOnDiskSizeWithHeader(), fromCache.getOnDiskSizeWithHeader()); 305 assertEquals(block.getOnDiskSizeWithoutHeader(), fromCache.getOnDiskSizeWithoutHeader()); 306 assertEquals( 307 block.getUncompressedSizeWithoutHeader(), fromCache.getUncompressedSizeWithoutHeader()); 308 } 309 offset += block.getOnDiskSizeWithHeader(); 310 BlockType bt = block.getBlockType(); 311 Integer count = blockCountByType.get(bt); 312 blockCountByType.put(bt, (count == null ? 0 : count) + 1); 313 } 314 315 LOG.info("Block count by type: " + blockCountByType); 316 String countByType = blockCountByType.toString(); 317 if (useTags) { 318 assertEquals("{" + BlockType.DATA 319 + "=2663, LEAF_INDEX=297, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=32}", countByType); 320 } else { 321 assertEquals("{" + BlockType.DATA 322 + "=2498, LEAF_INDEX=278, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=31}", countByType); 323 } 324 325 // iterate all the keyvalue from hfile 326 while (scanner.next()) { 327 scanner.getCell(); 328 } 329 Iterator<Long> iterator = cachedBlocksOffset.iterator(); 330 while(iterator.hasNext()) { 331 Long entry = iterator.next(); 332 BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), 333 entry); 334 HFileBlock hFileBlock = cachedBlocks.get(entry); 335 if (hFileBlock != null) { 336 // call return twice because for the isCache cased the counter would have got incremented 337 // twice 338 blockCache.returnBlock(blockCacheKey, hFileBlock); 339 if(cacheCompressedData) { 340 if (this.compress == Compression.Algorithm.NONE 341 || cowType == CacheOnWriteType.INDEX_BLOCKS 342 || cowType == CacheOnWriteType.BLOOM_BLOCKS) { 343 blockCache.returnBlock(blockCacheKey, hFileBlock); 344 } 345 } else { 346 blockCache.returnBlock(blockCacheKey, hFileBlock); 347 } 348 } 349 } 350 scanner.shipped(); 351 reader.close(); 352 } 353 354 public static KeyValue.Type generateKeyType(Random rand) { 355 if (rand.nextBoolean()) { 356 // Let's make half of KVs puts. 357 return KeyValue.Type.Put; 358 } else { 359 KeyValue.Type keyType = KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)]; 360 if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) { 361 throw new RuntimeException("Generated an invalid key type: " + keyType + ". " 362 + "Probably the layout of KeyValue.Type has changed."); 363 } 364 return keyType; 365 } 366 } 367 368 private void writeStoreFile(boolean useTags) throws IOException { 369 Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), 370 "test_cache_on_write"); 371 HFileContext meta = new HFileContextBuilder().withCompression(compress) 372 .withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL) 373 .withBlockSize(DATA_BLOCK_SIZE) 374 .withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()) 375 .withIncludesTags(useTags).build(); 376 StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs) 377 .withOutputDir(storeFileParentDir).withComparator(CellComparatorImpl.COMPARATOR) 378 .withFileContext(meta) 379 .withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build(); 380 byte[] cf = Bytes.toBytes("fam"); 381 for (int i = 0; i < NUM_KV; ++i) { 382 byte[] row = RandomKeyValueUtil.randomOrderedKey(rand, i); 383 byte[] qualifier = RandomKeyValueUtil.randomRowOrQualifier(rand); 384 byte[] value = RandomKeyValueUtil.randomValue(rand); 385 KeyValue kv; 386 if(useTags) { 387 Tag t = new ArrayBackedTag((byte) 1, "visibility"); 388 List<Tag> tagList = new ArrayList<>(); 389 tagList.add(t); 390 Tag[] tags = new Tag[1]; 391 tags[0] = t; 392 kv = 393 new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, 394 Math.abs(rand.nextLong()), generateKeyType(rand), value, 0, value.length, tagList); 395 } else { 396 kv = 397 new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, 398 Math.abs(rand.nextLong()), generateKeyType(rand), value, 0, value.length); 399 } 400 sfw.append(kv); 401 } 402 403 sfw.close(); 404 storeFilePath = sfw.getPath(); 405 } 406 407 private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) 408 throws IOException, InterruptedException { 409 // TODO: need to change this test if we add a cache size threshold for 410 // compactions, or if we implement some other kind of intelligent logic for 411 // deciding what blocks to cache-on-write on compaction. 412 final String table = "CompactionCacheOnWrite"; 413 final String cf = "myCF"; 414 final byte[] cfBytes = Bytes.toBytes(cf); 415 final int maxVersions = 3; 416 HRegion region = TEST_UTIL.createTestRegion(table, 417 new HColumnDescriptor(cf) 418 .setCompressionType(compress) 419 .setBloomFilterType(BLOOM_TYPE) 420 .setMaxVersions(maxVersions) 421 .setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()) 422 ); 423 int rowIdx = 0; 424 long ts = EnvironmentEdgeManager.currentTime(); 425 for (int iFile = 0; iFile < 5; ++iFile) { 426 for (int iRow = 0; iRow < 500; ++iRow) { 427 String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + 428 iRow; 429 Put p = new Put(Bytes.toBytes(rowStr)); 430 ++rowIdx; 431 for (int iCol = 0; iCol < 10; ++iCol) { 432 String qualStr = "col" + iCol; 433 String valueStr = "value_" + rowStr + "_" + qualStr; 434 for (int iTS = 0; iTS < 5; ++iTS) { 435 if (useTags) { 436 Tag t = new ArrayBackedTag((byte) 1, "visibility"); 437 Tag[] tags = new Tag[1]; 438 tags[0] = t; 439 KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr), 440 HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags); 441 p.add(kv); 442 } else { 443 p.addColumn(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr)); 444 } 445 } 446 } 447 p.setDurability(Durability.ASYNC_WAL); 448 region.put(p); 449 } 450 region.flush(true); 451 } 452 clearBlockCache(blockCache); 453 assertEquals(0, blockCache.getBlockCount()); 454 region.compact(false); 455 LOG.debug("compactStores() returned"); 456 457 for (CachedBlock block: blockCache) { 458 assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType()); 459 assertNotEquals(BlockType.DATA, block.getBlockType()); 460 } 461 ((HRegion)region).close(); 462 } 463 464 @Test 465 public void testStoreFileCacheOnWrite() throws IOException { 466 testStoreFileCacheOnWriteInternals(false); 467 testStoreFileCacheOnWriteInternals(true); 468 } 469 470 @Test 471 public void testNotCachingDataBlocksDuringCompaction() throws IOException, InterruptedException { 472 testNotCachingDataBlocksDuringCompactionInternals(false); 473 testNotCachingDataBlocksDuringCompactionInternals(true); 474 } 475}