001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import static org.junit.jupiter.api.Assertions.assertEquals; 021import static org.junit.jupiter.api.Assertions.assertFalse; 022import static org.junit.jupiter.api.Assertions.assertTrue; 023import static org.junit.jupiter.api.Assertions.fail; 024 025import java.io.IOException; 026import java.lang.management.ManagementFactory; 027import java.lang.management.MemoryUsage; 028import java.nio.ByteBuffer; 029import org.apache.hadoop.conf.Configuration; 030import org.apache.hadoop.fs.FileSystem; 031import org.apache.hadoop.fs.Path; 032import org.apache.hadoop.hbase.HBaseConfiguration; 033import org.apache.hadoop.hbase.HBaseTestingUtil; 034import org.apache.hadoop.hbase.HConstants; 035import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 036import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 037import org.apache.hadoop.hbase.io.ByteBuffAllocator; 038import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; 039import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; 040import org.apache.hadoop.hbase.io.util.MemorySizeUtil; 041import org.apache.hadoop.hbase.nio.ByteBuff; 042import org.apache.hadoop.hbase.testclassification.IOTests; 043import org.apache.hadoop.hbase.testclassification.MediumTests; 044import org.apache.hadoop.hbase.util.Bytes; 045import org.apache.hadoop.hbase.util.Threads; 046import org.junit.jupiter.api.BeforeEach; 047import org.junit.jupiter.api.Tag; 048import org.junit.jupiter.api.Test; 049import org.slf4j.Logger; 050import org.slf4j.LoggerFactory; 051 052/** 053 * Tests that {@link CacheConfig} does as expected. 054 */ 055// This test is marked as a large test though it runs in a short amount of time 056// (seconds). It is large because it depends on being able to reset the global 057// blockcache instance which is in a global variable. Experience has it that 058// tests clash on the global variable if this test is run as small sized test. 059@Tag(IOTests.TAG) 060@Tag(MediumTests.TAG) 061public class TestCacheConfig { 062 063 private static final Logger LOG = LoggerFactory.getLogger(TestCacheConfig.class); 064 private Configuration conf; 065 066 static class Deserializer implements CacheableDeserializer<Cacheable> { 067 private final Cacheable cacheable; 068 private int deserializedIdentifier = 0; 069 070 Deserializer(final Cacheable c) { 071 deserializedIdentifier = CacheableDeserializerIdManager.registerDeserializer(this); 072 this.cacheable = c; 073 } 074 075 @Override 076 public int getDeserializerIdentifier() { 077 return deserializedIdentifier; 078 } 079 080 @Override 081 public Cacheable deserialize(ByteBuff b, ByteBuffAllocator alloc) throws IOException { 082 LOG.info("Deserialized " + b); 083 return cacheable; 084 } 085 } 086 087 static class IndexCacheEntry extends DataCacheEntry { 088 private static IndexCacheEntry SINGLETON = new IndexCacheEntry(); 089 090 public IndexCacheEntry() { 091 super(SINGLETON); 092 } 093 094 @Override 095 public BlockType getBlockType() { 096 return BlockType.ROOT_INDEX; 097 } 098 } 099 100 static class DataCacheEntry implements Cacheable { 101 private static final int SIZE = 1; 102 private static DataCacheEntry SINGLETON = new DataCacheEntry(); 103 final CacheableDeserializer<Cacheable> deserializer; 104 105 DataCacheEntry() { 106 this(SINGLETON); 107 } 108 109 DataCacheEntry(final Cacheable c) { 110 this.deserializer = new Deserializer(c); 111 } 112 113 @Override 114 public String toString() { 115 return "size=" + SIZE + ", type=" + getBlockType(); 116 } 117 118 @Override 119 public long heapSize() { 120 return SIZE; 121 } 122 123 @Override 124 public int getSerializedLength() { 125 return SIZE; 126 } 127 128 @Override 129 public void serialize(ByteBuffer destination, boolean includeNextBlockMetadata) { 130 LOG.info("Serialized " + this + " to " + destination); 131 } 132 133 @Override 134 public CacheableDeserializer<Cacheable> getDeserializer() { 135 return this.deserializer; 136 } 137 138 @Override 139 public BlockType getBlockType() { 140 return BlockType.DATA; 141 } 142 } 143 144 static class MetaCacheEntry extends DataCacheEntry { 145 @Override 146 public BlockType getBlockType() { 147 return BlockType.INTERMEDIATE_INDEX; 148 } 149 } 150 151 @BeforeEach 152 public void setUp() throws Exception { 153 this.conf = HBaseConfiguration.create(); 154 } 155 156 /** 157 * @param bc The block cache instance. 158 * @param cc Cache config. 159 * @param doubling If true, addition of element ups counter by 2, not 1, because element added to 160 * onheap and offheap caches. 161 * @param sizing True if we should run sizing test (doesn't always apply). 162 */ 163 void basicBlockCacheOps(final BlockCache bc, final CacheConfig cc, final boolean doubling, 164 final boolean sizing) { 165 assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); 166 BlockCacheKey bck = new BlockCacheKey("f", 0); 167 Cacheable c = new DataCacheEntry(); 168 // Do asserts on block counting. 169 long initialBlockCount = bc.getBlockCount(); 170 bc.cacheBlock(bck, c, cc.isInMemory()); 171 assertEquals(doubling ? 2 : 1, bc.getBlockCount() - initialBlockCount); 172 bc.evictBlock(bck); 173 assertEquals(initialBlockCount, bc.getBlockCount()); 174 // Do size accounting. Do it after the above 'warm-up' because it looks like some 175 // buffers do lazy allocation so sizes are off on first go around. 176 if (sizing) { 177 long originalSize = bc.getCurrentSize(); 178 bc.cacheBlock(bck, c, cc.isInMemory()); 179 assertTrue(bc.getCurrentSize() > originalSize); 180 bc.evictBlock(bck); 181 long size = bc.getCurrentSize(); 182 assertEquals(originalSize, size); 183 } 184 } 185 186 @Test 187 public void testDisableCacheDataBlock() throws IOException { 188 // First tests the default configs behaviour and block cache enabled 189 Configuration conf = HBaseConfiguration.create(); 190 CacheConfig cacheConfig = new CacheConfig(conf); 191 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); 192 assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA)); 193 assertFalse(cacheConfig.shouldCacheDataCompressed()); 194 assertFalse(cacheConfig.shouldCacheDataOnWrite()); 195 assertFalse(cacheConfig.shouldCacheCompactedBlocksOnWrite()); 196 assertTrue(cacheConfig.shouldCacheDataOnRead()); 197 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX)); 198 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META)); 199 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM)); 200 assertFalse(cacheConfig.shouldCacheBloomsOnWrite()); 201 assertFalse(cacheConfig.shouldCacheIndexesOnWrite()); 202 203 // Tests block cache enabled and related cache on write flags enabled 204 conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true); 205 conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, true); 206 conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, true); 207 conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true); 208 conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, true); 209 210 cacheConfig = new CacheConfig(conf); 211 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); 212 assertTrue(cacheConfig.shouldCacheCompressed(BlockCategory.DATA)); 213 assertTrue(cacheConfig.shouldCacheDataCompressed()); 214 assertTrue(cacheConfig.shouldCacheDataOnWrite()); 215 assertTrue(cacheConfig.shouldCacheDataOnRead()); 216 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX)); 217 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META)); 218 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM)); 219 assertTrue(cacheConfig.shouldCacheBloomsOnWrite()); 220 assertTrue(cacheConfig.shouldCacheIndexesOnWrite()); 221 assertTrue(cacheConfig.shouldCacheCompactedBlocksOnWrite()); 222 223 // Tests block cache enabled but related cache on read/write properties disabled 224 conf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, false); 225 conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false); 226 conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, false); 227 228 cacheConfig = new CacheConfig(conf); 229 assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); 230 assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA)); 231 assertFalse(cacheConfig.shouldCacheDataCompressed()); 232 assertFalse(cacheConfig.shouldCacheDataOnWrite()); 233 assertFalse(cacheConfig.shouldCacheDataOnRead()); 234 assertFalse(cacheConfig.shouldCacheCompactedBlocksOnWrite()); 235 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX)); 236 assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META)); 237 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM)); 238 assertTrue(cacheConfig.shouldCacheBloomsOnWrite()); 239 assertTrue(cacheConfig.shouldCacheIndexesOnWrite()); 240 241 // Finally tests block cache disabled in the column family but all cache on read/write 242 // properties enabled in the config. 243 conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true); 244 conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, true); 245 conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, true); 246 conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true); 247 conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, true); 248 249 ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder 250 .newBuilder(Bytes.toBytes("testDisableCacheDataBlock")).setBlockCacheEnabled(false).build(); 251 252 cacheConfig = new CacheConfig(conf, columnFamilyDescriptor, null, ByteBuffAllocator.HEAP); 253 assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); 254 assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA)); 255 assertFalse(cacheConfig.shouldCacheDataCompressed()); 256 assertFalse(cacheConfig.shouldCacheDataOnWrite()); 257 assertFalse(cacheConfig.shouldCacheDataOnRead()); 258 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX)); 259 assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META)); 260 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM)); 261 assertFalse(cacheConfig.shouldCacheBloomsOnWrite()); 262 assertFalse(cacheConfig.shouldCacheIndexesOnWrite()); 263 } 264 265 @Test 266 public void testCacheConfigDefaultLRUBlockCache() { 267 CacheConfig cc = new CacheConfig(this.conf); 268 assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); 269 BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); 270 basicBlockCacheOps(blockCache, cc, false, true); 271 assertTrue(blockCache instanceof LruBlockCache); 272 } 273 274 /** 275 * Assert that the caches are deployed with CombinedBlockCache and of the appropriate sizes. 276 */ 277 @Test 278 public void testOffHeapBucketCacheConfig() { 279 this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); 280 doBucketCacheConfigTest(); 281 } 282 283 @Test 284 public void testFileBucketCacheConfig() throws IOException { 285 HBaseTestingUtil htu = new HBaseTestingUtil(this.conf); 286 try { 287 Path p = new Path(htu.getDataTestDir(), "bc.txt"); 288 FileSystem fs = FileSystem.get(this.conf); 289 fs.create(p).close(); 290 this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "file:" + p); 291 doBucketCacheConfigTest(); 292 } finally { 293 htu.cleanupTestDir(); 294 } 295 } 296 297 private void doBucketCacheConfigTest() { 298 final int bcSize = 100; 299 this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize); 300 CacheConfig cc = new CacheConfig(this.conf); 301 BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); 302 basicBlockCacheOps(blockCache, cc, false, false); 303 assertTrue(blockCache instanceof CombinedBlockCache); 304 // TODO: Assert sizes allocated are right and proportions. 305 CombinedBlockCache cbc = (CombinedBlockCache) blockCache; 306 BlockCache[] bcs = cbc.getBlockCaches(); 307 assertTrue(bcs[0] instanceof LruBlockCache); 308 LruBlockCache lbc = (LruBlockCache) bcs[0]; 309 assertEquals(MemorySizeUtil.getOnHeapCacheSize(this.conf), lbc.getMaxSize()); 310 assertTrue(bcs[1] instanceof BucketCache); 311 BucketCache bc = (BucketCache) bcs[1]; 312 // getMaxSize comes back in bytes but we specified size in MB 313 assertEquals(bcSize, bc.getMaxSize() / (1024 * 1024)); 314 } 315 316 /** 317 * Assert that when BUCKET_CACHE_COMBINED_KEY is false, the non-default, that we deploy 318 * LruBlockCache as L1 with a BucketCache for L2. 319 */ 320 @Test 321 public void testBucketCacheConfigL1L2Setup() { 322 this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); 323 // Make lru size is smaller than bcSize for sure. Need this to be true so when eviction 324 // from L1 happens, it does not fail because L2 can't take the eviction because block too big. 325 this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f); 326 MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); 327 long lruExpectedSize = MemorySizeUtil.getOnHeapCacheSize(this.conf); 328 final int bcSize = 100; 329 long bcExpectedSize = 100 * 1024 * 1024; // MB. 330 assertTrue(lruExpectedSize < bcExpectedSize); 331 this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize); 332 CacheConfig cc = new CacheConfig(this.conf); 333 BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); 334 basicBlockCacheOps(blockCache, cc, false, false); 335 assertTrue(blockCache instanceof CombinedBlockCache); 336 // TODO: Assert sizes allocated are right and proportions. 337 CombinedBlockCache cbc = (CombinedBlockCache) blockCache; 338 FirstLevelBlockCache lbc = cbc.l1Cache; 339 assertEquals(lruExpectedSize, lbc.getMaxSize()); 340 BlockCache bc = cbc.l2Cache; 341 // getMaxSize comes back in bytes but we specified size in MB 342 assertEquals(bcExpectedSize, ((BucketCache) bc).getMaxSize()); 343 // Test the L1+L2 deploy works as we'd expect with blocks evicted from L1 going to L2. 344 long initialL1BlockCount = lbc.getBlockCount(); 345 long initialL2BlockCount = bc.getBlockCount(); 346 Cacheable c = new DataCacheEntry(); 347 BlockCacheKey bck = new BlockCacheKey("bck", 0); 348 lbc.cacheBlock(bck, c, false); 349 assertEquals(initialL1BlockCount + 1, lbc.getBlockCount()); 350 assertEquals(initialL2BlockCount, bc.getBlockCount()); 351 // Force evictions by putting in a block too big. 352 final long justTooBigSize = ((LruBlockCache) lbc).acceptableSize() + 1; 353 lbc.cacheBlock(new BlockCacheKey("bck2", 0), new DataCacheEntry() { 354 @Override 355 public long heapSize() { 356 return justTooBigSize; 357 } 358 359 @Override 360 public int getSerializedLength() { 361 return (int) heapSize(); 362 } 363 }); 364 // The eviction thread in lrublockcache needs to run. 365 while (initialL1BlockCount != lbc.getBlockCount()) 366 Threads.sleep(10); 367 assertEquals(initialL1BlockCount, lbc.getBlockCount()); 368 } 369 370 @Test 371 public void testL2CacheWithInvalidBucketSize() { 372 Configuration c = new Configuration(this.conf); 373 c.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); 374 c.set(BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY, "256,512,1024,2048,4000,4096"); 375 c.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 1024); 376 try { 377 BlockCacheFactory.createBlockCache(c); 378 fail("Should throw IllegalArgumentException when passing illegal value for bucket size"); 379 } catch (IllegalArgumentException e) { 380 } 381 } 382 383 @Test 384 public void testIndexOnlyLruBlockCache() { 385 CacheConfig cc = new CacheConfig(this.conf); 386 conf.set(BlockCacheFactory.BLOCKCACHE_POLICY_KEY, "IndexOnlyLRU"); 387 BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); 388 assertTrue(blockCache instanceof IndexOnlyLruBlockCache); 389 // reject data block 390 long initialBlockCount = blockCache.getBlockCount(); 391 BlockCacheKey bck = new BlockCacheKey("bck", 0); 392 Cacheable c = new DataCacheEntry(); 393 blockCache.cacheBlock(bck, c, true); 394 // accept index block 395 Cacheable indexCacheEntry = new IndexCacheEntry(); 396 blockCache.cacheBlock(bck, indexCacheEntry, true); 397 assertEquals(initialBlockCount + 1, blockCache.getBlockCount()); 398 } 399 400 @Test 401 public void testGetOnHeapCacheSize() { 402 Configuration copyConf = new Configuration(conf); 403 long fixedSize = 1024 * 1024L; 404 long onHeapCacheSize = MemorySizeUtil.getOnHeapCacheSize(copyConf); 405 assertEquals(null, copyConf.get(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY)); 406 assertTrue(onHeapCacheSize > 0 && onHeapCacheSize != fixedSize); 407 // when HBASE_BLOCK_CACHE_MEMORY_SIZE is set in number 408 copyConf.setLong(HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY, 3 * 1024 * 1024); 409 onHeapCacheSize = MemorySizeUtil.getOnHeapCacheSize(copyConf); 410 assertEquals(3 * 1024 * 1024, onHeapCacheSize); 411 // when HBASE_BLOCK_CACHE_MEMORY_SIZE is set in human-readable format 412 copyConf.set(HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY, "2m"); 413 onHeapCacheSize = MemorySizeUtil.getOnHeapCacheSize(copyConf); 414 assertEquals(2 * 1024 * 1024, onHeapCacheSize); 415 // when HBASE_BLOCK_CACHE_FIXED_SIZE_KEY is set, it will be a fixed size 416 copyConf.setLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, fixedSize); 417 onHeapCacheSize = MemorySizeUtil.getOnHeapCacheSize(copyConf); 418 assertEquals(fixedSize, onHeapCacheSize); 419 } 420}