001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertTrue; 023import static org.junit.Assert.fail; 024 025import java.io.IOException; 026import java.lang.management.ManagementFactory; 027import java.lang.management.MemoryUsage; 028import java.nio.ByteBuffer; 029import org.apache.hadoop.conf.Configuration; 030import org.apache.hadoop.fs.FileSystem; 031import org.apache.hadoop.fs.Path; 032import org.apache.hadoop.hbase.HBaseClassTestRule; 033import org.apache.hadoop.hbase.HBaseConfiguration; 034import org.apache.hadoop.hbase.HBaseTestingUtility; 035import org.apache.hadoop.hbase.HColumnDescriptor; 036import org.apache.hadoop.hbase.HConstants; 037import org.apache.hadoop.hbase.io.ByteBuffAllocator; 038import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; 039import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; 040import org.apache.hadoop.hbase.io.util.MemorySizeUtil; 041import org.apache.hadoop.hbase.nio.ByteBuff; 042import org.apache.hadoop.hbase.testclassification.IOTests; 043import org.apache.hadoop.hbase.testclassification.MediumTests; 044import org.apache.hadoop.hbase.util.Threads; 045import org.junit.Before; 046import org.junit.ClassRule; 047import org.junit.Test; 048import org.junit.experimental.categories.Category; 049import org.slf4j.Logger; 050import org.slf4j.LoggerFactory; 051 052/** 053 * Tests that {@link CacheConfig} does as expected. 054 */ 055// This test is marked as a large test though it runs in a short amount of time 056// (seconds). It is large because it depends on being able to reset the global 057// blockcache instance which is in a global variable. Experience has it that 058// tests clash on the global variable if this test is run as small sized test. 059@Category({IOTests.class, MediumTests.class}) 060public class TestCacheConfig { 061 062 @ClassRule 063 public static final HBaseClassTestRule CLASS_RULE = 064 HBaseClassTestRule.forClass(TestCacheConfig.class); 065 066 private static final Logger LOG = LoggerFactory.getLogger(TestCacheConfig.class); 067 private Configuration conf; 068 069 static class Deserializer implements CacheableDeserializer<Cacheable> { 070 private final Cacheable cacheable; 071 private int deserializedIdentifier = 0; 072 073 Deserializer(final Cacheable c) { 074 deserializedIdentifier = CacheableDeserializerIdManager.registerDeserializer(this); 075 this.cacheable = c; 076 } 077 078 @Override 079 public int getDeserializerIdentifier() { 080 return deserializedIdentifier; 081 } 082 083 @Override 084 public Cacheable deserialize(ByteBuff b, ByteBuffAllocator alloc) 085 throws IOException { 086 LOG.info("Deserialized " + b); 087 return cacheable; 088 } 089 }; 090 091 static class IndexCacheEntry extends DataCacheEntry { 092 private static IndexCacheEntry SINGLETON = new IndexCacheEntry(); 093 094 public IndexCacheEntry() { 095 super(SINGLETON); 096 } 097 098 @Override 099 public BlockType getBlockType() { 100 return BlockType.ROOT_INDEX; 101 } 102 } 103 104 static class DataCacheEntry implements Cacheable { 105 private static final int SIZE = 1; 106 private static DataCacheEntry SINGLETON = new DataCacheEntry(); 107 final CacheableDeserializer<Cacheable> deserializer; 108 109 DataCacheEntry() { 110 this(SINGLETON); 111 } 112 113 DataCacheEntry(final Cacheable c) { 114 this.deserializer = new Deserializer(c); 115 } 116 117 @Override 118 public String toString() { 119 return "size=" + SIZE + ", type=" + getBlockType(); 120 }; 121 122 @Override 123 public long heapSize() { 124 return SIZE; 125 } 126 127 @Override 128 public int getSerializedLength() { 129 return SIZE; 130 } 131 132 @Override 133 public void serialize(ByteBuffer destination, boolean includeNextBlockMetadata) { 134 LOG.info("Serialized " + this + " to " + destination); 135 } 136 137 @Override 138 public CacheableDeserializer<Cacheable> getDeserializer() { 139 return this.deserializer; 140 } 141 142 @Override 143 public BlockType getBlockType() { 144 return BlockType.DATA; 145 } 146 } 147 148 static class MetaCacheEntry extends DataCacheEntry { 149 @Override 150 public BlockType getBlockType() { 151 return BlockType.INTERMEDIATE_INDEX; 152 } 153 } 154 155 @Before 156 public void setUp() throws Exception { 157 this.conf = HBaseConfiguration.create(); 158 } 159 160 /** 161 * @param bc The block cache instance. 162 * @param cc Cache config. 163 * @param doubling If true, addition of element ups counter by 2, not 1, because element added 164 * to onheap and offheap caches. 165 * @param sizing True if we should run sizing test (doesn't always apply). 166 */ 167 void basicBlockCacheOps(final BlockCache bc, final CacheConfig cc, final boolean doubling, 168 final boolean sizing) { 169 assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); 170 BlockCacheKey bck = new BlockCacheKey("f", 0); 171 Cacheable c = new DataCacheEntry(); 172 // Do asserts on block counting. 173 long initialBlockCount = bc.getBlockCount(); 174 bc.cacheBlock(bck, c, cc.isInMemory()); 175 assertEquals(doubling ? 2 : 1, bc.getBlockCount() - initialBlockCount); 176 bc.evictBlock(bck); 177 assertEquals(initialBlockCount, bc.getBlockCount()); 178 // Do size accounting. Do it after the above 'warm-up' because it looks like some 179 // buffers do lazy allocation so sizes are off on first go around. 180 if (sizing) { 181 long originalSize = bc.getCurrentSize(); 182 bc.cacheBlock(bck, c, cc.isInMemory()); 183 assertTrue(bc.getCurrentSize() > originalSize); 184 bc.evictBlock(bck); 185 long size = bc.getCurrentSize(); 186 assertEquals(originalSize, size); 187 } 188 } 189 190 @Test 191 public void testDisableCacheDataBlock() throws IOException { 192 Configuration conf = HBaseConfiguration.create(); 193 CacheConfig cacheConfig = new CacheConfig(conf); 194 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); 195 assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA)); 196 assertFalse(cacheConfig.shouldCacheDataCompressed()); 197 assertFalse(cacheConfig.shouldCacheDataOnWrite()); 198 assertTrue(cacheConfig.shouldCacheDataOnRead()); 199 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX)); 200 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META)); 201 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM)); 202 assertFalse(cacheConfig.shouldCacheBloomsOnWrite()); 203 assertFalse(cacheConfig.shouldCacheIndexesOnWrite()); 204 205 conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true); 206 conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, true); 207 conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, true); 208 conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true); 209 210 cacheConfig = new CacheConfig(conf); 211 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); 212 assertTrue(cacheConfig.shouldCacheCompressed(BlockCategory.DATA)); 213 assertTrue(cacheConfig.shouldCacheDataCompressed()); 214 assertTrue(cacheConfig.shouldCacheDataOnWrite()); 215 assertTrue(cacheConfig.shouldCacheDataOnRead()); 216 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX)); 217 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META)); 218 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM)); 219 assertTrue(cacheConfig.shouldCacheBloomsOnWrite()); 220 assertTrue(cacheConfig.shouldCacheIndexesOnWrite()); 221 222 conf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, false); 223 conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false); 224 225 cacheConfig = new CacheConfig(conf); 226 assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); 227 assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA)); 228 assertFalse(cacheConfig.shouldCacheDataCompressed()); 229 assertFalse(cacheConfig.shouldCacheDataOnWrite()); 230 assertFalse(cacheConfig.shouldCacheDataOnRead()); 231 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX)); 232 assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META)); 233 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM)); 234 assertTrue(cacheConfig.shouldCacheBloomsOnWrite()); 235 assertTrue(cacheConfig.shouldCacheIndexesOnWrite()); 236 237 conf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, true); 238 conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false); 239 240 HColumnDescriptor family = new HColumnDescriptor("testDisableCacheDataBlock"); 241 family.setBlockCacheEnabled(false); 242 243 cacheConfig = new CacheConfig(conf, family, null, ByteBuffAllocator.HEAP); 244 assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); 245 assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA)); 246 assertFalse(cacheConfig.shouldCacheDataCompressed()); 247 assertFalse(cacheConfig.shouldCacheDataOnWrite()); 248 assertFalse(cacheConfig.shouldCacheDataOnRead()); 249 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX)); 250 assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META)); 251 assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM)); 252 assertTrue(cacheConfig.shouldCacheBloomsOnWrite()); 253 assertTrue(cacheConfig.shouldCacheIndexesOnWrite()); 254 } 255 256 @Test 257 public void testCacheConfigDefaultLRUBlockCache() { 258 CacheConfig cc = new CacheConfig(this.conf); 259 assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); 260 BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); 261 basicBlockCacheOps(blockCache, cc, false, true); 262 assertTrue(blockCache instanceof LruBlockCache); 263 } 264 265 /** 266 * Assert that the caches are deployed with CombinedBlockCache and of the appropriate sizes. 267 */ 268 @Test 269 public void testOffHeapBucketCacheConfig() { 270 this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); 271 doBucketCacheConfigTest(); 272 } 273 274 @Test 275 public void testFileBucketCacheConfig() throws IOException { 276 HBaseTestingUtility htu = new HBaseTestingUtility(this.conf); 277 try { 278 Path p = new Path(htu.getDataTestDir(), "bc.txt"); 279 FileSystem fs = FileSystem.get(this.conf); 280 fs.create(p).close(); 281 this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "file:" + p); 282 doBucketCacheConfigTest(); 283 } finally { 284 htu.cleanupTestDir(); 285 } 286 } 287 288 private void doBucketCacheConfigTest() { 289 final int bcSize = 100; 290 this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize); 291 CacheConfig cc = new CacheConfig(this.conf); 292 BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); 293 basicBlockCacheOps(blockCache, cc, false, false); 294 assertTrue(blockCache instanceof CombinedBlockCache); 295 // TODO: Assert sizes allocated are right and proportions. 296 CombinedBlockCache cbc = (CombinedBlockCache) blockCache; 297 BlockCache[] bcs = cbc.getBlockCaches(); 298 assertTrue(bcs[0] instanceof LruBlockCache); 299 LruBlockCache lbc = (LruBlockCache) bcs[0]; 300 assertEquals(MemorySizeUtil.getOnHeapCacheSize(this.conf), lbc.getMaxSize()); 301 assertTrue(bcs[1] instanceof BucketCache); 302 BucketCache bc = (BucketCache) bcs[1]; 303 // getMaxSize comes back in bytes but we specified size in MB 304 assertEquals(bcSize, bc.getMaxSize() / (1024 * 1024)); 305 } 306 307 /** 308 * Assert that when BUCKET_CACHE_COMBINED_KEY is false, the non-default, that we deploy 309 * LruBlockCache as L1 with a BucketCache for L2. 310 */ 311 @Test 312 public void testBucketCacheConfigL1L2Setup() { 313 this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); 314 // Make lru size is smaller than bcSize for sure. Need this to be true so when eviction 315 // from L1 happens, it does not fail because L2 can't take the eviction because block too big. 316 this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f); 317 MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); 318 long lruExpectedSize = MemorySizeUtil.getOnHeapCacheSize(this.conf); 319 final int bcSize = 100; 320 long bcExpectedSize = 100 * 1024 * 1024; // MB. 321 assertTrue(lruExpectedSize < bcExpectedSize); 322 this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize); 323 CacheConfig cc = new CacheConfig(this.conf); 324 BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); 325 basicBlockCacheOps(blockCache, cc, false, false); 326 assertTrue(blockCache instanceof CombinedBlockCache); 327 // TODO: Assert sizes allocated are right and proportions. 328 CombinedBlockCache cbc = (CombinedBlockCache) blockCache; 329 FirstLevelBlockCache lbc = cbc.l1Cache; 330 assertEquals(lruExpectedSize, lbc.getMaxSize()); 331 BlockCache bc = cbc.l2Cache; 332 // getMaxSize comes back in bytes but we specified size in MB 333 assertEquals(bcExpectedSize, ((BucketCache) bc).getMaxSize()); 334 // Test the L1+L2 deploy works as we'd expect with blocks evicted from L1 going to L2. 335 long initialL1BlockCount = lbc.getBlockCount(); 336 long initialL2BlockCount = bc.getBlockCount(); 337 Cacheable c = new DataCacheEntry(); 338 BlockCacheKey bck = new BlockCacheKey("bck", 0); 339 lbc.cacheBlock(bck, c, false); 340 assertEquals(initialL1BlockCount + 1, lbc.getBlockCount()); 341 assertEquals(initialL2BlockCount, bc.getBlockCount()); 342 // Force evictions by putting in a block too big. 343 final long justTooBigSize = ((LruBlockCache)lbc).acceptableSize() + 1; 344 lbc.cacheBlock(new BlockCacheKey("bck2", 0), new DataCacheEntry() { 345 @Override 346 public long heapSize() { 347 return justTooBigSize; 348 } 349 350 @Override 351 public int getSerializedLength() { 352 return (int)heapSize(); 353 } 354 }); 355 // The eviction thread in lrublockcache needs to run. 356 while (initialL1BlockCount != lbc.getBlockCount()) Threads.sleep(10); 357 assertEquals(initialL1BlockCount, lbc.getBlockCount()); 358 } 359 360 @Test 361 public void testL2CacheWithInvalidBucketSize() { 362 Configuration c = new Configuration(this.conf); 363 c.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); 364 c.set(BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY, "256,512,1024,2048,4000,4096"); 365 c.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 1024); 366 try { 367 BlockCacheFactory.createBlockCache(c); 368 fail("Should throw IllegalArgumentException when passing illegal value for bucket size"); 369 } catch (IllegalArgumentException e) { 370 } 371 } 372}