001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertFalse;
022import static org.junit.Assert.assertTrue;
023import static org.junit.Assert.fail;
024
025import java.io.IOException;
026import java.lang.management.ManagementFactory;
027import java.lang.management.MemoryUsage;
028import java.nio.ByteBuffer;
029import org.apache.hadoop.conf.Configuration;
030import org.apache.hadoop.fs.FileSystem;
031import org.apache.hadoop.fs.Path;
032import org.apache.hadoop.hbase.HBaseClassTestRule;
033import org.apache.hadoop.hbase.HBaseConfiguration;
034import org.apache.hadoop.hbase.HBaseTestingUtility;
035import org.apache.hadoop.hbase.HConstants;
036import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
037import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
038import org.apache.hadoop.hbase.io.ByteBuffAllocator;
039import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
040import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
041import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
042import org.apache.hadoop.hbase.nio.ByteBuff;
043import org.apache.hadoop.hbase.testclassification.IOTests;
044import org.apache.hadoop.hbase.testclassification.MediumTests;
045import org.apache.hadoop.hbase.util.Bytes;
046import org.apache.hadoop.hbase.util.Threads;
047import org.junit.Before;
048import org.junit.ClassRule;
049import org.junit.Test;
050import org.junit.experimental.categories.Category;
051import org.slf4j.Logger;
052import org.slf4j.LoggerFactory;
053
054/**
055 * Tests that {@link CacheConfig} does as expected.
056 */
057// This test is marked as a large test though it runs in a short amount of time
058// (seconds).  It is large because it depends on being able to reset the global
059// blockcache instance which is in a global variable.  Experience has it that
060// tests clash on the global variable if this test is run as small sized test.
061@Category({IOTests.class, MediumTests.class})
062public class TestCacheConfig {
063
064  @ClassRule
065  public static final HBaseClassTestRule CLASS_RULE =
066      HBaseClassTestRule.forClass(TestCacheConfig.class);
067
068  private static final Logger LOG = LoggerFactory.getLogger(TestCacheConfig.class);
069  private Configuration conf;
070
071  static class Deserializer implements CacheableDeserializer<Cacheable> {
072    private final Cacheable cacheable;
073    private int deserializedIdentifier = 0;
074
075    Deserializer(final Cacheable c) {
076      deserializedIdentifier = CacheableDeserializerIdManager.registerDeserializer(this);
077      this.cacheable = c;
078    }
079
080    @Override
081    public int getDeserializerIdentifier() {
082      return deserializedIdentifier;
083    }
084
085    @Override
086    public Cacheable deserialize(ByteBuff b, ByteBuffAllocator alloc)
087        throws IOException {
088      LOG.info("Deserialized " + b);
089      return cacheable;
090    }
091  }
092
093  static class IndexCacheEntry extends DataCacheEntry {
094    private static IndexCacheEntry SINGLETON = new IndexCacheEntry();
095
096    public IndexCacheEntry() {
097      super(SINGLETON);
098    }
099
100    @Override
101    public BlockType getBlockType() {
102      return BlockType.ROOT_INDEX;
103    }
104  }
105
106  static class DataCacheEntry implements Cacheable {
107    private static final int SIZE = 1;
108    private static DataCacheEntry SINGLETON = new DataCacheEntry();
109    final CacheableDeserializer<Cacheable> deserializer;
110
111    DataCacheEntry() {
112      this(SINGLETON);
113    }
114
115    DataCacheEntry(final Cacheable c) {
116      this.deserializer = new Deserializer(c);
117    }
118
119    @Override
120    public String toString() {
121      return "size=" + SIZE + ", type=" + getBlockType();
122    }
123
124    @Override
125    public long heapSize() {
126      return SIZE;
127    }
128
129    @Override
130    public int getSerializedLength() {
131      return SIZE;
132    }
133
134    @Override
135    public void serialize(ByteBuffer destination, boolean includeNextBlockMetadata) {
136      LOG.info("Serialized " + this + " to " + destination);
137    }
138
139    @Override
140    public CacheableDeserializer<Cacheable> getDeserializer() {
141      return this.deserializer;
142    }
143
144    @Override
145    public BlockType getBlockType() {
146      return BlockType.DATA;
147    }
148  }
149
150  static class MetaCacheEntry extends DataCacheEntry {
151    @Override
152    public BlockType getBlockType() {
153      return BlockType.INTERMEDIATE_INDEX;
154    }
155  }
156
157  @Before
158  public void setUp() throws Exception {
159    this.conf = HBaseConfiguration.create();
160  }
161
162  /**
163   * @param bc The block cache instance.
164   * @param cc Cache config.
165   * @param doubling If true, addition of element ups counter by 2, not 1, because element added
166   * to onheap and offheap caches.
167   * @param sizing True if we should run sizing test (doesn't always apply).
168   */
169  void basicBlockCacheOps(final BlockCache bc, final CacheConfig cc, final boolean doubling,
170      final boolean sizing) {
171    assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
172    BlockCacheKey bck = new BlockCacheKey("f", 0);
173    Cacheable c = new DataCacheEntry();
174    // Do asserts on block counting.
175    long initialBlockCount = bc.getBlockCount();
176    bc.cacheBlock(bck, c, cc.isInMemory());
177    assertEquals(doubling ? 2 : 1, bc.getBlockCount() - initialBlockCount);
178    bc.evictBlock(bck);
179    assertEquals(initialBlockCount, bc.getBlockCount());
180    // Do size accounting.  Do it after the above 'warm-up' because it looks like some
181    // buffers do lazy allocation so sizes are off on first go around.
182    if (sizing) {
183      long originalSize = bc.getCurrentSize();
184      bc.cacheBlock(bck, c, cc.isInMemory());
185      assertTrue(bc.getCurrentSize() > originalSize);
186      bc.evictBlock(bck);
187      long size = bc.getCurrentSize();
188      assertEquals(originalSize, size);
189    }
190  }
191
192  @Test
193  public void testDisableCacheDataBlock() throws IOException {
194    Configuration conf = HBaseConfiguration.create();
195    CacheConfig cacheConfig = new CacheConfig(conf);
196    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
197    assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
198    assertFalse(cacheConfig.shouldCacheDataCompressed());
199    assertFalse(cacheConfig.shouldCacheDataOnWrite());
200    assertTrue(cacheConfig.shouldCacheDataOnRead());
201    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX));
202    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META));
203    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM));
204    assertFalse(cacheConfig.shouldCacheBloomsOnWrite());
205    assertFalse(cacheConfig.shouldCacheIndexesOnWrite());
206
207    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
208    conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, true);
209    conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, true);
210    conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true);
211
212    cacheConfig = new CacheConfig(conf);
213    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
214    assertTrue(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
215    assertTrue(cacheConfig.shouldCacheDataCompressed());
216    assertTrue(cacheConfig.shouldCacheDataOnWrite());
217    assertTrue(cacheConfig.shouldCacheDataOnRead());
218    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX));
219    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META));
220    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM));
221    assertTrue(cacheConfig.shouldCacheBloomsOnWrite());
222    assertTrue(cacheConfig.shouldCacheIndexesOnWrite());
223
224    conf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, false);
225    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
226
227    cacheConfig = new CacheConfig(conf);
228    assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
229    assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
230    assertFalse(cacheConfig.shouldCacheDataCompressed());
231    assertFalse(cacheConfig.shouldCacheDataOnWrite());
232    assertFalse(cacheConfig.shouldCacheDataOnRead());
233    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX));
234    assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META));
235    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM));
236    assertTrue(cacheConfig.shouldCacheBloomsOnWrite());
237    assertTrue(cacheConfig.shouldCacheIndexesOnWrite());
238
239    conf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, true);
240    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
241
242    ColumnFamilyDescriptor columnFamilyDescriptor =
243      ColumnFamilyDescriptorBuilder
244        .newBuilder(Bytes.toBytes("testDisableCacheDataBlock"))
245        .setBlockCacheEnabled(false)
246        .build();
247
248    cacheConfig = new CacheConfig(conf, columnFamilyDescriptor, null, ByteBuffAllocator.HEAP);
249    assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
250    assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
251    assertFalse(cacheConfig.shouldCacheDataCompressed());
252    assertFalse(cacheConfig.shouldCacheDataOnWrite());
253    assertFalse(cacheConfig.shouldCacheDataOnRead());
254    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX));
255    assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META));
256    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM));
257    assertTrue(cacheConfig.shouldCacheBloomsOnWrite());
258    assertTrue(cacheConfig.shouldCacheIndexesOnWrite());
259  }
260
261  @Test
262  public void testCacheConfigDefaultLRUBlockCache() {
263    CacheConfig cc = new CacheConfig(this.conf);
264    assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
265    BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf);
266    basicBlockCacheOps(blockCache, cc, false, true);
267    assertTrue(blockCache instanceof LruBlockCache);
268  }
269
270  /**
271   * Assert that the caches are deployed with CombinedBlockCache and of the appropriate sizes.
272   */
273  @Test
274  public void testOffHeapBucketCacheConfig() {
275    this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
276    doBucketCacheConfigTest();
277  }
278
279  @Test
280  public void testFileBucketCacheConfig() throws IOException {
281    HBaseTestingUtility htu = new HBaseTestingUtility(this.conf);
282    try {
283      Path p = new Path(htu.getDataTestDir(), "bc.txt");
284      FileSystem fs = FileSystem.get(this.conf);
285      fs.create(p).close();
286      this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "file:" + p);
287      doBucketCacheConfigTest();
288    } finally {
289      htu.cleanupTestDir();
290    }
291  }
292
293  private void doBucketCacheConfigTest() {
294    final int bcSize = 100;
295    this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
296    CacheConfig cc = new CacheConfig(this.conf);
297    BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf);
298    basicBlockCacheOps(blockCache, cc, false, false);
299    assertTrue(blockCache instanceof CombinedBlockCache);
300    // TODO: Assert sizes allocated are right and proportions.
301    CombinedBlockCache cbc = (CombinedBlockCache) blockCache;
302    BlockCache[] bcs = cbc.getBlockCaches();
303    assertTrue(bcs[0] instanceof LruBlockCache);
304    LruBlockCache lbc = (LruBlockCache) bcs[0];
305    assertEquals(MemorySizeUtil.getOnHeapCacheSize(this.conf), lbc.getMaxSize());
306    assertTrue(bcs[1] instanceof BucketCache);
307    BucketCache bc = (BucketCache) bcs[1];
308    // getMaxSize comes back in bytes but we specified size in MB
309    assertEquals(bcSize, bc.getMaxSize() / (1024 * 1024));
310  }
311
312  /**
313   * Assert that when BUCKET_CACHE_COMBINED_KEY is false, the non-default, that we deploy
314   * LruBlockCache as L1 with a BucketCache for L2.
315   */
316  @Test
317  public void testBucketCacheConfigL1L2Setup() {
318    this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
319    // Make lru size is smaller than bcSize for sure.  Need this to be true so when eviction
320    // from L1 happens, it does not fail because L2 can't take the eviction because block too big.
321    this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f);
322    MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
323    long lruExpectedSize = MemorySizeUtil.getOnHeapCacheSize(this.conf);
324    final int bcSize = 100;
325    long bcExpectedSize = 100 * 1024 * 1024; // MB.
326    assertTrue(lruExpectedSize < bcExpectedSize);
327    this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
328    CacheConfig cc = new CacheConfig(this.conf);
329    BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf);
330    basicBlockCacheOps(blockCache, cc, false, false);
331    assertTrue(blockCache instanceof CombinedBlockCache);
332    // TODO: Assert sizes allocated are right and proportions.
333    CombinedBlockCache cbc = (CombinedBlockCache) blockCache;
334    FirstLevelBlockCache lbc = cbc.l1Cache;
335    assertEquals(lruExpectedSize, lbc.getMaxSize());
336    BlockCache bc = cbc.l2Cache;
337    // getMaxSize comes back in bytes but we specified size in MB
338    assertEquals(bcExpectedSize, ((BucketCache) bc).getMaxSize());
339    // Test the L1+L2 deploy works as we'd expect with blocks evicted from L1 going to L2.
340    long initialL1BlockCount = lbc.getBlockCount();
341    long initialL2BlockCount = bc.getBlockCount();
342    Cacheable c = new DataCacheEntry();
343    BlockCacheKey bck = new BlockCacheKey("bck", 0);
344    lbc.cacheBlock(bck, c, false);
345    assertEquals(initialL1BlockCount + 1, lbc.getBlockCount());
346    assertEquals(initialL2BlockCount, bc.getBlockCount());
347    // Force evictions by putting in a block too big.
348    final long justTooBigSize = ((LruBlockCache)lbc).acceptableSize() + 1;
349    lbc.cacheBlock(new BlockCacheKey("bck2", 0), new DataCacheEntry() {
350      @Override
351      public long heapSize() {
352        return justTooBigSize;
353      }
354
355      @Override
356      public int getSerializedLength() {
357        return (int)heapSize();
358      }
359    });
360    // The eviction thread in lrublockcache needs to run.
361    while (initialL1BlockCount != lbc.getBlockCount()) Threads.sleep(10);
362    assertEquals(initialL1BlockCount, lbc.getBlockCount());
363  }
364
365  @Test
366  public void testL2CacheWithInvalidBucketSize() {
367    Configuration c = new Configuration(this.conf);
368    c.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
369    c.set(BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY, "256,512,1024,2048,4000,4096");
370    c.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 1024);
371    try {
372      BlockCacheFactory.createBlockCache(c);
373      fail("Should throw IllegalArgumentException when passing illegal value for bucket size");
374    } catch (IllegalArgumentException e) {
375    }
376  }
377}