001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertFalse;
022import static org.junit.Assert.assertTrue;
023import static org.junit.Assert.fail;
024
025import java.io.IOException;
026import java.lang.management.ManagementFactory;
027import java.lang.management.MemoryUsage;
028import java.nio.ByteBuffer;
029import org.apache.hadoop.conf.Configuration;
030import org.apache.hadoop.fs.FileSystem;
031import org.apache.hadoop.fs.Path;
032import org.apache.hadoop.hbase.HBaseClassTestRule;
033import org.apache.hadoop.hbase.HBaseConfiguration;
034import org.apache.hadoop.hbase.HBaseTestingUtility;
035import org.apache.hadoop.hbase.HColumnDescriptor;
036import org.apache.hadoop.hbase.HConstants;
037import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
038import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
039import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
040import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
041import org.apache.hadoop.hbase.nio.ByteBuff;
042import org.apache.hadoop.hbase.testclassification.IOTests;
043import org.apache.hadoop.hbase.testclassification.LargeTests;
044import org.apache.hadoop.hbase.util.Threads;
045import org.junit.Before;
046import org.junit.ClassRule;
047import org.junit.Test;
048import org.junit.experimental.categories.Category;
049import org.slf4j.Logger;
050import org.slf4j.LoggerFactory;
051
052/**
053 * Tests that {@link CacheConfig} does as expected.
054 */
055// This test is marked as a large test though it runs in a short amount of time
056// (seconds).  It is large because it depends on being able to reset the global
057// blockcache instance which is in a global variable.  Experience has it that
058// tests clash on the global variable if this test is run as small sized test.
059@Category({IOTests.class, LargeTests.class})
060public class TestCacheConfig {
061
062  @ClassRule
063  public static final HBaseClassTestRule CLASS_RULE =
064      HBaseClassTestRule.forClass(TestCacheConfig.class);
065
066  private static final Logger LOG = LoggerFactory.getLogger(TestCacheConfig.class);
067  private Configuration conf;
068
069  static class Deserializer implements CacheableDeserializer<Cacheable> {
070    private final Cacheable cacheable;
071    private int deserializedIdentifier = 0;
072
073    Deserializer(final Cacheable c) {
074      deserializedIdentifier = CacheableDeserializerIdManager.registerDeserializer(this);
075      this.cacheable = c;
076    }
077
078    @Override
079    public int getDeserialiserIdentifier() {
080      return deserializedIdentifier;
081    }
082
083    @Override
084    public Cacheable deserialize(ByteBuff b, boolean reuse, MemoryType memType) throws IOException {
085      LOG.info("Deserialized " + b + ", reuse=" + reuse);
086      return cacheable;
087    }
088
089    @Override
090    public Cacheable deserialize(ByteBuff b) throws IOException {
091      LOG.info("Deserialized " + b);
092      return cacheable;
093    }
094  };
095
096  static class IndexCacheEntry extends DataCacheEntry {
097    private static IndexCacheEntry SINGLETON = new IndexCacheEntry();
098
099    public IndexCacheEntry() {
100      super(SINGLETON);
101    }
102
103    @Override
104    public BlockType getBlockType() {
105      return BlockType.ROOT_INDEX;
106    }
107  }
108
109  static class DataCacheEntry implements Cacheable {
110    private static final int SIZE = 1;
111    private static DataCacheEntry SINGLETON = new DataCacheEntry();
112    final CacheableDeserializer<Cacheable> deserializer;
113
114    DataCacheEntry() {
115      this(SINGLETON);
116    }
117
118    DataCacheEntry(final Cacheable c) {
119      this.deserializer = new Deserializer(c);
120    }
121
122    @Override
123    public String toString() {
124      return "size=" + SIZE + ", type=" + getBlockType();
125    };
126
127    @Override
128    public long heapSize() {
129      return SIZE;
130    }
131
132    @Override
133    public int getSerializedLength() {
134      return SIZE;
135    }
136
137    @Override
138    public void serialize(ByteBuffer destination, boolean includeNextBlockMetadata) {
139      LOG.info("Serialized " + this + " to " + destination);
140    }
141
142    @Override
143    public CacheableDeserializer<Cacheable> getDeserializer() {
144      return this.deserializer;
145    }
146
147    @Override
148    public BlockType getBlockType() {
149      return BlockType.DATA;
150    }
151
152    @Override
153    public MemoryType getMemoryType() {
154      return MemoryType.EXCLUSIVE;
155    }
156  };
157
158  static class MetaCacheEntry extends DataCacheEntry {
159    @Override
160    public BlockType getBlockType() {
161      return BlockType.INTERMEDIATE_INDEX;
162    }
163  }
164
165  @Before
166  public void setUp() throws Exception {
167    this.conf = HBaseConfiguration.create();
168  }
169
170  /**
171   * @param bc The block cache instance.
172   * @param cc Cache config.
173   * @param doubling If true, addition of element ups counter by 2, not 1, because element added
174   * to onheap and offheap caches.
175   * @param sizing True if we should run sizing test (doesn't always apply).
176   */
177  void basicBlockCacheOps(final BlockCache bc, final CacheConfig cc, final boolean doubling,
178      final boolean sizing) {
179    assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
180    BlockCacheKey bck = new BlockCacheKey("f", 0);
181    Cacheable c = new DataCacheEntry();
182    // Do asserts on block counting.
183    long initialBlockCount = bc.getBlockCount();
184    bc.cacheBlock(bck, c, cc.isInMemory());
185    assertEquals(doubling ? 2 : 1, bc.getBlockCount() - initialBlockCount);
186    bc.evictBlock(bck);
187    assertEquals(initialBlockCount, bc.getBlockCount());
188    // Do size accounting.  Do it after the above 'warm-up' because it looks like some
189    // buffers do lazy allocation so sizes are off on first go around.
190    if (sizing) {
191      long originalSize = bc.getCurrentSize();
192      bc.cacheBlock(bck, c, cc.isInMemory());
193      assertTrue(bc.getCurrentSize() > originalSize);
194      bc.evictBlock(bck);
195      long size = bc.getCurrentSize();
196      assertEquals(originalSize, size);
197    }
198  }
199
200  @Test
201  public void testDisableCacheDataBlock() throws IOException {
202    Configuration conf = HBaseConfiguration.create();
203    CacheConfig cacheConfig = new CacheConfig(conf);
204    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
205    assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
206    assertFalse(cacheConfig.shouldCacheDataCompressed());
207    assertFalse(cacheConfig.shouldCacheDataOnWrite());
208    assertTrue(cacheConfig.shouldCacheDataOnRead());
209    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX));
210    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META));
211    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM));
212    assertFalse(cacheConfig.shouldCacheBloomsOnWrite());
213    assertFalse(cacheConfig.shouldCacheIndexesOnWrite());
214
215    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
216    conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, true);
217    conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, true);
218    conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true);
219
220    cacheConfig = new CacheConfig(conf);
221    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
222    assertTrue(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
223    assertTrue(cacheConfig.shouldCacheDataCompressed());
224    assertTrue(cacheConfig.shouldCacheDataOnWrite());
225    assertTrue(cacheConfig.shouldCacheDataOnRead());
226    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX));
227    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META));
228    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM));
229    assertTrue(cacheConfig.shouldCacheBloomsOnWrite());
230    assertTrue(cacheConfig.shouldCacheIndexesOnWrite());
231
232    conf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, false);
233    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
234
235    cacheConfig = new CacheConfig(conf);
236    assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
237    assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
238    assertFalse(cacheConfig.shouldCacheDataCompressed());
239    assertFalse(cacheConfig.shouldCacheDataOnWrite());
240    assertFalse(cacheConfig.shouldCacheDataOnRead());
241    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX));
242    assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META));
243    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM));
244    assertTrue(cacheConfig.shouldCacheBloomsOnWrite());
245    assertTrue(cacheConfig.shouldCacheIndexesOnWrite());
246
247    conf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, true);
248    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
249
250    HColumnDescriptor family = new HColumnDescriptor("testDisableCacheDataBlock");
251    family.setBlockCacheEnabled(false);
252
253    cacheConfig = new CacheConfig(conf, family, null);
254    assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
255    assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
256    assertFalse(cacheConfig.shouldCacheDataCompressed());
257    assertFalse(cacheConfig.shouldCacheDataOnWrite());
258    assertFalse(cacheConfig.shouldCacheDataOnRead());
259    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX));
260    assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META));
261    assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM));
262    assertTrue(cacheConfig.shouldCacheBloomsOnWrite());
263    assertTrue(cacheConfig.shouldCacheIndexesOnWrite());
264  }
265
266  @Test
267  public void testCacheConfigDefaultLRUBlockCache() {
268    CacheConfig cc = new CacheConfig(this.conf);
269    assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
270    BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf);
271    basicBlockCacheOps(blockCache, cc, false, true);
272    assertTrue(blockCache instanceof LruBlockCache);
273  }
274
275  /**
276   * Assert that the caches are deployed with CombinedBlockCache and of the appropriate sizes.
277   */
278  @Test
279  public void testOffHeapBucketCacheConfig() {
280    this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
281    doBucketCacheConfigTest();
282  }
283
284  @Test
285  public void testFileBucketCacheConfig() throws IOException {
286    HBaseTestingUtility htu = new HBaseTestingUtility(this.conf);
287    try {
288      Path p = new Path(htu.getDataTestDir(), "bc.txt");
289      FileSystem fs = FileSystem.get(this.conf);
290      fs.create(p).close();
291      this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "file:" + p);
292      doBucketCacheConfigTest();
293    } finally {
294      htu.cleanupTestDir();
295    }
296  }
297
298  private void doBucketCacheConfigTest() {
299    final int bcSize = 100;
300    this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
301    CacheConfig cc = new CacheConfig(this.conf);
302    BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf);
303    basicBlockCacheOps(blockCache, cc, false, false);
304    assertTrue(blockCache instanceof CombinedBlockCache);
305    // TODO: Assert sizes allocated are right and proportions.
306    CombinedBlockCache cbc = (CombinedBlockCache) blockCache;
307    BlockCache[] bcs = cbc.getBlockCaches();
308    assertTrue(bcs[0] instanceof LruBlockCache);
309    LruBlockCache lbc = (LruBlockCache) bcs[0];
310    assertEquals(MemorySizeUtil.getOnHeapCacheSize(this.conf), lbc.getMaxSize());
311    assertTrue(bcs[1] instanceof BucketCache);
312    BucketCache bc = (BucketCache) bcs[1];
313    // getMaxSize comes back in bytes but we specified size in MB
314    assertEquals(bcSize, bc.getMaxSize() / (1024 * 1024));
315  }
316
317  /**
318   * Assert that when BUCKET_CACHE_COMBINED_KEY is false, the non-default, that we deploy
319   * LruBlockCache as L1 with a BucketCache for L2.
320   */
321  @Test
322  public void testBucketCacheConfigL1L2Setup() {
323    this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
324    // Make lru size is smaller than bcSize for sure.  Need this to be true so when eviction
325    // from L1 happens, it does not fail because L2 can't take the eviction because block too big.
326    this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f);
327    MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
328    long lruExpectedSize = MemorySizeUtil.getOnHeapCacheSize(this.conf);
329    final int bcSize = 100;
330    long bcExpectedSize = 100 * 1024 * 1024; // MB.
331    assertTrue(lruExpectedSize < bcExpectedSize);
332    this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
333    CacheConfig cc = new CacheConfig(this.conf);
334    BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf);
335    basicBlockCacheOps(blockCache, cc, false, false);
336    assertTrue(blockCache instanceof CombinedBlockCache);
337    // TODO: Assert sizes allocated are right and proportions.
338    CombinedBlockCache cbc = (CombinedBlockCache) blockCache;
339    LruBlockCache lbc = cbc.onHeapCache;
340    assertEquals(lruExpectedSize, lbc.getMaxSize());
341    BlockCache bc = cbc.l2Cache;
342    // getMaxSize comes back in bytes but we specified size in MB
343    assertEquals(bcExpectedSize, ((BucketCache) bc).getMaxSize());
344    // Test the L1+L2 deploy works as we'd expect with blocks evicted from L1 going to L2.
345    long initialL1BlockCount = lbc.getBlockCount();
346    long initialL2BlockCount = bc.getBlockCount();
347    Cacheable c = new DataCacheEntry();
348    BlockCacheKey bck = new BlockCacheKey("bck", 0);
349    lbc.cacheBlock(bck, c, false);
350    assertEquals(initialL1BlockCount + 1, lbc.getBlockCount());
351    assertEquals(initialL2BlockCount, bc.getBlockCount());
352    // Force evictions by putting in a block too big.
353    final long justTooBigSize = lbc.acceptableSize() + 1;
354    lbc.cacheBlock(new BlockCacheKey("bck2", 0), new DataCacheEntry() {
355      @Override
356      public long heapSize() {
357        return justTooBigSize;
358      }
359
360      @Override
361      public int getSerializedLength() {
362        return (int)heapSize();
363      }
364    });
365    // The eviction thread in lrublockcache needs to run.
366    while (initialL1BlockCount != lbc.getBlockCount()) Threads.sleep(10);
367    assertEquals(initialL1BlockCount, lbc.getBlockCount());
368  }
369
370  @Test
371  public void testL2CacheWithInvalidBucketSize() {
372    Configuration c = new Configuration(this.conf);
373    c.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
374    c.set(BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY, "256,512,1024,2048,4000,4096");
375    c.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 1024);
376    try {
377      BlockCacheFactory.createBlockCache(c);
378      fail("Should throw IllegalArgumentException when passing illegal value for bucket size");
379    } catch (IllegalArgumentException e) {
380    }
381  }
382}