001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile.bucket;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertFalse;
022import static org.junit.Assert.assertNotEquals;
023import static org.junit.Assert.assertNull;
024import static org.junit.Assert.assertTrue;
025
026import java.io.File;
027import java.io.FileNotFoundException;
028import java.io.IOException;
029import java.nio.ByteBuffer;
030import java.util.ArrayList;
031import java.util.Arrays;
032import java.util.List;
033import java.util.Map;
034import java.util.Random;
035import java.util.Set;
036import java.util.concurrent.locks.ReentrantReadWriteLock;
037import org.apache.hadoop.conf.Configuration;
038import org.apache.hadoop.fs.Path;
039import org.apache.hadoop.hbase.HBaseClassTestRule;
040import org.apache.hadoop.hbase.HBaseConfiguration;
041import org.apache.hadoop.hbase.HBaseTestingUtility;
042import org.apache.hadoop.hbase.HConstants;
043import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
044import org.apache.hadoop.hbase.io.hfile.BlockType;
045import org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
046import org.apache.hadoop.hbase.io.hfile.CacheTestUtils.HFileBlockPair;
047import org.apache.hadoop.hbase.io.hfile.Cacheable;
048import org.apache.hadoop.hbase.io.hfile.HFileBlock;
049import org.apache.hadoop.hbase.io.hfile.HFileContext;
050import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
051import org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.BucketSizeInfo;
052import org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.IndexStatistics;
053import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.RAMQueueEntry;
054import org.apache.hadoop.hbase.nio.ByteBuff;
055import org.apache.hadoop.hbase.testclassification.IOTests;
056import org.apache.hadoop.hbase.testclassification.MediumTests;
057import org.junit.After;
058import org.junit.Assert;
059import org.junit.Before;
060import org.junit.ClassRule;
061import org.junit.Test;
062import org.junit.experimental.categories.Category;
063import org.junit.runner.RunWith;
064import org.junit.runners.Parameterized;
065import org.mockito.Mockito;
066
067import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
068
069/**
070 * Basic test of BucketCache.Puts and gets.
071 * <p>
072 * Tests will ensure that blocks' data correctness under several threads concurrency
073 */
074@RunWith(Parameterized.class)
075@Category({ IOTests.class, MediumTests.class })
076public class TestBucketCache {
077
078  @ClassRule
079  public static final HBaseClassTestRule CLASS_RULE =
080      HBaseClassTestRule.forClass(TestBucketCache.class);
081
082  private static final Random RAND = new Random();
083
084  @Parameterized.Parameters(name = "{index}: blockSize={0}, bucketSizes={1}")
085  public static Iterable<Object[]> data() {
086    return Arrays.asList(new Object[][] {
087        { 8192, null }, // TODO: why is 8k the default blocksize for these tests?
088        {
089            16 * 1024,
090            new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024,
091                28 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024,
092                128 * 1024 + 1024 } } });
093  }
094
095  @Parameterized.Parameter(0)
096  public int constructedBlockSize;
097
098  @Parameterized.Parameter(1)
099  public int[] constructedBlockSizes;
100
101  BucketCache cache;
102  final int CACHE_SIZE = 1000000;
103  final int NUM_BLOCKS = 100;
104  final int BLOCK_SIZE = CACHE_SIZE / NUM_BLOCKS;
105  final int NUM_THREADS = 100;
106  final int NUM_QUERIES = 10000;
107
108  final long capacitySize = 32 * 1024 * 1024;
109  final int writeThreads = BucketCache.DEFAULT_WRITER_THREADS;
110  final int writerQLen = BucketCache.DEFAULT_WRITER_QUEUE_ITEMS;
111  String ioEngineName = "offheap";
112  String persistencePath = null;
113
114  private static class MockedBucketCache extends BucketCache {
115
116    public MockedBucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes,
117        int writerThreads, int writerQLen, String persistencePath) throws FileNotFoundException,
118        IOException {
119      super(ioEngineName, capacity, blockSize, bucketSizes, writerThreads, writerQLen,
120          persistencePath);
121      super.wait_when_cache = true;
122    }
123
124    @Override
125    public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
126      super.cacheBlock(cacheKey, buf, inMemory);
127    }
128
129    @Override
130    public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
131      super.cacheBlock(cacheKey, buf);
132    }
133  }
134
135  @Before
136  public void setup() throws FileNotFoundException, IOException {
137    cache =
138        new MockedBucketCache(ioEngineName, capacitySize, constructedBlockSize,
139            constructedBlockSizes, writeThreads, writerQLen, persistencePath);
140  }
141
142  @After
143  public void tearDown() {
144    cache.shutdown();
145  }
146
147  /**
148   * Return a random element from {@code a}.
149   */
150  private static <T> T randFrom(List<T> a) {
151    return a.get(RAND.nextInt(a.size()));
152  }
153
154  @Test
155  public void testBucketAllocator() throws BucketAllocatorException {
156    BucketAllocator mAllocator = cache.getAllocator();
157    /*
158     * Test the allocator first
159     */
160    final List<Integer> BLOCKSIZES = Arrays.asList(4 * 1024, 8 * 1024, 64 * 1024, 96 * 1024);
161
162    boolean full = false;
163    ArrayList<Long> allocations = new ArrayList<>();
164    // Fill the allocated extents by choosing a random blocksize. Continues selecting blocks until
165    // the cache is completely filled.
166    List<Integer> tmp = new ArrayList<>(BLOCKSIZES);
167    while (!full) {
168      Integer blockSize = null;
169      try {
170        blockSize = randFrom(tmp);
171        allocations.add(mAllocator.allocateBlock(blockSize));
172      } catch (CacheFullException cfe) {
173        tmp.remove(blockSize);
174        if (tmp.isEmpty()) full = true;
175      }
176    }
177
178    for (Integer blockSize : BLOCKSIZES) {
179      BucketSizeInfo bucketSizeInfo = mAllocator.roundUpToBucketSizeInfo(blockSize);
180      IndexStatistics indexStatistics = bucketSizeInfo.statistics();
181      assertEquals("unexpected freeCount for " + bucketSizeInfo, 0, indexStatistics.freeCount());
182    }
183
184    for (long offset : allocations) {
185      assertEquals(mAllocator.sizeOfAllocation(offset), mAllocator.freeBlock(offset));
186    }
187    assertEquals(0, mAllocator.getUsedSize());
188  }
189
190  @Test
191  public void testCacheSimple() throws Exception {
192    CacheTestUtils.testCacheSimple(cache, BLOCK_SIZE, NUM_QUERIES);
193  }
194
195  @Test
196  public void testCacheMultiThreadedSingleKey() throws Exception {
197    CacheTestUtils.hammerSingleKey(cache, BLOCK_SIZE, 2 * NUM_THREADS, 2 * NUM_QUERIES);
198  }
199
200  @Test
201  public void testHeapSizeChanges() throws Exception {
202    cache.stopWriterThreads();
203    CacheTestUtils.testHeapSizeChanges(cache, BLOCK_SIZE);
204  }
205
206  private void waitUntilFlushedToBucket(BucketCache cache, BlockCacheKey cacheKey)
207      throws InterruptedException {
208    while (!cache.backingMap.containsKey(cacheKey) || cache.ramCache.containsKey(cacheKey)) {
209      Thread.sleep(100);
210    }
211  }
212
213  // BucketCache.cacheBlock is async, it first adds block to ramCache and writeQueue, then writer
214  // threads will flush it to the bucket and put reference entry in backingMap.
215  private void cacheAndWaitUntilFlushedToBucket(BucketCache cache, BlockCacheKey cacheKey,
216      Cacheable block) throws InterruptedException {
217    cache.cacheBlock(cacheKey, block);
218    waitUntilFlushedToBucket(cache, cacheKey);
219  }
220
221  @Test
222  public void testMemoryLeak() throws Exception {
223    final BlockCacheKey cacheKey = new BlockCacheKey("dummy", 1L);
224    cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable(
225        new byte[10]));
226    long lockId = cache.backingMap.get(cacheKey).offset();
227    ReentrantReadWriteLock lock = cache.offsetLock.getLock(lockId);
228    lock.writeLock().lock();
229    Thread evictThread = new Thread("evict-block") {
230
231      @Override
232      public void run() {
233        cache.evictBlock(cacheKey);
234      }
235
236    };
237    evictThread.start();
238    cache.offsetLock.waitForWaiters(lockId, 1);
239    cache.blockEvicted(cacheKey, cache.backingMap.remove(cacheKey), true);
240    cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable(
241        new byte[10]));
242    lock.writeLock().unlock();
243    evictThread.join();
244    assertEquals(1L, cache.getBlockCount());
245    assertTrue(cache.getCurrentSize() > 0L);
246    assertTrue("We should have a block!", cache.iterator().hasNext());
247  }
248
249  @Test
250  public void testRetrieveFromFile() throws Exception {
251    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
252    Path testDir = TEST_UTIL.getDataTestDir();
253    TEST_UTIL.getTestFileSystem().mkdirs(testDir);
254
255    String ioEngineName = "file:" + testDir + "/bucket.cache";
256    String persistencePath = testDir + "/bucket.persistence";
257
258    BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize,
259        constructedBlockSizes, writeThreads, writerQLen, persistencePath);
260    long usedSize = bucketCache.getAllocator().getUsedSize();
261    assertEquals(0, usedSize);
262
263    HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1);
264    // Add blocks
265    for (HFileBlockPair block : blocks) {
266      bucketCache.cacheBlock(block.getBlockName(), block.getBlock());
267    }
268    for (HFileBlockPair block : blocks) {
269      cacheAndWaitUntilFlushedToBucket(bucketCache, block.getBlockName(), block.getBlock());
270    }
271    usedSize = bucketCache.getAllocator().getUsedSize();
272    assertNotEquals(0, usedSize);
273    // persist cache to file
274    bucketCache.shutdown();
275    assertTrue(new File(persistencePath).exists());
276
277    // restore cache from file
278    bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize,
279        constructedBlockSizes, writeThreads, writerQLen, persistencePath);
280    assertFalse(new File(persistencePath).exists());
281    assertEquals(usedSize, bucketCache.getAllocator().getUsedSize());
282    // persist cache to file
283    bucketCache.shutdown();
284    assertTrue(new File(persistencePath).exists());
285
286    // reconfig buckets sizes, the biggest bucket is small than constructedBlockSize (8k or 16k)
287    // so it can't restore cache from file
288    int[] smallBucketSizes = new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024 };
289    bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize,
290        smallBucketSizes, writeThreads, writerQLen, persistencePath);
291    assertFalse(new File(persistencePath).exists());
292    assertEquals(0, bucketCache.getAllocator().getUsedSize());
293    assertEquals(0, bucketCache.backingMap.size());
294
295    TEST_UTIL.cleanupTestDir();
296  }
297
298  @Test
299  public void testBucketAllocatorLargeBuckets() throws BucketAllocatorException {
300    long availableSpace = 20 * 1024L * 1024 * 1024;
301    int[] bucketSizes = new int[]{1024, 1024 * 1024, 1024 * 1024 * 1024};
302    BucketAllocator allocator = new BucketAllocator(availableSpace, bucketSizes);
303    assertTrue(allocator.getBuckets().length > 0);
304  }
305
306  @Test
307  public void testGetPartitionSize() throws IOException {
308    //Test default values
309    validateGetPartitionSize(cache, BucketCache.DEFAULT_SINGLE_FACTOR, BucketCache.DEFAULT_MIN_FACTOR);
310
311    Configuration conf = HBaseConfiguration.create();
312    conf.setFloat(BucketCache.MIN_FACTOR_CONFIG_NAME, 0.5f);
313    conf.setFloat(BucketCache.SINGLE_FACTOR_CONFIG_NAME, 0.1f);
314    conf.setFloat(BucketCache.MULTI_FACTOR_CONFIG_NAME, 0.7f);
315    conf.setFloat(BucketCache.MEMORY_FACTOR_CONFIG_NAME, 0.2f);
316
317    BucketCache cache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize,
318        constructedBlockSizes, writeThreads, writerQLen, persistencePath, 100, conf);
319
320    validateGetPartitionSize(cache, 0.1f, 0.5f);
321    validateGetPartitionSize(cache, 0.7f, 0.5f);
322    validateGetPartitionSize(cache, 0.2f, 0.5f);
323  }
324
325  @Test
326  public void testValidBucketCacheConfigs() throws IOException {
327    Configuration conf = HBaseConfiguration.create();
328    conf.setFloat(BucketCache.ACCEPT_FACTOR_CONFIG_NAME, 0.9f);
329    conf.setFloat(BucketCache.MIN_FACTOR_CONFIG_NAME, 0.5f);
330    conf.setFloat(BucketCache.EXTRA_FREE_FACTOR_CONFIG_NAME, 0.5f);
331    conf.setFloat(BucketCache.SINGLE_FACTOR_CONFIG_NAME, 0.1f);
332    conf.setFloat(BucketCache.MULTI_FACTOR_CONFIG_NAME, 0.7f);
333    conf.setFloat(BucketCache.MEMORY_FACTOR_CONFIG_NAME, 0.2f);
334
335    BucketCache cache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize,
336        constructedBlockSizes, writeThreads, writerQLen, persistencePath, 100, conf);
337
338    assertEquals(BucketCache.ACCEPT_FACTOR_CONFIG_NAME + " failed to propagate.", 0.9f,
339        cache.getAcceptableFactor(), 0);
340    assertEquals(BucketCache.MIN_FACTOR_CONFIG_NAME + " failed to propagate.", 0.5f,
341        cache.getMinFactor(), 0);
342    assertEquals(BucketCache.EXTRA_FREE_FACTOR_CONFIG_NAME + " failed to propagate.", 0.5f,
343        cache.getExtraFreeFactor(), 0);
344    assertEquals(BucketCache.SINGLE_FACTOR_CONFIG_NAME + " failed to propagate.", 0.1f,
345        cache.getSingleFactor(), 0);
346    assertEquals(BucketCache.MULTI_FACTOR_CONFIG_NAME + " failed to propagate.", 0.7f,
347        cache.getMultiFactor(), 0);
348    assertEquals(BucketCache.MEMORY_FACTOR_CONFIG_NAME + " failed to propagate.", 0.2f,
349        cache.getMemoryFactor(), 0);
350  }
351
352  @Test
353  public void testInvalidAcceptFactorConfig() throws IOException {
354    float[] configValues = {-1f, 0.2f, 0.86f, 1.05f};
355    boolean[] expectedOutcomes = {false, false, true, false};
356    Map<String, float[]> configMappings = ImmutableMap.of(BucketCache.ACCEPT_FACTOR_CONFIG_NAME, configValues);
357    Configuration conf = HBaseConfiguration.create();
358    checkConfigValues(conf, configMappings, expectedOutcomes);
359  }
360
361  @Test
362  public void testInvalidMinFactorConfig() throws IOException {
363    float[] configValues = {-1f, 0f, 0.96f, 1.05f};
364    //throws due to <0, in expected range, minFactor > acceptableFactor, > 1.0
365    boolean[] expectedOutcomes = {false, true, false, false};
366    Map<String, float[]> configMappings = ImmutableMap
367      .of(BucketCache.MIN_FACTOR_CONFIG_NAME, configValues);
368    Configuration conf = HBaseConfiguration.create();
369    checkConfigValues(conf, configMappings, expectedOutcomes);
370  }
371
372  @Test
373  public void testInvalidExtraFreeFactorConfig() throws IOException {
374    float[] configValues = {-1f, 0f, 0.2f, 1.05f};
375    //throws due to <0, in expected range, in expected range, config can be > 1.0
376    boolean[] expectedOutcomes = {false, true, true, true};
377    Map<String, float[]> configMappings = ImmutableMap.of(BucketCache.EXTRA_FREE_FACTOR_CONFIG_NAME, configValues);
378    Configuration conf = HBaseConfiguration.create();
379    checkConfigValues(conf, configMappings, expectedOutcomes);
380  }
381
382  @Test
383  public void testInvalidCacheSplitFactorConfig() throws IOException {
384    float[] singleFactorConfigValues = {0.2f, 0f, -0.2f, 1f};
385    float[] multiFactorConfigValues = {0.4f, 0f, 1f, .05f};
386    float[] memoryFactorConfigValues = {0.4f, 0f, 0.2f, .5f};
387    // All configs add up to 1.0 and are between 0 and 1.0, configs don't add to 1.0, configs can't be negative, configs don't add to 1.0
388    boolean[] expectedOutcomes = {true, false, false, false};
389    Map<String, float[]> configMappings = ImmutableMap.of(BucketCache.SINGLE_FACTOR_CONFIG_NAME,
390        singleFactorConfigValues, BucketCache.MULTI_FACTOR_CONFIG_NAME, multiFactorConfigValues,
391        BucketCache.MEMORY_FACTOR_CONFIG_NAME, memoryFactorConfigValues);
392    Configuration conf = HBaseConfiguration.create();
393    checkConfigValues(conf, configMappings, expectedOutcomes);
394  }
395
396  private void checkConfigValues(Configuration conf, Map<String, float[]> configMap, boolean[] expectSuccess) throws IOException {
397    Set<String> configNames = configMap.keySet();
398    for (int i = 0; i < expectSuccess.length; i++) {
399      try {
400        for (String configName : configNames) {
401          conf.setFloat(configName, configMap.get(configName)[i]);
402        }
403        BucketCache cache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize,
404            constructedBlockSizes, writeThreads, writerQLen, persistencePath, 100, conf);
405        assertTrue("Created BucketCache and expected it to succeed: " + expectSuccess[i] + ", but it actually was: " + !expectSuccess[i], expectSuccess[i]);
406      } catch (IllegalArgumentException e) {
407        assertFalse("Created BucketCache and expected it to succeed: " + expectSuccess[i] + ", but it actually was: " + !expectSuccess[i], expectSuccess[i]);
408      }
409    }
410  }
411
412  private void validateGetPartitionSize(BucketCache bucketCache, float partitionFactor, float minFactor) {
413    long expectedOutput = (long) Math.floor(bucketCache.getAllocator().getTotalSize() * partitionFactor * minFactor);
414    assertEquals(expectedOutput, bucketCache.getPartitionSize(partitionFactor));
415  }
416
417  @Test
418  public void testOffsetProducesPositiveOutput() {
419    //This number is picked because it produces negative output if the values isn't ensured to be positive.
420    //See HBASE-18757 for more information.
421    long testValue = 549888460800L;
422    BucketCache.BucketEntry bucketEntry = new BucketCache.BucketEntry(testValue, 10, 10L, true);
423    assertEquals(testValue, bucketEntry.offset());
424  }
425
426  @Test
427  public void testCacheBlockNextBlockMetadataMissing() throws Exception {
428    int size = 100;
429    int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
430    byte[] byteArr = new byte[length];
431    ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
432    HFileContext meta = new HFileContextBuilder().build();
433    HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, buf,
434        HFileBlock.FILL_HEADER, -1, 52, -1, meta);
435    HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, buf,
436        HFileBlock.FILL_HEADER, -1, -1, -1, meta);
437
438    BlockCacheKey key = new BlockCacheKey("key1", 0);
439    ByteBuffer actualBuffer = ByteBuffer.allocate(length);
440    ByteBuffer block1Buffer = ByteBuffer.allocate(length);
441    ByteBuffer block2Buffer = ByteBuffer.allocate(length);
442    blockWithNextBlockMetadata.serialize(block1Buffer, true);
443    blockWithoutNextBlockMetadata.serialize(block2Buffer, true);
444
445    // Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back.
446    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer,
447      block1Buffer);
448
449    waitUntilFlushedToBucket(cache, key);
450
451    // Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back.
452    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer,
453      block1Buffer);
454
455    // Clear and add blockWithoutNextBlockMetadata
456    cache.evictBlock(key);
457    assertNull(cache.getBlock(key, false, false, false));
458    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer,
459      block2Buffer);
460
461    waitUntilFlushedToBucket(cache, key);
462
463    // Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace.
464    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer,
465      block1Buffer);
466  }
467
468  @Test
469  public void testFreeBlockWhenIOEngineWriteFailure() throws IOException {
470    // initialize an block.
471    int size = 100, offset = 20;
472    int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
473    ByteBuffer buf = ByteBuffer.allocate(length);
474    HFileContext meta = new HFileContextBuilder().build();
475    HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf, HFileBlock.FILL_HEADER,
476        offset, 52, -1, meta);
477
478    // initialize an mocked ioengine.
479    IOEngine ioEngine = Mockito.mock(IOEngine.class);
480    Mockito.when(ioEngine.usesSharedMemory()).thenReturn(false);
481    // Mockito.doNothing().when(ioEngine).write(Mockito.any(ByteBuffer.class), Mockito.anyLong());
482    Mockito.doThrow(RuntimeException.class).when(ioEngine).write(Mockito.any(ByteBuffer.class),
483      Mockito.anyLong());
484    Mockito.doThrow(RuntimeException.class).when(ioEngine).write(Mockito.any(ByteBuff.class),
485      Mockito.anyLong());
486
487    // create an bucket allocator.
488    long availableSpace = 1024 * 1024 * 1024L;
489    BucketAllocator allocator = new BucketAllocator(availableSpace, null);
490
491    BlockCacheKey key = new BlockCacheKey("dummy", 1L);
492    RAMQueueEntry re = new RAMQueueEntry(key, block, 1, true);
493
494    Assert.assertEquals(0, allocator.getUsedSize());
495    try {
496      re.writeToCache(ioEngine, allocator, null);
497      Assert.fail();
498    } catch (Exception e) {
499    }
500    Assert.assertEquals(0, allocator.getUsedSize());
501  }
502}