001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile;
019
020import static org.apache.hadoop.hbase.io.ByteBuffAllocator.HEAP;
021import static org.junit.Assert.assertEquals;
022import static org.junit.Assert.assertNotNull;
023import static org.junit.Assert.assertNull;
024import static org.junit.Assert.assertTrue;
025
026import java.nio.ByteBuffer;
027import java.util.Random;
028import java.util.concurrent.ExecutorService;
029import java.util.concurrent.Executors;
030import java.util.concurrent.TimeUnit;
031import java.util.concurrent.atomic.AtomicBoolean;
032import java.util.concurrent.atomic.AtomicInteger;
033import org.apache.hadoop.conf.Configuration;
034import org.apache.hadoop.hbase.HBaseClassTestRule;
035import org.apache.hadoop.hbase.HBaseConfiguration;
036import org.apache.hadoop.hbase.HConstants;
037import org.apache.hadoop.hbase.Waiter;
038import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
039import org.apache.hadoop.hbase.io.HeapSize;
040import org.apache.hadoop.hbase.io.hfile.LruAdaptiveBlockCache.EvictionThread;
041import org.apache.hadoop.hbase.nio.ByteBuff;
042import org.apache.hadoop.hbase.testclassification.IOTests;
043import org.apache.hadoop.hbase.testclassification.SmallTests;
044import org.apache.hadoop.hbase.util.ClassSize;
045import org.junit.Assert;
046import org.junit.ClassRule;
047import org.junit.Test;
048import org.junit.experimental.categories.Category;
049import org.slf4j.Logger;
050import org.slf4j.LoggerFactory;
051
052/**
053 * Tests the concurrent LruAdaptiveBlockCache.<p>
054 *
055 * Tests will ensure it grows and shrinks in size properly,
056 * evictions run when they're supposed to and do what they should,
057 * and that cached blocks are accessible when expected to be.
058 */
059@Category({IOTests.class, SmallTests.class})
060public class TestLruAdaptiveBlockCache {
061
062  @ClassRule
063  public static final HBaseClassTestRule CLASS_RULE =
064    HBaseClassTestRule.forClass(TestLruAdaptiveBlockCache.class);
065
066  private static final Logger LOG = LoggerFactory.getLogger(TestLruAdaptiveBlockCache.class);
067
068  @Test
069  public void testCacheEvictionThreadSafe() throws Exception {
070    long maxSize = 100000;
071    int numBlocks = 9;
072    int testRuns = 10;
073    final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks);
074    assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize);
075
076    final Configuration conf = HBaseConfiguration.create();
077    final LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize);
078    EvictionThread evictionThread = cache.getEvictionThread();
079    assertNotNull(evictionThread);
080    while (!evictionThread.isEnteringRun()) {
081      Thread.sleep(1000);
082    }
083    final String hfileName = "hfile";
084    int threads = 10;
085    final int blocksPerThread = 5 * numBlocks;
086    for (int run = 0; run != testRuns; ++run) {
087      final AtomicInteger blockCount = new AtomicInteger(0);
088      ExecutorService service = Executors.newFixedThreadPool(threads);
089      for (int i = 0; i != threads; ++i) {
090        service.execute(() -> {
091            for (int blockIndex = 0; blockIndex < blocksPerThread
092              || (!cache.isEvictionInProgress()); ++blockIndex) {
093              CachedItem block = new CachedItem(hfileName, (int) blockSize,
094                blockCount.getAndIncrement());
095              boolean inMemory = Math.random() > 0.5;
096              cache.cacheBlock(block.cacheKey, block, inMemory);
097            }
098            cache.evictBlocksByHfileName(hfileName);
099          });
100      }
101      service.shutdown();
102      // The test may fail here if the evict thread frees the blocks too fast
103      service.awaitTermination(10, TimeUnit.MINUTES);
104      Waiter.waitFor(conf, 10000, 100, new ExplainingPredicate<Exception>() {
105        @Override
106        public boolean evaluate() throws Exception {
107          return cache.getBlockCount() == 0;
108        }
109
110        @Override
111        public String explainFailure() throws Exception {
112          return "Cache block count failed to return to 0";
113        }
114      });
115      assertEquals(0, cache.getBlockCount());
116      assertEquals(cache.getOverhead(), cache.getCurrentSize());
117    }
118  }
119
120  @Test
121  public void testBackgroundEvictionThread() throws Exception {
122    long maxSize = 100000;
123    int numBlocks = 9;
124    long blockSize = calculateBlockSizeDefault(maxSize, numBlocks);
125    assertTrue("calculateBlockSize appears broken.",
126      blockSize * numBlocks <= maxSize);
127
128    LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize,blockSize);
129    EvictionThread evictionThread = cache.getEvictionThread();
130    assertNotNull(evictionThread);
131
132    CachedItem[] blocks = generateFixedBlocks(numBlocks + 1, blockSize, "block");
133
134    // Make sure eviction thread has entered run method
135    while (!evictionThread.isEnteringRun()) {
136      Thread.sleep(1);
137    }
138
139    // Add all the blocks
140    for (CachedItem block : blocks) {
141      cache.cacheBlock(block.cacheKey, block);
142    }
143
144    // wait until at least one eviction has run
145    int n = 0;
146    while(cache.getStats().getEvictionCount() == 0) {
147      Thread.sleep(200);
148      assertTrue("Eviction never happened.", n++ < 20);
149    }
150
151    // let cache stabilize
152    // On some systems, the cache will run multiple evictions before it attains
153    // steady-state. For instance, after populating the cache with 10 blocks,
154    // the first eviction evicts a single block and then a second eviction
155    // evicts another. I think this is due to the delta between minSize and
156    // acceptableSize, combined with variance between object overhead on
157    // different environments.
158    n = 0;
159    for (long prevCnt = 0 /* < number of blocks added */,
160         curCnt = cache.getBlockCount();
161         prevCnt != curCnt; prevCnt = curCnt, curCnt = cache.getBlockCount()) {
162      Thread.sleep(200);
163      assertTrue("Cache never stabilized.", n++ < 20);
164    }
165
166    long evictionCount = cache.getStats().getEvictionCount();
167    assertTrue(evictionCount >= 1);
168    System.out.println("Background Evictions run: " + evictionCount);
169  }
170
171  @Test
172  public void testCacheSimple() throws Exception {
173
174    long maxSize = 1000000;
175    long blockSize = calculateBlockSizeDefault(maxSize, 101);
176
177    LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize);
178
179    CachedItem [] blocks = generateRandomBlocks(100, blockSize);
180
181    long expectedCacheSize = cache.heapSize();
182
183    // Confirm empty
184    for (CachedItem block : blocks) {
185      assertTrue(cache.getBlock(block.cacheKey, true, false,
186        true) == null);
187    }
188
189    // Add blocks
190    for (CachedItem block : blocks) {
191      cache.cacheBlock(block.cacheKey, block);
192      expectedCacheSize += block.cacheBlockHeapSize();
193    }
194
195    // Verify correctly calculated cache heap size
196    assertEquals(expectedCacheSize, cache.heapSize());
197
198    // Check if all blocks are properly cached and retrieved
199    for (CachedItem block : blocks) {
200      HeapSize buf = cache.getBlock(block.cacheKey, true, false,
201        true);
202      assertTrue(buf != null);
203      assertEquals(buf.heapSize(), block.heapSize());
204    }
205
206    // Re-add same blocks and ensure nothing has changed
207    long expectedBlockCount = cache.getBlockCount();
208    for (CachedItem block : blocks) {
209      cache.cacheBlock(block.cacheKey, block);
210    }
211    assertEquals(
212      "Cache should ignore cache requests for blocks already in cache",
213      expectedBlockCount, cache.getBlockCount());
214
215    // Verify correctly calculated cache heap size
216    assertEquals(expectedCacheSize, cache.heapSize());
217
218    // Check if all blocks are properly cached and retrieved
219    for (CachedItem block : blocks) {
220      HeapSize buf = cache.getBlock(block.cacheKey, true, false,
221        true);
222      assertTrue(buf != null);
223      assertEquals(buf.heapSize(), block.heapSize());
224    }
225
226    // Expect no evictions
227    assertEquals(0, cache.getStats().getEvictionCount());
228    Thread t = new LruAdaptiveBlockCache.StatisticsThread(cache);
229    t.start();
230    t.join();
231  }
232
233  @Test
234  public void testCacheEvictionSimple() throws Exception {
235
236    long maxSize = 100000;
237    long blockSize = calculateBlockSizeDefault(maxSize, 10);
238
239    LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize,blockSize,false);
240
241    CachedItem [] blocks = generateFixedBlocks(10, blockSize, "block");
242
243    long expectedCacheSize = cache.heapSize();
244
245    // Add all the blocks
246    for (CachedItem block : blocks) {
247      cache.cacheBlock(block.cacheKey, block);
248      expectedCacheSize += block.cacheBlockHeapSize();
249    }
250
251    // A single eviction run should have occurred
252    assertEquals(1, cache.getStats().getEvictionCount());
253
254    // Our expected size overruns acceptable limit
255    assertTrue(expectedCacheSize >
256      (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR));
257
258    // But the cache did not grow beyond max
259    assertTrue(cache.heapSize() < maxSize);
260
261    // And is still below the acceptable limit
262    assertTrue(cache.heapSize() <
263      (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR));
264
265    // All blocks except block 0  should be in the cache
266    assertTrue(cache.getBlock(blocks[0].cacheKey, true, false,
267      true) == null);
268    for(int i=1;i<blocks.length;i++) {
269      assertEquals(cache.getBlock(blocks[i].cacheKey, true, false,
270        true),
271        blocks[i]);
272    }
273  }
274
275  @Test
276  public void testCacheEvictionTwoPriorities() throws Exception {
277
278    long maxSize = 100000;
279    long blockSize = calculateBlockSizeDefault(maxSize, 10);
280
281    LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize,blockSize,false);
282
283    CachedItem [] singleBlocks = generateFixedBlocks(5, 10000, "single");
284    CachedItem [] multiBlocks = generateFixedBlocks(5, 10000, "multi");
285
286    long expectedCacheSize = cache.heapSize();
287
288    // Add and get the multi blocks
289    for (CachedItem block : multiBlocks) {
290      cache.cacheBlock(block.cacheKey, block);
291      expectedCacheSize += block.cacheBlockHeapSize();
292      assertEquals(cache.getBlock(block.cacheKey, true, false, true),
293        block);
294    }
295
296    // Add the single blocks (no get)
297    for (CachedItem block : singleBlocks) {
298      cache.cacheBlock(block.cacheKey, block);
299      expectedCacheSize += block.heapSize();
300    }
301
302    // A single eviction run should have occurred
303    assertEquals(1, cache.getStats().getEvictionCount());
304
305    // We expect two entries evicted
306    assertEquals(2, cache.getStats().getEvictedCount());
307
308    // Our expected size overruns acceptable limit
309    assertTrue(expectedCacheSize >
310      (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR));
311
312    // But the cache did not grow beyond max
313    assertTrue(cache.heapSize() <= maxSize);
314
315    // And is now below the acceptable limit
316    assertTrue(cache.heapSize() <=
317      (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR));
318
319    // We expect fairness across the two priorities.
320    // This test makes multi go barely over its limit, in-memory
321    // empty, and the rest in single.  Two single evictions and
322    // one multi eviction expected.
323    assertTrue(cache.getBlock(singleBlocks[0].cacheKey, true, false,
324      true) == null);
325    assertTrue(cache.getBlock(multiBlocks[0].cacheKey, true, false,
326      true) == null);
327
328    // And all others to be cached
329    for(int i=1;i<4;i++) {
330      assertEquals(cache.getBlock(singleBlocks[i].cacheKey, true, false,
331        true),
332        singleBlocks[i]);
333      assertEquals(cache.getBlock(multiBlocks[i].cacheKey, true, false,
334        true),
335        multiBlocks[i]);
336    }
337  }
338
339  @Test
340  public void testCacheEvictionThreePriorities() throws Exception {
341
342    long maxSize = 100000;
343    long blockSize = calculateBlockSize(maxSize, 10);
344
345    LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false,
346      (int)Math.ceil(1.2*maxSize/blockSize),
347      LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR,
348      LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL,
349      0.98f, // min
350      0.99f, // acceptable
351      0.33f, // single
352      0.33f, // multi
353      0.34f, // memory
354      1.2f,  // limit
355      false,
356      16 * 1024 * 1024,
357      10,
358      500,
359      0.01f);
360
361    CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single");
362    CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi");
363    CachedItem [] memoryBlocks = generateFixedBlocks(5, blockSize, "memory");
364
365    long expectedCacheSize = cache.heapSize();
366
367    // Add 3 blocks from each priority
368    for(int i=0;i<3;i++) {
369
370      // Just add single blocks
371      cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]);
372      expectedCacheSize += singleBlocks[i].cacheBlockHeapSize();
373
374      // Add and get multi blocks
375      cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]);
376      expectedCacheSize += multiBlocks[i].cacheBlockHeapSize();
377      cache.getBlock(multiBlocks[i].cacheKey, true, false, true);
378
379      // Add memory blocks as such
380      cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true);
381      expectedCacheSize += memoryBlocks[i].cacheBlockHeapSize();
382
383    }
384
385    // Do not expect any evictions yet
386    assertEquals(0, cache.getStats().getEvictionCount());
387
388    // Verify cache size
389    assertEquals(expectedCacheSize, cache.heapSize());
390
391    // Insert a single block, oldest single should be evicted
392    cache.cacheBlock(singleBlocks[3].cacheKey, singleBlocks[3]);
393
394    // Single eviction, one thing evicted
395    assertEquals(1, cache.getStats().getEvictionCount());
396    assertEquals(1, cache.getStats().getEvictedCount());
397
398    // Verify oldest single block is the one evicted
399    assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false,
400      true));
401
402    // Change the oldest remaining single block to a multi
403    cache.getBlock(singleBlocks[1].cacheKey, true, false, true);
404
405    // Insert another single block
406    cache.cacheBlock(singleBlocks[4].cacheKey, singleBlocks[4]);
407
408    // Two evictions, two evicted.
409    assertEquals(2, cache.getStats().getEvictionCount());
410    assertEquals(2, cache.getStats().getEvictedCount());
411
412    // Oldest multi block should be evicted now
413    assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false,
414      true));
415
416    // Insert another memory block
417    cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true);
418
419    // Three evictions, three evicted.
420    assertEquals(3, cache.getStats().getEvictionCount());
421    assertEquals(3, cache.getStats().getEvictedCount());
422
423    // Oldest memory block should be evicted now
424    assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false,
425      true));
426
427    // Add a block that is twice as big (should force two evictions)
428    CachedItem [] bigBlocks = generateFixedBlocks(3, blockSize*3, "big");
429    cache.cacheBlock(bigBlocks[0].cacheKey, bigBlocks[0]);
430
431    // Four evictions, six evicted (inserted block 3X size, expect +3 evicted)
432    assertEquals(4, cache.getStats().getEvictionCount());
433    assertEquals(6, cache.getStats().getEvictedCount());
434
435    // Expect three remaining singles to be evicted
436    assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false,
437      true));
438    assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false,
439      true));
440    assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false,
441      true));
442
443    // Make the big block a multi block
444    cache.getBlock(bigBlocks[0].cacheKey, true, false, true);
445
446    // Cache another single big block
447    cache.cacheBlock(bigBlocks[1].cacheKey, bigBlocks[1]);
448
449    // Five evictions, nine evicted (3 new)
450    assertEquals(5, cache.getStats().getEvictionCount());
451    assertEquals(9, cache.getStats().getEvictedCount());
452
453    // Expect three remaining multis to be evicted
454    assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false,
455      true));
456    assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false,
457      true));
458    assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false,
459      true));
460
461    // Cache a big memory block
462    cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true);
463
464    // Six evictions, twelve evicted (3 new)
465    assertEquals(6, cache.getStats().getEvictionCount());
466    assertEquals(12, cache.getStats().getEvictedCount());
467
468    // Expect three remaining in-memory to be evicted
469    assertEquals(null, cache.getBlock(memoryBlocks[1].cacheKey, true, false,
470      true));
471    assertEquals(null, cache.getBlock(memoryBlocks[2].cacheKey, true, false,
472      true));
473    assertEquals(null, cache.getBlock(memoryBlocks[3].cacheKey, true, false,
474      true));
475  }
476
477  @Test
478  public void testCacheEvictionInMemoryForceMode() throws Exception {
479    long maxSize = 100000;
480    long blockSize = calculateBlockSize(maxSize, 10);
481
482    LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false,
483      (int)Math.ceil(1.2*maxSize/blockSize),
484      LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR,
485      LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL,
486      0.98f, // min
487      0.99f, // acceptable
488      0.2f, // single
489      0.3f, // multi
490      0.5f, // memory
491      1.2f, // limit
492      true,
493      16 * 1024 * 1024,
494      10,
495      500,
496      0.01f);
497
498    CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single");
499    CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi");
500    CachedItem [] memoryBlocks = generateFixedBlocks(10, blockSize, "memory");
501
502    long expectedCacheSize = cache.heapSize();
503
504    // 0. Add 5 single blocks and 4 multi blocks to make cache full, si:mu:me = 5:4:0
505    for(int i = 0; i < 4; i++) {
506      // Just add single blocks
507      cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]);
508      expectedCacheSize += singleBlocks[i].cacheBlockHeapSize();
509      // Add and get multi blocks
510      cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]);
511      expectedCacheSize += multiBlocks[i].cacheBlockHeapSize();
512      cache.getBlock(multiBlocks[i].cacheKey, true, false, true);
513    }
514    // 5th single block
515    cache.cacheBlock(singleBlocks[4].cacheKey, singleBlocks[4]);
516    expectedCacheSize += singleBlocks[4].cacheBlockHeapSize();
517    // Do not expect any evictions yet
518    assertEquals(0, cache.getStats().getEvictionCount());
519    // Verify cache size
520    assertEquals(expectedCacheSize, cache.heapSize());
521
522    // 1. Insert a memory block, oldest single should be evicted, si:mu:me = 4:4:1
523    cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true);
524    // Single eviction, one block evicted
525    assertEquals(1, cache.getStats().getEvictionCount());
526    assertEquals(1, cache.getStats().getEvictedCount());
527    // Verify oldest single block (index = 0) is the one evicted
528    assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false,
529      true));
530
531    // 2. Insert another memory block, another single evicted, si:mu:me = 3:4:2
532    cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true);
533    // Two evictions, two evicted.
534    assertEquals(2, cache.getStats().getEvictionCount());
535    assertEquals(2, cache.getStats().getEvictedCount());
536    // Current oldest single block (index = 1) should be evicted now
537    assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false,
538      true));
539
540    // 3. Insert 4 memory blocks, 2 single and 2 multi evicted, si:mu:me = 1:2:6
541    cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true);
542    cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true);
543    cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true);
544    cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true);
545    // Three evictions, three evicted.
546    assertEquals(6, cache.getStats().getEvictionCount());
547    assertEquals(6, cache.getStats().getEvictedCount());
548    // two oldest single blocks and two oldest multi blocks evicted
549    assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false,
550      true));
551    assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false,
552      true));
553    assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false,
554      true));
555    assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false,
556      true));
557
558    // 4. Insert 3 memory blocks, the remaining 1 single and 2 multi evicted
559    // si:mu:me = 0:0:9
560    cache.cacheBlock(memoryBlocks[6].cacheKey, memoryBlocks[6], true);
561    cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true);
562    cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true);
563    // Three evictions, three evicted.
564    assertEquals(9, cache.getStats().getEvictionCount());
565    assertEquals(9, cache.getStats().getEvictedCount());
566    // one oldest single block and two oldest multi blocks evicted
567    assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false,
568      true));
569    assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false,
570      true));
571    assertEquals(null, cache.getBlock(multiBlocks[3].cacheKey, true, false,
572      true));
573
574    // 5. Insert one memory block, the oldest memory evicted
575    // si:mu:me = 0:0:9
576    cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true);
577    // one eviction, one evicted.
578    assertEquals(10, cache.getStats().getEvictionCount());
579    assertEquals(10, cache.getStats().getEvictedCount());
580    // oldest memory block evicted
581    assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false,
582      true));
583
584    // 6. Insert one new single block, itself evicted immediately since
585    //    all blocks in cache are memory-type which have higher priority
586    // si:mu:me = 0:0:9 (no change)
587    cache.cacheBlock(singleBlocks[9].cacheKey, singleBlocks[9]);
588    // one eviction, one evicted.
589    assertEquals(11, cache.getStats().getEvictionCount());
590    assertEquals(11, cache.getStats().getEvictedCount());
591    // the single block just cached now evicted (can't evict memory)
592    assertEquals(null, cache.getBlock(singleBlocks[9].cacheKey, true, false,
593      true));
594  }
595
596  // test scan resistance
597  @Test
598  public void testScanResistance() throws Exception {
599
600    long maxSize = 100000;
601    long blockSize = calculateBlockSize(maxSize, 10);
602
603    LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false,
604      (int)Math.ceil(1.2*maxSize/blockSize),
605      LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR,
606      LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL,
607      0.66f, // min
608      0.99f, // acceptable
609      0.33f, // single
610      0.33f, // multi
611      0.34f, // memory
612      1.2f,  // limit
613      false,
614      16 * 1024 * 1024,
615      10,
616      500,
617      0.01f);
618
619    CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single");
620    CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi");
621
622    // Add 5 multi blocks
623    for (CachedItem block : multiBlocks) {
624      cache.cacheBlock(block.cacheKey, block);
625      cache.getBlock(block.cacheKey, true, false, true);
626    }
627
628    // Add 5 single blocks
629    for(int i=0;i<5;i++) {
630      cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]);
631    }
632
633    // An eviction ran
634    assertEquals(1, cache.getStats().getEvictionCount());
635
636    // To drop down to 2/3 capacity, we'll need to evict 4 blocks
637    assertEquals(4, cache.getStats().getEvictedCount());
638
639    // Should have been taken off equally from single and multi
640    assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false,
641      true));
642    assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false,
643      true));
644    assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false,
645      true));
646    assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false,
647      true));
648
649    // Let's keep "scanning" by adding single blocks.  From here on we only
650    // expect evictions from the single bucket.
651
652    // Every time we reach 10 total blocks (every 4 inserts) we get 4 single
653    // blocks evicted.  Inserting 13 blocks should yield 3 more evictions and
654    // 12 more evicted.
655
656    for(int i=5;i<18;i++) {
657      cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]);
658    }
659
660    // 4 total evictions, 16 total evicted
661    assertEquals(4, cache.getStats().getEvictionCount());
662    assertEquals(16, cache.getStats().getEvictedCount());
663
664    // Should now have 7 total blocks
665    assertEquals(7, cache.getBlockCount());
666
667  }
668
669  @Test
670  public void testMaxBlockSize() throws Exception {
671    long maxSize = 100000;
672    long blockSize = calculateBlockSize(maxSize, 10);
673
674    LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false,
675      (int)Math.ceil(1.2*maxSize/blockSize),
676      LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR,
677      LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL,
678      0.66f, // min
679      0.99f, // acceptable
680      0.33f, // single
681      0.33f, // multi
682      0.34f, // memory
683      1.2f,  // limit
684      false,
685      1024,
686      10,
687      500,
688      0.01f);
689
690    CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long");
691    CachedItem [] small = generateFixedBlocks(15, 600, "small");
692
693
694    for (CachedItem i:tooLong) {
695      cache.cacheBlock(i.cacheKey, i);
696    }
697    for (CachedItem i:small) {
698      cache.cacheBlock(i.cacheKey, i);
699    }
700    assertEquals(15,cache.getBlockCount());
701    for (CachedItem i:small) {
702      assertNotNull(cache.getBlock(i.cacheKey, true, false, false));
703    }
704    for (CachedItem i:tooLong) {
705      assertNull(cache.getBlock(i.cacheKey, true, false, false));
706    }
707
708    assertEquals(10, cache.getStats().getFailedInserts());
709  }
710
711  // test setMaxSize
712  @Test
713  public void testResizeBlockCache() throws Exception {
714
715    long maxSize = 300000;
716    long blockSize = calculateBlockSize(maxSize, 31);
717
718    LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false,
719      (int)Math.ceil(1.2*maxSize/blockSize),
720      LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR,
721      LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL,
722      0.98f, // min
723      0.99f, // acceptable
724      0.33f, // single
725      0.33f, // multi
726      0.34f, // memory
727      1.2f,  // limit
728      false,
729      16 * 1024 * 1024,
730      10,
731      500,
732      0.01f);
733
734    CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single");
735    CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi");
736    CachedItem [] memoryBlocks = generateFixedBlocks(10, blockSize, "memory");
737
738    // Add all blocks from all priorities
739    for(int i=0;i<10;i++) {
740
741      // Just add single blocks
742      cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]);
743
744      // Add and get multi blocks
745      cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]);
746      cache.getBlock(multiBlocks[i].cacheKey, true, false, true);
747
748      // Add memory blocks as such
749      cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true);
750    }
751
752    // Do not expect any evictions yet
753    assertEquals(0, cache.getStats().getEvictionCount());
754
755    // Resize to half capacity plus an extra block (otherwise we evict an extra)
756    cache.setMaxSize((long)(maxSize * 0.5f));
757
758    // Should have run a single eviction
759    assertEquals(1, cache.getStats().getEvictionCount());
760
761    // And we expect 1/2 of the blocks to be evicted
762    assertEquals(15, cache.getStats().getEvictedCount());
763
764    // And the oldest 5 blocks from each category should be gone
765    for(int i=0;i<5;i++) {
766      assertEquals(null, cache.getBlock(singleBlocks[i].cacheKey, true,
767        false, true));
768      assertEquals(null, cache.getBlock(multiBlocks[i].cacheKey, true,
769        false, true));
770      assertEquals(null, cache.getBlock(memoryBlocks[i].cacheKey, true,
771        false, true));
772    }
773
774    // And the newest 5 blocks should still be accessible
775    for(int i=5;i<10;i++) {
776      assertEquals(singleBlocks[i], cache.getBlock(singleBlocks[i].cacheKey, true,
777        false, true));
778      assertEquals(multiBlocks[i], cache.getBlock(multiBlocks[i].cacheKey, true,
779        false, true));
780      assertEquals(memoryBlocks[i], cache.getBlock(memoryBlocks[i].cacheKey, true,
781        false, true));
782    }
783  }
784
785  // test metricsPastNPeriods
786  @Test
787  public void testPastNPeriodsMetrics() throws Exception {
788    double delta = 0.01;
789
790    // 3 total periods
791    CacheStats stats = new CacheStats("test", 3);
792
793    // No accesses, should be 0
794    stats.rollMetricsPeriod();
795    assertEquals(0.0, stats.getHitRatioPastNPeriods(), delta);
796    assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta);
797
798    // period 1, 1 hit caching, 1 hit non-caching, 2 miss non-caching
799    // should be (2/4)=0.5 and (1/1)=1
800    stats.hit(false, true, BlockType.DATA);
801    stats.hit(true, true, BlockType.DATA);
802    stats.miss(false, false, BlockType.DATA);
803    stats.miss(false, false, BlockType.DATA);
804    stats.rollMetricsPeriod();
805    assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta);
806    assertEquals(1.0, stats.getHitCachingRatioPastNPeriods(), delta);
807
808    // period 2, 1 miss caching, 3 miss non-caching
809    // should be (2/8)=0.25 and (1/2)=0.5
810    stats.miss(true, false, BlockType.DATA);
811    stats.miss(false, false, BlockType.DATA);
812    stats.miss(false, false, BlockType.DATA);
813    stats.miss(false, false, BlockType.DATA);
814    stats.rollMetricsPeriod();
815    assertEquals(0.25, stats.getHitRatioPastNPeriods(), delta);
816    assertEquals(0.5, stats.getHitCachingRatioPastNPeriods(), delta);
817
818    // period 3, 2 hits of each type
819    // should be (6/12)=0.5 and (3/4)=0.75
820    stats.hit(false, true, BlockType.DATA);
821    stats.hit(true, true, BlockType.DATA);
822    stats.hit(false, true, BlockType.DATA);
823    stats.hit(true, true, BlockType.DATA);
824    stats.rollMetricsPeriod();
825    assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta);
826    assertEquals(0.75, stats.getHitCachingRatioPastNPeriods(), delta);
827
828    // period 4, evict period 1, two caching misses
829    // should be (4/10)=0.4 and (2/5)=0.4
830    stats.miss(true, false, BlockType.DATA);
831    stats.miss(true, false, BlockType.DATA);
832    stats.rollMetricsPeriod();
833    assertEquals(0.4, stats.getHitRatioPastNPeriods(), delta);
834    assertEquals(0.4, stats.getHitCachingRatioPastNPeriods(), delta);
835
836    // period 5, evict period 2, 2 caching misses, 2 non-caching hit
837    // should be (6/10)=0.6 and (2/6)=1/3
838    stats.miss(true, false, BlockType.DATA);
839    stats.miss(true, false, BlockType.DATA);
840    stats.hit(false, true, BlockType.DATA);
841    stats.hit(false, true, BlockType.DATA);
842    stats.rollMetricsPeriod();
843    assertEquals(0.6, stats.getHitRatioPastNPeriods(), delta);
844    assertEquals((double)1/3, stats.getHitCachingRatioPastNPeriods(), delta);
845
846    // period 6, evict period 3
847    // should be (2/6)=1/3 and (0/4)=0
848    stats.rollMetricsPeriod();
849    assertEquals((double)1/3, stats.getHitRatioPastNPeriods(), delta);
850    assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta);
851
852    // period 7, evict period 4
853    // should be (2/4)=0.5 and (0/2)=0
854    stats.rollMetricsPeriod();
855    assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta);
856    assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta);
857
858    // period 8, evict period 5
859    // should be 0 and 0
860    stats.rollMetricsPeriod();
861    assertEquals(0.0, stats.getHitRatioPastNPeriods(), delta);
862    assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta);
863
864    // period 9, one of each
865    // should be (2/4)=0.5 and (1/2)=0.5
866    stats.miss(true, false, BlockType.DATA);
867    stats.miss(false, false, BlockType.DATA);
868    stats.hit(true, true, BlockType.DATA);
869    stats.hit(false, true, BlockType.DATA);
870    stats.rollMetricsPeriod();
871    assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta);
872    assertEquals(0.5, stats.getHitCachingRatioPastNPeriods(), delta);
873  }
874
875  @Test
876  public void testCacheBlockNextBlockMetadataMissing() {
877    long maxSize = 100000;
878    long blockSize = calculateBlockSize(maxSize, 10);
879    int size = 100;
880    int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
881    byte[] byteArr = new byte[length];
882    ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
883    HFileContext meta = new HFileContextBuilder().build();
884    HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size,
885      -1, ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, 52,
886      -1, meta, HEAP);
887    HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size,
888      -1, ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, -1,
889      -1, meta, HEAP);
890
891    LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false,
892      (int)Math.ceil(1.2*maxSize/blockSize),
893      LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR,
894      LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL,
895      0.66f, // min
896      0.99f, // acceptable
897      0.33f, // single
898      0.33f, // multi
899      0.34f, // memory
900      1.2f,  // limit
901      false,
902      1024,
903      10,
904      500,
905      0.01f);
906
907    BlockCacheKey key = new BlockCacheKey("key1", 0);
908    ByteBuffer actualBuffer = ByteBuffer.allocate(length);
909    ByteBuffer block1Buffer = ByteBuffer.allocate(length);
910    ByteBuffer block2Buffer = ByteBuffer.allocate(length);
911    blockWithNextBlockMetadata.serialize(block1Buffer, true);
912    blockWithoutNextBlockMetadata.serialize(block2Buffer, true);
913
914    //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back.
915    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer,
916      block1Buffer);
917
918    //Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back.
919    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer,
920      block1Buffer);
921
922    //Clear and add blockWithoutNextBlockMetadata
923    cache.clearCache();
924    assertNull(cache.getBlock(key, false, false, false));
925    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer,
926      block2Buffer);
927
928    //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace.
929    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer,
930      block1Buffer);
931  }
932
933  private CachedItem [] generateFixedBlocks(int numBlocks, int size, String pfx) {
934    CachedItem [] blocks = new CachedItem[numBlocks];
935    for(int i=0;i<numBlocks;i++) {
936      blocks[i] = new CachedItem(pfx + i, size);
937    }
938    return blocks;
939  }
940
941  private CachedItem [] generateFixedBlocks(int numBlocks, long size, String pfx) {
942    return generateFixedBlocks(numBlocks, (int)size, pfx);
943  }
944
945  private CachedItem [] generateRandomBlocks(int numBlocks, long maxSize) {
946    CachedItem [] blocks = new CachedItem[numBlocks];
947    Random r = new Random();
948    for(int i=0;i<numBlocks;i++) {
949      blocks[i] = new CachedItem("block" + i, r.nextInt((int)maxSize)+1);
950    }
951    return blocks;
952  }
953
954  private long calculateBlockSize(long maxSize, int numBlocks) {
955    long roughBlockSize = maxSize / numBlocks;
956    int numEntries = (int)Math.ceil((1.2)*maxSize/roughBlockSize);
957    long totalOverhead = LruAdaptiveBlockCache.CACHE_FIXED_OVERHEAD +
958      ClassSize.CONCURRENT_HASHMAP +
959      (numEntries * ClassSize.CONCURRENT_HASHMAP_ENTRY) +
960      (LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL * ClassSize.CONCURRENT_HASHMAP_SEGMENT);
961    long negateBlockSize = (long)(totalOverhead/numEntries);
962    negateBlockSize += LruCachedBlock.PER_BLOCK_OVERHEAD;
963    return ClassSize.align((long)Math.floor((roughBlockSize - negateBlockSize)*0.99f));
964  }
965
966  private long calculateBlockSizeDefault(long maxSize, int numBlocks) {
967    long roughBlockSize = maxSize / numBlocks;
968    int numEntries = (int)Math.ceil((1.2)*maxSize/roughBlockSize);
969    long totalOverhead = LruAdaptiveBlockCache.CACHE_FIXED_OVERHEAD +
970      ClassSize.CONCURRENT_HASHMAP +
971      (numEntries * ClassSize.CONCURRENT_HASHMAP_ENTRY) +
972      (LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL * ClassSize.CONCURRENT_HASHMAP_SEGMENT);
973    long negateBlockSize = totalOverhead / numEntries;
974    negateBlockSize += LruCachedBlock.PER_BLOCK_OVERHEAD;
975    return ClassSize.align((long)Math.floor((roughBlockSize - negateBlockSize)*
976      LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR));
977  }
978
979  private static class CachedItem implements Cacheable {
980    BlockCacheKey cacheKey;
981    int size;
982
983    CachedItem(String blockName, int size, int offset) {
984      this.cacheKey = new BlockCacheKey(blockName, offset);
985      this.size = size;
986    }
987
988    CachedItem(String blockName, int size) {
989      this.cacheKey = new BlockCacheKey(blockName, 0);
990      this.size = size;
991    }
992
993    /** The size of this item reported to the block cache layer */
994    @Override
995    public long heapSize() {
996      return ClassSize.align(size);
997    }
998
999    /** Size of the cache block holding this item. Used for verification. */
1000    public long cacheBlockHeapSize() {
1001      return LruCachedBlock.PER_BLOCK_OVERHEAD
1002        + ClassSize.align(cacheKey.heapSize())
1003        + ClassSize.align(size);
1004    }
1005
1006    @Override
1007    public int getSerializedLength() {
1008      return 0;
1009    }
1010
1011    @Override
1012    public CacheableDeserializer<Cacheable> getDeserializer() {
1013      return null;
1014    }
1015
1016    @Override
1017    public void serialize(ByteBuffer destination, boolean includeNextBlockMetadata) {
1018    }
1019
1020    @Override
1021    public BlockType getBlockType() {
1022      return BlockType.DATA;
1023    }
1024  }
1025
1026  static void testMultiThreadGetAndEvictBlockInternal(BlockCache cache) throws Exception {
1027    int size = 100;
1028    int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
1029    byte[] byteArr = new byte[length];
1030    HFileContext meta = new HFileContextBuilder().build();
1031    BlockCacheKey key = new BlockCacheKey("key1", 0);
1032    HFileBlock blk = new HFileBlock(BlockType.DATA, size, size, -1,
1033      ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1,
1034      52, -1, meta,
1035      HEAP);
1036    AtomicBoolean err1 = new AtomicBoolean(false);
1037    Thread t1 = new Thread(() -> {
1038      for (int i = 0; i < 10000 && !err1.get(); i++) {
1039        try {
1040          cache.getBlock(key, false, false, true);
1041        } catch (Exception e) {
1042          err1.set(true);
1043          LOG.info("Cache block or get block failure: ", e);
1044        }
1045      }
1046    });
1047
1048    AtomicBoolean err2 = new AtomicBoolean(false);
1049    Thread t2 = new Thread(() -> {
1050      for (int i = 0; i < 10000 && !err2.get(); i++) {
1051        try {
1052          cache.evictBlock(key);
1053        } catch (Exception e) {
1054          err2.set(true);
1055          LOG.info("Evict block failure: ", e);
1056        }
1057      }
1058    });
1059
1060    AtomicBoolean err3 = new AtomicBoolean(false);
1061    Thread t3 = new Thread(() -> {
1062      for (int i = 0; i < 10000 && !err3.get(); i++) {
1063        try {
1064          cache.cacheBlock(key, blk);
1065        } catch (Exception e) {
1066          err3.set(true);
1067          LOG.info("Cache block failure: ", e);
1068        }
1069      }
1070    });
1071    t1.start();
1072    t2.start();
1073    t3.start();
1074    t1.join();
1075    t2.join();
1076    t3.join();
1077    Assert.assertFalse(err1.get());
1078    Assert.assertFalse(err2.get());
1079    Assert.assertFalse(err3.get());
1080  }
1081
1082  @Test
1083  public void testMultiThreadGetAndEvictBlock() throws Exception {
1084    long maxSize = 100000;
1085    long blockSize = calculateBlockSize(maxSize, 10);
1086    LruAdaptiveBlockCache cache =
1087      new LruAdaptiveBlockCache(maxSize, blockSize, false,
1088        (int) Math.ceil(1.2 * maxSize / blockSize),
1089        LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL,
1090        0.66f, // min
1091        0.99f, // acceptable
1092        0.33f, // single
1093        0.33f, // multi
1094        0.34f, // memory
1095        1.2f, // limit
1096        false, 1024,
1097        10,
1098        500,
1099        0.01f);
1100    testMultiThreadGetAndEvictBlockInternal(cache);
1101  }
1102
1103  public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws Exception {
1104    long maxSize = 100000000;
1105    int numBlocks = 100000;
1106    final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks);
1107    assertTrue("calculateBlockSize appears broken.",
1108      blockSize * numBlocks <= maxSize);
1109
1110    final LruAdaptiveBlockCache cache =
1111      new LruAdaptiveBlockCache(maxSize, blockSize, true,
1112        (int) Math.ceil(1.2 * maxSize / blockSize),
1113        LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL,
1114        0.5f, // min
1115        0.99f, // acceptable
1116        0.33f, // single
1117        0.33f, // multi
1118        0.34f, // memory
1119        1.2f, // limit
1120        false,
1121        maxSize,
1122        heavyEvictionCountLimit,
1123        200,
1124        0.01f);
1125
1126    EvictionThread evictionThread = cache.getEvictionThread();
1127    assertNotNull(evictionThread);
1128    while (!evictionThread.isEnteringRun()) {
1129      Thread.sleep(1);
1130    }
1131
1132    final String hfileName = "hfile";
1133    for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) {
1134      CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex);
1135      cache.cacheBlock(block.cacheKey, block, false);
1136      if (cache.getCacheDataBlockPercent() < 70) {
1137        // enough for test
1138        break;
1139      }
1140    }
1141
1142    evictionThread.evict();
1143    Thread.sleep(100);
1144
1145    if (heavyEvictionCountLimit == 0) {
1146      // Check if all offset (last two digits) of cached blocks less than the percent.
1147      // It means some of blocks haven't put into BlockCache
1148      assertTrue(cache.getCacheDataBlockPercent() < 90);
1149      for (BlockCacheKey key : cache.getMapForTests().keySet()) {
1150        assertTrue(!(key.getOffset() % 100 > 90));
1151      }
1152    } else {
1153      // Check that auto-scaling is not working (all blocks in BlockCache)
1154      assertTrue(cache.getCacheDataBlockPercent() == 100);
1155      int counter = 0;
1156      for (BlockCacheKey key : cache.getMapForTests().keySet()) {
1157        if (key.getOffset() % 100 > 90) {
1158          counter++;
1159        }
1160      }
1161      assertTrue(counter > 1000);
1162    }
1163    evictionThread.shutdown();
1164  }
1165
1166  @Test
1167  public void testSkipCacheDataBlocks() throws Exception {
1168    // Check that auto-scaling will work right after start
1169    testSkipCacheDataBlocksInteral(0);
1170    // Check that auto-scaling will not work right after start
1171    // (have to finished before auto-scaling)
1172    testSkipCacheDataBlocksInteral(100);
1173  }
1174}