View Javadoc

1   /**
2    * Copyright 2009 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.io.hfile;
21  
22  import java.io.IOException;
23  import java.lang.ref.WeakReference;
24  import java.nio.ByteBuffer;
25  import java.util.ArrayList;
26  import java.util.Collections;
27  import java.util.EnumMap;
28  import java.util.HashMap;
29  import java.util.List;
30  import java.util.Map;
31  import java.util.PriorityQueue;
32  import java.util.SortedSet;
33  import java.util.TreeSet;
34  import java.util.concurrent.ConcurrentHashMap;
35  import java.util.concurrent.Executors;
36  import java.util.concurrent.ScheduledExecutorService;
37  import java.util.concurrent.TimeUnit;
38  import java.util.concurrent.atomic.AtomicLong;
39  import java.util.concurrent.locks.ReentrantLock;
40  
41  import org.apache.commons.logging.Log;
42  import org.apache.commons.logging.LogFactory;
43  import org.apache.hadoop.conf.Configuration;
44  import org.apache.hadoop.fs.FileSystem;
45  import org.apache.hadoop.fs.Path;
46  import org.apache.hadoop.hbase.io.HeapSize;
47  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
48  import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
49  import org.apache.hadoop.hbase.util.Bytes;
50  import org.apache.hadoop.hbase.util.ClassSize;
51  import org.apache.hadoop.hbase.util.FSUtils;
52  import org.apache.hadoop.hbase.util.HasThread;
53  import org.apache.hadoop.hbase.util.Threads;
54  import org.apache.hadoop.util.StringUtils;
55  
56  import com.google.common.util.concurrent.ThreadFactoryBuilder;
57  
58  /**
59   * A block cache implementation that is memory-aware using {@link HeapSize},
60   * memory-bound using an LRU eviction algorithm, and concurrent: backed by a
61   * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving
62   * constant-time {@link #cacheBlock} and {@link #getBlock} operations.<p>
63   *
64   * Contains three levels of block priority to allow for
65   * scan-resistance and in-memory families.  A block is added with an inMemory
66   * flag if necessary, otherwise a block becomes a single access priority.  Once
67   * a blocked is accessed again, it changes to multiple access.  This is used
68   * to prevent scans from thrashing the cache, adding a least-frequently-used
69   * element to the eviction algorithm.<p>
70   *
71   * Each priority is given its own chunk of the total cache to ensure
72   * fairness during eviction.  Each priority will retain close to its maximum
73   * size, however, if any priority is not using its entire chunk the others
74   * are able to grow beyond their chunk size.<p>
75   *
76   * Instantiated at a minimum with the total size and average block size.
77   * All sizes are in bytes.  The block size is not especially important as this
78   * cache is fully dynamic in its sizing of blocks.  It is only used for
79   * pre-allocating data structures and in initial heap estimation of the map.<p>
80   *
81   * The detailed constructor defines the sizes for the three priorities (they
82   * should total to the maximum size defined).  It also sets the levels that
83   * trigger and control the eviction thread.<p>
84   *
85   * The acceptable size is the cache size level which triggers the eviction
86   * process to start.  It evicts enough blocks to get the size below the
87   * minimum size specified.<p>
88   *
89   * Eviction happens in a separate thread and involves a single full-scan
90   * of the map.  It determines how many bytes must be freed to reach the minimum
91   * size, and then while scanning determines the fewest least-recently-used
92   * blocks necessary from each of the three priorities (would be 3 times bytes
93   * to free).  It then uses the priority chunk sizes to evict fairly according
94   * to the relative sizes and usage.
95   */
96  public class LruBlockCache implements BlockCache, HeapSize {
97  
98    static final Log LOG = LogFactory.getLog(LruBlockCache.class);
99  
100   static final String LRU_MIN_FACTOR_CONFIG_NAME = "hbase.lru.blockcache.min.factor";
101   static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = "hbase.lru.blockcache.acceptable.factor";
102 
103   /** Default Configuration Parameters*/
104 
105   /** Backing Concurrent Map Configuration */
106   static final float DEFAULT_LOAD_FACTOR = 0.75f;
107   static final int DEFAULT_CONCURRENCY_LEVEL = 16;
108 
109   /** Eviction thresholds */
110   static final float DEFAULT_MIN_FACTOR = 0.75f;
111   static final float DEFAULT_ACCEPTABLE_FACTOR = 0.85f;
112 
113   /** Priority buckets */
114   static final float DEFAULT_SINGLE_FACTOR = 0.25f;
115   static final float DEFAULT_MULTI_FACTOR = 0.50f;
116   static final float DEFAULT_MEMORY_FACTOR = 0.25f;
117 
118   /** Statistics thread */
119   static final int statThreadPeriod = 60 * 5;
120 
121   /** Concurrent map (the cache) */
122   private final ConcurrentHashMap<BlockCacheKey,CachedBlock> map;
123 
124   /** Eviction lock (locked when eviction in process) */
125   private final ReentrantLock evictionLock = new ReentrantLock(true);
126 
127   /** Volatile boolean to track if we are in an eviction process or not */
128   private volatile boolean evictionInProgress = false;
129 
130   /** Eviction thread */
131   private final EvictionThread evictionThread;
132 
133   /** Statistics thread schedule pool (for heavy debugging, could remove) */
134   private final ScheduledExecutorService scheduleThreadPool =
135     Executors.newScheduledThreadPool(1,
136       new ThreadFactoryBuilder()
137         .setNameFormat("LRU Statistics #%d")
138         .setDaemon(true)
139         .build());
140 
141   /** Current size of cache */
142   private final AtomicLong size;
143 
144   /** Current number of cached elements */
145   private final AtomicLong elements;
146 
147   /** Cache access count (sequential ID) */
148   private final AtomicLong count;
149 
150   /** Cache statistics */
151   private final CacheStats stats;
152 
153   /** Maximum allowable size of cache (block put if size > max, evict) */
154   private long maxSize;
155 
156   /** Approximate block size */
157   private long blockSize;
158 
159   /** Acceptable size of cache (no evictions if size < acceptable) */
160   private float acceptableFactor;
161 
162   /** Minimum threshold of cache (when evicting, evict until size < min) */
163   private float minFactor;
164 
165   /** Single access bucket size */
166   private float singleFactor;
167 
168   /** Multiple access bucket size */
169   private float multiFactor;
170 
171   /** In-memory bucket size */
172   private float memoryFactor;
173 
174   /** Overhead of the structure itself */
175   private long overhead;
176 
177   /**
178    * Default constructor.  Specify maximum size and expected average block
179    * size (approximation is fine).
180    *
181    * <p>All other factors will be calculated based on defaults specified in
182    * this class.
183    * @param maxSize maximum size of cache, in bytes
184    * @param blockSize approximate size of each block, in bytes
185    * @param conf configuration
186    */
187   public LruBlockCache(long maxSize, long blockSize, Configuration conf) {
188     this(maxSize, blockSize, true, conf);
189   }
190 
191   /**
192    * Constructor used for testing.  Allows disabling of the eviction thread.
193    */
194   public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) {
195     this(maxSize, blockSize, evictionThread,
196         (int)Math.ceil(1.2*maxSize/blockSize),
197         DEFAULT_LOAD_FACTOR,
198         DEFAULT_CONCURRENCY_LEVEL,
199         conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR),
200         conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR),
201         DEFAULT_SINGLE_FACTOR,
202         DEFAULT_MULTI_FACTOR,
203         DEFAULT_MEMORY_FACTOR);
204   }
205 
206 
207   /**
208    * Configurable constructor.  Use this constructor if not using defaults.
209    * @param maxSize maximum size of this cache, in bytes
210    * @param blockSize expected average size of blocks, in bytes
211    * @param evictionThread whether to run evictions in a bg thread or not
212    * @param mapInitialSize initial size of backing ConcurrentHashMap
213    * @param mapLoadFactor initial load factor of backing ConcurrentHashMap
214    * @param mapConcurrencyLevel initial concurrency factor for backing CHM
215    * @param minFactor percentage of total size that eviction will evict until
216    * @param acceptableFactor percentage of total size that triggers eviction
217    * @param singleFactor percentage of total size for single-access blocks
218    * @param multiFactor percentage of total size for multiple-access blocks
219    * @param memoryFactor percentage of total size for in-memory blocks
220    */
221   public LruBlockCache(long maxSize, long blockSize, boolean evictionThread,
222       int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel,
223       float minFactor, float acceptableFactor,
224       float singleFactor, float multiFactor, float memoryFactor) {
225     if(singleFactor + multiFactor + memoryFactor != 1) {
226       throw new IllegalArgumentException("Single, multi, and memory factors " +
227           " should total 1.0");
228     }
229     if(minFactor >= acceptableFactor) {
230       throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor");
231     }
232     if(minFactor >= 1.0f || acceptableFactor >= 1.0f) {
233       throw new IllegalArgumentException("all factors must be < 1");
234     }
235     this.maxSize = maxSize;
236     this.blockSize = blockSize;
237     map = new ConcurrentHashMap<BlockCacheKey,CachedBlock>(mapInitialSize,
238         mapLoadFactor, mapConcurrencyLevel);
239     this.minFactor = minFactor;
240     this.acceptableFactor = acceptableFactor;
241     this.singleFactor = singleFactor;
242     this.multiFactor = multiFactor;
243     this.memoryFactor = memoryFactor;
244     this.stats = new CacheStats();
245     this.count = new AtomicLong(0);
246     this.elements = new AtomicLong(0);
247     this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel);
248     this.size = new AtomicLong(this.overhead);
249     if(evictionThread) {
250       this.evictionThread = new EvictionThread(this);
251       this.evictionThread.start(); // FindBugs SC_START_IN_CTOR
252     } else {
253       this.evictionThread = null;
254     }
255     this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this),
256         statThreadPeriod, statThreadPeriod, TimeUnit.SECONDS);
257   }
258 
259   public void setMaxSize(long maxSize) {
260     this.maxSize = maxSize;
261     if(this.size.get() > acceptableSize() && !evictionInProgress) {
262       runEviction();
263     }
264   }
265 
266   // BlockCache implementation
267 
268   /**
269    * Cache the block with the specified name and buffer.
270    * <p>
271    * It is assumed this will NOT be called on an already cached block. In rare cases (HBASE-8547)
272    * this can happen, for which we compare the buffer contents.
273    * @param cacheKey block's cache key
274    * @param buf block buffer
275    * @param inMemory if block is in-memory
276    */
277   @Override
278   public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
279     CachedBlock cb = map.get(cacheKey);
280     if(cb != null) {
281       // compare the contents, if they are not equal, we are in big trouble
282       if (compare(buf, cb.getBuffer()) != 0) {
283         throw new RuntimeException("Cached block contents differ, which should not have happened."
284           + "cacheKey:" + cacheKey);
285       }
286       String msg = "Cached an already cached block: " + cacheKey + " cb:" + cb.getCacheKey();
287       msg += ". This is harmless and can happen in rare cases (see HBASE-8547)";
288       LOG.warn(msg);
289       return;
290     }
291     cb = new CachedBlock(cacheKey, buf, count.incrementAndGet(), inMemory);
292     long newSize = updateSizeMetrics(cb, false);
293     map.put(cacheKey, cb);
294     elements.incrementAndGet();
295     if(newSize > acceptableSize() && !evictionInProgress) {
296       runEviction();
297     }
298   }
299 
300   private int compare(Cacheable left, Cacheable right) {
301     ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength());
302     left.serialize(l);
303     ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength());
304     right.serialize(r);
305     return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(),
306       r.array(), r.arrayOffset(), r.limit());
307   }
308 
309   /**
310    * Cache the block with the specified name and buffer.
311    * <p>
312    * It is assumed this will NEVER be called on an already cached block.  If
313    * that is done, it is assumed that you are reinserting the same exact
314    * block due to a race condition and will update the buffer but not modify
315    * the size of the cache.
316    * @param cacheKey block's cache key
317    * @param buf block buffer
318    */
319   public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
320     cacheBlock(cacheKey, buf, false);
321   }
322 
323   /**
324    * Helper function that updates the local size counter and also updates any
325    * per-cf or per-blocktype metrics it can discern from given
326    * {@link CachedBlock}
327    *
328    * @param cb
329    * @param evict
330    */
331   protected long updateSizeMetrics(CachedBlock cb, boolean evict) {
332     long heapsize = cb.heapSize();
333     if (evict) {
334       heapsize *= -1;
335     }
336     Cacheable cachedBlock = cb.getBuffer();
337     SchemaMetrics schemaMetrics = cachedBlock.getSchemaMetrics();
338     if (schemaMetrics != null) {
339       schemaMetrics.updateOnCachePutOrEvict(
340           cachedBlock.getBlockType().getCategory(), heapsize, evict);
341     }
342     return size.addAndGet(heapsize);
343   }
344 
345   /**
346    * Get the buffer of the block with the specified name.
347    * @param cacheKey block's cache key
348    * @param caching true if the caller caches blocks on cache misses
349    * @param repeat Whether this is a repeat lookup for the same block
350    *        (used to avoid double counting cache misses when doing double-check locking)
351    *        {@see HFileReaderV2#readBlock(long, long, boolean, boolean, boolean, BlockType)}
352    * @return buffer of specified cache key, or null if not in cache
353    */
354   @Override
355   public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat) {
356     CachedBlock cb = map.get(cacheKey);
357     if(cb == null) {
358       if (!repeat) stats.miss(caching);
359       return null;
360     }
361     stats.hit(caching);
362     cb.access(count.incrementAndGet());
363     return cb.getBuffer();
364   }
365 
366 
367   @Override
368   public boolean evictBlock(BlockCacheKey cacheKey) {
369     CachedBlock cb = map.get(cacheKey);
370     if (cb == null) return false;
371     evictBlock(cb);
372     return true;
373   }
374 
375   /**
376    * Evicts all blocks for a specific HFile. This is an
377    * expensive operation implemented as a linear-time search through all blocks
378    * in the cache. Ideally this should be a search in a log-access-time map.
379    *
380    * <p>
381    * This is used for evict-on-close to remove all blocks of a specific HFile.
382    *
383    * @return the number of blocks evicted
384    */
385   @Override
386   public int evictBlocksByHfileName(String hfileName) {
387     int numEvicted = 0;
388     for (BlockCacheKey key : map.keySet()) {
389       if (key.getHfileName().equals(hfileName)) {
390         if (evictBlock(key))
391           ++numEvicted;
392       }
393     }
394     return numEvicted;
395   }
396 
397   protected long evictBlock(CachedBlock block) {
398     map.remove(block.getCacheKey());
399     updateSizeMetrics(block, true);
400     elements.decrementAndGet();
401     stats.evicted();
402     return block.heapSize();
403   }
404 
405   /**
406    * Multi-threaded call to run the eviction process.
407    */
408   private void runEviction() {
409     if(evictionThread == null) {
410       evict();
411     } else {
412       evictionThread.evict();
413     }
414   }
415 
416   /**
417    * Eviction method.
418    */
419   void evict() {
420 
421     // Ensure only one eviction at a time
422     if(!evictionLock.tryLock()) return;
423 
424     try {
425       evictionInProgress = true;
426       long currentSize = this.size.get();
427       long bytesToFree = currentSize - minSize();
428 
429       if (LOG.isDebugEnabled()) {
430         LOG.debug("Block cache LRU eviction started; Attempting to free " +
431           StringUtils.byteDesc(bytesToFree) + " of total=" +
432           StringUtils.byteDesc(currentSize));
433       }
434 
435       if(bytesToFree <= 0) return;
436 
437       // Instantiate priority buckets
438       BlockBucket bucketSingle = new BlockBucket(bytesToFree, blockSize,
439           singleSize());
440       BlockBucket bucketMulti = new BlockBucket(bytesToFree, blockSize,
441           multiSize());
442       BlockBucket bucketMemory = new BlockBucket(bytesToFree, blockSize,
443           memorySize());
444 
445       // Scan entire map putting into appropriate buckets
446       for(CachedBlock cachedBlock : map.values()) {
447         switch(cachedBlock.getPriority()) {
448           case SINGLE: {
449             bucketSingle.add(cachedBlock);
450             break;
451           }
452           case MULTI: {
453             bucketMulti.add(cachedBlock);
454             break;
455           }
456           case MEMORY: {
457             bucketMemory.add(cachedBlock);
458             break;
459           }
460         }
461       }
462 
463       PriorityQueue<BlockBucket> bucketQueue =
464         new PriorityQueue<BlockBucket>(3);
465 
466       bucketQueue.add(bucketSingle);
467       bucketQueue.add(bucketMulti);
468       bucketQueue.add(bucketMemory);
469 
470       int remainingBuckets = 3;
471       long bytesFreed = 0;
472 
473       BlockBucket bucket;
474       while((bucket = bucketQueue.poll()) != null) {
475         long overflow = bucket.overflow();
476         if(overflow > 0) {
477           long bucketBytesToFree = Math.min(overflow,
478             (bytesToFree - bytesFreed) / remainingBuckets);
479           bytesFreed += bucket.free(bucketBytesToFree);
480         }
481         remainingBuckets--;
482       }
483 
484       if (LOG.isDebugEnabled()) {
485         long single = bucketSingle.totalSize();
486         long multi = bucketMulti.totalSize();
487         long memory = bucketMemory.totalSize();
488         LOG.debug("Block cache LRU eviction completed; " +
489           "freed=" + StringUtils.byteDesc(bytesFreed) + ", " +
490           "total=" + StringUtils.byteDesc(this.size.get()) + ", " +
491           "single=" + StringUtils.byteDesc(single) + ", " +
492           "multi=" + StringUtils.byteDesc(multi) + ", " +
493           "memory=" + StringUtils.byteDesc(memory));
494       }
495     } finally {
496       stats.evict();
497       evictionInProgress = false;
498       evictionLock.unlock();
499     }
500   }
501 
502   /**
503    * Used to group blocks into priority buckets.  There will be a BlockBucket
504    * for each priority (single, multi, memory).  Once bucketed, the eviction
505    * algorithm takes the appropriate number of elements out of each according
506    * to configuration parameters and their relatives sizes.
507    */
508   private class BlockBucket implements Comparable<BlockBucket> {
509 
510     private CachedBlockQueue queue;
511     private long totalSize = 0;
512     private long bucketSize;
513 
514     public BlockBucket(long bytesToFree, long blockSize, long bucketSize) {
515       this.bucketSize = bucketSize;
516       queue = new CachedBlockQueue(bytesToFree, blockSize);
517       totalSize = 0;
518     }
519 
520     public void add(CachedBlock block) {
521       totalSize += block.heapSize();
522       queue.add(block);
523     }
524 
525     public long free(long toFree) {
526       CachedBlock cb;
527       long freedBytes = 0;
528       while ((cb = queue.pollLast()) != null) {
529         freedBytes += evictBlock(cb);
530         if (freedBytes >= toFree) {
531           return freedBytes;
532         }
533       }
534       return freedBytes;
535     }
536 
537     public long overflow() {
538       return totalSize - bucketSize;
539     }
540 
541     public long totalSize() {
542       return totalSize;
543     }
544 
545     public int compareTo(BlockBucket that) {
546       if(this.overflow() == that.overflow()) return 0;
547       return this.overflow() > that.overflow() ? 1 : -1;
548     }
549   }
550 
551   /**
552    * Get the maximum size of this cache.
553    * @return max size in bytes
554    */
555   public long getMaxSize() {
556     return this.maxSize;
557   }
558 
559   /**
560    * Get the current size of this cache.
561    * @return current size in bytes
562    */
563   public long getCurrentSize() {
564     return this.size.get();
565   }
566 
567   /**
568    * Get the current size of this cache.
569    * @return current size in bytes
570    */
571   public long getFreeSize() {
572     return getMaxSize() - getCurrentSize();
573   }
574 
575   /**
576    * Get the size of this cache (number of cached blocks)
577    * @return number of cached blocks
578    */
579   public long size() {
580     return this.elements.get();
581   }
582 
583   @Override
584   public long getBlockCount() {
585     return this.elements.get();
586   }
587 
588   /**
589    * Get the number of eviction runs that have occurred
590    */
591   public long getEvictionCount() {
592     return this.stats.getEvictionCount();
593   }
594 
595   /**
596    * Get the number of blocks that have been evicted during the lifetime
597    * of this cache.
598    */
599   public long getEvictedCount() {
600     return this.stats.getEvictedCount();
601   }
602 
603   EvictionThread getEvictionThread() {
604     return this.evictionThread;
605   }
606 
607   /*
608    * Eviction thread.  Sits in waiting state until an eviction is triggered
609    * when the cache size grows above the acceptable level.<p>
610    *
611    * Thread is triggered into action by {@link LruBlockCache#runEviction()}
612    */
613   static class EvictionThread extends HasThread {
614     private WeakReference<LruBlockCache> cache;
615     private boolean go = true;
616     // flag set after enter the run method, used for test
617     private boolean enteringRun = false;
618 
619     public EvictionThread(LruBlockCache cache) {
620       super(Thread.currentThread().getName() + ".LruBlockCache.EvictionThread");
621       setDaemon(true);
622       this.cache = new WeakReference<LruBlockCache>(cache);
623     }
624 
625     @Override
626     public void run() {
627       enteringRun = true;
628       while (this.go) {
629         synchronized(this) {
630           try {
631             this.wait();
632           } catch(InterruptedException e) {}
633         }
634         LruBlockCache cache = this.cache.get();
635         if(cache == null) break;
636         cache.evict();
637       }
638     }
639 
640     public void evict() {
641       synchronized(this) {
642         this.notify(); // FindBugs NN_NAKED_NOTIFY
643       }
644     }
645 
646     void shutdown() {
647       this.go = false;
648       interrupt();
649     }
650 
651     /**
652      * Used for the test.
653      */
654     boolean isEnteringRun() {
655       return this.enteringRun;
656     }
657   }
658 
659   /*
660    * Statistics thread.  Periodically prints the cache statistics to the log.
661    */
662   static class StatisticsThread extends Thread {
663     LruBlockCache lru;
664 
665     public StatisticsThread(LruBlockCache lru) {
666       super("LruBlockCache.StatisticsThread");
667       setDaemon(true);
668       this.lru = lru;
669     }
670     @Override
671     public void run() {
672       lru.logStats();
673     }
674   }
675 
676   public void logStats() {
677     if (!LOG.isDebugEnabled()) return;
678     // Log size
679     long totalSize = heapSize();
680     long freeSize = maxSize - totalSize;
681     LruBlockCache.LOG.debug("Stats: " +
682         "total=" + StringUtils.byteDesc(totalSize) + ", " +
683         "free=" + StringUtils.byteDesc(freeSize) + ", " +
684         "max=" + StringUtils.byteDesc(this.maxSize) + ", " +
685         "blocks=" + size() +", " +
686         "accesses=" + stats.getRequestCount() + ", " +
687         "hits=" + stats.getHitCount() + ", " +
688         "hitRatio=" +
689           (stats.getHitCount() == 0 ? "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " +
690         "cachingAccesses=" + stats.getRequestCachingCount() + ", " +
691         "cachingHits=" + stats.getHitCachingCount() + ", " +
692         "cachingHitsRatio=" +
693           (stats.getHitCachingCount() == 0 ? "0" : (StringUtils.formatPercent(stats.getHitCachingRatio(), 2)+ ", ")) + ", " +
694         "evictions=" + stats.getEvictionCount() + ", " +
695         "evicted=" + stats.getEvictedCount() + ", " +
696         "evictedPerRun=" + stats.evictedPerEviction());
697   }
698 
699   /**
700    * Get counter statistics for this cache.
701    *
702    * <p>Includes: total accesses, hits, misses, evicted blocks, and runs
703    * of the eviction processes.
704    */
705   public CacheStats getStats() {
706     return this.stats;
707   }
708 
709   public final static long CACHE_FIXED_OVERHEAD = ClassSize.align(
710       (3 * Bytes.SIZEOF_LONG) + (8 * ClassSize.REFERENCE) +
711       (5 * Bytes.SIZEOF_FLOAT) + Bytes.SIZEOF_BOOLEAN
712       + ClassSize.OBJECT);
713 
714   // HeapSize implementation
715   public long heapSize() {
716     return getCurrentSize();
717   }
718 
719   public static long calculateOverhead(long maxSize, long blockSize, int concurrency){
720     // FindBugs ICAST_INTEGER_MULTIPLY_CAST_TO_LONG
721     return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP +
722         ((long)Math.ceil(maxSize*1.2/blockSize)
723             * ClassSize.CONCURRENT_HASHMAP_ENTRY) +
724         (concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT);
725   }
726 
727   @Override
728   public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(Configuration conf) throws IOException {
729 
730     Map<String, Path> sfMap = FSUtils.getTableStoreFilePathMap(
731         FileSystem.get(conf),
732         FSUtils.getRootDir(conf));
733 
734     // quirky, but it's a compound key and this is a shortcut taken instead of
735     // creating a class that would represent only a key.
736     Map<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary> bcs =
737       new HashMap<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary>();
738 
739     for (CachedBlock cb : map.values()) {
740       String sf = cb.getCacheKey().getHfileName();
741       Path path = sfMap.get(sf);
742       if ( path != null) {
743         BlockCacheColumnFamilySummary lookup =
744           BlockCacheColumnFamilySummary.createFromStoreFilePath(path);
745         BlockCacheColumnFamilySummary bcse = bcs.get(lookup);
746         if (bcse == null) {
747           bcse = BlockCacheColumnFamilySummary.create(lookup);
748           bcs.put(lookup,bcse);
749         }
750         bcse.incrementBlocks();
751         bcse.incrementHeapSize(cb.heapSize());
752       }
753     }
754     List<BlockCacheColumnFamilySummary> list =
755         new ArrayList<BlockCacheColumnFamilySummary>(bcs.values());
756     Collections.sort( list );
757     return list;
758   }
759 
760   // Simple calculators of sizes given factors and maxSize
761 
762   private long acceptableSize() {
763     return (long)Math.floor(this.maxSize * this.acceptableFactor);
764   }
765   private long minSize() {
766     return (long)Math.floor(this.maxSize * this.minFactor);
767   }
768   private long singleSize() {
769     return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor);
770   }
771   private long multiSize() {
772     return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor);
773   }
774   private long memorySize() {
775     return (long)Math.floor(this.maxSize * this.memoryFactor * this.minFactor);
776   }
777 
778   public void shutdown() {
779     this.scheduleThreadPool.shutdown();
780     for (int i = 0; i < 10; i++) {
781       if (!this.scheduleThreadPool.isShutdown()) Threads.sleep(10);
782     }
783     if (!this.scheduleThreadPool.isShutdown()) {
784       List<Runnable> runnables = this.scheduleThreadPool.shutdownNow();
785       LOG.debug("Still running " + runnables);
786     }
787     this.evictionThread.shutdown();
788   }
789 
790   /** Clears the cache. Used in tests. */
791   public void clearCache() {
792     map.clear();
793   }
794 
795   /**
796    * Used in testing. May be very inefficient.
797    * @return the set of cached file names
798    */
799   SortedSet<String> getCachedFileNamesForTest() {
800     SortedSet<String> fileNames = new TreeSet<String>();
801     for (BlockCacheKey cacheKey : map.keySet()) {
802       fileNames.add(cacheKey.getHfileName());
803     }
804     return fileNames;
805   }
806 
807   Map<BlockType, Integer> getBlockTypeCountsForTest() {
808     Map<BlockType, Integer> counts =
809         new EnumMap<BlockType, Integer>(BlockType.class);
810     for (CachedBlock cb : map.values()) {
811       BlockType blockType = ((HFileBlock) cb.getBuffer()).getBlockType();
812       Integer count = counts.get(blockType);
813       counts.put(blockType, (count == null ? 0 : count) + 1);
814     }
815     return counts;
816   }
817 
818   public Map<DataBlockEncoding, Integer> getEncodingCountsForTest() {
819     Map<DataBlockEncoding, Integer> counts =
820         new EnumMap<DataBlockEncoding, Integer>(DataBlockEncoding.class);
821     for (BlockCacheKey cacheKey : map.keySet()) {
822       DataBlockEncoding encoding = cacheKey.getDataBlockEncoding();
823       Integer count = counts.get(encoding);
824       counts.put(encoding, (count == null ? 0 : count) + 1);
825     }
826     return counts;
827   }
828 
829 }