View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.io.hfile.slab;
20  
21  import java.nio.ByteBuffer;
22  import java.util.List;
23  import java.util.concurrent.ConcurrentMap;
24  import java.util.concurrent.atomic.AtomicLong;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.classification.InterfaceAudience;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.hbase.io.HeapSize;
31  import org.apache.hadoop.hbase.io.hfile.BlockCache;
32  import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
33  import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
34  import org.apache.hadoop.hbase.io.hfile.CacheStats;
35  import org.apache.hadoop.hbase.io.hfile.Cacheable;
36  import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
37  import org.apache.hadoop.hbase.util.Bytes;
38  import org.apache.hadoop.hbase.util.ClassSize;
39  import org.apache.hadoop.util.StringUtils;
40  
41  import com.google.common.cache.CacheBuilder;
42  import com.google.common.cache.RemovalListener;
43  import com.google.common.cache.RemovalNotification;
44  
45  /**
46   * SingleSizeCache is a slab allocated cache that caches elements up to a single
47   * size. It uses a slab allocator (Slab.java) to divide a direct bytebuffer,
48   * into evenly sized blocks. Any cached data will take up exactly 1 block. An
49   * exception will be thrown if the cached data cannot fit into the blockSize of
50   * this SingleSizeCache.
51   *
52   * Eviction and LRUness is taken care of by Guava's MapMaker, which creates a
53   * ConcurrentLinkedHashMap.
54   *
55   **/
56  @InterfaceAudience.Private
57  public class SingleSizeCache implements BlockCache, HeapSize {
58    private final Slab backingStore;
59    private final ConcurrentMap<BlockCacheKey, CacheablePair> backingMap;
60    private final int numBlocks;
61    private final int blockSize;
62    private final CacheStats stats;
63    private final SlabItemActionWatcher actionWatcher;
64    private final AtomicLong size;
65    private final AtomicLong timeSinceLastAccess;
66    public final static long CACHE_FIXED_OVERHEAD = ClassSize
67        .align((2 * Bytes.SIZEOF_INT) + (5 * ClassSize.REFERENCE)
68            + +ClassSize.OBJECT);
69  
70    static final Log LOG = LogFactory.getLog(SingleSizeCache.class);
71  
72    /**
73     * Default constructor. Specify the size of the blocks, number of blocks, and
74     * the SlabCache this cache will be assigned to.
75     *
76     *
77     * @param blockSize the size of each block, in bytes
78     *
79     * @param numBlocks the number of blocks of blockSize this cache will hold.
80     *
81     * @param master the SlabCache this SingleSlabCache is assigned to.
82     */
83    public SingleSizeCache(int blockSize, int numBlocks,
84        SlabItemActionWatcher master) {
85      this.blockSize = blockSize;
86      this.numBlocks = numBlocks;
87      backingStore = new Slab(blockSize, numBlocks);
88      this.stats = new CacheStats();
89      this.actionWatcher = master;
90      this.size = new AtomicLong(CACHE_FIXED_OVERHEAD + backingStore.heapSize());
91      this.timeSinceLastAccess = new AtomicLong();
92  
93      // This evictionListener is called whenever the cache automatically
94      // evicts something.
95      RemovalListener<BlockCacheKey, CacheablePair> listener =
96        new RemovalListener<BlockCacheKey, CacheablePair>() {
97          @Override
98          public void onRemoval(
99              RemovalNotification<BlockCacheKey, CacheablePair> notification) {
100           if (!notification.wasEvicted()) {
101             // Only process removals by eviction, not by replacement or
102             // explicit removal
103             return;
104           }
105           CacheablePair value = notification.getValue();
106           timeSinceLastAccess.set(System.nanoTime()
107               - value.recentlyAccessed.get());
108           stats.evict();
109           doEviction(notification.getKey(), value);
110         }
111       };
112 
113     backingMap = CacheBuilder.newBuilder()
114         .maximumSize(numBlocks - 1)
115         .removalListener(listener)
116         .<BlockCacheKey, CacheablePair>build()
117         .asMap();
118   }
119 
120   @Override
121   public void cacheBlock(BlockCacheKey blockName, Cacheable toBeCached) {
122     ByteBuffer storedBlock;
123 
124     try {
125       storedBlock = backingStore.alloc(toBeCached.getSerializedLength());
126     } catch (InterruptedException e) {
127       LOG.warn("SlabAllocator was interrupted while waiting for block to become available");
128       LOG.warn(e);
129       return;
130     }
131 
132     CacheablePair newEntry = new CacheablePair(toBeCached.getDeserializer(),
133         storedBlock);
134     toBeCached.serialize(storedBlock);
135 
136     synchronized (this) {
137       CacheablePair alreadyCached = backingMap.putIfAbsent(blockName, newEntry);
138 
139       if (alreadyCached != null) {
140         backingStore.free(storedBlock);
141         throw new RuntimeException("already cached " + blockName);
142       }
143       if (actionWatcher != null) {
144         actionWatcher.onInsertion(blockName, this);
145       }
146     }
147     newEntry.recentlyAccessed.set(System.nanoTime());
148     this.size.addAndGet(newEntry.heapSize());
149   }
150 
151   @Override
152   public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat) {
153     CacheablePair contentBlock = backingMap.get(key);
154     if (contentBlock == null) {
155       if (!repeat) stats.miss(caching);
156       return null;
157     }
158 
159     stats.hit(caching);
160     // If lock cannot be obtained, that means we're undergoing eviction.
161     try {
162       contentBlock.recentlyAccessed.set(System.nanoTime());
163       synchronized (contentBlock) {
164         if (contentBlock.serializedData == null) {
165           // concurrently evicted
166           LOG.warn("Concurrent eviction of " + key);
167           return null;
168         }
169         return contentBlock.deserializer
170             .deserialize(contentBlock.serializedData.asReadOnlyBuffer());
171       }
172     } catch (Throwable t) {
173       LOG.error("Deserializer threw an exception. This may indicate a bug.", t);
174       return null;
175     }
176   }
177 
178   /**
179    * Evicts the block
180    *
181    * @param key the key of the entry we are going to evict
182    * @return the evicted ByteBuffer
183    */
184   public boolean evictBlock(BlockCacheKey key) {
185     stats.evict();
186     CacheablePair evictedBlock = backingMap.remove(key);
187 
188     if (evictedBlock != null) {
189       doEviction(key, evictedBlock);
190     }
191     return evictedBlock != null;
192   }
193 
194   private void doEviction(BlockCacheKey key, CacheablePair evictedBlock) {
195     long evictedHeap = 0;
196     synchronized (evictedBlock) {
197       if (evictedBlock.serializedData == null) {
198         // someone else already freed
199         return;
200       }
201       evictedHeap = evictedBlock.heapSize();
202       ByteBuffer bb = evictedBlock.serializedData;
203       evictedBlock.serializedData = null;
204       backingStore.free(bb);
205 
206       // We have to do this callback inside the synchronization here.
207       // Otherwise we can have the following interleaving:
208       // Thread A calls getBlock():
209       // SlabCache directs call to this SingleSizeCache
210       // It gets the CacheablePair object
211       // Thread B runs eviction
212       // doEviction() is called and sets serializedData = null, here.
213       // Thread A sees the null serializedData, and returns null
214       // Thread A calls cacheBlock on the same block, and gets
215       // "already cached" since the block is still in backingStore
216 
217       if (actionWatcher != null) {
218         actionWatcher.onEviction(key, this);
219       }
220     }
221     stats.evicted();
222     size.addAndGet(-1 * evictedHeap);
223   }
224 
225   public void logStats() {
226 
227     long milliseconds = this.timeSinceLastAccess.get() / 1000000;
228 
229     LOG.info("For Slab of size " + this.blockSize + ": "
230         + this.getOccupiedSize() / this.blockSize
231         + " occupied, out of a capacity of " + this.numBlocks
232         + " blocks. HeapSize is "
233         + StringUtils.humanReadableInt(this.heapSize()) + " bytes." + ", "
234         + "churnTime=" + StringUtils.formatTime(milliseconds));
235 
236     LOG.info("Slab Stats: " + "accesses="
237         + stats.getRequestCount()
238         + ", "
239         + "hits="
240         + stats.getHitCount()
241         + ", "
242         + "hitRatio="
243         + (stats.getHitCount() == 0 ? "0" : (StringUtils.formatPercent(
244             stats.getHitRatio(), 2) + "%, "))
245         + "cachingAccesses="
246         + stats.getRequestCachingCount()
247         + ", "
248         + "cachingHits="
249         + stats.getHitCachingCount()
250         + ", "
251         + "cachingHitsRatio="
252         + (stats.getHitCachingCount() == 0 ? "0" : (StringUtils.formatPercent(
253             stats.getHitCachingRatio(), 2) + "%, ")) + "evictions="
254         + stats.getEvictionCount() + ", " + "evicted="
255         + stats.getEvictedCount() + ", " + "evictedPerRun="
256         + stats.evictedPerEviction());
257 
258   }
259 
260   public void shutdown() {
261     backingStore.shutdown();
262   }
263 
264   public long heapSize() {
265     return this.size.get() + backingStore.heapSize();
266   }
267 
268   public long size() {
269     return (long) this.blockSize * (long) this.numBlocks;
270   }
271 
272   public long getFreeSize() {
273     return (long) backingStore.getBlocksRemaining() * (long) blockSize;
274   }
275 
276   public long getOccupiedSize() {
277     return (long) (numBlocks - backingStore.getBlocksRemaining()) * (long) blockSize;
278   }
279 
280   public long getEvictedCount() {
281     return stats.getEvictedCount();
282   }
283 
284   public CacheStats getStats() {
285     return this.stats;
286   }
287 
288   @Override
289   public long getBlockCount() {
290     return numBlocks - backingStore.getBlocksRemaining();
291   }
292 
293   /* Since its offheap, it doesn't matter if its in memory or not */
294   @Override
295   public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
296     this.cacheBlock(cacheKey, buf);
297   }
298 
299   /*
300    * This is never called, as evictions are handled in the SlabCache layer,
301    * implemented in the event we want to use this as a standalone cache.
302    */
303   @Override
304   public int evictBlocksByHfileName(String hfileName) {
305     int evictedCount = 0;
306     for (BlockCacheKey e : backingMap.keySet()) {
307       if (e.getHfileName().equals(hfileName)) {
308         this.evictBlock(e);
309       }
310     }
311     return evictedCount;
312   }
313 
314   @Override
315   public long getCurrentSize() {
316     return 0;
317   }
318 
319   /*
320    * Not implemented. Extremely costly to do this from the off heap cache, you'd
321    * need to copy every object on heap once
322    */
323   @Override
324   public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(
325       Configuration conf) {
326     throw new UnsupportedOperationException();
327   }
328 
329   /* Just a pair class, holds a reference to the parent cacheable */
330   private static class CacheablePair implements HeapSize {
331     final CacheableDeserializer<Cacheable> deserializer;
332     ByteBuffer serializedData;
333     AtomicLong recentlyAccessed;
334 
335     private CacheablePair(CacheableDeserializer<Cacheable> deserializer,
336         ByteBuffer serializedData) {
337       this.recentlyAccessed = new AtomicLong();
338       this.deserializer = deserializer;
339       this.serializedData = serializedData;
340     }
341 
342     /*
343      * Heapsize overhead of this is the default object overhead, the heapsize of
344      * the serialized object, and the cost of a reference to the bytebuffer,
345      * which is already accounted for in SingleSizeCache
346      */
347     @Override
348     public long heapSize() {
349       return ClassSize.align(ClassSize.OBJECT + ClassSize.REFERENCE * 3
350           + ClassSize.ATOMIC_LONG);
351     }
352   }
353 }