001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile;
019
020import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
021import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_PERSISTENT_PATH_KEY;
022import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
023
024import java.io.IOException;
025import java.util.Map;
026import java.util.concurrent.ForkJoinPool;
027import org.apache.hadoop.conf.Configuration;
028import org.apache.hadoop.hbase.HConstants;
029import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
030import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
031import org.apache.hadoop.hbase.regionserver.HRegion;
032import org.apache.hadoop.hbase.util.ReflectionUtils;
033import org.apache.hadoop.util.StringUtils;
034import org.apache.yetus.audience.InterfaceAudience;
035import org.slf4j.Logger;
036import org.slf4j.LoggerFactory;
037
038@InterfaceAudience.Private
039public final class BlockCacheFactory {
040
041  private static final Logger LOG = LoggerFactory.getLogger(BlockCacheFactory.class.getName());
042
043  /**
044   * Configuration keys for Bucket cache
045   */
046
047  /**
048   * Configuration key to cache block policy (Lru, TinyLfu, AdaptiveLRU, IndexOnlyLRU).
049   */
050  public static final String BLOCKCACHE_POLICY_KEY = "hfile.block.cache.policy";
051  public static final String BLOCKCACHE_POLICY_DEFAULT = "LRU";
052
053  public static final String BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads";
054
055  public static final String BUCKET_CACHE_WRITER_QUEUE_KEY = "hbase.bucketcache.writer.queuelength";
056
057  /**
058   * A comma-delimited array of values for use as bucket sizes.
059   */
060  public static final String BUCKET_CACHE_BUCKETS_KEY = "hbase.bucketcache.bucket.sizes";
061
062  /**
063   * Defaults for Bucket cache
064   */
065  public static final int DEFAULT_BUCKET_CACHE_WRITER_THREADS = 3;
066  public static final int DEFAULT_BUCKET_CACHE_WRITER_QUEUE = 64;
067
068  /**
069   * The target block size used by blockcache instances. Defaults to
070   * {@link HConstants#DEFAULT_BLOCKSIZE}.
071   */
072  public static final String BLOCKCACHE_BLOCKSIZE_KEY = "hbase.blockcache.minblocksize";
073
074  private static final String EXTERNAL_BLOCKCACHE_KEY = "hbase.blockcache.use.external";
075  private static final boolean EXTERNAL_BLOCKCACHE_DEFAULT = false;
076
077  private static final String EXTERNAL_BLOCKCACHE_CLASS_KEY = "hbase.blockcache.external.class";
078
079  /**
080   * @deprecated use {@link BlockCacheFactory#BLOCKCACHE_BLOCKSIZE_KEY} instead.
081   */
082  @Deprecated
083  static final String DEPRECATED_BLOCKCACHE_BLOCKSIZE_KEY = "hbase.offheapcache.minblocksize";
084
085  /**
086   * The window period length in minutes for CacheStats rolling metrics.
087   */
088  public static final String BLOCKCACHE_STATS_PERIOD_MINUTES_KEY =
089    "hbase.blockcache.stats.period.minutes";
090
091  /**
092   * Default window period length in minutes.
093   */
094  public static final int DEFAULT_BLOCKCACHE_STATS_PERIOD_MINUTES = 5;
095
096  /**
097   * The total number of periods in the window.
098   */
099  public static final String BLOCKCACHE_STATS_PERIODS = "hbase.blockcache.stats.periods";
100
101  /**
102   * Default number of periods in the window. We define 12 periods of 5 minutes to give an hourly
103   * split of 5 minutes periods.
104   */
105  public static final int DEFAULT_BLOCKCACHE_STATS_PERIODS = 12;
106
107  private BlockCacheFactory() {
108  }
109
110  public static BlockCache createBlockCache(Configuration conf,
111    Map<String, HRegion> onlineRegions) {
112    FirstLevelBlockCache l1Cache = createFirstLevelCache(conf);
113    if (l1Cache == null) {
114      return null;
115    }
116    boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT);
117    if (useExternal) {
118      BlockCache l2CacheInstance = createExternalBlockcache(conf);
119      return l2CacheInstance == null
120        ? l1Cache
121        : new InclusiveCombinedBlockCache(l1Cache, l2CacheInstance);
122    } else {
123      // otherwise use the bucket cache.
124      BucketCache bucketCache = createBucketCache(conf, onlineRegions);
125      if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) {
126        // Non combined mode is off from 2.0
127        LOG.warn(
128          "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available");
129      }
130      return bucketCache == null ? l1Cache : new CombinedBlockCache(l1Cache, bucketCache);
131    }
132  }
133
134  public static BlockCache createBlockCache(Configuration conf) {
135    return createBlockCache(conf, null);
136  }
137
138  private static FirstLevelBlockCache createFirstLevelCache(final Configuration c) {
139    final long cacheSize = MemorySizeUtil.getOnHeapCacheSize(c);
140    if (cacheSize < 0) {
141      return null;
142    }
143    String policy = c.get(BLOCKCACHE_POLICY_KEY, BLOCKCACHE_POLICY_DEFAULT);
144    int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE);
145    LOG.info("Allocating BlockCache size=" + StringUtils.byteDesc(cacheSize) + ", blockSize="
146      + StringUtils.byteDesc(blockSize));
147    if (policy.equalsIgnoreCase("LRU")) {
148      return new LruBlockCache(cacheSize, blockSize, true, c);
149    } else if (policy.equalsIgnoreCase("IndexOnlyLRU")) {
150      return new IndexOnlyLruBlockCache(cacheSize, blockSize, true, c);
151    } else if (policy.equalsIgnoreCase("TinyLFU")) {
152      return new TinyLfuBlockCache(cacheSize, blockSize, ForkJoinPool.commonPool(), c);
153    } else if (policy.equalsIgnoreCase("AdaptiveLRU")) {
154      return new LruAdaptiveBlockCache(cacheSize, blockSize, true, c);
155    } else {
156      throw new IllegalArgumentException("Unknown policy: " + policy);
157    }
158  }
159
160  /**
161   * Enum of all built in external block caches. This is used for config.
162   */
163  private static enum ExternalBlockCaches {
164    memcached("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache");
165
166    // TODO(eclark): Consider more. Redis, etc.
167    Class<? extends BlockCache> clazz;
168
169    ExternalBlockCaches(String clazzName) {
170      try {
171        clazz = (Class<? extends BlockCache>) Class.forName(clazzName);
172      } catch (ClassNotFoundException cnef) {
173        clazz = null;
174      }
175    }
176
177    ExternalBlockCaches(Class<? extends BlockCache> clazz) {
178      this.clazz = clazz;
179    }
180  }
181
182  private static BlockCache createExternalBlockcache(Configuration c) {
183    if (LOG.isDebugEnabled()) {
184      LOG.debug("Trying to use External l2 cache");
185    }
186    Class klass = null;
187
188    // Get the class, from the config. s
189    try {
190      klass = ExternalBlockCaches.valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz;
191    } catch (IllegalArgumentException exception) {
192      try {
193        klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY,
194          Class.forName("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache"));
195      } catch (ClassNotFoundException e) {
196        return null;
197      }
198    }
199
200    // Now try and create an instance of the block cache.
201    try {
202      LOG.info("Creating external block cache of type: " + klass);
203      return (BlockCache) ReflectionUtils.newInstance(klass, c);
204    } catch (Exception e) {
205      LOG.warn("Error creating external block cache", e);
206    }
207    return null;
208
209  }
210
211  private static BucketCache createBucketCache(Configuration c,
212    Map<String, HRegion> onlineRegions) {
213    // Check for L2. ioengine name must be non-null.
214    String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null);
215    if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) {
216      return null;
217    }
218
219    int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE);
220    final long bucketCacheSize = MemorySizeUtil.getBucketCacheSize(c);
221    if (bucketCacheSize <= 0) {
222      throw new IllegalStateException("bucketCacheSize <= 0; Check " + BUCKET_CACHE_SIZE_KEY
223        + " setting and/or server java heap size");
224    }
225    if (c.get("hbase.bucketcache.percentage.in.combinedcache") != null) {
226      LOG.warn("Configuration 'hbase.bucketcache.percentage.in.combinedcache' is no longer "
227        + "respected. See comments in http://hbase.apache.org/book.html#_changes_of_note");
228    }
229    int writerThreads =
230      c.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, DEFAULT_BUCKET_CACHE_WRITER_THREADS);
231    int writerQueueLen = c.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, DEFAULT_BUCKET_CACHE_WRITER_QUEUE);
232    String persistentPath = c.get(BUCKET_CACHE_PERSISTENT_PATH_KEY);
233    String[] configuredBucketSizes = c.getStrings(BUCKET_CACHE_BUCKETS_KEY);
234    int[] bucketSizes = null;
235    if (configuredBucketSizes != null) {
236      bucketSizes = new int[configuredBucketSizes.length];
237      for (int i = 0; i < configuredBucketSizes.length; i++) {
238        int bucketSize = Integer.parseInt(configuredBucketSizes[i].trim());
239        if (bucketSize % 256 != 0) {
240          // We need all the bucket sizes to be multiples of 256. Having all the configured bucket
241          // sizes to be multiples of 256 will ensure that the block offsets within buckets,
242          // that are calculated, will also be multiples of 256.
243          // See BucketEntry where offset to each block is represented using 5 bytes (instead of 8
244          // bytes long). We would like to save heap overhead as less as possible.
245          throw new IllegalArgumentException("Illegal value: " + bucketSize + " configured for '"
246            + BUCKET_CACHE_BUCKETS_KEY + "'. All bucket sizes to be multiples of 256");
247        }
248        bucketSizes[i] = bucketSize;
249      }
250    }
251    BucketCache bucketCache = null;
252    try {
253      int ioErrorsTolerationDuration =
254        c.getInt("hbase.bucketcache.ioengine.errors.tolerated.duration",
255          BucketCache.DEFAULT_ERROR_TOLERATION_DURATION);
256      // Bucket cache logs its stats on creation internal to the constructor.
257      bucketCache = new BucketCache(bucketCacheIOEngineName, bucketCacheSize, blockSize,
258        bucketSizes, writerThreads, writerQueueLen, persistentPath, ioErrorsTolerationDuration, c,
259        onlineRegions);
260    } catch (IOException ioex) {
261      LOG.error("Can't instantiate bucket cache", ioex);
262      throw new RuntimeException(ioex);
263    }
264    return bucketCache;
265  }
266}