001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; 021import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_PERSISTENT_PATH_KEY; 022import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; 023 024import java.io.IOException; 025import java.util.Map; 026import java.util.concurrent.ForkJoinPool; 027import org.apache.hadoop.conf.Configuration; 028import org.apache.hadoop.hbase.HConstants; 029import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; 030import org.apache.hadoop.hbase.io.util.MemorySizeUtil; 031import org.apache.hadoop.hbase.regionserver.HRegion; 032import org.apache.hadoop.hbase.util.ReflectionUtils; 033import org.apache.hadoop.util.StringUtils; 034import org.apache.yetus.audience.InterfaceAudience; 035import org.slf4j.Logger; 036import org.slf4j.LoggerFactory; 037 038@InterfaceAudience.Private 039public final class BlockCacheFactory { 040 041 private static final Logger LOG = LoggerFactory.getLogger(BlockCacheFactory.class.getName()); 042 043 /** 044 * Configuration keys for Bucket cache 045 */ 046 047 /** 048 * Configuration key to cache block policy (Lru, TinyLfu, AdaptiveLRU, IndexOnlyLRU). 049 */ 050 public static final String BLOCKCACHE_POLICY_KEY = "hfile.block.cache.policy"; 051 public static final String BLOCKCACHE_POLICY_DEFAULT = "LRU"; 052 053 public static final String BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads"; 054 055 public static final String BUCKET_CACHE_WRITER_QUEUE_KEY = "hbase.bucketcache.writer.queuelength"; 056 057 /** 058 * A comma-delimited array of values for use as bucket sizes. 059 */ 060 public static final String BUCKET_CACHE_BUCKETS_KEY = "hbase.bucketcache.bucket.sizes"; 061 062 /** 063 * Defaults for Bucket cache 064 */ 065 public static final int DEFAULT_BUCKET_CACHE_WRITER_THREADS = 3; 066 public static final int DEFAULT_BUCKET_CACHE_WRITER_QUEUE = 64; 067 068 /** 069 * The target block size used by blockcache instances. Defaults to 070 * {@link HConstants#DEFAULT_BLOCKSIZE}. 071 */ 072 public static final String BLOCKCACHE_BLOCKSIZE_KEY = "hbase.blockcache.minblocksize"; 073 074 private static final String EXTERNAL_BLOCKCACHE_KEY = "hbase.blockcache.use.external"; 075 private static final boolean EXTERNAL_BLOCKCACHE_DEFAULT = false; 076 077 private static final String EXTERNAL_BLOCKCACHE_CLASS_KEY = "hbase.blockcache.external.class"; 078 079 /** 080 * @deprecated use {@link BlockCacheFactory#BLOCKCACHE_BLOCKSIZE_KEY} instead. 081 */ 082 @Deprecated 083 static final String DEPRECATED_BLOCKCACHE_BLOCKSIZE_KEY = "hbase.offheapcache.minblocksize"; 084 085 private BlockCacheFactory() { 086 } 087 088 public static BlockCache createBlockCache(Configuration conf, 089 Map<String, HRegion> onlineRegions) { 090 FirstLevelBlockCache l1Cache = createFirstLevelCache(conf); 091 if (l1Cache == null) { 092 return null; 093 } 094 boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); 095 if (useExternal) { 096 BlockCache l2CacheInstance = createExternalBlockcache(conf); 097 return l2CacheInstance == null 098 ? l1Cache 099 : new InclusiveCombinedBlockCache(l1Cache, l2CacheInstance); 100 } else { 101 // otherwise use the bucket cache. 102 BucketCache bucketCache = createBucketCache(conf, onlineRegions); 103 if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) { 104 // Non combined mode is off from 2.0 105 LOG.warn( 106 "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); 107 } 108 return bucketCache == null ? l1Cache : new CombinedBlockCache(l1Cache, bucketCache); 109 } 110 } 111 112 public static BlockCache createBlockCache(Configuration conf) { 113 return createBlockCache(conf, null); 114 } 115 116 private static FirstLevelBlockCache createFirstLevelCache(final Configuration c) { 117 final long cacheSize = MemorySizeUtil.getOnHeapCacheSize(c); 118 if (cacheSize < 0) { 119 return null; 120 } 121 String policy = c.get(BLOCKCACHE_POLICY_KEY, BLOCKCACHE_POLICY_DEFAULT); 122 int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); 123 LOG.info("Allocating BlockCache size=" + StringUtils.byteDesc(cacheSize) + ", blockSize=" 124 + StringUtils.byteDesc(blockSize)); 125 if (policy.equalsIgnoreCase("LRU")) { 126 return new LruBlockCache(cacheSize, blockSize, true, c); 127 } else if (policy.equalsIgnoreCase("IndexOnlyLRU")) { 128 return new IndexOnlyLruBlockCache(cacheSize, blockSize, true, c); 129 } else if (policy.equalsIgnoreCase("TinyLFU")) { 130 return new TinyLfuBlockCache(cacheSize, blockSize, ForkJoinPool.commonPool(), c); 131 } else if (policy.equalsIgnoreCase("AdaptiveLRU")) { 132 return new LruAdaptiveBlockCache(cacheSize, blockSize, true, c); 133 } else { 134 throw new IllegalArgumentException("Unknown policy: " + policy); 135 } 136 } 137 138 /** 139 * Enum of all built in external block caches. This is used for config. 140 */ 141 private static enum ExternalBlockCaches { 142 memcached("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache"); 143 144 // TODO(eclark): Consider more. Redis, etc. 145 Class<? extends BlockCache> clazz; 146 147 ExternalBlockCaches(String clazzName) { 148 try { 149 clazz = (Class<? extends BlockCache>) Class.forName(clazzName); 150 } catch (ClassNotFoundException cnef) { 151 clazz = null; 152 } 153 } 154 155 ExternalBlockCaches(Class<? extends BlockCache> clazz) { 156 this.clazz = clazz; 157 } 158 } 159 160 private static BlockCache createExternalBlockcache(Configuration c) { 161 if (LOG.isDebugEnabled()) { 162 LOG.debug("Trying to use External l2 cache"); 163 } 164 Class klass = null; 165 166 // Get the class, from the config. s 167 try { 168 klass = ExternalBlockCaches.valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz; 169 } catch (IllegalArgumentException exception) { 170 try { 171 klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, 172 Class.forName("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache")); 173 } catch (ClassNotFoundException e) { 174 return null; 175 } 176 } 177 178 // Now try and create an instance of the block cache. 179 try { 180 LOG.info("Creating external block cache of type: " + klass); 181 return (BlockCache) ReflectionUtils.newInstance(klass, c); 182 } catch (Exception e) { 183 LOG.warn("Error creating external block cache", e); 184 } 185 return null; 186 187 } 188 189 private static BucketCache createBucketCache(Configuration c, 190 Map<String, HRegion> onlineRegions) { 191 // Check for L2. ioengine name must be non-null. 192 String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null); 193 if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) { 194 return null; 195 } 196 197 int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); 198 final long bucketCacheSize = MemorySizeUtil.getBucketCacheSize(c); 199 if (bucketCacheSize <= 0) { 200 throw new IllegalStateException("bucketCacheSize <= 0; Check " + BUCKET_CACHE_SIZE_KEY 201 + " setting and/or server java heap size"); 202 } 203 if (c.get("hbase.bucketcache.percentage.in.combinedcache") != null) { 204 LOG.warn("Configuration 'hbase.bucketcache.percentage.in.combinedcache' is no longer " 205 + "respected. See comments in http://hbase.apache.org/book.html#_changes_of_note"); 206 } 207 int writerThreads = 208 c.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, DEFAULT_BUCKET_CACHE_WRITER_THREADS); 209 int writerQueueLen = c.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, DEFAULT_BUCKET_CACHE_WRITER_QUEUE); 210 String persistentPath = c.get(BUCKET_CACHE_PERSISTENT_PATH_KEY); 211 String[] configuredBucketSizes = c.getStrings(BUCKET_CACHE_BUCKETS_KEY); 212 int[] bucketSizes = null; 213 if (configuredBucketSizes != null) { 214 bucketSizes = new int[configuredBucketSizes.length]; 215 for (int i = 0; i < configuredBucketSizes.length; i++) { 216 int bucketSize = Integer.parseInt(configuredBucketSizes[i].trim()); 217 if (bucketSize % 256 != 0) { 218 // We need all the bucket sizes to be multiples of 256. Having all the configured bucket 219 // sizes to be multiples of 256 will ensure that the block offsets within buckets, 220 // that are calculated, will also be multiples of 256. 221 // See BucketEntry where offset to each block is represented using 5 bytes (instead of 8 222 // bytes long). We would like to save heap overhead as less as possible. 223 throw new IllegalArgumentException("Illegal value: " + bucketSize + " configured for '" 224 + BUCKET_CACHE_BUCKETS_KEY + "'. All bucket sizes to be multiples of 256"); 225 } 226 bucketSizes[i] = bucketSize; 227 } 228 } 229 BucketCache bucketCache = null; 230 try { 231 int ioErrorsTolerationDuration = 232 c.getInt("hbase.bucketcache.ioengine.errors.tolerated.duration", 233 BucketCache.DEFAULT_ERROR_TOLERATION_DURATION); 234 // Bucket cache logs its stats on creation internal to the constructor. 235 bucketCache = new BucketCache(bucketCacheIOEngineName, bucketCacheSize, blockSize, 236 bucketSizes, writerThreads, writerQueueLen, persistentPath, ioErrorsTolerationDuration, c, 237 onlineRegions); 238 } catch (IOException ioex) { 239 LOG.error("Can't instantiate bucket cache", ioex); 240 throw new RuntimeException(ioex); 241 } 242 return bucketCache; 243 } 244}