001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; 021import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; 022 023import java.io.IOException; 024import java.util.concurrent.ForkJoinPool; 025 026import org.apache.hadoop.conf.Configuration; 027import org.apache.hadoop.hbase.HConstants; 028import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; 029import org.apache.hadoop.hbase.io.util.MemorySizeUtil; 030import org.apache.hadoop.hbase.util.ReflectionUtils; 031import org.apache.hadoop.util.StringUtils; 032import org.apache.yetus.audience.InterfaceAudience; 033import org.slf4j.Logger; 034import org.slf4j.LoggerFactory; 035 036@InterfaceAudience.Private 037public final class BlockCacheFactory { 038 039 private static final Logger LOG = LoggerFactory.getLogger(BlockCacheFactory.class.getName()); 040 041 /** 042 * Configuration keys for Bucket cache 043 */ 044 045 /** 046 * Configuration key to cache block policy (Lru, TinyLfu). 047 */ 048 public static final String BLOCKCACHE_POLICY_KEY = "hfile.block.cache.policy"; 049 public static final String BLOCKCACHE_POLICY_DEFAULT = "LRU"; 050 051 /** 052 * If the chosen ioengine can persist its state across restarts, the path to the file to persist 053 * to. This file is NOT the data file. It is a file into which we will serialize the map of 054 * what is in the data file. For example, if you pass the following argument as 055 * BUCKET_CACHE_IOENGINE_KEY ("hbase.bucketcache.ioengine"), 056 * <code>file:/tmp/bucketcache.data </code>, then we will write the bucketcache data to the file 057 * <code>/tmp/bucketcache.data</code> but the metadata on where the data is in the supplied file 058 * is an in-memory map that needs to be persisted across restarts. Where to store this 059 * in-memory state is what you supply here: e.g. <code>/tmp/bucketcache.map</code>. 060 */ 061 public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = "hbase.bucketcache.persistent.path"; 062 063 public static final String BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads"; 064 065 public static final String BUCKET_CACHE_WRITER_QUEUE_KEY = "hbase.bucketcache.writer.queuelength"; 066 067 /** 068 * A comma-delimited array of values for use as bucket sizes. 069 */ 070 public static final String BUCKET_CACHE_BUCKETS_KEY = "hbase.bucketcache.bucket.sizes"; 071 072 /** 073 * Defaults for Bucket cache 074 */ 075 public static final int DEFAULT_BUCKET_CACHE_WRITER_THREADS = 3; 076 public static final int DEFAULT_BUCKET_CACHE_WRITER_QUEUE = 64; 077 078 /** 079 * The target block size used by blockcache instances. Defaults to 080 * {@link HConstants#DEFAULT_BLOCKSIZE}. 081 */ 082 public static final String BLOCKCACHE_BLOCKSIZE_KEY = "hbase.blockcache.minblocksize"; 083 084 private static final String EXTERNAL_BLOCKCACHE_KEY = "hbase.blockcache.use.external"; 085 private static final boolean EXTERNAL_BLOCKCACHE_DEFAULT = false; 086 087 private static final String EXTERNAL_BLOCKCACHE_CLASS_KEY = "hbase.blockcache.external.class"; 088 089 /** 090 * @deprecated use {@link BlockCacheFactory#BLOCKCACHE_BLOCKSIZE_KEY} instead. 091 */ 092 @Deprecated 093 static final String DEPRECATED_BLOCKCACHE_BLOCKSIZE_KEY = "hbase.offheapcache.minblocksize"; 094 095 /** 096 * The config point hbase.offheapcache.minblocksize is completely wrong, which is replaced by 097 * {@link BlockCacheFactory#BLOCKCACHE_BLOCKSIZE_KEY}. Keep the old config key here for backward 098 * compatibility. 099 */ 100 static { 101 Configuration.addDeprecation(DEPRECATED_BLOCKCACHE_BLOCKSIZE_KEY, BLOCKCACHE_BLOCKSIZE_KEY); 102 } 103 104 private BlockCacheFactory() { 105 } 106 107 public static BlockCache createBlockCache(Configuration conf) { 108 if (conf.get(DEPRECATED_BLOCKCACHE_BLOCKSIZE_KEY) != null) { 109 LOG.warn("The config key {} is deprecated now, instead please use {}. In future release " 110 + "we will remove the deprecated config.", DEPRECATED_BLOCKCACHE_BLOCKSIZE_KEY, 111 BLOCKCACHE_BLOCKSIZE_KEY); 112 } 113 FirstLevelBlockCache l1Cache = createFirstLevelCache(conf); 114 if (l1Cache == null) { 115 return null; 116 } 117 boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); 118 if (useExternal) { 119 BlockCache l2CacheInstance = createExternalBlockcache(conf); 120 return l2CacheInstance == null ? 121 l1Cache : 122 new InclusiveCombinedBlockCache(l1Cache, l2CacheInstance); 123 } else { 124 // otherwise use the bucket cache. 125 BucketCache bucketCache = createBucketCache(conf); 126 if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) { 127 // Non combined mode is off from 2.0 128 LOG.warn( 129 "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); 130 } 131 return bucketCache == null ? l1Cache : new CombinedBlockCache(l1Cache, bucketCache); 132 } 133 } 134 135 private static FirstLevelBlockCache createFirstLevelCache(final Configuration c) { 136 final long cacheSize = MemorySizeUtil.getOnHeapCacheSize(c); 137 if (cacheSize < 0) { 138 return null; 139 } 140 String policy = c.get(BLOCKCACHE_POLICY_KEY, BLOCKCACHE_POLICY_DEFAULT); 141 int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); 142 LOG.info("Allocating BlockCache size=" + 143 StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); 144 if (policy.equalsIgnoreCase("LRU")) { 145 return new LruBlockCache(cacheSize, blockSize, true, c); 146 } else if (policy.equalsIgnoreCase("TinyLFU")) { 147 return new TinyLfuBlockCache(cacheSize, blockSize, ForkJoinPool.commonPool(), c); 148 } else { 149 throw new IllegalArgumentException("Unknown policy: " + policy); 150 } 151 } 152 153 /** 154 * Enum of all built in external block caches. 155 * This is used for config. 156 */ 157 private static enum ExternalBlockCaches { 158 memcached("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache"); 159 // TODO(eclark): Consider more. Redis, etc. 160 Class<? extends BlockCache> clazz; 161 ExternalBlockCaches(String clazzName) { 162 try { 163 clazz = (Class<? extends BlockCache>) Class.forName(clazzName); 164 } catch (ClassNotFoundException cnef) { 165 clazz = null; 166 } 167 } 168 ExternalBlockCaches(Class<? extends BlockCache> clazz) { 169 this.clazz = clazz; 170 } 171 } 172 173 private static BlockCache createExternalBlockcache(Configuration c) { 174 if (LOG.isDebugEnabled()) { 175 LOG.debug("Trying to use External l2 cache"); 176 } 177 Class klass = null; 178 179 // Get the class, from the config. s 180 try { 181 klass = ExternalBlockCaches 182 .valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz; 183 } catch (IllegalArgumentException exception) { 184 try { 185 klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, Class.forName( 186 "org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache")); 187 } catch (ClassNotFoundException e) { 188 return null; 189 } 190 } 191 192 // Now try and create an instance of the block cache. 193 try { 194 LOG.info("Creating external block cache of type: " + klass); 195 return (BlockCache) ReflectionUtils.newInstance(klass, c); 196 } catch (Exception e) { 197 LOG.warn("Error creating external block cache", e); 198 } 199 return null; 200 201 } 202 203 private static BucketCache createBucketCache(Configuration c) { 204 // Check for L2. ioengine name must be non-null. 205 String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null); 206 if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) { 207 return null; 208 } 209 210 int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); 211 final long bucketCacheSize = MemorySizeUtil.getBucketCacheSize(c); 212 if (bucketCacheSize <= 0) { 213 throw new IllegalStateException("bucketCacheSize <= 0; Check " + 214 BUCKET_CACHE_SIZE_KEY + " setting and/or server java heap size"); 215 } 216 if (c.get("hbase.bucketcache.percentage.in.combinedcache") != null) { 217 LOG.warn("Configuration 'hbase.bucketcache.percentage.in.combinedcache' is no longer " 218 + "respected. See comments in http://hbase.apache.org/book.html#_changes_of_note"); 219 } 220 int writerThreads = c.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, 221 DEFAULT_BUCKET_CACHE_WRITER_THREADS); 222 int writerQueueLen = c.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, 223 DEFAULT_BUCKET_CACHE_WRITER_QUEUE); 224 String persistentPath = c.get(BUCKET_CACHE_PERSISTENT_PATH_KEY); 225 String[] configuredBucketSizes = c.getStrings(BUCKET_CACHE_BUCKETS_KEY); 226 int [] bucketSizes = null; 227 if (configuredBucketSizes != null) { 228 bucketSizes = new int[configuredBucketSizes.length]; 229 for (int i = 0; i < configuredBucketSizes.length; i++) { 230 int bucketSize = Integer.parseInt(configuredBucketSizes[i].trim()); 231 if (bucketSize % 256 != 0) { 232 // We need all the bucket sizes to be multiples of 256. Having all the configured bucket 233 // sizes to be multiples of 256 will ensure that the block offsets within buckets, 234 // that are calculated, will also be multiples of 256. 235 // See BucketEntry where offset to each block is represented using 5 bytes (instead of 8 236 // bytes long). We would like to save heap overhead as less as possible. 237 throw new IllegalArgumentException("Illegal value: " + bucketSize + " configured for '" 238 + BUCKET_CACHE_BUCKETS_KEY + "'. All bucket sizes to be multiples of 256"); 239 } 240 bucketSizes[i] = bucketSize; 241 } 242 } 243 BucketCache bucketCache = null; 244 try { 245 int ioErrorsTolerationDuration = c.getInt( 246 "hbase.bucketcache.ioengine.errors.tolerated.duration", 247 BucketCache.DEFAULT_ERROR_TOLERATION_DURATION); 248 // Bucket cache logs its stats on creation internal to the constructor. 249 bucketCache = new BucketCache(bucketCacheIOEngineName, 250 bucketCacheSize, blockSize, bucketSizes, writerThreads, writerQueueLen, persistentPath, 251 ioErrorsTolerationDuration, c); 252 } catch (IOException ioex) { 253 LOG.error("Can't instantiate bucket cache", ioex); throw new RuntimeException(ioex); 254 } 255 return bucketCache; 256 } 257}