001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import java.io.IOException; 021import java.nio.ByteBuffer; 022import java.util.NavigableMap; 023import java.util.NavigableSet; 024import java.util.concurrent.ConcurrentSkipListMap; 025import java.util.concurrent.ConcurrentSkipListSet; 026import org.apache.hadoop.conf.Configuration; 027import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram; 028import org.apache.hadoop.hbase.util.Bytes; 029import org.apache.hadoop.hbase.util.GsonUtil; 030import org.apache.yetus.audience.InterfaceAudience; 031import org.slf4j.Logger; 032import org.slf4j.LoggerFactory; 033 034import org.apache.hbase.thirdparty.com.google.gson.Gson; 035import org.apache.hbase.thirdparty.com.google.gson.TypeAdapter; 036import org.apache.hbase.thirdparty.com.google.gson.stream.JsonReader; 037import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; 038 039/** 040 * Utilty for aggregating counts in CachedBlocks and toString/toJSON CachedBlocks and BlockCaches. 041 * No attempt has been made at making this thread safe. 042 */ 043@InterfaceAudience.Private 044public class BlockCacheUtil { 045 046 private static final Logger LOG = LoggerFactory.getLogger(BlockCacheUtil.class); 047 048 public static final long NANOS_PER_SECOND = 1000000000; 049 050 /** 051 * Needed generating JSON. 052 */ 053 private static final Gson GSON = GsonUtil.createGson() 054 .registerTypeAdapter(FastLongHistogram.class, new TypeAdapter<FastLongHistogram>() { 055 056 @Override 057 public void write(JsonWriter out, FastLongHistogram value) throws IOException { 058 AgeSnapshot snapshot = new AgeSnapshot(value); 059 out.beginObject(); 060 out.name("mean").value(snapshot.getMean()); 061 out.name("min").value(snapshot.getMin()); 062 out.name("max").value(snapshot.getMax()); 063 out.name("75thPercentile").value(snapshot.get75thPercentile()); 064 out.name("95thPercentile").value(snapshot.get95thPercentile()); 065 out.name("98thPercentile").value(snapshot.get98thPercentile()); 066 out.name("99thPercentile").value(snapshot.get99thPercentile()); 067 out.name("999thPercentile").value(snapshot.get999thPercentile()); 068 out.endObject(); 069 } 070 071 @Override 072 public FastLongHistogram read(JsonReader in) throws IOException { 073 throw new UnsupportedOperationException(); 074 } 075 }).setPrettyPrinting().create(); 076 077 /** 078 * n * @return The block content as String. 079 */ 080 public static String toString(final CachedBlock cb, final long now) { 081 return "filename=" + cb.getFilename() + ", " + toStringMinusFileName(cb, now); 082 } 083 084 /** 085 * Little data structure to hold counts for a file. Used doing a toJSON. 086 */ 087 static class CachedBlockCountsPerFile { 088 private int count = 0; 089 private long size = 0; 090 private int countData = 0; 091 private long sizeData = 0; 092 private final String filename; 093 094 CachedBlockCountsPerFile(final String filename) { 095 this.filename = filename; 096 } 097 098 public int getCount() { 099 return count; 100 } 101 102 public long getSize() { 103 return size; 104 } 105 106 public int getCountData() { 107 return countData; 108 } 109 110 public long getSizeData() { 111 return sizeData; 112 } 113 114 public String getFilename() { 115 return filename; 116 } 117 } 118 119 /** Returns A JSON String of <code>filename</code> and counts of <code>blocks</code> */ 120 public static String toJSON(String filename, NavigableSet<CachedBlock> blocks) 121 throws IOException { 122 CachedBlockCountsPerFile counts = new CachedBlockCountsPerFile(filename); 123 for (CachedBlock cb : blocks) { 124 counts.count++; 125 counts.size += cb.getSize(); 126 BlockType bt = cb.getBlockType(); 127 if (bt != null && bt.isData()) { 128 counts.countData++; 129 counts.sizeData += cb.getSize(); 130 } 131 } 132 return GSON.toJson(counts); 133 } 134 135 /** Returns JSON string of <code>cbsf</code> aggregated */ 136 public static String toJSON(CachedBlocksByFile cbsbf) throws IOException { 137 return GSON.toJson(cbsbf); 138 } 139 140 /** Returns JSON string of <code>bc</code> content. */ 141 public static String toJSON(BlockCache bc) throws IOException { 142 return GSON.toJson(bc); 143 } 144 145 /** 146 * n * @return The block content of <code>bc</code> as a String minus the filename. 147 */ 148 public static String toStringMinusFileName(final CachedBlock cb, final long now) { 149 return "offset=" + cb.getOffset() + ", size=" + cb.getSize() + ", age=" 150 + (now - cb.getCachedTime()) + ", type=" + cb.getBlockType() + ", priority=" 151 + cb.getBlockPriority(); 152 } 153 154 /** 155 * Get a {@link CachedBlocksByFile} instance and load it up by iterating content in 156 * {@link BlockCache}. 157 * @param conf Used to read configurations 158 * @param bc Block Cache to iterate. 159 * @return Laoded up instance of CachedBlocksByFile 160 */ 161 public static CachedBlocksByFile getLoadedCachedBlocksByFile(final Configuration conf, 162 final BlockCache bc) { 163 CachedBlocksByFile cbsbf = new CachedBlocksByFile(conf); 164 for (CachedBlock cb : bc) { 165 if (cbsbf.update(cb)) break; 166 } 167 return cbsbf; 168 } 169 170 private static int compareCacheBlock(Cacheable left, Cacheable right, 171 boolean includeNextBlockMetadata) { 172 ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength()); 173 left.serialize(l, includeNextBlockMetadata); 174 ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength()); 175 right.serialize(r, includeNextBlockMetadata); 176 return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), r.array(), r.arrayOffset(), 177 r.limit()); 178 } 179 180 /** 181 * Validate that the existing and newBlock are the same without including the nextBlockMetadata, 182 * if not, throw an exception. If they are the same without the nextBlockMetadata, return the 183 * comparison. 184 * @param existing block that is existing in the cache. 185 * @param newBlock block that is trying to be cached. 186 * @param cacheKey the cache key of the blocks. 187 * @return comparison of the existing block to the newBlock. 188 */ 189 public static int validateBlockAddition(Cacheable existing, Cacheable newBlock, 190 BlockCacheKey cacheKey) { 191 int comparison = compareCacheBlock(existing, newBlock, false); 192 if (comparison != 0) { 193 throw new RuntimeException( 194 "Cached block contents differ, which should not have happened." + "cacheKey:" + cacheKey); 195 } 196 if ((existing instanceof HFileBlock) && (newBlock instanceof HFileBlock)) { 197 comparison = ((HFileBlock) existing).getNextBlockOnDiskSize() 198 - ((HFileBlock) newBlock).getNextBlockOnDiskSize(); 199 } 200 return comparison; 201 } 202 203 /** 204 * Because of the region splitting, it's possible that the split key locate in the middle of a 205 * block. So it's possible that both the daughter regions load the same block from their parent 206 * HFile. When pread, we don't force the read to read all of the next block header. So when two 207 * threads try to cache the same block, it's possible that one thread read all of the next block 208 * header but the other one didn't. if the already cached block hasn't next block header but the 209 * new block to cache has, then we can replace the existing block with the new block for better 210 * performance.(HBASE-20447) 211 * @param blockCache BlockCache to check 212 * @param cacheKey the block cache key 213 * @param newBlock the new block which try to put into the block cache. 214 * @return true means need to replace existing block with new block for the same block cache key. 215 * false means just keep the existing block. 216 */ 217 public static boolean shouldReplaceExistingCacheBlock(BlockCache blockCache, 218 BlockCacheKey cacheKey, Cacheable newBlock) { 219 // NOTICE: The getBlock has retained the existingBlock inside. 220 Cacheable existingBlock = blockCache.getBlock(cacheKey, false, false, false); 221 if (existingBlock == null) { 222 return true; 223 } 224 try { 225 int comparison = BlockCacheUtil.validateBlockAddition(existingBlock, newBlock, cacheKey); 226 if (comparison < 0) { 227 LOG.warn("Cached block contents differ by nextBlockOnDiskSize, the new block has " 228 + "nextBlockOnDiskSize set. Caching new block."); 229 return true; 230 } else if (comparison > 0) { 231 LOG.warn("Cached block contents differ by nextBlockOnDiskSize, the existing block has " 232 + "nextBlockOnDiskSize set, Keeping cached block."); 233 return false; 234 } else { 235 LOG.debug("Caching an already cached block: {}. This is harmless and can happen in rare " 236 + "cases (see HBASE-8547)", cacheKey); 237 return false; 238 } 239 } finally { 240 // Release this block to decrement the reference count. 241 existingBlock.release(); 242 } 243 } 244 245 private static final int DEFAULT_MAX = 1000000; 246 247 public static int getMaxCachedBlocksByFile(Configuration conf) { 248 return conf == null ? DEFAULT_MAX : conf.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX); 249 } 250 251 /** 252 * Use one of these to keep a running account of cached blocks by file. Throw it away when done. 253 * This is different than metrics in that it is stats on current state of a cache. See 254 * getLoadedCachedBlocksByFile 255 */ 256 public static class CachedBlocksByFile { 257 private int count; 258 private int dataBlockCount; 259 private long size; 260 private long dataSize; 261 private final long now = System.nanoTime(); 262 /** 263 * How many blocks to look at before we give up. There could be many millions of blocks. We 264 * don't want the ui to freeze while we run through 1B blocks... users will think hbase dead. UI 265 * displays warning in red when stats are incomplete. 266 */ 267 private final int max; 268 269 CachedBlocksByFile() { 270 this(null); 271 } 272 273 CachedBlocksByFile(final Configuration c) { 274 this.max = getMaxCachedBlocksByFile(c); 275 } 276 277 /** 278 * Map by filename. use concurent utils because we want our Map and contained blocks sorted. 279 */ 280 private transient NavigableMap<String, NavigableSet<CachedBlock>> cachedBlockByFile = 281 new ConcurrentSkipListMap<>(); 282 FastLongHistogram hist = new FastLongHistogram(); 283 284 /** 285 * n * @return True if full.... if we won't be adding any more. 286 */ 287 public boolean update(final CachedBlock cb) { 288 if (isFull()) return true; 289 NavigableSet<CachedBlock> set = this.cachedBlockByFile.get(cb.getFilename()); 290 if (set == null) { 291 set = new ConcurrentSkipListSet<>(); 292 this.cachedBlockByFile.put(cb.getFilename(), set); 293 } 294 set.add(cb); 295 this.size += cb.getSize(); 296 this.count++; 297 BlockType bt = cb.getBlockType(); 298 if (bt != null && bt.isData()) { 299 this.dataBlockCount++; 300 this.dataSize += cb.getSize(); 301 } 302 long age = (this.now - cb.getCachedTime()) / NANOS_PER_SECOND; 303 this.hist.add(age, 1); 304 return false; 305 } 306 307 /** 308 * @return True if full; i.e. there are more items in the cache but we only loaded up the 309 * maximum set in configuration <code>hbase.ui.blockcache.by.file.max</code> (Default: 310 * DEFAULT_MAX). 311 */ 312 public boolean isFull() { 313 return this.count >= this.max; 314 } 315 316 public NavigableMap<String, NavigableSet<CachedBlock>> getCachedBlockStatsByFile() { 317 return this.cachedBlockByFile; 318 } 319 320 /** Returns count of blocks in the cache */ 321 public int getCount() { 322 return count; 323 } 324 325 public int getDataCount() { 326 return dataBlockCount; 327 } 328 329 /** Returns size of blocks in the cache */ 330 public long getSize() { 331 return size; 332 } 333 334 /** Returns Size of data. */ 335 public long getDataSize() { 336 return dataSize; 337 } 338 339 public AgeSnapshot getAgeInCacheSnapshot() { 340 return new AgeSnapshot(this.hist); 341 } 342 343 @Override 344 public String toString() { 345 AgeSnapshot snapshot = getAgeInCacheSnapshot(); 346 return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size 347 + ", dataSize=" + getDataSize() + ", mean age=" + snapshot.getMean() + ", min age=" 348 + snapshot.getMin() + ", max age=" + snapshot.getMax() + ", 75th percentile age=" 349 + snapshot.get75thPercentile() + ", 95th percentile age=" + snapshot.get95thPercentile() 350 + ", 98th percentile age=" + snapshot.get98thPercentile() + ", 99th percentile age=" 351 + snapshot.get99thPercentile() + ", 99.9th percentile age=" + snapshot.get99thPercentile(); 352 } 353 } 354}