001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.hfile; 019 020import static org.apache.hadoop.hbase.io.hfile.HFileBlock.FILL_HEADER; 021 022import java.io.IOException; 023import java.nio.ByteBuffer; 024import java.util.HashSet; 025import java.util.Map; 026import java.util.NavigableMap; 027import java.util.NavigableSet; 028import java.util.Set; 029import java.util.concurrent.ConcurrentSkipListMap; 030import java.util.concurrent.ConcurrentSkipListSet; 031import org.apache.hadoop.conf.Configuration; 032import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram; 033import org.apache.hadoop.hbase.nio.ByteBuff; 034import org.apache.hadoop.hbase.regionserver.HRegion; 035import org.apache.hadoop.hbase.util.Bytes; 036import org.apache.hadoop.hbase.util.ChecksumType; 037import org.apache.hadoop.hbase.util.GsonUtil; 038import org.apache.yetus.audience.InterfaceAudience; 039import org.slf4j.Logger; 040import org.slf4j.LoggerFactory; 041 042import org.apache.hbase.thirdparty.com.google.gson.Gson; 043import org.apache.hbase.thirdparty.com.google.gson.TypeAdapter; 044import org.apache.hbase.thirdparty.com.google.gson.stream.JsonReader; 045import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; 046 047/** 048 * Utilty for aggregating counts in CachedBlocks and toString/toJSON CachedBlocks and BlockCaches. 049 * No attempt has been made at making this thread safe. 050 */ 051@InterfaceAudience.Private 052public class BlockCacheUtil { 053 054 private static final Logger LOG = LoggerFactory.getLogger(BlockCacheUtil.class); 055 056 public static final long NANOS_PER_SECOND = 1000000000; 057 058 /** 059 * Needed generating JSON. 060 */ 061 private static final Gson GSON = GsonUtil.createGson() 062 .registerTypeAdapter(FastLongHistogram.class, new TypeAdapter<FastLongHistogram>() { 063 064 @Override 065 public void write(JsonWriter out, FastLongHistogram value) throws IOException { 066 AgeSnapshot snapshot = new AgeSnapshot(value); 067 out.beginObject(); 068 out.name("mean").value(snapshot.getMean()); 069 out.name("min").value(snapshot.getMin()); 070 out.name("max").value(snapshot.getMax()); 071 out.name("75thPercentile").value(snapshot.get75thPercentile()); 072 out.name("95thPercentile").value(snapshot.get95thPercentile()); 073 out.name("98thPercentile").value(snapshot.get98thPercentile()); 074 out.name("99thPercentile").value(snapshot.get99thPercentile()); 075 out.name("999thPercentile").value(snapshot.get999thPercentile()); 076 out.endObject(); 077 } 078 079 @Override 080 public FastLongHistogram read(JsonReader in) throws IOException { 081 throw new UnsupportedOperationException(); 082 } 083 }).setPrettyPrinting().create(); 084 085 /** Returns The block content as String. */ 086 public static String toString(final CachedBlock cb, final long now) { 087 return "filename=" + cb.getFilename() + ", " + toStringMinusFileName(cb, now); 088 } 089 090 /** 091 * Little data structure to hold counts for a file. Used doing a toJSON. 092 */ 093 static class CachedBlockCountsPerFile { 094 private int count = 0; 095 private long size = 0; 096 private int countData = 0; 097 private long sizeData = 0; 098 private final String filename; 099 100 CachedBlockCountsPerFile(final String filename) { 101 this.filename = filename; 102 } 103 104 public int getCount() { 105 return count; 106 } 107 108 public long getSize() { 109 return size; 110 } 111 112 public int getCountData() { 113 return countData; 114 } 115 116 public long getSizeData() { 117 return sizeData; 118 } 119 120 public String getFilename() { 121 return filename; 122 } 123 } 124 125 /** Returns A JSON String of <code>filename</code> and counts of <code>blocks</code> */ 126 public static String toJSON(String filename, NavigableSet<CachedBlock> blocks) 127 throws IOException { 128 CachedBlockCountsPerFile counts = new CachedBlockCountsPerFile(filename); 129 for (CachedBlock cb : blocks) { 130 counts.count++; 131 counts.size += cb.getSize(); 132 BlockType bt = cb.getBlockType(); 133 if (bt != null && bt.isData()) { 134 counts.countData++; 135 counts.sizeData += cb.getSize(); 136 } 137 } 138 return GSON.toJson(counts); 139 } 140 141 /** Returns JSON string of <code>cbsf</code> aggregated */ 142 public static String toJSON(CachedBlocksByFile cbsbf) throws IOException { 143 return GSON.toJson(cbsbf); 144 } 145 146 /** Returns JSON string of <code>bc</code> content. */ 147 public static String toJSON(BlockCache bc) throws IOException { 148 return GSON.toJson(bc); 149 } 150 151 /** Returns The block content of <code>bc</code> as a String minus the filename. */ 152 public static String toStringMinusFileName(final CachedBlock cb, final long now) { 153 return "offset=" + cb.getOffset() + ", size=" + cb.getSize() + ", age=" 154 + (now - cb.getCachedTime()) + ", type=" + cb.getBlockType() + ", priority=" 155 + cb.getBlockPriority(); 156 } 157 158 /** 159 * Get a {@link CachedBlocksByFile} instance and load it up by iterating content in 160 * {@link BlockCache}. 161 * @param conf Used to read configurations 162 * @param bc Block Cache to iterate. 163 * @return Laoded up instance of CachedBlocksByFile 164 */ 165 public static CachedBlocksByFile getLoadedCachedBlocksByFile(final Configuration conf, 166 final BlockCache bc) { 167 CachedBlocksByFile cbsbf = new CachedBlocksByFile(conf); 168 for (CachedBlock cb : bc) { 169 if (cbsbf.update(cb)) break; 170 } 171 return cbsbf; 172 } 173 174 private static int compareCacheBlock(Cacheable left, Cacheable right, 175 boolean includeNextBlockMetadata) { 176 ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength()); 177 left.serialize(l, includeNextBlockMetadata); 178 ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength()); 179 right.serialize(r, includeNextBlockMetadata); 180 return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), r.array(), r.arrayOffset(), 181 r.limit()); 182 } 183 184 /** 185 * Validate that the existing and newBlock are the same without including the nextBlockMetadata, 186 * if not, throw an exception. If they are the same without the nextBlockMetadata, return the 187 * comparison. 188 * @param existing block that is existing in the cache. 189 * @param newBlock block that is trying to be cached. 190 * @param cacheKey the cache key of the blocks. 191 * @return comparison of the existing block to the newBlock. 192 */ 193 public static int validateBlockAddition(Cacheable existing, Cacheable newBlock, 194 BlockCacheKey cacheKey) { 195 int comparison = compareCacheBlock(existing, newBlock, false); 196 if (comparison != 0) { 197 throw new RuntimeException( 198 "Cached block contents differ, which should not have happened." + "cacheKey:" + cacheKey); 199 } 200 if ((existing instanceof HFileBlock) && (newBlock instanceof HFileBlock)) { 201 comparison = ((HFileBlock) existing).getNextBlockOnDiskSize() 202 - ((HFileBlock) newBlock).getNextBlockOnDiskSize(); 203 } 204 return comparison; 205 } 206 207 /** 208 * Because of the region splitting, it's possible that the split key locate in the middle of a 209 * block. So it's possible that both the daughter regions load the same block from their parent 210 * HFile. When pread, we don't force the read to read all of the next block header. So when two 211 * threads try to cache the same block, it's possible that one thread read all of the next block 212 * header but the other one didn't. if the already cached block hasn't next block header but the 213 * new block to cache has, then we can replace the existing block with the new block for better 214 * performance.(HBASE-20447) 215 * @param blockCache BlockCache to check 216 * @param cacheKey the block cache key 217 * @param newBlock the new block which try to put into the block cache. 218 * @return true means need to replace existing block with new block for the same block cache key. 219 * false means just keep the existing block. 220 */ 221 public static boolean shouldReplaceExistingCacheBlock(BlockCache blockCache, 222 BlockCacheKey cacheKey, Cacheable newBlock) { 223 // NOTICE: The getBlock has retained the existingBlock inside. 224 Cacheable existingBlock = blockCache.getBlock(cacheKey, false, false, false); 225 if (existingBlock == null) { 226 return true; 227 } 228 try { 229 int comparison = BlockCacheUtil.validateBlockAddition(existingBlock, newBlock, cacheKey); 230 if (comparison < 0) { 231 LOG.warn("Cached block contents differ by nextBlockOnDiskSize, the new block has " 232 + "nextBlockOnDiskSize set. Caching new block."); 233 return true; 234 } else if (comparison > 0) { 235 LOG.warn("Cached block contents differ by nextBlockOnDiskSize, the existing block has " 236 + "nextBlockOnDiskSize set, Keeping cached block."); 237 return false; 238 } else { 239 LOG.debug("Caching an already cached block: {}. This is harmless and can happen in rare " 240 + "cases (see HBASE-8547)", cacheKey); 241 return false; 242 } 243 } finally { 244 // Release this block to decrement the reference count. 245 existingBlock.release(); 246 } 247 } 248 249 public static Set<String> listAllFilesNames(Map<String, HRegion> onlineRegions) { 250 Set<String> files = new HashSet<>(); 251 onlineRegions.values().forEach(r -> { 252 r.getStores().forEach(s -> { 253 s.getStorefiles().forEach(f -> files.add(f.getPath().getName())); 254 }); 255 }); 256 return files; 257 } 258 259 private static final int DEFAULT_MAX = 1000000; 260 261 public static int getMaxCachedBlocksByFile(Configuration conf) { 262 return conf == null ? DEFAULT_MAX : conf.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX); 263 } 264 265 /** 266 * Similarly to HFileBlock.Writer.getBlockForCaching(), creates a HFileBlock instance without 267 * checksum for caching. This is needed for when we cache blocks via readers (either prefetch or 268 * client read), otherwise we may fail equality comparison when checking against same block that 269 * may already have been cached at write time. 270 * @param cacheConf the related CacheConfig object. 271 * @param block the HFileBlock instance to be converted. 272 * @return the resulting HFileBlock instance without checksum. 273 */ 274 public static HFileBlock getBlockForCaching(CacheConfig cacheConf, HFileBlock block) { 275 // Calculate how many bytes we need for checksum on the tail of the block. 276 int numBytes = cacheConf.shouldCacheCompressed(block.getBlockType().getCategory()) 277 ? 0 278 : (int) ChecksumUtil.numBytes(block.getOnDiskDataSizeWithHeader(), 279 block.getHFileContext().getBytesPerChecksum()); 280 ByteBuff buff = block.getBufferReadOnly(); 281 HFileBlockBuilder builder = new HFileBlockBuilder(); 282 return builder.withBlockType(block.getBlockType()) 283 .withOnDiskSizeWithoutHeader(block.getOnDiskSizeWithoutHeader()) 284 .withUncompressedSizeWithoutHeader(block.getUncompressedSizeWithoutHeader()) 285 .withPrevBlockOffset(block.getPrevBlockOffset()).withByteBuff(buff) 286 .withFillHeader(FILL_HEADER).withOffset(block.getOffset()).withNextBlockOnDiskSize(-1) 287 .withOnDiskDataSizeWithHeader(block.getOnDiskDataSizeWithHeader() + numBytes) 288 .withNextBlockOnDiskSize(block.getNextBlockOnDiskSize()) 289 .withHFileContext(cloneContext(block.getHFileContext())) 290 .withByteBuffAllocator(cacheConf.getByteBuffAllocator()).withShared(!buff.hasArray()).build(); 291 } 292 293 public static HFileContext cloneContext(HFileContext context) { 294 HFileContext newContext = new HFileContextBuilder().withBlockSize(context.getBlocksize()) 295 .withBytesPerCheckSum(0).withChecksumType(ChecksumType.NULL) // no checksums in cached data 296 .withCompression(context.getCompression()) 297 .withDataBlockEncoding(context.getDataBlockEncoding()) 298 .withHBaseCheckSum(context.isUseHBaseChecksum()).withCompressTags(context.isCompressTags()) 299 .withIncludesMvcc(context.isIncludesMvcc()).withIncludesTags(context.isIncludesTags()) 300 .withColumnFamily(context.getColumnFamily()).withTableName(context.getTableName()).build(); 301 return newContext; 302 } 303 304 /** 305 * Use one of these to keep a running account of cached blocks by file. Throw it away when done. 306 * This is different than metrics in that it is stats on current state of a cache. See 307 * getLoadedCachedBlocksByFile 308 */ 309 public static class CachedBlocksByFile { 310 private int count; 311 private int dataBlockCount; 312 private long size; 313 private long dataSize; 314 private final long now = System.nanoTime(); 315 /** 316 * How many blocks to look at before we give up. There could be many millions of blocks. We 317 * don't want the ui to freeze while we run through 1B blocks... users will think hbase dead. UI 318 * displays warning in red when stats are incomplete. 319 */ 320 private final int max; 321 322 CachedBlocksByFile() { 323 this(null); 324 } 325 326 CachedBlocksByFile(final Configuration c) { 327 this.max = getMaxCachedBlocksByFile(c); 328 } 329 330 /** 331 * Map by filename. use concurent utils because we want our Map and contained blocks sorted. 332 */ 333 private transient NavigableMap<String, NavigableSet<CachedBlock>> cachedBlockByFile = 334 new ConcurrentSkipListMap<>(); 335 FastLongHistogram hist = new FastLongHistogram(); 336 337 /** Returns True if full.... if we won't be adding any more. */ 338 public boolean update(final CachedBlock cb) { 339 if (isFull()) return true; 340 NavigableSet<CachedBlock> set = this.cachedBlockByFile.get(cb.getFilename()); 341 if (set == null) { 342 set = new ConcurrentSkipListSet<>(); 343 this.cachedBlockByFile.put(cb.getFilename(), set); 344 } 345 set.add(cb); 346 this.size += cb.getSize(); 347 this.count++; 348 BlockType bt = cb.getBlockType(); 349 if (bt != null && bt.isData()) { 350 this.dataBlockCount++; 351 this.dataSize += cb.getSize(); 352 } 353 long age = (this.now - cb.getCachedTime()) / NANOS_PER_SECOND; 354 this.hist.add(age, 1); 355 return false; 356 } 357 358 /** 359 * @return True if full; i.e. there are more items in the cache but we only loaded up the 360 * maximum set in configuration <code>hbase.ui.blockcache.by.file.max</code> (Default: 361 * DEFAULT_MAX). 362 */ 363 public boolean isFull() { 364 return this.count >= this.max; 365 } 366 367 public NavigableMap<String, NavigableSet<CachedBlock>> getCachedBlockStatsByFile() { 368 return this.cachedBlockByFile; 369 } 370 371 /** Returns count of blocks in the cache */ 372 public int getCount() { 373 return count; 374 } 375 376 public int getDataCount() { 377 return dataBlockCount; 378 } 379 380 /** Returns size of blocks in the cache */ 381 public long getSize() { 382 return size; 383 } 384 385 /** Returns Size of data. */ 386 public long getDataSize() { 387 return dataSize; 388 } 389 390 public AgeSnapshot getAgeInCacheSnapshot() { 391 return new AgeSnapshot(this.hist); 392 } 393 394 @Override 395 public String toString() { 396 AgeSnapshot snapshot = getAgeInCacheSnapshot(); 397 return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size 398 + ", dataSize=" + getDataSize() + ", mean age=" + snapshot.getMean() + ", min age=" 399 + snapshot.getMin() + ", max age=" + snapshot.getMax() + ", 75th percentile age=" 400 + snapshot.get75thPercentile() + ", 95th percentile age=" + snapshot.get95thPercentile() 401 + ", 98th percentile age=" + snapshot.get98thPercentile() + ", 99th percentile age=" 402 + snapshot.get99thPercentile() + ", 99.9th percentile age=" + snapshot.get99thPercentile(); 403 } 404 } 405}