001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile;
019
020import static org.apache.hadoop.hbase.io.hfile.HFileBlock.FILL_HEADER;
021
022import java.io.IOException;
023import java.nio.ByteBuffer;
024import java.util.HashSet;
025import java.util.Map;
026import java.util.NavigableMap;
027import java.util.NavigableSet;
028import java.util.Set;
029import java.util.concurrent.ConcurrentSkipListMap;
030import java.util.concurrent.ConcurrentSkipListSet;
031import org.apache.hadoop.conf.Configuration;
032import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram;
033import org.apache.hadoop.hbase.nio.ByteBuff;
034import org.apache.hadoop.hbase.regionserver.HRegion;
035import org.apache.hadoop.hbase.util.Bytes;
036import org.apache.hadoop.hbase.util.ChecksumType;
037import org.apache.hadoop.hbase.util.GsonUtil;
038import org.apache.yetus.audience.InterfaceAudience;
039import org.slf4j.Logger;
040import org.slf4j.LoggerFactory;
041
042import org.apache.hbase.thirdparty.com.google.gson.Gson;
043import org.apache.hbase.thirdparty.com.google.gson.TypeAdapter;
044import org.apache.hbase.thirdparty.com.google.gson.stream.JsonReader;
045import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter;
046
047/**
048 * Utilty for aggregating counts in CachedBlocks and toString/toJSON CachedBlocks and BlockCaches.
049 * No attempt has been made at making this thread safe.
050 */
051@InterfaceAudience.Private
052public class BlockCacheUtil {
053
054  private static final Logger LOG = LoggerFactory.getLogger(BlockCacheUtil.class);
055
056  public static final long NANOS_PER_SECOND = 1000000000;
057
058  /**
059   * Needed generating JSON.
060   */
061  private static final Gson GSON = GsonUtil.createGson()
062    .registerTypeAdapter(FastLongHistogram.class, new TypeAdapter<FastLongHistogram>() {
063
064      @Override
065      public void write(JsonWriter out, FastLongHistogram value) throws IOException {
066        AgeSnapshot snapshot = new AgeSnapshot(value);
067        out.beginObject();
068        out.name("mean").value(snapshot.getMean());
069        out.name("min").value(snapshot.getMin());
070        out.name("max").value(snapshot.getMax());
071        out.name("75thPercentile").value(snapshot.get75thPercentile());
072        out.name("95thPercentile").value(snapshot.get95thPercentile());
073        out.name("98thPercentile").value(snapshot.get98thPercentile());
074        out.name("99thPercentile").value(snapshot.get99thPercentile());
075        out.name("999thPercentile").value(snapshot.get999thPercentile());
076        out.endObject();
077      }
078
079      @Override
080      public FastLongHistogram read(JsonReader in) throws IOException {
081        throw new UnsupportedOperationException();
082      }
083    }).setPrettyPrinting().create();
084
085  /** Returns The block content as String. */
086  public static String toString(final CachedBlock cb, final long now) {
087    return "filename=" + cb.getFilename() + ", " + toStringMinusFileName(cb, now);
088  }
089
090  /**
091   * Little data structure to hold counts for a file. Used doing a toJSON.
092   */
093  static class CachedBlockCountsPerFile {
094    private int count = 0;
095    private long size = 0;
096    private int countData = 0;
097    private long sizeData = 0;
098    private final String filename;
099
100    CachedBlockCountsPerFile(final String filename) {
101      this.filename = filename;
102    }
103
104    public int getCount() {
105      return count;
106    }
107
108    public long getSize() {
109      return size;
110    }
111
112    public int getCountData() {
113      return countData;
114    }
115
116    public long getSizeData() {
117      return sizeData;
118    }
119
120    public String getFilename() {
121      return filename;
122    }
123  }
124
125  /** Returns A JSON String of <code>filename</code> and counts of <code>blocks</code> */
126  public static String toJSON(String filename, NavigableSet<CachedBlock> blocks)
127    throws IOException {
128    CachedBlockCountsPerFile counts = new CachedBlockCountsPerFile(filename);
129    for (CachedBlock cb : blocks) {
130      counts.count++;
131      counts.size += cb.getSize();
132      BlockType bt = cb.getBlockType();
133      if (bt != null && bt.isData()) {
134        counts.countData++;
135        counts.sizeData += cb.getSize();
136      }
137    }
138    return GSON.toJson(counts);
139  }
140
141  /** Returns JSON string of <code>cbsf</code> aggregated */
142  public static String toJSON(CachedBlocksByFile cbsbf) throws IOException {
143    return GSON.toJson(cbsbf);
144  }
145
146  /** Returns JSON string of <code>bc</code> content. */
147  public static String toJSON(BlockCache bc) throws IOException {
148    return GSON.toJson(bc);
149  }
150
151  /** Returns The block content of <code>bc</code> as a String minus the filename. */
152  public static String toStringMinusFileName(final CachedBlock cb, final long now) {
153    return "offset=" + cb.getOffset() + ", size=" + cb.getSize() + ", age="
154      + (now - cb.getCachedTime()) + ", type=" + cb.getBlockType() + ", priority="
155      + cb.getBlockPriority();
156  }
157
158  /**
159   * Get a {@link CachedBlocksByFile} instance and load it up by iterating content in
160   * {@link BlockCache}.
161   * @param conf Used to read configurations
162   * @param bc   Block Cache to iterate.
163   * @return Laoded up instance of CachedBlocksByFile
164   */
165  public static CachedBlocksByFile getLoadedCachedBlocksByFile(final Configuration conf,
166    final BlockCache bc) {
167    CachedBlocksByFile cbsbf = new CachedBlocksByFile(conf);
168    for (CachedBlock cb : bc) {
169      if (cbsbf.update(cb)) break;
170    }
171    return cbsbf;
172  }
173
174  private static int compareCacheBlock(Cacheable left, Cacheable right,
175    boolean includeNextBlockMetadata) {
176    ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength());
177    left.serialize(l, includeNextBlockMetadata);
178    ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength());
179    right.serialize(r, includeNextBlockMetadata);
180    return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), r.array(), r.arrayOffset(),
181      r.limit());
182  }
183
184  /**
185   * Validate that the existing and newBlock are the same without including the nextBlockMetadata,
186   * if not, throw an exception. If they are the same without the nextBlockMetadata, return the
187   * comparison.
188   * @param existing block that is existing in the cache.
189   * @param newBlock block that is trying to be cached.
190   * @param cacheKey the cache key of the blocks.
191   * @return comparison of the existing block to the newBlock.
192   */
193  public static int validateBlockAddition(Cacheable existing, Cacheable newBlock,
194    BlockCacheKey cacheKey) {
195    int comparison = compareCacheBlock(existing, newBlock, false);
196    if (comparison != 0) {
197      throw new RuntimeException(
198        "Cached block contents differ, which should not have happened." + "cacheKey:" + cacheKey);
199    }
200    if ((existing instanceof HFileBlock) && (newBlock instanceof HFileBlock)) {
201      comparison = ((HFileBlock) existing).getNextBlockOnDiskSize()
202        - ((HFileBlock) newBlock).getNextBlockOnDiskSize();
203    }
204    return comparison;
205  }
206
207  /**
208   * Because of the region splitting, it's possible that the split key locate in the middle of a
209   * block. So it's possible that both the daughter regions load the same block from their parent
210   * HFile. When pread, we don't force the read to read all of the next block header. So when two
211   * threads try to cache the same block, it's possible that one thread read all of the next block
212   * header but the other one didn't. if the already cached block hasn't next block header but the
213   * new block to cache has, then we can replace the existing block with the new block for better
214   * performance.(HBASE-20447)
215   * @param blockCache BlockCache to check
216   * @param cacheKey   the block cache key
217   * @param newBlock   the new block which try to put into the block cache.
218   * @return true means need to replace existing block with new block for the same block cache key.
219   *         false means just keep the existing block.
220   */
221  public static boolean shouldReplaceExistingCacheBlock(BlockCache blockCache,
222    BlockCacheKey cacheKey, Cacheable newBlock) {
223    // NOTICE: The getBlock has retained the existingBlock inside.
224    Cacheable existingBlock = blockCache.getBlock(cacheKey, false, false, false);
225    if (existingBlock == null) {
226      return true;
227    }
228    try {
229      int comparison = BlockCacheUtil.validateBlockAddition(existingBlock, newBlock, cacheKey);
230      if (comparison < 0) {
231        LOG.warn("Cached block contents differ by nextBlockOnDiskSize, the new block has "
232          + "nextBlockOnDiskSize set. Caching new block.");
233        return true;
234      } else if (comparison > 0) {
235        LOG.warn("Cached block contents differ by nextBlockOnDiskSize, the existing block has "
236          + "nextBlockOnDiskSize set, Keeping cached block.");
237        return false;
238      } else {
239        LOG.debug("Caching an already cached block: {}. This is harmless and can happen in rare "
240          + "cases (see HBASE-8547)", cacheKey);
241        return false;
242      }
243    } finally {
244      // Release this block to decrement the reference count.
245      existingBlock.release();
246    }
247  }
248
249  public static Set<String> listAllFilesNames(Map<String, HRegion> onlineRegions) {
250    Set<String> files = new HashSet<>();
251    onlineRegions.values().forEach(r -> {
252      r.getStores().forEach(s -> {
253        s.getStorefiles().forEach(f -> files.add(f.getPath().getName()));
254      });
255    });
256    return files;
257  }
258
259  private static final int DEFAULT_MAX = 1000000;
260
261  public static int getMaxCachedBlocksByFile(Configuration conf) {
262    return conf == null ? DEFAULT_MAX : conf.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX);
263  }
264
265  /**
266   * Similarly to HFileBlock.Writer.getBlockForCaching(), creates a HFileBlock instance without
267   * checksum for caching. This is needed for when we cache blocks via readers (either prefetch or
268   * client read), otherwise we may fail equality comparison when checking against same block that
269   * may already have been cached at write time.
270   * @param cacheConf the related CacheConfig object.
271   * @param block     the HFileBlock instance to be converted.
272   * @return the resulting HFileBlock instance without checksum.
273   */
274  public static HFileBlock getBlockForCaching(CacheConfig cacheConf, HFileBlock block) {
275    // Calculate how many bytes we need for checksum on the tail of the block.
276    int numBytes = cacheConf.shouldCacheCompressed(block.getBlockType().getCategory())
277      ? 0
278      : (int) ChecksumUtil.numBytes(block.getOnDiskDataSizeWithHeader(),
279        block.getHFileContext().getBytesPerChecksum());
280    ByteBuff buff = block.getBufferReadOnly();
281    HFileBlockBuilder builder = new HFileBlockBuilder();
282    return builder.withBlockType(block.getBlockType())
283      .withOnDiskSizeWithoutHeader(block.getOnDiskSizeWithoutHeader())
284      .withUncompressedSizeWithoutHeader(block.getUncompressedSizeWithoutHeader())
285      .withPrevBlockOffset(block.getPrevBlockOffset()).withByteBuff(buff)
286      .withFillHeader(FILL_HEADER).withOffset(block.getOffset()).withNextBlockOnDiskSize(-1)
287      .withOnDiskDataSizeWithHeader(block.getOnDiskDataSizeWithHeader() + numBytes)
288      .withHFileContext(cloneContext(block.getHFileContext()))
289      .withByteBuffAllocator(cacheConf.getByteBuffAllocator()).withShared(!buff.hasArray()).build();
290  }
291
292  public static HFileContext cloneContext(HFileContext context) {
293    HFileContext newContext = new HFileContextBuilder().withBlockSize(context.getBlocksize())
294      .withBytesPerCheckSum(0).withChecksumType(ChecksumType.NULL) // no checksums in cached data
295      .withCompression(context.getCompression())
296      .withDataBlockEncoding(context.getDataBlockEncoding())
297      .withHBaseCheckSum(context.isUseHBaseChecksum()).withCompressTags(context.isCompressTags())
298      .withIncludesMvcc(context.isIncludesMvcc()).withIncludesTags(context.isIncludesTags())
299      .withColumnFamily(context.getColumnFamily()).withTableName(context.getTableName()).build();
300    return newContext;
301  }
302
303  /**
304   * Use one of these to keep a running account of cached blocks by file. Throw it away when done.
305   * This is different than metrics in that it is stats on current state of a cache. See
306   * getLoadedCachedBlocksByFile
307   */
308  public static class CachedBlocksByFile {
309    private int count;
310    private int dataBlockCount;
311    private long size;
312    private long dataSize;
313    private final long now = System.nanoTime();
314    /**
315     * How many blocks to look at before we give up. There could be many millions of blocks. We
316     * don't want the ui to freeze while we run through 1B blocks... users will think hbase dead. UI
317     * displays warning in red when stats are incomplete.
318     */
319    private final int max;
320
321    CachedBlocksByFile() {
322      this(null);
323    }
324
325    CachedBlocksByFile(final Configuration c) {
326      this.max = getMaxCachedBlocksByFile(c);
327    }
328
329    /**
330     * Map by filename. use concurent utils because we want our Map and contained blocks sorted.
331     */
332    private transient NavigableMap<String, NavigableSet<CachedBlock>> cachedBlockByFile =
333      new ConcurrentSkipListMap<>();
334    FastLongHistogram hist = new FastLongHistogram();
335
336    /** Returns True if full.... if we won't be adding any more. */
337    public boolean update(final CachedBlock cb) {
338      if (isFull()) return true;
339      NavigableSet<CachedBlock> set = this.cachedBlockByFile.get(cb.getFilename());
340      if (set == null) {
341        set = new ConcurrentSkipListSet<>();
342        this.cachedBlockByFile.put(cb.getFilename(), set);
343      }
344      set.add(cb);
345      this.size += cb.getSize();
346      this.count++;
347      BlockType bt = cb.getBlockType();
348      if (bt != null && bt.isData()) {
349        this.dataBlockCount++;
350        this.dataSize += cb.getSize();
351      }
352      long age = (this.now - cb.getCachedTime()) / NANOS_PER_SECOND;
353      this.hist.add(age, 1);
354      return false;
355    }
356
357    /**
358     * @return True if full; i.e. there are more items in the cache but we only loaded up the
359     *         maximum set in configuration <code>hbase.ui.blockcache.by.file.max</code> (Default:
360     *         DEFAULT_MAX).
361     */
362    public boolean isFull() {
363      return this.count >= this.max;
364    }
365
366    public NavigableMap<String, NavigableSet<CachedBlock>> getCachedBlockStatsByFile() {
367      return this.cachedBlockByFile;
368    }
369
370    /** Returns count of blocks in the cache */
371    public int getCount() {
372      return count;
373    }
374
375    public int getDataCount() {
376      return dataBlockCount;
377    }
378
379    /** Returns size of blocks in the cache */
380    public long getSize() {
381      return size;
382    }
383
384    /** Returns Size of data. */
385    public long getDataSize() {
386      return dataSize;
387    }
388
389    public AgeSnapshot getAgeInCacheSnapshot() {
390      return new AgeSnapshot(this.hist);
391    }
392
393    @Override
394    public String toString() {
395      AgeSnapshot snapshot = getAgeInCacheSnapshot();
396      return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size
397        + ", dataSize=" + getDataSize() + ", mean age=" + snapshot.getMean() + ", min age="
398        + snapshot.getMin() + ", max age=" + snapshot.getMax() + ", 75th percentile age="
399        + snapshot.get75thPercentile() + ", 95th percentile age=" + snapshot.get95thPercentile()
400        + ", 98th percentile age=" + snapshot.get98thPercentile() + ", 99th percentile age="
401        + snapshot.get99thPercentile() + ", 99.9th percentile age=" + snapshot.get99thPercentile();
402    }
403  }
404}