001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile.bucket;
019
020import java.util.Arrays;
021import java.util.Comparator;
022import java.util.HashSet;
023import java.util.Iterator;
024import java.util.Map;
025import java.util.Queue;
026import java.util.Set;
027import java.util.concurrent.atomic.LongAdder;
028import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
029import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
030import org.apache.yetus.audience.InterfaceAudience;
031import org.slf4j.Logger;
032import org.slf4j.LoggerFactory;
033
034import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
035import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
036import org.apache.hbase.thirdparty.com.google.common.collect.MinMaxPriorityQueue;
037import org.apache.hbase.thirdparty.com.google.common.primitives.Ints;
038import org.apache.hbase.thirdparty.org.apache.commons.collections4.map.LinkedMap;
039
040/**
041 * This class is used to allocate a block with specified size and free the block when evicting. It
042 * manages an array of buckets, each bucket is associated with a size and caches elements up to this
043 * size. For a completely empty bucket, this size could be re-specified dynamically.
044 * <p/>
045 * This class is not thread safe.
046 */
047@InterfaceAudience.Private
048public final class BucketAllocator {
049  private static final Logger LOG = LoggerFactory.getLogger(BucketAllocator.class);
050
051  public final static class Bucket {
052    private long baseOffset;
053    private int itemAllocationSize, sizeIndex;
054    private int itemCount;
055    private int freeList[];
056    private int freeCount, usedCount;
057
058    public Bucket(long offset) {
059      baseOffset = offset;
060      sizeIndex = -1;
061    }
062
063    void reconfigure(int sizeIndex, int[] bucketSizes, long bucketCapacity) {
064      Preconditions.checkElementIndex(sizeIndex, bucketSizes.length);
065      this.sizeIndex = sizeIndex;
066      itemAllocationSize = bucketSizes[sizeIndex];
067      itemCount = (int) (bucketCapacity / (long) itemAllocationSize);
068      freeCount = itemCount;
069      usedCount = 0;
070      freeList = new int[itemCount];
071      for (int i = 0; i < freeCount; ++i)
072        freeList[i] = i;
073    }
074
075    public boolean isUninstantiated() {
076      return sizeIndex == -1;
077    }
078
079    public int sizeIndex() {
080      return sizeIndex;
081    }
082
083    public int getItemAllocationSize() {
084      return itemAllocationSize;
085    }
086
087    public boolean hasFreeSpace() {
088      return freeCount > 0;
089    }
090
091    public boolean isCompletelyFree() {
092      return usedCount == 0;
093    }
094
095    public int freeCount() {
096      return freeCount;
097    }
098
099    public int usedCount() {
100      return usedCount;
101    }
102
103    public int getFreeBytes() {
104      return freeCount * itemAllocationSize;
105    }
106
107    public int getUsedBytes() {
108      return usedCount * itemAllocationSize;
109    }
110
111    public long getBaseOffset() {
112      return baseOffset;
113    }
114
115    /**
116     * Allocate a block in this bucket, return the offset representing the position in physical
117     * space
118     * @return the offset in the IOEngine
119     */
120    public long allocate() {
121      assert freeCount > 0; // Else should not have been called
122      assert sizeIndex != -1;
123      ++usedCount;
124      long offset = baseOffset + (freeList[--freeCount] * itemAllocationSize);
125      assert offset >= 0;
126      return offset;
127    }
128
129    public void addAllocation(long offset) throws BucketAllocatorException {
130      offset -= baseOffset;
131      if (offset < 0 || offset % itemAllocationSize != 0)
132        throw new BucketAllocatorException("Attempt to add allocation for bad offset: " + offset
133          + " base=" + baseOffset + ", bucket size=" + itemAllocationSize);
134      int idx = (int) (offset / itemAllocationSize);
135      boolean matchFound = false;
136      for (int i = 0; i < freeCount; ++i) {
137        if (matchFound) freeList[i - 1] = freeList[i];
138        else if (freeList[i] == idx) matchFound = true;
139      }
140      if (!matchFound) throw new BucketAllocatorException(
141        "Couldn't find match for index " + idx + " in free list");
142      ++usedCount;
143      --freeCount;
144    }
145
146    private void free(long offset) {
147      offset -= baseOffset;
148      assert offset >= 0;
149      assert offset < itemCount * itemAllocationSize;
150      assert offset % itemAllocationSize == 0;
151      assert usedCount > 0;
152      assert freeCount < itemCount; // Else duplicate free
153      int item = (int) (offset / (long) itemAllocationSize);
154      assert !freeListContains(item);
155      --usedCount;
156      freeList[freeCount++] = item;
157    }
158
159    private boolean freeListContains(int blockNo) {
160      for (int i = 0; i < freeCount; ++i) {
161        if (freeList[i] == blockNo) return true;
162      }
163      return false;
164    }
165  }
166
167  final class BucketSizeInfo {
168    // Free bucket means it has space to allocate a block;
169    // Completely free bucket means it has no block.
170    private LinkedMap bucketList, freeBuckets, completelyFreeBuckets;
171    // only modified under synchronization, but also read outside it.
172    private volatile long fragmentationBytes;
173    private int sizeIndex;
174
175    BucketSizeInfo(int sizeIndex) {
176      bucketList = new LinkedMap();
177      freeBuckets = new LinkedMap();
178      completelyFreeBuckets = new LinkedMap();
179      fragmentationBytes = 0;
180      this.sizeIndex = sizeIndex;
181    }
182
183    public synchronized void instantiateBucket(Bucket b) {
184      assert b.isUninstantiated() || b.isCompletelyFree();
185      b.reconfigure(sizeIndex, bucketSizes, bucketCapacity);
186      bucketList.put(b, b);
187      freeBuckets.put(b, b);
188      completelyFreeBuckets.put(b, b);
189    }
190
191    public int sizeIndex() {
192      return sizeIndex;
193    }
194
195    /**
196     * Find a bucket to allocate a block
197     * @return the offset in the IOEngine
198     */
199    public long allocateBlock(int blockSize) {
200      Bucket b = null;
201      if (freeBuckets.size() > 0) {
202        // Use up an existing one first...
203        b = (Bucket) freeBuckets.lastKey();
204      }
205      if (b == null) {
206        b = grabGlobalCompletelyFreeBucket();
207        if (b != null) instantiateBucket(b);
208      }
209      if (b == null) return -1;
210      long result = b.allocate();
211      blockAllocated(b);
212      if (blockSize < b.getItemAllocationSize()) {
213        fragmentationBytes += b.getItemAllocationSize() - blockSize;
214      }
215      return result;
216    }
217
218    void blockAllocated(Bucket b) {
219      if (!b.isCompletelyFree()) completelyFreeBuckets.remove(b);
220      if (!b.hasFreeSpace()) freeBuckets.remove(b);
221    }
222
223    public Bucket findAndRemoveCompletelyFreeBucket() {
224      Bucket b = null;
225      assert bucketList.size() > 0;
226      if (bucketList.size() == 1) {
227        // So we never get complete starvation of a bucket for a size
228        return null;
229      }
230
231      if (completelyFreeBuckets.size() > 0) {
232        b = (Bucket) completelyFreeBuckets.firstKey();
233        removeBucket(b);
234      }
235      return b;
236    }
237
238    private synchronized void removeBucket(Bucket b) {
239      assert b.isCompletelyFree();
240      bucketList.remove(b);
241      freeBuckets.remove(b);
242      completelyFreeBuckets.remove(b);
243    }
244
245    public void freeBlock(Bucket b, long offset, int length) {
246      assert bucketList.containsKey(b);
247      // else we shouldn't have anything to free...
248      assert (!completelyFreeBuckets.containsKey(b));
249      b.free(offset);
250      if (length < b.getItemAllocationSize()) {
251        fragmentationBytes -= b.getItemAllocationSize() - length;
252      }
253      if (!freeBuckets.containsKey(b)) freeBuckets.put(b, b);
254      if (b.isCompletelyFree()) completelyFreeBuckets.put(b, b);
255    }
256
257    public synchronized IndexStatistics statistics() {
258      long free = 0, used = 0;
259      int full = 0;
260      for (Object obj : bucketList.keySet()) {
261        Bucket b = (Bucket) obj;
262        free += b.freeCount();
263        used += b.usedCount();
264        if (!b.hasFreeSpace()) {
265          full++;
266        }
267      }
268      int bucketObjectSize = bucketSizes[sizeIndex];
269      // this is most likely to always be 1 or 0
270      int fillingBuckets = Math.max(0, freeBuckets.size() - completelyFreeBuckets.size());
271      // if bucket capacity is not perfectly divisible by a bucket's object size, there will
272      // be some left over per bucket. for some object sizes this may be large enough to be
273      // non-trivial and worth tuning by choosing a more divisible object size.
274      long wastedBytes = (bucketCapacity % bucketObjectSize) * (full + fillingBuckets);
275      return new IndexStatistics(free, used, bucketObjectSize, full, completelyFreeBuckets.size(),
276        wastedBytes, fragmentationBytes);
277    }
278
279    @Override
280    public String toString() {
281      return MoreObjects.toStringHelper(this.getClass()).add("sizeIndex", sizeIndex)
282        .add("bucketSize", bucketSizes[sizeIndex]).toString();
283    }
284  }
285
286  // Default block size in hbase is 64K, so we choose more sizes near 64K, you'd better
287  // reset it according to your cluster's block size distribution
288  // The real block size in hfile maybe a little larger than the size we configured ,
289  // so we need add extra 1024 bytes for fit.
290  // TODO Support the view of block size distribution statistics
291  private static final int DEFAULT_BUCKET_SIZES[] =
292    { 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, 32 * 1024 + 1024, 40 * 1024 + 1024,
293      48 * 1024 + 1024, 56 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024,
294      192 * 1024 + 1024, 256 * 1024 + 1024, 384 * 1024 + 1024, 512 * 1024 + 1024 };
295
296  /**
297   * Round up the given block size to bucket size, and get the corresponding BucketSizeInfo
298   */
299  public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) {
300    for (int i = 0; i < bucketSizes.length; ++i)
301      if (blockSize <= bucketSizes[i]) return bucketSizeInfos[i];
302    return null;
303  }
304
305  /**
306   * So, what is the minimum amount of items we'll tolerate in a single bucket?
307   */
308  static public final int FEWEST_ITEMS_IN_BUCKET = 4;
309
310  private final int[] bucketSizes;
311  private final int bigItemSize;
312  // The capacity size for each bucket
313  private final long bucketCapacity;
314  private Bucket[] buckets;
315  private BucketSizeInfo[] bucketSizeInfos;
316  private final long totalSize;
317  private transient long usedSize = 0;
318
319  BucketAllocator(long availableSpace, int[] bucketSizes) throws BucketAllocatorException {
320    this.bucketSizes = bucketSizes == null ? DEFAULT_BUCKET_SIZES : bucketSizes;
321    Arrays.sort(this.bucketSizes);
322    this.bigItemSize = Ints.max(this.bucketSizes);
323    this.bucketCapacity = FEWEST_ITEMS_IN_BUCKET * (long) bigItemSize;
324    buckets = new Bucket[(int) (availableSpace / bucketCapacity)];
325    if (buckets.length < this.bucketSizes.length)
326      throw new BucketAllocatorException("Bucket allocator size too small (" + buckets.length
327        + "); must have room for at least " + this.bucketSizes.length + " buckets");
328    bucketSizeInfos = new BucketSizeInfo[this.bucketSizes.length];
329    for (int i = 0; i < this.bucketSizes.length; ++i) {
330      bucketSizeInfos[i] = new BucketSizeInfo(i);
331    }
332    for (int i = 0; i < buckets.length; ++i) {
333      buckets[i] = new Bucket(bucketCapacity * i);
334      bucketSizeInfos[i < this.bucketSizes.length ? i : this.bucketSizes.length - 1]
335        .instantiateBucket(buckets[i]);
336    }
337    this.totalSize = ((long) buckets.length) * bucketCapacity;
338    if (LOG.isInfoEnabled()) {
339      LOG.info("Cache totalSize=" + this.totalSize + ", buckets=" + this.buckets.length
340        + ", bucket capacity=" + this.bucketCapacity + "=(" + FEWEST_ITEMS_IN_BUCKET + "*"
341        + this.bigItemSize + ")="
342        + "(FEWEST_ITEMS_IN_BUCKET*(largest configured bucketcache size))");
343    }
344  }
345
346  /**
347   * Rebuild the allocator's data structures from a persisted map.
348   * @param availableSpace capacity of cache
349   * @param map            A map stores the block key and BucketEntry(block's meta data like offset,
350   *                       length)
351   * @param realCacheSize  cached data size statistics for bucket cache n
352   */
353  BucketAllocator(long availableSpace, int[] bucketSizes, Map<BlockCacheKey, BucketEntry> map,
354    LongAdder realCacheSize) throws BucketAllocatorException {
355    this(availableSpace, bucketSizes);
356
357    // each bucket has an offset, sizeindex. probably the buckets are too big
358    // in our default state. so what we do is reconfigure them according to what
359    // we've found. we can only reconfigure each bucket once; if more than once,
360    // we know there's a bug, so we just log the info, throw, and start again...
361    boolean[] reconfigured = new boolean[buckets.length];
362    int sizeNotMatchedCount = 0;
363    int insufficientCapacityCount = 0;
364    Iterator<Map.Entry<BlockCacheKey, BucketEntry>> iterator = map.entrySet().iterator();
365    while (iterator.hasNext()) {
366      Map.Entry<BlockCacheKey, BucketEntry> entry = iterator.next();
367      long foundOffset = entry.getValue().offset();
368      int foundLen = entry.getValue().getLength();
369      int bucketSizeIndex = -1;
370      for (int i = 0; i < this.bucketSizes.length; ++i) {
371        if (foundLen <= this.bucketSizes[i]) {
372          bucketSizeIndex = i;
373          break;
374        }
375      }
376      if (bucketSizeIndex == -1) {
377        sizeNotMatchedCount++;
378        iterator.remove();
379        continue;
380      }
381      int bucketNo = (int) (foundOffset / bucketCapacity);
382      if (bucketNo < 0 || bucketNo >= buckets.length) {
383        insufficientCapacityCount++;
384        iterator.remove();
385        continue;
386      }
387      Bucket b = buckets[bucketNo];
388      if (reconfigured[bucketNo]) {
389        if (b.sizeIndex() != bucketSizeIndex) {
390          throw new BucketAllocatorException("Inconsistent allocation in bucket map;");
391        }
392      } else {
393        if (!b.isCompletelyFree()) {
394          throw new BucketAllocatorException(
395            "Reconfiguring bucket " + bucketNo + " but it's already allocated; corrupt data");
396        }
397        // Need to remove the bucket from whichever list it's currently in at
398        // the moment...
399        BucketSizeInfo bsi = bucketSizeInfos[bucketSizeIndex];
400        BucketSizeInfo oldbsi = bucketSizeInfos[b.sizeIndex()];
401        oldbsi.removeBucket(b);
402        bsi.instantiateBucket(b);
403        reconfigured[bucketNo] = true;
404      }
405      realCacheSize.add(foundLen);
406      buckets[bucketNo].addAllocation(foundOffset);
407      usedSize += buckets[bucketNo].getItemAllocationSize();
408      bucketSizeInfos[bucketSizeIndex].blockAllocated(b);
409    }
410
411    if (sizeNotMatchedCount > 0) {
412      LOG.warn("There are " + sizeNotMatchedCount + " blocks which can't be rebuilt because "
413        + "there is no matching bucket size for these blocks");
414    }
415    if (insufficientCapacityCount > 0) {
416      LOG.warn("There are " + insufficientCapacityCount + " blocks which can't be rebuilt - "
417        + "did you shrink the cache?");
418    }
419  }
420
421  @Override
422  public String toString() {
423    StringBuilder sb = new StringBuilder(1024);
424    for (int i = 0; i < buckets.length; ++i) {
425      Bucket b = buckets[i];
426      if (i > 0) sb.append(", ");
427      sb.append("bucket.").append(i).append(": size=").append(b.getItemAllocationSize());
428      sb.append(", freeCount=").append(b.freeCount()).append(", used=").append(b.usedCount());
429    }
430    return sb.toString();
431  }
432
433  public long getUsedSize() {
434    return this.usedSize;
435  }
436
437  public long getFreeSize() {
438    return this.totalSize - getUsedSize();
439  }
440
441  public long getTotalSize() {
442    return this.totalSize;
443  }
444
445  /**
446   * Allocate a block with specified size. Return the offset
447   * @param blockSize size of block nn * @return the offset in the IOEngine
448   */
449  public synchronized long allocateBlock(int blockSize)
450    throws CacheFullException, BucketAllocatorException {
451    assert blockSize > 0;
452    BucketSizeInfo bsi = roundUpToBucketSizeInfo(blockSize);
453    if (bsi == null) {
454      throw new BucketAllocatorException("Allocation too big size=" + blockSize
455        + "; adjust BucketCache sizes " + BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY
456        + " to accomodate if size seems reasonable and you want it cached.");
457    }
458    long offset = bsi.allocateBlock(blockSize);
459
460    // Ask caller to free up space and try again!
461    if (offset < 0) throw new CacheFullException(blockSize, bsi.sizeIndex());
462    usedSize += bucketSizes[bsi.sizeIndex()];
463    return offset;
464  }
465
466  private Bucket grabGlobalCompletelyFreeBucket() {
467    for (BucketSizeInfo bsi : bucketSizeInfos) {
468      Bucket b = bsi.findAndRemoveCompletelyFreeBucket();
469      if (b != null) return b;
470    }
471    return null;
472  }
473
474  /**
475   * Free a block with the offset
476   * @param offset block's offset
477   * @return size freed
478   */
479  public synchronized int freeBlock(long offset, int length) {
480    int bucketNo = (int) (offset / bucketCapacity);
481    assert bucketNo >= 0 && bucketNo < buckets.length;
482    Bucket targetBucket = buckets[bucketNo];
483    bucketSizeInfos[targetBucket.sizeIndex()].freeBlock(targetBucket, offset, length);
484    usedSize -= targetBucket.getItemAllocationSize();
485    return targetBucket.getItemAllocationSize();
486  }
487
488  public int sizeIndexOfAllocation(long offset) {
489    int bucketNo = (int) (offset / bucketCapacity);
490    assert bucketNo >= 0 && bucketNo < buckets.length;
491    Bucket targetBucket = buckets[bucketNo];
492    return targetBucket.sizeIndex();
493  }
494
495  public int sizeOfAllocation(long offset) {
496    int bucketNo = (int) (offset / bucketCapacity);
497    assert bucketNo >= 0 && bucketNo < buckets.length;
498    Bucket targetBucket = buckets[bucketNo];
499    return targetBucket.getItemAllocationSize();
500  }
501
502  /**
503   * Statistics to give a glimpse into the distribution of BucketCache objects. Each configured
504   * bucket size, denoted by {@link BucketSizeInfo}, gets an IndexStatistic. A BucketSizeInfo
505   * allocates blocks of a configured size from claimed buckets. If you have a bucket size of 512k,
506   * the corresponding BucketSizeInfo will always allocate chunks of 512k at a time regardless of
507   * actual request.
508   * <p>
509   * Over time, as a BucketSizeInfo gets more allocations, it will claim more buckets from the total
510   * pool of completelyFreeBuckets. As blocks are freed from a BucketSizeInfo, those buckets may be
511   * returned to the completelyFreeBuckets pool.
512   * <p>
513   * The IndexStatistics help visualize how these buckets are currently distributed, through counts
514   * of items, bytes, and fullBuckets. Additionally, mismatches between block sizes and bucket sizes
515   * can manifest in inefficient cache usage. These typically manifest in three ways:
516   * <p>
517   * 1. Allocation failures, because block size is larger than max bucket size. These show up in
518   * logs and can be alleviated by adding larger bucket sizes if appropriate.<br>
519   * 2. Memory fragmentation, because blocks are typically smaller than the bucket size. See
520   * {@link #fragmentationBytes()} for details.<br>
521   * 3. Memory waste, because a bucket's itemSize is not a perfect divisor of bucketCapacity. see
522   * {@link #wastedBytes()} for details.<br>
523   */
524  static class IndexStatistics {
525    private long freeCount, usedCount, itemSize, totalCount, wastedBytes, fragmentationBytes;
526    private int fullBuckets, completelyFreeBuckets;
527
528    /**
529     * How many more items can be allocated from the currently claimed blocks of this bucket size
530     */
531    public long freeCount() {
532      return freeCount;
533    }
534
535    /**
536     * How many items are currently taking up space in this bucket size's buckets
537     */
538    public long usedCount() {
539      return usedCount;
540    }
541
542    /**
543     * Combined {@link #freeCount()} + {@link #usedCount()}
544     */
545    public long totalCount() {
546      return totalCount;
547    }
548
549    /**
550     * How many more bytes can be allocated from the currently claimed blocks of this bucket size
551     */
552    public long freeBytes() {
553      return freeCount * itemSize;
554    }
555
556    /**
557     * How many bytes are currently taking up space in this bucket size's buckets Note: If your
558     * items are less than the bucket size of this bucket, the actual used bytes by items will be
559     * lower than this value. But since a bucket size can only allocate items of a single size, this
560     * value is the true number of used bytes. The difference will be counted in
561     * {@link #fragmentationBytes()}.
562     */
563    public long usedBytes() {
564      return usedCount * itemSize;
565    }
566
567    /**
568     * Combined {@link #totalCount()} * {@link #itemSize()}
569     */
570    public long totalBytes() {
571      return totalCount * itemSize;
572    }
573
574    /**
575     * This bucket size can only allocate items of this size, even if the requested allocation size
576     * is smaller. The rest goes towards {@link #fragmentationBytes()}.
577     */
578    public long itemSize() {
579      return itemSize;
580    }
581
582    /**
583     * How many buckets have been completely filled by blocks for this bucket size. These buckets
584     * can't accept any more blocks unless some existing are freed.
585     */
586    public int fullBuckets() {
587      return fullBuckets;
588    }
589
590    /**
591     * How many buckets are currently claimed by this bucket size but as yet totally unused. These
592     * buckets are available for reallocation to other bucket sizes if those fill up.
593     */
594    public int completelyFreeBuckets() {
595      return completelyFreeBuckets;
596    }
597
598    /**
599     * If {@link #bucketCapacity} is not perfectly divisible by this {@link #itemSize()}, the
600     * remainder will be unusable by in buckets of this size. A high value here may be optimized by
601     * trying to choose bucket sizes which can better divide {@link #bucketCapacity}.
602     */
603    public long wastedBytes() {
604      return wastedBytes;
605    }
606
607    /**
608     * Every time you allocate blocks in these buckets where the block size is less than the bucket
609     * size, fragmentation increases by that difference. You can reduce fragmentation by lowering
610     * the bucket size so that it is closer to the typical block size. This may have the consequence
611     * of bumping some blocks to the next larger bucket size, so experimentation may be needed.
612     */
613    public long fragmentationBytes() {
614      return fragmentationBytes;
615    }
616
617    public IndexStatistics(long free, long used, long itemSize, int fullBuckets,
618      int completelyFreeBuckets, long wastedBytes, long fragmentationBytes) {
619      setTo(free, used, itemSize, fullBuckets, completelyFreeBuckets, wastedBytes,
620        fragmentationBytes);
621    }
622
623    public IndexStatistics() {
624      setTo(-1, -1, 0, 0, 0, 0, 0);
625    }
626
627    public void setTo(long free, long used, long itemSize, int fullBuckets,
628      int completelyFreeBuckets, long wastedBytes, long fragmentationBytes) {
629      this.itemSize = itemSize;
630      this.freeCount = free;
631      this.usedCount = used;
632      this.totalCount = free + used;
633      this.fullBuckets = fullBuckets;
634      this.completelyFreeBuckets = completelyFreeBuckets;
635      this.wastedBytes = wastedBytes;
636      this.fragmentationBytes = fragmentationBytes;
637    }
638  }
639
640  public Bucket[] getBuckets() {
641    return this.buckets;
642  }
643
644  void logDebugStatistics() {
645    if (!LOG.isDebugEnabled()) {
646      return;
647    }
648
649    IndexStatistics total = new IndexStatistics();
650    IndexStatistics[] stats = getIndexStatistics(total);
651    LOG.debug("Bucket allocator statistics follow:");
652    LOG.debug(
653      "  Free bytes={}; used bytes={}; total bytes={}; wasted bytes={}; fragmentation bytes={}; "
654        + "completelyFreeBuckets={}",
655      total.freeBytes(), total.usedBytes(), total.totalBytes(), total.wastedBytes(),
656      total.fragmentationBytes(), total.completelyFreeBuckets());
657    for (IndexStatistics s : stats) {
658      LOG.debug(
659        "  Object size {}; used={}; free={}; total={}; wasted bytes={}; fragmentation bytes={}, "
660          + "full buckets={}",
661        s.itemSize(), s.usedCount(), s.freeCount(), s.totalCount(), s.wastedBytes(),
662        s.fragmentationBytes(), s.fullBuckets());
663    }
664  }
665
666  IndexStatistics[] getIndexStatistics(IndexStatistics grandTotal) {
667    IndexStatistics[] stats = getIndexStatistics();
668    long totalfree = 0, totalused = 0, totalWasted = 0, totalFragmented = 0;
669    int fullBuckets = 0, completelyFreeBuckets = 0;
670
671    for (IndexStatistics stat : stats) {
672      totalfree += stat.freeBytes();
673      totalused += stat.usedBytes();
674      totalWasted += stat.wastedBytes();
675      totalFragmented += stat.fragmentationBytes();
676      fullBuckets += stat.fullBuckets();
677      completelyFreeBuckets += stat.completelyFreeBuckets();
678    }
679    grandTotal.setTo(totalfree, totalused, 1, fullBuckets, completelyFreeBuckets, totalWasted,
680      totalFragmented);
681    return stats;
682  }
683
684  IndexStatistics[] getIndexStatistics() {
685    IndexStatistics[] stats = new IndexStatistics[bucketSizes.length];
686    for (int i = 0; i < stats.length; ++i)
687      stats[i] = bucketSizeInfos[i].statistics();
688    return stats;
689  }
690
691  public int getBucketIndex(long offset) {
692    return (int) (offset / bucketCapacity);
693  }
694
695  /**
696   * Returns a set of indices of the buckets that are least filled excluding the offsets, we also
697   * the fully free buckets for the BucketSizes where everything is empty and they only have one
698   * completely free bucket as a reserved
699   * @param excludedBuckets the buckets that need to be excluded due to currently being in used
700   * @param bucketCount     max Number of buckets to return
701   * @return set of bucket indices which could be used for eviction
702   */
703  public Set<Integer> getLeastFilledBuckets(Set<Integer> excludedBuckets, int bucketCount) {
704    Queue<Integer> queue = MinMaxPriorityQueue.<Integer> orderedBy(new Comparator<Integer>() {
705      @Override
706      public int compare(Integer left, Integer right) {
707        // We will always get instantiated buckets
708        return Float.compare(((float) buckets[left].usedCount) / buckets[left].itemCount,
709          ((float) buckets[right].usedCount) / buckets[right].itemCount);
710      }
711    }).maximumSize(bucketCount).create();
712
713    for (int i = 0; i < buckets.length; i++) {
714      if (!excludedBuckets.contains(i) && !buckets[i].isUninstantiated() &&
715      // Avoid the buckets that are the only buckets for a sizeIndex
716        bucketSizeInfos[buckets[i].sizeIndex()].bucketList.size() != 1
717      ) {
718        queue.add(i);
719      }
720    }
721
722    Set<Integer> result = new HashSet<>(bucketCount);
723    result.addAll(queue);
724
725    return result;
726  }
727}