001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile.bucket;
019
020import java.util.Arrays;
021import java.util.Comparator;
022import java.util.HashSet;
023import java.util.Iterator;
024import java.util.Map;
025import java.util.Queue;
026import java.util.Set;
027import java.util.concurrent.atomic.LongAdder;
028import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
029import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
030import org.apache.yetus.audience.InterfaceAudience;
031import org.slf4j.Logger;
032import org.slf4j.LoggerFactory;
033
034import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
035import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
036import org.apache.hbase.thirdparty.com.google.common.collect.MinMaxPriorityQueue;
037import org.apache.hbase.thirdparty.com.google.common.primitives.Ints;
038import org.apache.hbase.thirdparty.org.apache.commons.collections4.map.LinkedMap;
039
040/**
041 * This class is used to allocate a block with specified size and free the block when evicting. It
042 * manages an array of buckets, each bucket is associated with a size and caches elements up to this
043 * size. For a completely empty bucket, this size could be re-specified dynamically.
044 * <p/>
045 * This class is not thread safe.
046 */
047@InterfaceAudience.Private
048public final class BucketAllocator {
049  private static final Logger LOG = LoggerFactory.getLogger(BucketAllocator.class);
050
051  public final static class Bucket {
052    private long baseOffset;
053    private int itemAllocationSize, sizeIndex;
054    private int itemCount;
055    private int freeList[];
056    private int freeCount, usedCount;
057
058    public Bucket(long offset) {
059      baseOffset = offset;
060      sizeIndex = -1;
061    }
062
063    void reconfigure(int sizeIndex, int[] bucketSizes, long bucketCapacity) {
064      Preconditions.checkElementIndex(sizeIndex, bucketSizes.length);
065      this.sizeIndex = sizeIndex;
066      itemAllocationSize = bucketSizes[sizeIndex];
067      itemCount = (int) (bucketCapacity / (long) itemAllocationSize);
068      freeCount = itemCount;
069      usedCount = 0;
070      freeList = new int[itemCount];
071      for (int i = 0; i < freeCount; ++i)
072        freeList[i] = i;
073    }
074
075    public boolean isUninstantiated() {
076      return sizeIndex == -1;
077    }
078
079    public int sizeIndex() {
080      return sizeIndex;
081    }
082
083    public int getItemAllocationSize() {
084      return itemAllocationSize;
085    }
086
087    public boolean hasFreeSpace() {
088      return freeCount > 0;
089    }
090
091    public boolean isCompletelyFree() {
092      return usedCount == 0;
093    }
094
095    public int freeCount() {
096      return freeCount;
097    }
098
099    public int usedCount() {
100      return usedCount;
101    }
102
103    public int getFreeBytes() {
104      return freeCount * itemAllocationSize;
105    }
106
107    public int getUsedBytes() {
108      return usedCount * itemAllocationSize;
109    }
110
111    public long getBaseOffset() {
112      return baseOffset;
113    }
114
115    /**
116     * Allocate a block in this bucket, return the offset representing the position in physical
117     * space
118     * @return the offset in the IOEngine
119     */
120    public long allocate() {
121      assert freeCount > 0; // Else should not have been called
122      assert sizeIndex != -1;
123      ++usedCount;
124      long offset = baseOffset + (freeList[--freeCount] * itemAllocationSize);
125      assert offset >= 0;
126      return offset;
127    }
128
129    public void addAllocation(long offset) throws BucketAllocatorException {
130      offset -= baseOffset;
131      if (offset < 0 || offset % itemAllocationSize != 0)
132        throw new BucketAllocatorException("Attempt to add allocation for bad offset: " + offset
133          + " base=" + baseOffset + ", bucket size=" + itemAllocationSize);
134      int idx = (int) (offset / itemAllocationSize);
135      boolean matchFound = false;
136      for (int i = 0; i < freeCount; ++i) {
137        if (matchFound) freeList[i - 1] = freeList[i];
138        else if (freeList[i] == idx) matchFound = true;
139      }
140      if (!matchFound) throw new BucketAllocatorException(
141        "Couldn't find match for index " + idx + " in free list");
142      ++usedCount;
143      --freeCount;
144    }
145
146    private void free(long offset) {
147      offset -= baseOffset;
148      assert offset >= 0;
149      assert offset < itemCount * itemAllocationSize;
150      assert offset % itemAllocationSize == 0;
151      assert usedCount > 0;
152      assert freeCount < itemCount; // Else duplicate free
153      int item = (int) (offset / (long) itemAllocationSize);
154      assert !freeListContains(item);
155      --usedCount;
156      freeList[freeCount++] = item;
157    }
158
159    private boolean freeListContains(int blockNo) {
160      for (int i = 0; i < freeCount; ++i) {
161        if (freeList[i] == blockNo) return true;
162      }
163      return false;
164    }
165  }
166
167  final class BucketSizeInfo {
168    // Free bucket means it has space to allocate a block;
169    // Completely free bucket means it has no block.
170    private LinkedMap bucketList, freeBuckets, completelyFreeBuckets;
171    // only modified under synchronization, but also read outside it.
172    private volatile long fragmentationBytes;
173    private int sizeIndex;
174
175    BucketSizeInfo(int sizeIndex) {
176      bucketList = new LinkedMap();
177      freeBuckets = new LinkedMap();
178      completelyFreeBuckets = new LinkedMap();
179      fragmentationBytes = 0;
180      this.sizeIndex = sizeIndex;
181    }
182
183    public synchronized void instantiateBucket(Bucket b) {
184      assert b.isUninstantiated() || b.isCompletelyFree();
185      b.reconfigure(sizeIndex, bucketSizes, bucketCapacity);
186      bucketList.put(b, b);
187      freeBuckets.put(b, b);
188      completelyFreeBuckets.put(b, b);
189    }
190
191    public int sizeIndex() {
192      return sizeIndex;
193    }
194
195    /**
196     * Find a bucket to allocate a block
197     * @return the offset in the IOEngine
198     */
199    public long allocateBlock(int blockSize) {
200      Bucket b = null;
201      if (freeBuckets.size() > 0) {
202        // Use up an existing one first...
203        b = (Bucket) freeBuckets.lastKey();
204      }
205      if (b == null) {
206        b = grabGlobalCompletelyFreeBucket();
207        if (b != null) instantiateBucket(b);
208      }
209      if (b == null) return -1;
210      long result = b.allocate();
211      blockAllocated(b);
212      if (blockSize < b.getItemAllocationSize()) {
213        fragmentationBytes += b.getItemAllocationSize() - blockSize;
214      }
215      return result;
216    }
217
218    void blockAllocated(Bucket b) {
219      if (!b.isCompletelyFree()) completelyFreeBuckets.remove(b);
220      if (!b.hasFreeSpace()) freeBuckets.remove(b);
221    }
222
223    public Bucket findAndRemoveCompletelyFreeBucket() {
224      Bucket b = null;
225      assert bucketList.size() > 0;
226      if (bucketList.size() == 1) {
227        // So we never get complete starvation of a bucket for a size
228        return null;
229      }
230
231      if (completelyFreeBuckets.size() > 0) {
232        b = (Bucket) completelyFreeBuckets.firstKey();
233        removeBucket(b);
234      }
235      return b;
236    }
237
238    private synchronized void removeBucket(Bucket b) {
239      assert b.isCompletelyFree();
240      bucketList.remove(b);
241      freeBuckets.remove(b);
242      completelyFreeBuckets.remove(b);
243    }
244
245    public void freeBlock(Bucket b, long offset, int length) {
246      assert bucketList.containsKey(b);
247      // else we shouldn't have anything to free...
248      assert (!completelyFreeBuckets.containsKey(b));
249      b.free(offset);
250      if (length < b.getItemAllocationSize()) {
251        fragmentationBytes -= b.getItemAllocationSize() - length;
252      }
253      if (!freeBuckets.containsKey(b)) freeBuckets.put(b, b);
254      if (b.isCompletelyFree()) completelyFreeBuckets.put(b, b);
255    }
256
257    public synchronized IndexStatistics statistics() {
258      long free = 0, used = 0;
259      int full = 0;
260      for (Object obj : bucketList.keySet()) {
261        Bucket b = (Bucket) obj;
262        free += b.freeCount();
263        used += b.usedCount();
264        if (!b.hasFreeSpace()) {
265          full++;
266        }
267      }
268      int bucketObjectSize = bucketSizes[sizeIndex];
269      // this is most likely to always be 1 or 0
270      int fillingBuckets = Math.max(0, freeBuckets.size() - completelyFreeBuckets.size());
271      // if bucket capacity is not perfectly divisible by a bucket's object size, there will
272      // be some left over per bucket. for some object sizes this may be large enough to be
273      // non-trivial and worth tuning by choosing a more divisible object size.
274      long wastedBytes = (bucketCapacity % bucketObjectSize) * (full + fillingBuckets);
275      return new IndexStatistics(free, used, bucketObjectSize, full, completelyFreeBuckets.size(),
276        wastedBytes, fragmentationBytes);
277    }
278
279    @Override
280    public String toString() {
281      return MoreObjects.toStringHelper(this.getClass()).add("sizeIndex", sizeIndex)
282        .add("bucketSize", bucketSizes[sizeIndex]).toString();
283    }
284  }
285
286  // Default block size in hbase is 64K, so we choose more sizes near 64K, you'd better
287  // reset it according to your cluster's block size distribution
288  // The real block size in hfile maybe a little larger than the size we configured ,
289  // so we need add extra 1024 bytes for fit.
290  // TODO Support the view of block size distribution statistics
291  private static final int DEFAULT_BUCKET_SIZES[] =
292    { 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, 32 * 1024 + 1024, 40 * 1024 + 1024,
293      48 * 1024 + 1024, 56 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024,
294      192 * 1024 + 1024, 256 * 1024 + 1024, 384 * 1024 + 1024, 512 * 1024 + 1024 };
295
296  /**
297   * Round up the given block size to bucket size, and get the corresponding BucketSizeInfo
298   */
299  public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) {
300    for (int i = 0; i < bucketSizes.length; ++i)
301      if (blockSize <= bucketSizes[i]) return bucketSizeInfos[i];
302    return null;
303  }
304
305  /**
306   * So, what is the minimum amount of items we'll tolerate in a single bucket?
307   */
308  static public final int FEWEST_ITEMS_IN_BUCKET = 4;
309
310  private final int[] bucketSizes;
311  private final int bigItemSize;
312  // The capacity size for each bucket
313  private final long bucketCapacity;
314  private Bucket[] buckets;
315  private BucketSizeInfo[] bucketSizeInfos;
316  private final long totalSize;
317  private transient long usedSize = 0;
318
319  BucketAllocator(long availableSpace, int[] bucketSizes) throws BucketAllocatorException {
320    this.bucketSizes = bucketSizes == null ? DEFAULT_BUCKET_SIZES : bucketSizes;
321    Arrays.sort(this.bucketSizes);
322    this.bigItemSize = Ints.max(this.bucketSizes);
323    this.bucketCapacity = FEWEST_ITEMS_IN_BUCKET * (long) bigItemSize;
324    buckets = new Bucket[(int) (availableSpace / bucketCapacity)];
325    if (buckets.length < this.bucketSizes.length)
326      throw new BucketAllocatorException("Bucket allocator size too small (" + buckets.length
327        + "); must have room for at least " + this.bucketSizes.length + " buckets");
328    bucketSizeInfos = new BucketSizeInfo[this.bucketSizes.length];
329    for (int i = 0; i < this.bucketSizes.length; ++i) {
330      bucketSizeInfos[i] = new BucketSizeInfo(i);
331    }
332    for (int i = 0; i < buckets.length; ++i) {
333      buckets[i] = new Bucket(bucketCapacity * i);
334      bucketSizeInfos[i < this.bucketSizes.length ? i : this.bucketSizes.length - 1]
335        .instantiateBucket(buckets[i]);
336    }
337    this.totalSize = ((long) buckets.length) * bucketCapacity;
338    if (LOG.isInfoEnabled()) {
339      LOG.info("Cache totalSize=" + this.totalSize + ", buckets=" + this.buckets.length
340        + ", bucket capacity=" + this.bucketCapacity + "=(" + FEWEST_ITEMS_IN_BUCKET + "*"
341        + this.bigItemSize + ")="
342        + "(FEWEST_ITEMS_IN_BUCKET*(largest configured bucketcache size))");
343    }
344  }
345
346  /**
347   * Rebuild the allocator's data structures from a persisted map.
348   * @param availableSpace capacity of cache
349   * @param map            A map stores the block key and BucketEntry(block's meta data like offset,
350   *                       length)
351   * @param realCacheSize  cached data size statistics for bucket cache
352   */
353  BucketAllocator(long availableSpace, int[] bucketSizes, Map<BlockCacheKey, BucketEntry> map,
354    LongAdder realCacheSize) throws BucketAllocatorException {
355    this(availableSpace, bucketSizes);
356
357    // each bucket has an offset, sizeindex. probably the buckets are too big
358    // in our default state. so what we do is reconfigure them according to what
359    // we've found. we can only reconfigure each bucket once; if more than once,
360    // we know there's a bug, so we just log the info, throw, and start again...
361    boolean[] reconfigured = new boolean[buckets.length];
362    int sizeNotMatchedCount = 0;
363    int insufficientCapacityCount = 0;
364    Iterator<Map.Entry<BlockCacheKey, BucketEntry>> iterator = map.entrySet().iterator();
365    while (iterator.hasNext()) {
366      Map.Entry<BlockCacheKey, BucketEntry> entry = iterator.next();
367      long foundOffset = entry.getValue().offset();
368      int foundLen = entry.getValue().getLength();
369      int bucketSizeIndex = -1;
370      for (int i = 0; i < this.bucketSizes.length; ++i) {
371        if (foundLen <= this.bucketSizes[i]) {
372          bucketSizeIndex = i;
373          break;
374        }
375      }
376      if (bucketSizeIndex == -1) {
377        sizeNotMatchedCount++;
378        iterator.remove();
379        continue;
380      }
381      int bucketNo = (int) (foundOffset / bucketCapacity);
382      if (bucketNo < 0 || bucketNo >= buckets.length) {
383        insufficientCapacityCount++;
384        iterator.remove();
385        continue;
386      }
387      Bucket b = buckets[bucketNo];
388      if (reconfigured[bucketNo]) {
389        if (b.sizeIndex() != bucketSizeIndex) {
390          throw new BucketAllocatorException("Inconsistent allocation in bucket map;");
391        }
392      } else {
393        if (!b.isCompletelyFree()) {
394          throw new BucketAllocatorException(
395            "Reconfiguring bucket " + bucketNo + " but it's already allocated; corrupt data");
396        }
397        // Need to remove the bucket from whichever list it's currently in at
398        // the moment...
399        BucketSizeInfo bsi = bucketSizeInfos[bucketSizeIndex];
400        BucketSizeInfo oldbsi = bucketSizeInfos[b.sizeIndex()];
401        oldbsi.removeBucket(b);
402        bsi.instantiateBucket(b);
403        reconfigured[bucketNo] = true;
404      }
405      realCacheSize.add(foundLen);
406      buckets[bucketNo].addAllocation(foundOffset);
407      usedSize += buckets[bucketNo].getItemAllocationSize();
408      bucketSizeInfos[bucketSizeIndex].blockAllocated(b);
409    }
410
411    if (sizeNotMatchedCount > 0) {
412      LOG.warn("There are " + sizeNotMatchedCount + " blocks which can't be rebuilt because "
413        + "there is no matching bucket size for these blocks");
414    }
415    if (insufficientCapacityCount > 0) {
416      LOG.warn("There are " + insufficientCapacityCount + " blocks which can't be rebuilt - "
417        + "did you shrink the cache?");
418    }
419  }
420
421  @Override
422  public String toString() {
423    StringBuilder sb = new StringBuilder(1024);
424    for (int i = 0; i < buckets.length; ++i) {
425      Bucket b = buckets[i];
426      if (i > 0) sb.append(", ");
427      sb.append("bucket.").append(i).append(": size=").append(b.getItemAllocationSize());
428      sb.append(", freeCount=").append(b.freeCount()).append(", used=").append(b.usedCount());
429    }
430    return sb.toString();
431  }
432
433  public long getUsedSize() {
434    return this.usedSize;
435  }
436
437  public long getFreeSize() {
438    return this.totalSize - getUsedSize();
439  }
440
441  public long getTotalSize() {
442    return this.totalSize;
443  }
444
445  /**
446   * Allocate a block with specified size. Return the offset
447   * @param blockSize size of block
448   * @return the offset in the IOEngine
449   */
450  public synchronized long allocateBlock(int blockSize)
451    throws CacheFullException, BucketAllocatorException {
452    assert blockSize > 0;
453    BucketSizeInfo bsi = roundUpToBucketSizeInfo(blockSize);
454    if (bsi == null) {
455      throw new BucketAllocatorException("Allocation too big size=" + blockSize
456        + "; adjust BucketCache sizes " + BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY
457        + " to accomodate if size seems reasonable and you want it cached.");
458    }
459    long offset = bsi.allocateBlock(blockSize);
460
461    // Ask caller to free up space and try again!
462    if (offset < 0) throw new CacheFullException(blockSize, bsi.sizeIndex());
463    usedSize += bucketSizes[bsi.sizeIndex()];
464    return offset;
465  }
466
467  private Bucket grabGlobalCompletelyFreeBucket() {
468    for (BucketSizeInfo bsi : bucketSizeInfos) {
469      Bucket b = bsi.findAndRemoveCompletelyFreeBucket();
470      if (b != null) return b;
471    }
472    return null;
473  }
474
475  /**
476   * Free a block with the offset
477   * @param offset block's offset
478   * @return size freed
479   */
480  public synchronized int freeBlock(long offset, int length) {
481    int bucketNo = (int) (offset / bucketCapacity);
482    assert bucketNo >= 0 && bucketNo < buckets.length;
483    Bucket targetBucket = buckets[bucketNo];
484    bucketSizeInfos[targetBucket.sizeIndex()].freeBlock(targetBucket, offset, length);
485    usedSize -= targetBucket.getItemAllocationSize();
486    return targetBucket.getItemAllocationSize();
487  }
488
489  public int sizeIndexOfAllocation(long offset) {
490    int bucketNo = (int) (offset / bucketCapacity);
491    assert bucketNo >= 0 && bucketNo < buckets.length;
492    Bucket targetBucket = buckets[bucketNo];
493    return targetBucket.sizeIndex();
494  }
495
496  public int sizeOfAllocation(long offset) {
497    int bucketNo = (int) (offset / bucketCapacity);
498    assert bucketNo >= 0 && bucketNo < buckets.length;
499    Bucket targetBucket = buckets[bucketNo];
500    return targetBucket.getItemAllocationSize();
501  }
502
503  /**
504   * Statistics to give a glimpse into the distribution of BucketCache objects. Each configured
505   * bucket size, denoted by {@link BucketSizeInfo}, gets an IndexStatistic. A BucketSizeInfo
506   * allocates blocks of a configured size from claimed buckets. If you have a bucket size of 512k,
507   * the corresponding BucketSizeInfo will always allocate chunks of 512k at a time regardless of
508   * actual request.
509   * <p>
510   * Over time, as a BucketSizeInfo gets more allocations, it will claim more buckets from the total
511   * pool of completelyFreeBuckets. As blocks are freed from a BucketSizeInfo, those buckets may be
512   * returned to the completelyFreeBuckets pool.
513   * <p>
514   * The IndexStatistics help visualize how these buckets are currently distributed, through counts
515   * of items, bytes, and fullBuckets. Additionally, mismatches between block sizes and bucket sizes
516   * can manifest in inefficient cache usage. These typically manifest in three ways:
517   * <p>
518   * 1. Allocation failures, because block size is larger than max bucket size. These show up in
519   * logs and can be alleviated by adding larger bucket sizes if appropriate.<br>
520   * 2. Memory fragmentation, because blocks are typically smaller than the bucket size. See
521   * {@link #fragmentationBytes()} for details.<br>
522   * 3. Memory waste, because a bucket's itemSize is not a perfect divisor of bucketCapacity. see
523   * {@link #wastedBytes()} for details.<br>
524   */
525  static class IndexStatistics {
526    private long freeCount, usedCount, itemSize, totalCount, wastedBytes, fragmentationBytes;
527    private int fullBuckets, completelyFreeBuckets;
528
529    /**
530     * How many more items can be allocated from the currently claimed blocks of this bucket size
531     */
532    public long freeCount() {
533      return freeCount;
534    }
535
536    /**
537     * How many items are currently taking up space in this bucket size's buckets
538     */
539    public long usedCount() {
540      return usedCount;
541    }
542
543    /**
544     * Combined {@link #freeCount()} + {@link #usedCount()}
545     */
546    public long totalCount() {
547      return totalCount;
548    }
549
550    /**
551     * How many more bytes can be allocated from the currently claimed blocks of this bucket size
552     */
553    public long freeBytes() {
554      return freeCount * itemSize;
555    }
556
557    /**
558     * How many bytes are currently taking up space in this bucket size's buckets Note: If your
559     * items are less than the bucket size of this bucket, the actual used bytes by items will be
560     * lower than this value. But since a bucket size can only allocate items of a single size, this
561     * value is the true number of used bytes. The difference will be counted in
562     * {@link #fragmentationBytes()}.
563     */
564    public long usedBytes() {
565      return usedCount * itemSize;
566    }
567
568    /**
569     * Combined {@link #totalCount()} * {@link #itemSize()}
570     */
571    public long totalBytes() {
572      return totalCount * itemSize;
573    }
574
575    /**
576     * This bucket size can only allocate items of this size, even if the requested allocation size
577     * is smaller. The rest goes towards {@link #fragmentationBytes()}.
578     */
579    public long itemSize() {
580      return itemSize;
581    }
582
583    /**
584     * How many buckets have been completely filled by blocks for this bucket size. These buckets
585     * can't accept any more blocks unless some existing are freed.
586     */
587    public int fullBuckets() {
588      return fullBuckets;
589    }
590
591    /**
592     * How many buckets are currently claimed by this bucket size but as yet totally unused. These
593     * buckets are available for reallocation to other bucket sizes if those fill up.
594     */
595    public int completelyFreeBuckets() {
596      return completelyFreeBuckets;
597    }
598
599    /**
600     * If {@link #bucketCapacity} is not perfectly divisible by this {@link #itemSize()}, the
601     * remainder will be unusable by in buckets of this size. A high value here may be optimized by
602     * trying to choose bucket sizes which can better divide {@link #bucketCapacity}.
603     */
604    public long wastedBytes() {
605      return wastedBytes;
606    }
607
608    /**
609     * Every time you allocate blocks in these buckets where the block size is less than the bucket
610     * size, fragmentation increases by that difference. You can reduce fragmentation by lowering
611     * the bucket size so that it is closer to the typical block size. This may have the consequence
612     * of bumping some blocks to the next larger bucket size, so experimentation may be needed.
613     */
614    public long fragmentationBytes() {
615      return fragmentationBytes;
616    }
617
618    public IndexStatistics(long free, long used, long itemSize, int fullBuckets,
619      int completelyFreeBuckets, long wastedBytes, long fragmentationBytes) {
620      setTo(free, used, itemSize, fullBuckets, completelyFreeBuckets, wastedBytes,
621        fragmentationBytes);
622    }
623
624    public IndexStatistics() {
625      setTo(-1, -1, 0, 0, 0, 0, 0);
626    }
627
628    public void setTo(long free, long used, long itemSize, int fullBuckets,
629      int completelyFreeBuckets, long wastedBytes, long fragmentationBytes) {
630      this.itemSize = itemSize;
631      this.freeCount = free;
632      this.usedCount = used;
633      this.totalCount = free + used;
634      this.fullBuckets = fullBuckets;
635      this.completelyFreeBuckets = completelyFreeBuckets;
636      this.wastedBytes = wastedBytes;
637      this.fragmentationBytes = fragmentationBytes;
638    }
639  }
640
641  public Bucket[] getBuckets() {
642    return this.buckets;
643  }
644
645  void logDebugStatistics() {
646    if (!LOG.isDebugEnabled()) {
647      return;
648    }
649
650    IndexStatistics total = new IndexStatistics();
651    IndexStatistics[] stats = getIndexStatistics(total);
652    LOG.debug("Bucket allocator statistics follow:");
653    LOG.debug(
654      "  Free bytes={}; used bytes={}; total bytes={}; wasted bytes={}; fragmentation bytes={}; "
655        + "completelyFreeBuckets={}",
656      total.freeBytes(), total.usedBytes(), total.totalBytes(), total.wastedBytes(),
657      total.fragmentationBytes(), total.completelyFreeBuckets());
658    for (IndexStatistics s : stats) {
659      LOG.debug(
660        "  Object size {}; used={}; free={}; total={}; wasted bytes={}; fragmentation bytes={}, "
661          + "full buckets={}",
662        s.itemSize(), s.usedCount(), s.freeCount(), s.totalCount(), s.wastedBytes(),
663        s.fragmentationBytes(), s.fullBuckets());
664    }
665  }
666
667  IndexStatistics[] getIndexStatistics(IndexStatistics grandTotal) {
668    IndexStatistics[] stats = getIndexStatistics();
669    long totalfree = 0, totalused = 0, totalWasted = 0, totalFragmented = 0;
670    int fullBuckets = 0, completelyFreeBuckets = 0;
671
672    for (IndexStatistics stat : stats) {
673      totalfree += stat.freeBytes();
674      totalused += stat.usedBytes();
675      totalWasted += stat.wastedBytes();
676      totalFragmented += stat.fragmentationBytes();
677      fullBuckets += stat.fullBuckets();
678      completelyFreeBuckets += stat.completelyFreeBuckets();
679    }
680    grandTotal.setTo(totalfree, totalused, 1, fullBuckets, completelyFreeBuckets, totalWasted,
681      totalFragmented);
682    return stats;
683  }
684
685  IndexStatistics[] getIndexStatistics() {
686    IndexStatistics[] stats = new IndexStatistics[bucketSizes.length];
687    for (int i = 0; i < stats.length; ++i)
688      stats[i] = bucketSizeInfos[i].statistics();
689    return stats;
690  }
691
692  public int getBucketIndex(long offset) {
693    return (int) (offset / bucketCapacity);
694  }
695
696  /**
697   * Returns a set of indices of the buckets that are least filled excluding the offsets, we also
698   * the fully free buckets for the BucketSizes where everything is empty and they only have one
699   * completely free bucket as a reserved
700   * @param excludedBuckets the buckets that need to be excluded due to currently being in used
701   * @param bucketCount     max Number of buckets to return
702   * @return set of bucket indices which could be used for eviction
703   */
704  public Set<Integer> getLeastFilledBuckets(Set<Integer> excludedBuckets, int bucketCount) {
705    Queue<Integer> queue = MinMaxPriorityQueue.<Integer> orderedBy(new Comparator<Integer>() {
706      @Override
707      public int compare(Integer left, Integer right) {
708        // We will always get instantiated buckets
709        return Float.compare(((float) buckets[left].usedCount) / buckets[left].itemCount,
710          ((float) buckets[right].usedCount) / buckets[right].itemCount);
711      }
712    }).maximumSize(bucketCount).create();
713
714    for (int i = 0; i < buckets.length; i++) {
715      if (!excludedBuckets.contains(i) && !buckets[i].isUninstantiated() &&
716      // Avoid the buckets that are the only buckets for a sizeIndex
717        bucketSizeInfos[buckets[i].sizeIndex()].bucketList.size() != 1
718      ) {
719        queue.add(i);
720      }
721    }
722
723    Set<Integer> result = new HashSet<>(bucketCount);
724    result.addAll(queue);
725
726    return result;
727  }
728}