001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client.backoff;
019
020import org.apache.hadoop.conf.Configuration;
021import org.apache.hadoop.hbase.HConstants;
022import org.apache.hadoop.hbase.ServerName;
023import org.apache.yetus.audience.InterfaceAudience;
024
025import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
026
027/**
028 * Simple exponential backoff policy on for the client that uses a percent^4 times the max backoff
029 * to generate the backoff time.
030 */
031@InterfaceAudience.Public
032public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy {
033
034  private static final long ONE_MINUTE = 60 * 1000;
035  public static final long DEFAULT_MAX_BACKOFF = 5 * ONE_MINUTE;
036  public static final String MAX_BACKOFF_KEY = "hbase.client.exponential-backoff.max";
037  private long maxBackoff;
038  private float heapOccupancyLowWatermark;
039  private float heapOccupancyHighWatermark;
040
041  public ExponentialClientBackoffPolicy(Configuration conf) {
042    this.maxBackoff = conf.getLong(MAX_BACKOFF_KEY, DEFAULT_MAX_BACKOFF);
043    this.heapOccupancyLowWatermark = conf.getFloat(HConstants.HEAP_OCCUPANCY_LOW_WATERMARK_KEY,
044      HConstants.DEFAULT_HEAP_OCCUPANCY_LOW_WATERMARK);
045    this.heapOccupancyHighWatermark = conf.getFloat(HConstants.HEAP_OCCUPANCY_HIGH_WATERMARK_KEY,
046      HConstants.DEFAULT_HEAP_OCCUPANCY_HIGH_WATERMARK);
047  }
048
049  @Override
050  public long getBackoffTime(ServerName serverName, byte[] region, ServerStatistics stats) {
051    // no stats for the server yet, so don't backoff
052    if (stats == null) {
053      return 0;
054    }
055
056    ServerStatistics.RegionStatistics regionStats = stats.getStatsForRegion(region);
057    // no stats for the region yet - don't backoff
058    if (regionStats == null) {
059      return 0;
060    }
061
062    // Factor in memstore load
063    double percent = regionStats.getMemStoreLoadPercent() / 100.0;
064
065    // Factor in heap occupancy
066    float heapOccupancy = regionStats.getHeapOccupancyPercent() / 100.0f;
067
068    // Factor in compaction pressure, 1.0 means heavy compaction pressure
069    float compactionPressure = regionStats.getCompactionPressure() / 100.0f;
070    if (heapOccupancy >= heapOccupancyLowWatermark) {
071      // If we are higher than the high watermark, we are already applying max
072      // backoff and cannot scale more (see scale() below)
073      if (heapOccupancy > heapOccupancyHighWatermark) {
074        heapOccupancy = heapOccupancyHighWatermark;
075      }
076      percent = Math.max(percent,
077        scale(heapOccupancy, heapOccupancyLowWatermark, heapOccupancyHighWatermark, 0.1, 1.0));
078    }
079    percent = Math.max(percent, compactionPressure);
080    // square the percent as a value less than 1. Closer we move to 100 percent,
081    // the percent moves to 1, but squaring causes the exponential curve
082    double multiplier = Math.pow(percent, 4.0);
083    if (multiplier > 1) {
084      multiplier = 1;
085    }
086    return (long) (multiplier * maxBackoff);
087  }
088
089  /** Scale valueIn in the range [baseMin,baseMax] to the range [limitMin,limitMax] */
090  private static double scale(double valueIn, double baseMin, double baseMax, double limitMin,
091    double limitMax) {
092    Preconditions.checkArgument(baseMin <= baseMax, "Illegal source range [%s,%s]", baseMin,
093      baseMax);
094    Preconditions.checkArgument(limitMin <= limitMax, "Illegal target range [%s,%s]", limitMin,
095      limitMax);
096    Preconditions.checkArgument(valueIn >= baseMin && valueIn <= baseMax,
097      "Value %s must be within the range [%s,%s]", valueIn, baseMin, baseMax);
098    return ((limitMax - limitMin) * (valueIn - baseMin) / (baseMax - baseMin)) + limitMin;
099  }
100}