001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client.backoff;
019
020import org.apache.hadoop.conf.Configuration;
021import org.apache.hadoop.hbase.HConstants;
022import org.apache.hadoop.hbase.ServerName;
023import org.apache.yetus.audience.InterfaceAudience;
024import org.slf4j.Logger;
025import org.slf4j.LoggerFactory;
026
027import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
028
029/**
030 * Simple exponential backoff policy on for the client that uses a percent^4 times the max backoff
031 * to generate the backoff time.
032 */
033@InterfaceAudience.Public
034public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy {
035
036  private static final Logger LOG = LoggerFactory.getLogger(ExponentialClientBackoffPolicy.class);
037
038  private static final long ONE_MINUTE = 60 * 1000;
039  public static final long DEFAULT_MAX_BACKOFF = 5 * ONE_MINUTE;
040  public static final String MAX_BACKOFF_KEY = "hbase.client.exponential-backoff.max";
041  private long maxBackoff;
042  private float heapOccupancyLowWatermark;
043  private float heapOccupancyHighWatermark;
044
045  public ExponentialClientBackoffPolicy(Configuration conf) {
046    this.maxBackoff = conf.getLong(MAX_BACKOFF_KEY, DEFAULT_MAX_BACKOFF);
047    this.heapOccupancyLowWatermark = conf.getFloat(HConstants.HEAP_OCCUPANCY_LOW_WATERMARK_KEY,
048      HConstants.DEFAULT_HEAP_OCCUPANCY_LOW_WATERMARK);
049    this.heapOccupancyHighWatermark = conf.getFloat(HConstants.HEAP_OCCUPANCY_HIGH_WATERMARK_KEY,
050      HConstants.DEFAULT_HEAP_OCCUPANCY_HIGH_WATERMARK);
051  }
052
053  @Override
054  public long getBackoffTime(ServerName serverName, byte[] region, ServerStatistics stats) {
055    // no stats for the server yet, so don't backoff
056    if (stats == null) {
057      return 0;
058    }
059
060    ServerStatistics.RegionStatistics regionStats = stats.getStatsForRegion(region);
061    // no stats for the region yet - don't backoff
062    if (regionStats == null) {
063      return 0;
064    }
065
066    // Factor in memstore load
067    double percent = regionStats.getMemStoreLoadPercent() / 100.0;
068
069    // Factor in heap occupancy
070    float heapOccupancy = regionStats.getHeapOccupancyPercent() / 100.0f;
071
072    // Factor in compaction pressure, 1.0 means heavy compaction pressure
073    float compactionPressure = regionStats.getCompactionPressure() / 100.0f;
074    if (heapOccupancy >= heapOccupancyLowWatermark) {
075      // If we are higher than the high watermark, we are already applying max
076      // backoff and cannot scale more (see scale() below)
077      if (heapOccupancy > heapOccupancyHighWatermark) {
078        heapOccupancy = heapOccupancyHighWatermark;
079      }
080      percent = Math.max(percent,
081        scale(heapOccupancy, heapOccupancyLowWatermark, heapOccupancyHighWatermark, 0.1, 1.0));
082    }
083    percent = Math.max(percent, compactionPressure);
084    // square the percent as a value less than 1. Closer we move to 100 percent,
085    // the percent moves to 1, but squaring causes the exponential curve
086    double multiplier = Math.pow(percent, 4.0);
087    if (multiplier > 1) {
088      multiplier = 1;
089    }
090    return (long) (multiplier * maxBackoff);
091  }
092
093  /** Scale valueIn in the range [baseMin,baseMax] to the range [limitMin,limitMax] */
094  private static double scale(double valueIn, double baseMin, double baseMax, double limitMin,
095    double limitMax) {
096    Preconditions.checkArgument(baseMin <= baseMax, "Illegal source range [%s,%s]", baseMin,
097      baseMax);
098    Preconditions.checkArgument(limitMin <= limitMax, "Illegal target range [%s,%s]", limitMin,
099      limitMax);
100    Preconditions.checkArgument(valueIn >= baseMin && valueIn <= baseMax,
101      "Value %s must be within the range [%s,%s]", valueIn, baseMin, baseMax);
102    return ((limitMax - limitMin) * (valueIn - baseMin) / (baseMax - baseMin)) + limitMin;
103  }
104}