001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client.backoff; 019 020import org.apache.hadoop.conf.Configuration; 021import org.apache.hadoop.hbase.HConstants; 022import org.apache.hadoop.hbase.ServerName; 023import org.apache.yetus.audience.InterfaceAudience; 024import org.slf4j.Logger; 025import org.slf4j.LoggerFactory; 026import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; 027 028/** 029 * Simple exponential backoff policy on for the client that uses a percent^4 times the 030 * max backoff to generate the backoff time. 031 */ 032@InterfaceAudience.Public 033public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy { 034 035 private static final Logger LOG = LoggerFactory.getLogger(ExponentialClientBackoffPolicy.class); 036 037 private static final long ONE_MINUTE = 60 * 1000; 038 public static final long DEFAULT_MAX_BACKOFF = 5 * ONE_MINUTE; 039 public static final String MAX_BACKOFF_KEY = "hbase.client.exponential-backoff.max"; 040 private long maxBackoff; 041 private float heapOccupancyLowWatermark; 042 private float heapOccupancyHighWatermark; 043 044 public ExponentialClientBackoffPolicy(Configuration conf) { 045 this.maxBackoff = conf.getLong(MAX_BACKOFF_KEY, DEFAULT_MAX_BACKOFF); 046 this.heapOccupancyLowWatermark = conf.getFloat(HConstants.HEAP_OCCUPANCY_LOW_WATERMARK_KEY, 047 HConstants.DEFAULT_HEAP_OCCUPANCY_LOW_WATERMARK); 048 this.heapOccupancyHighWatermark = conf.getFloat(HConstants.HEAP_OCCUPANCY_HIGH_WATERMARK_KEY, 049 HConstants.DEFAULT_HEAP_OCCUPANCY_HIGH_WATERMARK); 050 } 051 052 @Override 053 public long getBackoffTime(ServerName serverName, byte[] region, ServerStatistics stats) { 054 // no stats for the server yet, so don't backoff 055 if (stats == null) { 056 return 0; 057 } 058 059 ServerStatistics.RegionStatistics regionStats = stats.getStatsForRegion(region); 060 // no stats for the region yet - don't backoff 061 if (regionStats == null) { 062 return 0; 063 } 064 065 // Factor in memstore load 066 double percent = regionStats.getMemStoreLoadPercent() / 100.0; 067 068 // Factor in heap occupancy 069 float heapOccupancy = regionStats.getHeapOccupancyPercent() / 100.0f; 070 071 // Factor in compaction pressure, 1.0 means heavy compaction pressure 072 float compactionPressure = regionStats.getCompactionPressure() / 100.0f; 073 if (heapOccupancy >= heapOccupancyLowWatermark) { 074 // If we are higher than the high watermark, we are already applying max 075 // backoff and cannot scale more (see scale() below) 076 if (heapOccupancy > heapOccupancyHighWatermark) { 077 heapOccupancy = heapOccupancyHighWatermark; 078 } 079 percent = Math.max(percent, 080 scale(heapOccupancy, heapOccupancyLowWatermark, heapOccupancyHighWatermark, 081 0.1, 1.0)); 082 } 083 percent = Math.max(percent, compactionPressure); 084 // square the percent as a value less than 1. Closer we move to 100 percent, 085 // the percent moves to 1, but squaring causes the exponential curve 086 double multiplier = Math.pow(percent, 4.0); 087 if (multiplier > 1) { 088 multiplier = 1; 089 } 090 return (long) (multiplier * maxBackoff); 091 } 092 093 /** Scale valueIn in the range [baseMin,baseMax] to the range [limitMin,limitMax] */ 094 private static double scale(double valueIn, double baseMin, double baseMax, double limitMin, 095 double limitMax) { 096 Preconditions.checkArgument(baseMin <= baseMax, "Illegal source range [%s,%s]", 097 baseMin, baseMax); 098 Preconditions.checkArgument(limitMin <= limitMax, "Illegal target range [%s,%s]", 099 limitMin, limitMax); 100 Preconditions.checkArgument(valueIn >= baseMin && valueIn <= baseMax, 101 "Value %s must be within the range [%s,%s]", valueIn, baseMin, baseMax); 102 return ((limitMax - limitMin) * (valueIn - baseMin) / (baseMax - baseMin)) + limitMin; 103 } 104}