001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.chaos.actions; 019 020import java.io.IOException; 021import java.util.concurrent.ThreadLocalRandom; 022import org.apache.hadoop.hbase.HConstants; 023import org.apache.hadoop.hbase.TableName; 024import org.apache.hadoop.hbase.client.Admin; 025import org.apache.hadoop.hbase.client.TableDescriptor; 026import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 027import org.slf4j.Logger; 028import org.slf4j.LoggerFactory; 029 030public class DecreaseMaxHFileSizeAction extends Action { 031 private static final Logger LOG = LoggerFactory.getLogger(DecreaseMaxHFileSizeAction.class); 032 033 private static final long minFileSize = 1024 * 1024 * 1024L; 034 035 private final long sleepTime; 036 private final TableName tableName; 037 private Admin admin; 038 039 public DecreaseMaxHFileSizeAction(long sleepTime, TableName tableName) { 040 this.sleepTime = sleepTime; 041 this.tableName = tableName; 042 } 043 044 @Override 045 protected Logger getLogger() { 046 return LOG; 047 } 048 049 @Override 050 public void init(ActionContext context) throws IOException { 051 super.init(context); 052 this.admin = context.getHBaseIntegrationTestingUtility().getAdmin(); 053 } 054 055 @Override 056 public void perform() throws Exception { 057 TableDescriptor td = admin.getDescriptor(tableName); 058 059 // Try and get the current value. 060 long currentValue = td.getMaxFileSize(); 061 062 // If the current value is not set use the default for the cluster. 063 // If configs are really weird this might not work. 064 // That's ok. We're trying to cause chaos. 065 if (currentValue <= 0) { 066 currentValue = context.getHBaseCluster().getConf().getLong(HConstants.HREGION_MAX_FILESIZE, 067 HConstants.DEFAULT_MAX_FILE_SIZE); 068 } 069 070 // Decrease by 10% at a time. 071 long newValue = (long) (currentValue * 0.9); 072 073 // We don't want to go too far below 1gb. 074 // So go to about 1gb +/- 512 on each side. 075 newValue = Math.max(minFileSize, newValue) - (512 - ThreadLocalRandom.current().nextInt(1024)); 076 077 // Change the table descriptor. 078 TableDescriptor modifiedTable = 079 TableDescriptorBuilder.newBuilder(td).setMaxFileSize(newValue).build(); 080 081 // Don't try the modify if we're stopping 082 if (context.isStopping()) { 083 return; 084 } 085 086 // modify the table. 087 admin.modifyTable(modifiedTable); 088 089 // Sleep some time. 090 if (sleepTime > 0) { 091 Thread.sleep(sleepTime); 092 } 093 } 094}