001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * <p> 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * <p> 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019package org.apache.hadoop.hbase.chaos.actions; 020 021import java.io.IOException; 022import java.util.LinkedList; 023import java.util.List; 024import org.apache.hadoop.hbase.ServerName; 025import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; 026import org.apache.hadoop.hbase.util.CommonFSUtils; 027import org.apache.hadoop.hdfs.DFSClient; 028import org.apache.hadoop.hdfs.DistributedFileSystem; 029import org.apache.hadoop.hdfs.protocol.DatanodeInfo; 030import org.apache.hadoop.hdfs.protocol.HdfsConstants; 031import org.slf4j.Logger; 032import org.slf4j.LoggerFactory; 033 034/** 035 * Action that restarts a random datanode. 036 */ 037public class RestartRandomDataNodeAction extends RestartActionBaseAction { 038 private static final Logger LOG = LoggerFactory.getLogger(RestartRandomDataNodeAction.class); 039 040 public RestartRandomDataNodeAction(long sleepTime) { 041 super(sleepTime); 042 } 043 044 @Override protected Logger getLogger() { 045 return LOG; 046 } 047 048 @Override 049 public void perform() throws Exception { 050 getLogger().info("Performing action: Restart random data node"); 051 ServerName server = PolicyBasedChaosMonkey.selectRandomItem(getDataNodes()); 052 restartDataNode(server, sleepTime); 053 } 054 055 public ServerName[] getDataNodes() throws IOException { 056 DistributedFileSystem fs = (DistributedFileSystem) CommonFSUtils.getRootDir(getConf()) 057 .getFileSystem(getConf()); 058 DFSClient dfsClient = fs.getClient(); 059 List<ServerName> hosts = new LinkedList<>(); 060 for (DatanodeInfo dataNode: dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE)) { 061 hosts.add(ServerName.valueOf(dataNode.getHostName(), -1, -1)); 062 } 063 return hosts.toArray(new ServerName[0]); 064 } 065}