001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * <p> 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * <p> 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019package org.apache.hadoop.hbase.chaos.actions; 020 021import java.io.IOException; 022import java.util.LinkedList; 023import java.util.List; 024 025import org.apache.hadoop.hbase.ServerName; 026import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; 027import org.apache.hadoop.hbase.util.FSUtils; 028import org.apache.hadoop.hdfs.DFSClient; 029import org.apache.hadoop.hdfs.DistributedFileSystem; 030import org.apache.hadoop.hdfs.protocol.DatanodeInfo; 031import org.apache.hadoop.hdfs.protocol.HdfsConstants; 032import org.slf4j.Logger; 033import org.slf4j.LoggerFactory; 034 035/** 036 * Action that restarts a random datanode. 037 */ 038public class RestartRandomDataNodeAction extends RestartActionBaseAction { 039 private static final Logger LOG = LoggerFactory.getLogger(RestartRandomDataNodeAction.class); 040 041 public RestartRandomDataNodeAction(long sleepTime) { 042 super(sleepTime); 043 } 044 045 @Override 046 public void perform() throws Exception { 047 LOG.info("Performing action: Restart random data node"); 048 ServerName server = PolicyBasedChaosMonkey.selectRandomItem(getDataNodes()); 049 restartDataNode(server, sleepTime); 050 } 051 052 public ServerName[] getDataNodes() throws IOException { 053 DistributedFileSystem fs = (DistributedFileSystem) FSUtils.getRootDir(getConf()) 054 .getFileSystem(getConf()); 055 DFSClient dfsClient = fs.getClient(); 056 List<ServerName> hosts = new LinkedList<>(); 057 for (DatanodeInfo dataNode: dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE)) { 058 hosts.add(ServerName.valueOf(dataNode.getHostName(), -1, -1)); 059 } 060 return hosts.toArray(new ServerName[hosts.size()]); 061 } 062}