001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.chaos.actions; 019 020import java.io.IOException; 021import java.util.ArrayList; 022import java.util.List; 023import org.apache.hadoop.hbase.ServerName; 024import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; 025import org.apache.hadoop.hbase.util.CommonFSUtils; 026import org.apache.hadoop.hdfs.DFSClient; 027import org.apache.hadoop.hdfs.DistributedFileSystem; 028import org.apache.hadoop.hdfs.protocol.DatanodeInfo; 029import org.apache.hadoop.hdfs.protocol.HdfsConstants; 030import org.slf4j.Logger; 031import org.slf4j.LoggerFactory; 032 033/** 034 * Action that restarts a random datanode. 035 */ 036public class RestartRandomDataNodeAction extends RestartActionBaseAction { 037 private static final Logger LOG = LoggerFactory.getLogger(RestartRandomDataNodeAction.class); 038 039 public RestartRandomDataNodeAction(long sleepTime) { 040 super(sleepTime); 041 } 042 043 @Override 044 protected Logger getLogger() { 045 return LOG; 046 } 047 048 @Override 049 public void perform() throws Exception { 050 getLogger().info("Performing action: Restart random data node"); 051 ServerName server = PolicyBasedChaosMonkey.selectRandomItem(getDataNodes()); 052 restartDataNode(server, sleepTime); 053 } 054 055 public ServerName[] getDataNodes() throws IOException { 056 DistributedFileSystem fs = 057 (DistributedFileSystem) CommonFSUtils.getRootDir(getConf()).getFileSystem(getConf()); 058 DFSClient dfsClient = fs.getClient(); 059 List<ServerName> hosts = new ArrayList<>(); 060 for (DatanodeInfo dataNode : dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE)) { 061 hosts.add(ServerName.valueOf(dataNode.getHostName(), -1, -1)); 062 } 063 return hosts.toArray(new ServerName[0]); 064 } 065}