001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.chaos.actions;
019
020import java.io.IOException;
021import java.util.Arrays;
022import org.apache.hadoop.hbase.ServerName;
023import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
024import org.apache.hadoop.hdfs.DFSClient;
025import org.apache.hadoop.hdfs.DistributedFileSystem;
026import org.apache.hadoop.hdfs.protocol.HdfsConstants;
027import org.slf4j.Logger;
028import org.slf4j.LoggerFactory;
029
030/**
031 * Action that restarts a random datanode.
032 */
033public class RestartRandomDataNodeAction extends RestartActionBaseAction {
034  private static final Logger LOG = LoggerFactory.getLogger(RestartRandomDataNodeAction.class);
035
036  public RestartRandomDataNodeAction(long sleepTime) {
037    super(sleepTime);
038  }
039
040  @Override
041  protected Logger getLogger() {
042    return LOG;
043  }
044
045  @Override
046  public void perform() throws Exception {
047    getLogger().info("Performing action: Restart random data node");
048    final ServerName server = PolicyBasedChaosMonkey.selectRandomItem(getDataNodes());
049    restartDataNode(server, sleepTime);
050  }
051
052  private ServerName[] getDataNodes() throws IOException {
053    try (final DistributedFileSystem dfs = HdfsActionUtils.createDfs(getConf())) {
054      final DFSClient dfsClient = dfs.getClient();
055      return Arrays.stream(dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE))
056        .map(dn -> ServerName.valueOf(dn.getHostName(), -1, -1)).toArray(ServerName[]::new);
057    }
058  }
059}