001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.chaos.actions;
019
020import java.net.InetSocketAddress;
021import java.net.URI;
022import java.util.List;
023import org.apache.commons.io.FileUtils;
024import org.apache.hadoop.conf.Configuration;
025import org.apache.hadoop.hdfs.DistributedFileSystem;
026import org.apache.hadoop.hdfs.HAUtil;
027import org.apache.hadoop.hdfs.HAUtilClient;
028import org.apache.hadoop.hdfs.protocol.ClientProtocol;
029import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
030import org.apache.hadoop.hdfs.protocol.HdfsConstants;
031import org.slf4j.Logger;
032import org.slf4j.LoggerFactory;
033
034public class DumpHdfsClusterStatusAction extends Action {
035  private static final Logger LOG = LoggerFactory.getLogger(DumpHdfsClusterStatusAction.class);
036  private static final String PREFIX = "\n  ";
037
038  @Override
039  protected Logger getLogger() {
040    return LOG;
041  }
042
043  @Override
044  public void perform() throws Exception {
045    StringBuilder sb = new StringBuilder();
046    try (final DistributedFileSystem dfs = HdfsActionUtils.createDfs(getConf())) {
047      final Configuration dfsConf = dfs.getConf();
048      final URI dfsUri = dfs.getUri();
049      final boolean isHaAndLogicalUri = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
050      sb.append("Cluster status").append('\n');
051      if (isHaAndLogicalUri) {
052        final String nsId = dfsUri.getHost();
053        final List<ClientProtocol> namenodes =
054          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
055        final boolean atLeastOneActive = HAUtil.isAtLeastOneActive(namenodes);
056        final InetSocketAddress activeAddress = HAUtil.getAddressOfActive(dfs);
057        sb.append("Active NameNode=").append(activeAddress).append(", isAtLeastOneActive=")
058          .append(atLeastOneActive).append('\n');
059      }
060      DatanodeInfo[] dns = dfs.getClient().datanodeReport(HdfsConstants.DatanodeReportType.LIVE);
061      sb.append("Number of live DataNodes: ").append(dns.length);
062      for (DatanodeInfo dni : dns) {
063        sb.append(PREFIX).append("name=").append(dni.getName()).append(", used%=")
064          .append(dni.getDfsUsedPercent()).append(", capacity=")
065          .append(FileUtils.byteCountToDisplaySize(dni.getCapacity()));
066      }
067      sb.append('\n');
068      dns = dfs.getClient().datanodeReport(HdfsConstants.DatanodeReportType.DEAD);
069      sb.append("Number of dead DataNodes: ").append(dns.length);
070      for (DatanodeInfo dni : dns) {
071        sb.append(PREFIX).append(dni.getName()).append("/").append(dni.getNetworkLocation());
072      }
073    }
074    // TODO: add more on NN, JNs, and ZK.
075    // TODO: Print how long process has been up.
076    getLogger().info(sb.toString());
077  }
078}