001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019package org.apache.hadoop.hbase.chaos.actions; 020 021import java.io.IOException; 022import java.util.Collections; 023import java.util.HashSet; 024import java.util.Set; 025import java.util.stream.Collectors; 026import org.apache.hadoop.hbase.ClusterMetrics; 027import org.apache.hadoop.hbase.ServerName; 028import org.apache.hadoop.hbase.net.Address; 029import org.slf4j.Logger; 030import org.slf4j.LoggerFactory; 031 032/** 033 * Action to dump the cluster status. 034 */ 035public class DumpClusterStatusAction extends Action { 036 private static final Logger LOG = LoggerFactory.getLogger(DumpClusterStatusAction.class); 037 038 private Set<Address> initialRegionServers; 039 040 @Override 041 protected Logger getLogger() { 042 return LOG; 043 } 044 045 @Override 046 public void init(ActionContext context) throws IOException { 047 super.init(context); 048 initialRegionServers = collectKnownRegionServers(initialStatus); 049 } 050 051 @Override 052 public void perform() throws Exception { 053 getLogger().debug("Performing action: Dump cluster status"); 054 final ClusterMetrics currentMetrics = cluster.getClusterMetrics(); 055 getLogger().info("Cluster status\n{}", currentMetrics); 056 reportMissingRegionServers(currentMetrics); 057 reportNewRegionServers(currentMetrics); 058 } 059 060 /** 061 * Build a set of all the host:port pairs of region servers known to this cluster. 062 */ 063 private static Set<Address> collectKnownRegionServers(final ClusterMetrics clusterMetrics) { 064 final Set<Address> regionServers = clusterMetrics.getLiveServerMetrics() 065 .keySet() 066 .stream() 067 .map(ServerName::getAddress) 068 .collect(Collectors.toSet()); 069 clusterMetrics.getDeadServerNames() 070 .stream() 071 .map(ServerName::getAddress) 072 .forEach(regionServers::add); 073 return Collections.unmodifiableSet(regionServers); 074 } 075 076 private void reportMissingRegionServers(final ClusterMetrics clusterMetrics) { 077 final Set<Address> regionServers = collectKnownRegionServers(clusterMetrics); 078 final Set<Address> missingRegionServers = new HashSet<>(initialRegionServers); 079 missingRegionServers.removeAll(regionServers); 080 if (!missingRegionServers.isEmpty()) { 081 final StringBuilder stringBuilder = new StringBuilder() 082 .append("region server(s) are missing from this cluster report"); 083 missingRegionServers.stream() 084 .sorted() 085 .forEach(address -> stringBuilder.append("\n ").append(address)); 086 getLogger().warn(stringBuilder.toString()); 087 } 088 } 089 090 private void reportNewRegionServers(final ClusterMetrics clusterMetrics) { 091 final Set<Address> regionServers = collectKnownRegionServers(clusterMetrics); 092 final Set<Address> newRegionServers = new HashSet<>(regionServers); 093 newRegionServers.removeAll(initialRegionServers); 094 if (!newRegionServers.isEmpty()) { 095 final StringBuilder stringBuilder = new StringBuilder() 096 .append("region server(s) are new for this cluster report"); 097 newRegionServers.stream() 098 .sorted() 099 .forEach(address -> stringBuilder.append("\n ").append(address)); 100 getLogger().warn(stringBuilder.toString()); 101 } 102 } 103}