001/** 002 * 003 * Licensed to the Apache Software Foundation (ASF) under one 004 * or more contributor license agreements. See the NOTICE file 005 * distributed with this work for additional information 006 * regarding copyright ownership. The ASF licenses this file 007 * to you under the Apache License, Version 2.0 (the 008 * "License"); you may not use this file except in compliance 009 * with the License. You may obtain a copy of the License at 010 * 011 * http://www.apache.org/licenses/LICENSE-2.0 012 * 013 * Unless required by applicable law or agreed to in writing, software 014 * distributed under the License is distributed on an "AS IS" BASIS, 015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 016 * See the License for the specific language governing permissions and 017 * limitations under the License. 018 */ 019 020package org.apache.hadoop.hbase; 021 022import edu.umd.cs.findbugs.annotations.Nullable; 023import java.util.Collections; 024import java.util.EnumSet; 025import java.util.List; 026import java.util.Map; 027import java.util.TreeMap; 028import java.util.stream.Collectors; 029 030import org.apache.hadoop.hbase.client.RegionStatesCount; 031import org.apache.hadoop.hbase.master.RegionState; 032import org.apache.yetus.audience.InterfaceAudience; 033 034import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; 035import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; 036import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 037import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; 038import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.Option; 039import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos; 040import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 041 042@InterfaceAudience.Private 043public final class ClusterMetricsBuilder { 044 045 public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics metrics) { 046 ClusterStatusProtos.ClusterStatus.Builder builder 047 = ClusterStatusProtos.ClusterStatus.newBuilder() 048 .addAllBackupMasters(metrics.getBackupMasterNames().stream() 049 .map(ProtobufUtil::toServerName).collect(Collectors.toList())) 050 .addAllDeadServers(metrics.getDeadServerNames().stream() 051 .map(ProtobufUtil::toServerName).collect(Collectors.toList())) 052 .addAllLiveServers(metrics.getLiveServerMetrics().entrySet().stream() 053 .map(s -> ClusterStatusProtos.LiveServerInfo 054 .newBuilder() 055 .setServer(ProtobufUtil.toServerName(s.getKey())) 056 .setServerLoad(ServerMetricsBuilder.toServerLoad(s.getValue())) 057 .build()) 058 .collect(Collectors.toList())) 059 .addAllMasterCoprocessors(metrics.getMasterCoprocessorNames().stream() 060 .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) 061 .collect(Collectors.toList())) 062 .addAllRegionsInTransition(metrics.getRegionStatesInTransition().stream() 063 .map(r -> ClusterStatusProtos.RegionInTransition 064 .newBuilder() 065 .setSpec(HBaseProtos.RegionSpecifier 066 .newBuilder() 067 .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) 068 .setValue(UnsafeByteOperations.unsafeWrap(r.getRegion().getRegionName())) 069 .build()) 070 .setRegionState(r.convert()) 071 .build()) 072 .collect(Collectors.toList())) 073 .setMasterInfoPort(metrics.getMasterInfoPort()) 074 .addAllServersName(metrics.getServersName().stream().map(ProtobufUtil::toServerName) 075 .collect(Collectors.toList())) 076 .addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream() 077 .map(status -> 078 ClusterStatusProtos.TableRegionStatesCount.newBuilder() 079 .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) 080 .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())) 081 .build()) 082 .collect(Collectors.toList())); 083 if (metrics.getMasterName() != null) { 084 builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName()))); 085 } 086 if (metrics.getBalancerOn() != null) { 087 builder.setBalancerOn(metrics.getBalancerOn()); 088 } 089 if (metrics.getClusterId() != null) { 090 builder.setClusterId(new ClusterId(metrics.getClusterId()).convert()); 091 } 092 if (metrics.getHBaseVersion() != null) { 093 builder.setHbaseVersion( 094 FSProtos.HBaseVersionFileContent.newBuilder() 095 .setVersion(metrics.getHBaseVersion())); 096 } 097 return builder.build(); 098 } 099 100 public static ClusterMetrics toClusterMetrics( 101 ClusterStatusProtos.ClusterStatus proto) { 102 ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder(); 103 builder.setLiveServerMetrics(proto.getLiveServersList().stream() 104 .collect(Collectors.toMap(e -> ProtobufUtil.toServerName(e.getServer()), 105 ServerMetricsBuilder::toServerMetrics))) 106 .setDeadServerNames(proto.getDeadServersList().stream() 107 .map(ProtobufUtil::toServerName) 108 .collect(Collectors.toList())) 109 .setBackerMasterNames(proto.getBackupMastersList().stream() 110 .map(ProtobufUtil::toServerName) 111 .collect(Collectors.toList())) 112 .setRegionsInTransition(proto.getRegionsInTransitionList().stream() 113 .map(ClusterStatusProtos.RegionInTransition::getRegionState) 114 .map(RegionState::convert) 115 .collect(Collectors.toList())) 116 .setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream() 117 .map(HBaseProtos.Coprocessor::getName) 118 .collect(Collectors.toList())) 119 .setServerNames(proto.getServersNameList().stream().map(ProtobufUtil::toServerName) 120 .collect(Collectors.toList())) 121 .setTableRegionStatesCount( 122 proto.getTableRegionStatesCountList().stream() 123 .collect(Collectors.toMap( 124 e -> ProtobufUtil.toTableName(e.getTableName()), 125 e -> ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount())))); 126 if (proto.hasClusterId()) { 127 builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString()); 128 } 129 130 if (proto.hasHbaseVersion()) { 131 builder.setHBaseVersion(proto.getHbaseVersion().getVersion()); 132 } 133 134 if (proto.hasMaster()) { 135 builder.setMasterName(ProtobufUtil.toServerName(proto.getMaster())); 136 } 137 138 if (proto.hasBalancerOn()) { 139 builder.setBalancerOn(proto.getBalancerOn()); 140 } 141 142 if (proto.hasMasterInfoPort()) { 143 builder.setMasterInfoPort(proto.getMasterInfoPort()); 144 } 145 return builder.build(); 146 } 147 148 /** 149 * Convert ClusterStatusProtos.Option to ClusterMetrics.Option 150 * @param option a ClusterStatusProtos.Option 151 * @return converted ClusterMetrics.Option 152 */ 153 public static ClusterMetrics.Option toOption(ClusterStatusProtos.Option option) { 154 switch (option) { 155 case HBASE_VERSION: return ClusterMetrics.Option.HBASE_VERSION; 156 case LIVE_SERVERS: return ClusterMetrics.Option.LIVE_SERVERS; 157 case DEAD_SERVERS: return ClusterMetrics.Option.DEAD_SERVERS; 158 case REGIONS_IN_TRANSITION: return ClusterMetrics.Option.REGIONS_IN_TRANSITION; 159 case CLUSTER_ID: return ClusterMetrics.Option.CLUSTER_ID; 160 case MASTER_COPROCESSORS: return ClusterMetrics.Option.MASTER_COPROCESSORS; 161 case MASTER: return ClusterMetrics.Option.MASTER; 162 case BACKUP_MASTERS: return ClusterMetrics.Option.BACKUP_MASTERS; 163 case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON; 164 case SERVERS_NAME: return ClusterMetrics.Option.SERVERS_NAME; 165 case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT; 166 case TABLE_TO_REGIONS_COUNT: return ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT; 167 // should not reach here 168 default: throw new IllegalArgumentException("Invalid option: " + option); 169 } 170 } 171 172 /** 173 * Convert ClusterMetrics.Option to ClusterStatusProtos.Option 174 * @param option a ClusterMetrics.Option 175 * @return converted ClusterStatusProtos.Option 176 */ 177 public static ClusterStatusProtos.Option toOption(ClusterMetrics.Option option) { 178 switch (option) { 179 case HBASE_VERSION: return ClusterStatusProtos.Option.HBASE_VERSION; 180 case LIVE_SERVERS: return ClusterStatusProtos.Option.LIVE_SERVERS; 181 case DEAD_SERVERS: return ClusterStatusProtos.Option.DEAD_SERVERS; 182 case REGIONS_IN_TRANSITION: return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION; 183 case CLUSTER_ID: return ClusterStatusProtos.Option.CLUSTER_ID; 184 case MASTER_COPROCESSORS: return ClusterStatusProtos.Option.MASTER_COPROCESSORS; 185 case MASTER: return ClusterStatusProtos.Option.MASTER; 186 case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS; 187 case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON; 188 case SERVERS_NAME: return Option.SERVERS_NAME; 189 case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT; 190 case TABLE_TO_REGIONS_COUNT: return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT; 191 // should not reach here 192 default: throw new IllegalArgumentException("Invalid option: " + option); 193 } 194 } 195 196 /** 197 * Convert a list of ClusterStatusProtos.Option to an enum set of ClusterMetrics.Option 198 * @param options the pb options 199 * @return an enum set of ClusterMetrics.Option 200 */ 201 public static EnumSet<ClusterMetrics.Option> toOptions(List<ClusterStatusProtos.Option> options) { 202 return options.stream().map(ClusterMetricsBuilder::toOption) 203 .collect(Collectors.toCollection(() -> EnumSet.noneOf(ClusterMetrics.Option.class))); 204 } 205 206 /** 207 * Convert an enum set of ClusterMetrics.Option to a list of ClusterStatusProtos.Option 208 * @param options the ClusterMetrics options 209 * @return a list of ClusterStatusProtos.Option 210 */ 211 public static List<ClusterStatusProtos.Option> toOptions(EnumSet<ClusterMetrics.Option> options) { 212 return options.stream().map(ClusterMetricsBuilder::toOption).collect(Collectors.toList()); 213 } 214 215 public static ClusterMetricsBuilder newBuilder() { 216 return new ClusterMetricsBuilder(); 217 } 218 @Nullable 219 private String hbaseVersion; 220 private List<ServerName> deadServerNames = Collections.emptyList(); 221 private Map<ServerName, ServerMetrics> liveServerMetrics = new TreeMap<>(); 222 @Nullable 223 private ServerName masterName; 224 private List<ServerName> backupMasterNames = Collections.emptyList(); 225 private List<RegionState> regionsInTransition = Collections.emptyList(); 226 @Nullable 227 private String clusterId; 228 private List<String> masterCoprocessorNames = Collections.emptyList(); 229 @Nullable 230 private Boolean balancerOn; 231 private int masterInfoPort; 232 private List<ServerName> serversName = Collections.emptyList(); 233 private Map<TableName, RegionStatesCount> tableRegionStatesCount = Collections.emptyMap(); 234 235 private ClusterMetricsBuilder() { 236 } 237 public ClusterMetricsBuilder setHBaseVersion(String value) { 238 this.hbaseVersion = value; 239 return this; 240 } 241 public ClusterMetricsBuilder setDeadServerNames(List<ServerName> value) { 242 this.deadServerNames = value; 243 return this; 244 } 245 246 public ClusterMetricsBuilder setLiveServerMetrics(Map<ServerName, ServerMetrics> value) { 247 liveServerMetrics.putAll(value); 248 return this; 249 } 250 251 public ClusterMetricsBuilder setMasterName(ServerName value) { 252 this.masterName = value; 253 return this; 254 } 255 public ClusterMetricsBuilder setBackerMasterNames(List<ServerName> value) { 256 this.backupMasterNames = value; 257 return this; 258 } 259 public ClusterMetricsBuilder setRegionsInTransition(List<RegionState> value) { 260 this.regionsInTransition = value; 261 return this; 262 } 263 public ClusterMetricsBuilder setClusterId(String value) { 264 this.clusterId = value; 265 return this; 266 } 267 public ClusterMetricsBuilder setMasterCoprocessorNames(List<String> value) { 268 this.masterCoprocessorNames = value; 269 return this; 270 } 271 public ClusterMetricsBuilder setBalancerOn(@Nullable Boolean value) { 272 this.balancerOn = value; 273 return this; 274 } 275 public ClusterMetricsBuilder setMasterInfoPort(int value) { 276 this.masterInfoPort = value; 277 return this; 278 } 279 public ClusterMetricsBuilder setServerNames(List<ServerName> serversName) { 280 this.serversName = serversName; 281 return this; 282 } 283 284 public ClusterMetricsBuilder setTableRegionStatesCount( 285 Map<TableName, RegionStatesCount> tableRegionStatesCount) { 286 this.tableRegionStatesCount = tableRegionStatesCount; 287 return this; 288 } 289 290 public ClusterMetrics build() { 291 return new ClusterMetricsImpl( 292 hbaseVersion, 293 deadServerNames, 294 liveServerMetrics, 295 masterName, 296 backupMasterNames, 297 regionsInTransition, 298 clusterId, 299 masterCoprocessorNames, 300 balancerOn, 301 masterInfoPort, 302 serversName, 303 tableRegionStatesCount 304 ); 305 } 306 private static class ClusterMetricsImpl implements ClusterMetrics { 307 @Nullable 308 private final String hbaseVersion; 309 private final List<ServerName> deadServerNames; 310 private final Map<ServerName, ServerMetrics> liveServerMetrics; 311 @Nullable 312 private final ServerName masterName; 313 private final List<ServerName> backupMasterNames; 314 private final List<RegionState> regionsInTransition; 315 @Nullable 316 private final String clusterId; 317 private final List<String> masterCoprocessorNames; 318 @Nullable 319 private final Boolean balancerOn; 320 private final int masterInfoPort; 321 private final List<ServerName> serversName; 322 private final Map<TableName, RegionStatesCount> tableRegionStatesCount; 323 324 ClusterMetricsImpl(String hbaseVersion, List<ServerName> deadServerNames, 325 Map<ServerName, ServerMetrics> liveServerMetrics, 326 ServerName masterName, 327 List<ServerName> backupMasterNames, 328 List<RegionState> regionsInTransition, 329 String clusterId, 330 List<String> masterCoprocessorNames, 331 Boolean balancerOn, 332 int masterInfoPort, 333 List<ServerName> serversName, 334 Map<TableName, RegionStatesCount> tableRegionStatesCount) { 335 this.hbaseVersion = hbaseVersion; 336 this.deadServerNames = Preconditions.checkNotNull(deadServerNames); 337 this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics); 338 this.masterName = masterName; 339 this.backupMasterNames = Preconditions.checkNotNull(backupMasterNames); 340 this.regionsInTransition = Preconditions.checkNotNull(regionsInTransition); 341 this.clusterId = clusterId; 342 this.masterCoprocessorNames = Preconditions.checkNotNull(masterCoprocessorNames); 343 this.balancerOn = balancerOn; 344 this.masterInfoPort = masterInfoPort; 345 this.serversName = serversName; 346 this.tableRegionStatesCount = Preconditions.checkNotNull(tableRegionStatesCount); 347 } 348 349 @Override 350 public String getHBaseVersion() { 351 return hbaseVersion; 352 } 353 354 @Override 355 public List<ServerName> getDeadServerNames() { 356 return Collections.unmodifiableList(deadServerNames); 357 } 358 359 @Override 360 public Map<ServerName, ServerMetrics> getLiveServerMetrics() { 361 return Collections.unmodifiableMap(liveServerMetrics); 362 } 363 364 @Override 365 public ServerName getMasterName() { 366 return masterName; 367 } 368 369 @Override 370 public List<ServerName> getBackupMasterNames() { 371 return Collections.unmodifiableList(backupMasterNames); 372 } 373 374 @Override 375 public List<RegionState> getRegionStatesInTransition() { 376 return Collections.unmodifiableList(regionsInTransition); 377 } 378 379 @Override 380 public String getClusterId() { 381 return clusterId; 382 } 383 384 @Override 385 public List<String> getMasterCoprocessorNames() { 386 return Collections.unmodifiableList(masterCoprocessorNames); 387 } 388 389 @Override 390 public Boolean getBalancerOn() { 391 return balancerOn; 392 } 393 394 @Override 395 public int getMasterInfoPort() { 396 return masterInfoPort; 397 } 398 399 @Override 400 public List<ServerName> getServersName() { 401 return Collections.unmodifiableList(serversName); 402 } 403 404 @Override 405 public Map<TableName, RegionStatesCount> getTableRegionStatesCount() { 406 return Collections.unmodifiableMap(tableRegionStatesCount); 407 } 408 409 @Override 410 public String toString() { 411 StringBuilder sb = new StringBuilder(1024); 412 sb.append("Master: " + getMasterName()); 413 414 int backupMastersSize = getBackupMasterNames().size(); 415 sb.append("\nNumber of backup masters: " + backupMastersSize); 416 if (backupMastersSize > 0) { 417 for (ServerName serverName: getBackupMasterNames()) { 418 sb.append("\n " + serverName); 419 } 420 } 421 422 int serversSize = getLiveServerMetrics().size(); 423 int serversNameSize = getServersName().size(); 424 sb.append("\nNumber of live region servers: " 425 + (serversSize > 0 ? serversSize : serversNameSize)); 426 if (serversSize > 0) { 427 for (ServerName serverName : getLiveServerMetrics().keySet()) { 428 sb.append("\n " + serverName.getServerName()); 429 } 430 } else if (serversNameSize > 0) { 431 for (ServerName serverName : getServersName()) { 432 sb.append("\n " + serverName.getServerName()); 433 } 434 } 435 436 int deadServerSize = getDeadServerNames().size(); 437 sb.append("\nNumber of dead region servers: " + deadServerSize); 438 if (deadServerSize > 0) { 439 for (ServerName serverName : getDeadServerNames()) { 440 sb.append("\n " + serverName); 441 } 442 } 443 444 sb.append("\nAverage load: " + getAverageLoad()); 445 sb.append("\nNumber of requests: " + getRequestCount()); 446 sb.append("\nNumber of regions: " + getRegionCount()); 447 448 int ritSize = getRegionStatesInTransition().size(); 449 sb.append("\nNumber of regions in transition: " + ritSize); 450 if (ritSize > 0) { 451 for (RegionState state : getRegionStatesInTransition()) { 452 sb.append("\n " + state.toDescriptiveString()); 453 } 454 } 455 return sb.toString(); 456 } 457 } 458}