001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.balancer; 019 020import static org.junit.Assert.assertNotNull; 021import static org.junit.Assert.assertNull; 022import static org.junit.Assert.assertTrue; 023 024import java.util.ArrayList; 025import java.util.HashMap; 026import java.util.HashSet; 027import java.util.LinkedList; 028import java.util.List; 029import java.util.Map; 030import java.util.Map.Entry; 031import java.util.Queue; 032import java.util.Random; 033import java.util.Set; 034import java.util.SortedSet; 035import java.util.TreeMap; 036import java.util.TreeSet; 037import java.util.concurrent.ThreadLocalRandom; 038import java.util.stream.Collectors; 039import java.util.stream.Stream; 040import org.apache.hadoop.conf.Configuration; 041import org.apache.hadoop.hbase.HBaseConfiguration; 042import org.apache.hadoop.hbase.ServerName; 043import org.apache.hadoop.hbase.TableName; 044import org.apache.hadoop.hbase.client.RegionInfo; 045import org.apache.hadoop.hbase.client.RegionInfoBuilder; 046import org.apache.hadoop.hbase.client.RegionReplicaUtil; 047import org.apache.hadoop.hbase.master.RackManager; 048import org.apache.hadoop.hbase.master.RegionPlan; 049import org.apache.hadoop.hbase.util.Bytes; 050import org.apache.hadoop.net.DNSToSwitchMapping; 051import org.junit.Assert; 052import org.junit.BeforeClass; 053import org.slf4j.Logger; 054import org.slf4j.LoggerFactory; 055 056/** 057 * Class used to be the base of unit tests on load balancers. It gives helper 058 * methods to create maps of {@link ServerName} to lists of {@link RegionInfo} 059 * and to check list of region plans. 060 * 061 */ 062public class BalancerTestBase { 063 private static final Logger LOG = LoggerFactory.getLogger(BalancerTestBase.class); 064 static int regionId = 0; 065 protected static Configuration conf; 066 protected static StochasticLoadBalancer loadBalancer; 067 068 @BeforeClass 069 public static void beforeAllTests() throws Exception { 070 conf = HBaseConfiguration.create(); 071 conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class); 072 conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 0.75f); 073 conf.setFloat("hbase.regions.slop", 0.0f); 074 conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0); 075 loadBalancer = new StochasticLoadBalancer(); 076 loadBalancer.setConf(conf); 077 } 078 079 protected int[] largeCluster = new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 080 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 081 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 082 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 083 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 084 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 085 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 086 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 087 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 088 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 089 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 090 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 091 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 092 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 }; 093 094 // int[testnum][servernumber] -> numregions 095 protected int[][] clusterStateMocks = new int[][]{ 096 // 1 node 097 new int[]{0}, 098 new int[]{1}, 099 new int[]{10}, 100 // 2 node 101 new int[]{0, 0}, 102 new int[]{2, 0}, 103 new int[]{2, 1}, 104 new int[]{2, 2}, 105 new int[]{2, 3}, 106 new int[]{2, 4}, 107 new int[]{1, 1}, 108 new int[]{0, 1}, 109 new int[]{10, 1}, 110 new int[]{514, 1432}, 111 new int[]{48, 53}, 112 // 3 node 113 new int[]{0, 1, 2}, 114 new int[]{1, 2, 3}, 115 new int[]{0, 2, 2}, 116 new int[]{0, 3, 0}, 117 new int[]{0, 4, 0}, 118 new int[]{20, 20, 0}, 119 // 4 node 120 new int[]{0, 1, 2, 3}, 121 new int[]{4, 0, 0, 0}, 122 new int[]{5, 0, 0, 0}, 123 new int[]{6, 6, 0, 0}, 124 new int[]{6, 2, 0, 0}, 125 new int[]{6, 1, 0, 0}, 126 new int[]{6, 0, 0, 0}, 127 new int[]{4, 4, 4, 7}, 128 new int[]{4, 4, 4, 8}, 129 new int[]{0, 0, 0, 7}, 130 // 5 node 131 new int[]{1, 1, 1, 1, 4}, 132 // 6 nodes 133 new int[]{1500, 500, 500, 500, 10, 0}, 134 new int[]{1500, 500, 500, 500, 500, 0}, 135 // more nodes 136 new int[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 137 new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 10}, 138 new int[]{6, 6, 5, 6, 6, 6, 6, 6, 6, 1}, 139 new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 54}, 140 new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 55}, 141 new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 56}, 142 new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 16}, 143 new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 8}, 144 new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 9}, 145 new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 10}, 146 new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 123}, 147 new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 155}, 148 new int[]{10, 7, 12, 8, 11, 10, 9, 14}, 149 new int[]{13, 14, 6, 10, 10, 10, 8, 10}, 150 new int[]{130, 14, 60, 10, 100, 10, 80, 10}, 151 new int[]{130, 140, 60, 100, 100, 100, 80, 100}, 152 new int[]{0, 5 , 5, 5, 5}, 153 largeCluster, 154 155 }; 156 157 158 // This class is introduced because IP to rack resolution can be lengthy. 159 public static class MockMapping implements DNSToSwitchMapping { 160 public MockMapping(Configuration conf) { 161 } 162 163 @Override 164 public List<String> resolve(List<String> names) { 165 return Stream.generate(() -> "rack").limit(names.size()).collect(Collectors.toList()); 166 } 167 168 // do not add @Override annotations here. It mighty break compilation with earlier Hadoops 169 public void reloadCachedMappings() { 170 } 171 172 // do not add @Override annotations here. It mighty break compilation with earlier Hadoops 173 public void reloadCachedMappings(List<String> arg0) { 174 } 175 } 176 177 /** 178 * Invariant is that all servers have between floor(avg) and ceiling(avg) 179 * number of regions. 180 */ 181 public void assertClusterAsBalanced(List<ServerAndLoad> servers) { 182 int numServers = servers.size(); 183 int numRegions = 0; 184 int maxRegions = 0; 185 int minRegions = Integer.MAX_VALUE; 186 for (ServerAndLoad server : servers) { 187 int nr = server.getLoad(); 188 if (nr > maxRegions) { 189 maxRegions = nr; 190 } 191 if (nr < minRegions) { 192 minRegions = nr; 193 } 194 numRegions += nr; 195 } 196 if (maxRegions - minRegions < 2) { 197 // less than 2 between max and min, can't balance 198 return; 199 } 200 int min = numRegions / numServers; 201 int max = numRegions % numServers == 0 ? min : min + 1; 202 203 for (ServerAndLoad server : servers) { 204 assertTrue("All servers should have a positive load. " + server, server.getLoad() >= 0); 205 assertTrue("All servers should have load no more than " + max + ". " + server, 206 server.getLoad() <= max); 207 assertTrue("All servers should have load no less than " + min + ". " + server, 208 server.getLoad() >= min); 209 } 210 } 211 212 /** 213 * Invariant is that all servers have between acceptable range 214 * number of regions. 215 */ 216 public boolean assertClusterOverallAsBalanced(List<ServerAndLoad> servers, int tablenum) { 217 int numServers = servers.size(); 218 int numRegions = 0; 219 int maxRegions = 0; 220 int minRegions = Integer.MAX_VALUE; 221 for (ServerAndLoad server : servers) { 222 int nr = server.getLoad(); 223 if (nr > maxRegions) { 224 maxRegions = nr; 225 } 226 if (nr < minRegions) { 227 minRegions = nr; 228 } 229 numRegions += nr; 230 } 231 if (maxRegions - minRegions < 2) { 232 // less than 2 between max and min, can't balance 233 return true; 234 } 235 int min = numRegions / numServers; 236 int max = numRegions % numServers == 0 ? min : min + 1; 237 238 for (ServerAndLoad server : servers) { 239 // The '5' in below is arbitrary. 240 if (server.getLoad() < 0 || server.getLoad() > max + (tablenum/2 + 5) || 241 server.getLoad() < (min - tablenum/2 - 5)) { 242 LOG.warn("server={}, load={}, max={}, tablenum={}, min={}", 243 server.getServerName(), server.getLoad(), max, tablenum, min); 244 return false; 245 } 246 } 247 return true; 248 } 249 250 /** 251 * Checks whether region replicas are not hosted on the same host. 252 */ 253 public void assertRegionReplicaPlacement(Map<ServerName, List<RegionInfo>> serverMap, RackManager rackManager) { 254 TreeMap<String, Set<RegionInfo>> regionsPerHost = new TreeMap<>(); 255 TreeMap<String, Set<RegionInfo>> regionsPerRack = new TreeMap<>(); 256 257 for (Entry<ServerName, List<RegionInfo>> entry : serverMap.entrySet()) { 258 String hostname = entry.getKey().getHostname(); 259 Set<RegionInfo> infos = regionsPerHost.get(hostname); 260 if (infos == null) { 261 infos = new HashSet<>(); 262 regionsPerHost.put(hostname, infos); 263 } 264 265 for (RegionInfo info : entry.getValue()) { 266 RegionInfo primaryInfo = RegionReplicaUtil.getRegionInfoForDefaultReplica(info); 267 if (!infos.add(primaryInfo)) { 268 Assert.fail("Two or more region replicas are hosted on the same host after balance"); 269 } 270 } 271 } 272 273 if (rackManager == null) { 274 return; 275 } 276 277 for (Entry<ServerName, List<RegionInfo>> entry : serverMap.entrySet()) { 278 String rack = rackManager.getRack(entry.getKey()); 279 Set<RegionInfo> infos = regionsPerRack.get(rack); 280 if (infos == null) { 281 infos = new HashSet<>(); 282 regionsPerRack.put(rack, infos); 283 } 284 285 for (RegionInfo info : entry.getValue()) { 286 RegionInfo primaryInfo = RegionReplicaUtil.getRegionInfoForDefaultReplica(info); 287 if (!infos.add(primaryInfo)) { 288 Assert.fail("Two or more region replicas are hosted on the same rack after balance"); 289 } 290 } 291 } 292 } 293 294 protected String printStats(List<ServerAndLoad> servers) { 295 int numServers = servers.size(); 296 int totalRegions = 0; 297 for (ServerAndLoad server : servers) { 298 totalRegions += server.getLoad(); 299 } 300 float average = (float) totalRegions / numServers; 301 int max = (int) Math.ceil(average); 302 int min = (int) Math.floor(average); 303 return "[srvr=" + numServers + " rgns=" + totalRegions + " avg=" + average + " max=" + max 304 + " min=" + min + "]"; 305 } 306 307 protected List<ServerAndLoad> convertToList(final Map<ServerName, List<RegionInfo>> servers) { 308 List<ServerAndLoad> list = new ArrayList<>(servers.size()); 309 for (Map.Entry<ServerName, List<RegionInfo>> e : servers.entrySet()) { 310 list.add(new ServerAndLoad(e.getKey(), e.getValue().size())); 311 } 312 return list; 313 } 314 315 protected String printMock(List<ServerAndLoad> balancedCluster) { 316 SortedSet<ServerAndLoad> sorted = new TreeSet<>(balancedCluster); 317 ServerAndLoad[] arr = sorted.toArray(new ServerAndLoad[sorted.size()]); 318 StringBuilder sb = new StringBuilder(sorted.size() * 4 + 4); 319 sb.append("{ "); 320 for (int i = 0; i < arr.length; i++) { 321 if (i != 0) { 322 sb.append(" , "); 323 } 324 sb.append(arr[i].getServerName().getHostname()); 325 sb.append(":"); 326 sb.append(arr[i].getLoad()); 327 } 328 sb.append(" }"); 329 return sb.toString(); 330 } 331 332 /** 333 * This assumes the RegionPlan HSI instances are the same ones in the map, so 334 * actually no need to even pass in the map, but I think it's clearer. 335 * 336 * @param list 337 * @param plans 338 * @return a list of all added {@link ServerAndLoad} values. 339 */ 340 protected List<ServerAndLoad> reconcile(List<ServerAndLoad> list, 341 List<RegionPlan> plans, 342 Map<ServerName, List<RegionInfo>> servers) { 343 List<ServerAndLoad> result = new ArrayList<>(list.size()); 344 345 Map<ServerName, ServerAndLoad> map = new HashMap<>(list.size()); 346 for (ServerAndLoad sl : list) { 347 map.put(sl.getServerName(), sl); 348 } 349 if (plans != null) { 350 for (RegionPlan plan : plans) { 351 ServerName source = plan.getSource(); 352 353 updateLoad(map, source, -1); 354 ServerName destination = plan.getDestination(); 355 updateLoad(map, destination, +1); 356 357 servers.get(source).remove(plan.getRegionInfo()); 358 servers.get(destination).add(plan.getRegionInfo()); 359 } 360 } 361 result.clear(); 362 result.addAll(map.values()); 363 return result; 364 } 365 366 protected void updateLoad(final Map<ServerName, ServerAndLoad> map, 367 final ServerName sn, 368 final int diff) { 369 ServerAndLoad sal = map.get(sn); 370 if (sal == null) sal = new ServerAndLoad(sn, 0); 371 sal = new ServerAndLoad(sn, sal.getLoad() + diff); 372 map.put(sn, sal); 373 } 374 375 protected TreeMap<ServerName, List<RegionInfo>> mockClusterServers(int[] mockCluster) { 376 return mockClusterServers(mockCluster, -1); 377 } 378 379 protected BaseLoadBalancer.Cluster mockCluster(int[] mockCluster) { 380 return new BaseLoadBalancer.Cluster( 381 mockClusterServers(mockCluster, -1), null, null, null); 382 } 383 384 protected TreeMap<ServerName, List<RegionInfo>> mockClusterServers(int[] mockCluster, int numTables) { 385 int numServers = mockCluster.length; 386 TreeMap<ServerName, List<RegionInfo>> servers = new TreeMap<>(); 387 for (int i = 0; i < numServers; i++) { 388 int numRegions = mockCluster[i]; 389 ServerAndLoad sal = randomServer(0); 390 List<RegionInfo> regions = randomRegions(numRegions, numTables); 391 servers.put(sal.getServerName(), regions); 392 } 393 return servers; 394 } 395 396 protected TreeMap<ServerName, List<RegionInfo>> mockUniformClusterServers(int[] mockCluster) { 397 int numServers = mockCluster.length; 398 TreeMap<ServerName, List<RegionInfo>> servers = new TreeMap<>(); 399 for (int i = 0; i < numServers; i++) { 400 int numRegions = mockCluster[i]; 401 ServerAndLoad sal = randomServer(0); 402 List<RegionInfo> regions = uniformRegions(numRegions); 403 servers.put(sal.getServerName(), regions); 404 } 405 return servers; 406 } 407 408 protected HashMap<TableName, TreeMap<ServerName, List<RegionInfo>>> mockClusterServersWithTables(Map<ServerName, List<RegionInfo>> clusterServers) { 409 HashMap<TableName, TreeMap<ServerName, List<RegionInfo>>> result = new HashMap<>(); 410 for (Map.Entry<ServerName, List<RegionInfo>> entry : clusterServers.entrySet()) { 411 ServerName sal = entry.getKey(); 412 List<RegionInfo> regions = entry.getValue(); 413 for (RegionInfo hri : regions){ 414 TreeMap<ServerName, List<RegionInfo>> servers = result.get(hri.getTable()); 415 if (servers == null) { 416 servers = new TreeMap<>(); 417 result.put(hri.getTable(), servers); 418 } 419 List<RegionInfo> hrilist = servers.get(sal); 420 if (hrilist == null) { 421 hrilist = new ArrayList<>(); 422 servers.put(sal, hrilist); 423 } 424 hrilist.add(hri); 425 } 426 } 427 for(Map.Entry<TableName, TreeMap<ServerName, List<RegionInfo>>> entry : result.entrySet()){ 428 for(ServerName srn : clusterServers.keySet()){ 429 if (!entry.getValue().containsKey(srn)) entry.getValue().put(srn, new ArrayList<>()); 430 } 431 } 432 return result; 433 } 434 435 private Queue<RegionInfo> regionQueue = new LinkedList<>(); 436 437 protected List<RegionInfo> randomRegions(int numRegions) { 438 return randomRegions(numRegions, -1); 439 } 440 441 protected List<RegionInfo> createRegions(int numRegions, TableName tableName) { 442 List<RegionInfo> regions = new ArrayList<>(numRegions); 443 byte[] start = new byte[16]; 444 byte[] end = new byte[16]; 445 Random rand = ThreadLocalRandom.current(); 446 rand.nextBytes(start); 447 rand.nextBytes(end); 448 for (int i = 0; i < numRegions; i++) { 449 Bytes.putInt(start, 0, numRegions << 1); 450 Bytes.putInt(end, 0, (numRegions << 1) + 1); 451 RegionInfo hri = RegionInfoBuilder.newBuilder(tableName) 452 .setStartKey(start) 453 .setEndKey(end) 454 .setSplit(false) 455 .build(); 456 regions.add(hri); 457 } 458 return regions; 459 } 460 461 protected List<RegionInfo> randomRegions(int numRegions, int numTables) { 462 List<RegionInfo> regions = new ArrayList<>(numRegions); 463 byte[] start = new byte[16]; 464 byte[] end = new byte[16]; 465 Random rand = ThreadLocalRandom.current(); 466 rand.nextBytes(start); 467 rand.nextBytes(end); 468 for (int i = 0; i < numRegions; i++) { 469 if (!regionQueue.isEmpty()) { 470 regions.add(regionQueue.poll()); 471 continue; 472 } 473 Bytes.putInt(start, 0, numRegions << 1); 474 Bytes.putInt(end, 0, (numRegions << 1) + 1); 475 TableName tableName = 476 TableName.valueOf("table" + (numTables > 0 ? rand.nextInt(numTables) : i)); 477 RegionInfo hri = RegionInfoBuilder.newBuilder(tableName) 478 .setStartKey(start) 479 .setEndKey(end) 480 .setSplit(false) 481 .setRegionId(regionId++) 482 .build(); 483 regions.add(hri); 484 } 485 return regions; 486 } 487 488 protected List<RegionInfo> uniformRegions(int numRegions) { 489 List<RegionInfo> regions = new ArrayList<>(numRegions); 490 byte[] start = new byte[16]; 491 byte[] end = new byte[16]; 492 Random rand = ThreadLocalRandom.current(); 493 rand.nextBytes(start); 494 rand.nextBytes(end); 495 for (int i = 0; i < numRegions; i++) { 496 Bytes.putInt(start, 0, numRegions << 1); 497 Bytes.putInt(end, 0, (numRegions << 1) + 1); 498 TableName tableName = 499 TableName.valueOf("table" + i); 500 RegionInfo hri = RegionInfoBuilder.newBuilder(tableName) 501 .setStartKey(start) 502 .setEndKey(end) 503 .setSplit(false) 504 .build(); 505 regions.add(hri); 506 } 507 return regions; 508 } 509 510 protected void returnRegions(List<RegionInfo> regions) { 511 regionQueue.addAll(regions); 512 } 513 514 private Queue<ServerName> serverQueue = new LinkedList<>(); 515 516 protected ServerAndLoad randomServer(final int numRegionsPerServer) { 517 if (!this.serverQueue.isEmpty()) { 518 ServerName sn = this.serverQueue.poll(); 519 return new ServerAndLoad(sn, numRegionsPerServer); 520 } 521 Random rand = ThreadLocalRandom.current(); 522 String host = "srv" + rand.nextInt(Integer.MAX_VALUE); 523 int port = rand.nextInt(60000); 524 long startCode = rand.nextLong(); 525 ServerName sn = ServerName.valueOf(host, port, startCode); 526 return new ServerAndLoad(sn, numRegionsPerServer); 527 } 528 529 protected List<ServerAndLoad> randomServers(int numServers, int numRegionsPerServer) { 530 List<ServerAndLoad> servers = new ArrayList<>(numServers); 531 for (int i = 0; i < numServers; i++) { 532 servers.add(randomServer(numRegionsPerServer)); 533 } 534 return servers; 535 } 536 537 protected void returnServer(ServerName server) { 538 serverQueue.add(server); 539 } 540 541 protected void returnServers(List<ServerName> servers) { 542 this.serverQueue.addAll(servers); 543 } 544 545 protected void testWithCluster(int numNodes, 546 int numRegions, 547 int numRegionsPerServer, 548 int replication, 549 int numTables, 550 boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) { 551 Map<ServerName, List<RegionInfo>> serverMap = 552 createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); 553 testWithCluster(serverMap, null, assertFullyBalanced, assertFullyBalancedForReplicas); 554 } 555 556 protected void testWithCluster(Map<ServerName, List<RegionInfo>> serverMap, 557 RackManager rackManager, boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) { 558 List<ServerAndLoad> list = convertToList(serverMap); 559 LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); 560 561 loadBalancer.setRackManager(rackManager); 562 // Run the balancer. 563 Map<TableName, Map<ServerName, List<RegionInfo>>> LoadOfAllTable = 564 (Map) mockClusterServersWithTables(serverMap); 565 List<RegionPlan> plans = loadBalancer.balanceCluster(LoadOfAllTable); 566 assertNotNull("Initial cluster balance should produce plans.", plans); 567 568 // Check to see that this actually got to a stable place. 569 if (assertFullyBalanced || assertFullyBalancedForReplicas) { 570 // Apply the plan to the mock cluster. 571 List<ServerAndLoad> balancedCluster = reconcile(list, plans, serverMap); 572 573 // Print out the cluster loads to make debugging easier. 574 LOG.info("Mock Balance : " + printMock(balancedCluster)); 575 576 if (assertFullyBalanced) { 577 assertClusterAsBalanced(balancedCluster); 578 LoadOfAllTable = (Map) mockClusterServersWithTables(serverMap); 579 List<RegionPlan> secondPlans = loadBalancer.balanceCluster(LoadOfAllTable); 580 assertNull("Given a requirement to be fully balanced, second attempt at plans should " + 581 "produce none.", secondPlans); 582 } 583 584 if (assertFullyBalancedForReplicas) { 585 assertRegionReplicaPlacement(serverMap, rackManager); 586 } 587 } 588 } 589 590 protected Map<ServerName, List<RegionInfo>> createServerMap(int numNodes, 591 int numRegions, 592 int numRegionsPerServer, 593 int replication, 594 int numTables) { 595 //construct a cluster of numNodes, having a total of numRegions. Each RS will hold 596 //numRegionsPerServer many regions except for the last one, which will host all the 597 //remaining regions 598 int[] cluster = new int[numNodes]; 599 for (int i =0; i < numNodes; i++) { 600 cluster[i] = numRegionsPerServer; 601 } 602 cluster[cluster.length - 1] = numRegions - ((cluster.length - 1) * numRegionsPerServer); 603 Map<ServerName, List<RegionInfo>> clusterState = mockClusterServers(cluster, numTables); 604 if (replication > 0) { 605 // replicate the regions to the same servers 606 for (List<RegionInfo> regions : clusterState.values()) { 607 int length = regions.size(); 608 for (int i = 0; i < length; i++) { 609 for (int r = 1; r < replication ; r++) { 610 regions.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(i), r)); 611 } 612 } 613 } 614 } 615 616 return clusterState; 617 } 618 619}