001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.favored; 019 020import static org.junit.jupiter.api.Assertions.assertEquals; 021import static org.junit.jupiter.api.Assertions.assertFalse; 022import static org.junit.jupiter.api.Assertions.assertNotEquals; 023import static org.junit.jupiter.api.Assertions.assertNotNull; 024import static org.junit.jupiter.api.Assertions.assertNull; 025import static org.junit.jupiter.api.Assertions.assertTrue; 026import static org.mockito.ArgumentMatchers.any; 027import static org.mockito.Mockito.when; 028 029import java.io.IOException; 030import java.util.ArrayList; 031import java.util.HashMap; 032import java.util.List; 033import java.util.Map; 034import java.util.NavigableMap; 035import java.util.Set; 036import java.util.TreeMap; 037import org.apache.hadoop.hbase.HConstants; 038import org.apache.hadoop.hbase.ServerName; 039import org.apache.hadoop.hbase.TableName; 040import org.apache.hadoop.hbase.client.RegionInfo; 041import org.apache.hadoop.hbase.client.RegionInfoBuilder; 042import org.apache.hadoop.hbase.master.RackManager; 043import org.apache.hadoop.hbase.testclassification.MasterTests; 044import org.apache.hadoop.hbase.testclassification.MediumTests; 045import org.apache.hadoop.hbase.util.Bytes; 046import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 047import org.apache.hadoop.hbase.util.Triple; 048import org.junit.jupiter.api.BeforeAll; 049import org.junit.jupiter.api.BeforeEach; 050import org.junit.jupiter.api.Tag; 051import org.junit.jupiter.api.Test; 052import org.junit.jupiter.api.TestInfo; 053import org.mockito.Mockito; 054 055import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 056import org.apache.hbase.thirdparty.com.google.common.collect.Sets; 057 058@Tag(MasterTests.TAG) 059@Tag(MediumTests.TAG) 060public class TestFavoredNodeAssignmentHelper { 061 062 private static final List<ServerName> servers = new ArrayList<>(); 063 private static final Map<String, List<ServerName>> rackToServers = new HashMap<>(); 064 private static final RackManager rackManager = Mockito.mock(RackManager.class); 065 066 // Some tests have randomness, so we run them multiple times 067 private static final int MAX_ATTEMPTS = 100; 068 069 private static String methodName; 070 071 private static String getRack(int index) { 072 if (index < 10) { 073 return "rack1"; 074 } else if (index < 20) { 075 return "rack2"; 076 } else if (index < 30) { 077 return "rack3"; 078 } else { 079 return RackManager.UNKNOWN_RACK; 080 } 081 } 082 083 @BeforeAll 084 public static void setupBeforeAll() throws Exception { 085 // Set up some server -> rack mappings 086 // Have three racks in the cluster with 10 hosts each. 087 when(rackManager.getRack(any(ServerName.class))).then(invocation -> { 088 ServerName sn = invocation.getArgument(0, ServerName.class); 089 try { 090 int i = Integer.parseInt(sn.getHostname().substring("foo".length())); 091 return getRack(i); 092 } catch (NumberFormatException e) { 093 return RackManager.UNKNOWN_RACK; 094 } 095 }); 096 for (int i = 0; i < 40; i++) { 097 ServerName server = ServerName.valueOf("foo" + i, 1234, EnvironmentEdgeManager.currentTime()); 098 String rack = getRack(i); 099 if (!rack.equals(RackManager.UNKNOWN_RACK)) { 100 rackToServers.computeIfAbsent(rack, k -> new ArrayList<>()).add(server); 101 } 102 servers.add(server); 103 } 104 } 105 106 @BeforeEach 107 public void setupEach(TestInfo testInfo) { 108 methodName = testInfo.getTestMethod().get().getName(); 109 } 110 111 // The tests decide which racks to work with, and how many machines to 112 // work with from any given rack 113 // Return a random 'count' number of servers from 'rack' 114 private static List<ServerName> getServersFromRack(Map<String, Integer> rackToServerCount) { 115 List<ServerName> chosenServers = new ArrayList<>(); 116 for (Map.Entry<String, Integer> entry : rackToServerCount.entrySet()) { 117 List<ServerName> servers = rackToServers.get(entry.getKey()); 118 for (int i = 0; i < entry.getValue(); i++) { 119 chosenServers.add(servers.get(i)); 120 } 121 } 122 return chosenServers; 123 } 124 125 @Test 126 public void testSmallCluster() { 127 // Test the case where we cannot assign favored nodes (because the number 128 // of nodes in the cluster is too less) 129 Map<String, Integer> rackToServerCount = new HashMap<>(); 130 rackToServerCount.put("rack1", 2); 131 List<ServerName> servers = getServersFromRack(rackToServerCount); 132 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 133 helper.initialize(); 134 assertFalse(helper.canPlaceFavoredNodes()); 135 } 136 137 @Test 138 public void testPlacePrimaryRSAsRoundRobin() { 139 // Test the regular case where there are many servers in different racks 140 // Test once for few regions and once for many regions 141 primaryRSPlacement(6, null, 10, 10, 10); 142 // now create lots of regions and try to place them on the limited number of machines 143 primaryRSPlacement(600, null, 10, 10, 10); 144 } 145 146 @Test 147 public void testRoundRobinAssignmentsWithUnevenSizedRacks() { 148 // In the case of uneven racks, the regions should be distributed 149 // proportionately to the rack sizes 150 primaryRSPlacement(6, null, 10, 10, 10); 151 primaryRSPlacement(600, null, 10, 10, 5); 152 primaryRSPlacement(600, null, 10, 5, 10); 153 primaryRSPlacement(600, null, 5, 10, 10); 154 primaryRSPlacement(500, null, 10, 10, 5); 155 primaryRSPlacement(500, null, 10, 5, 10); 156 primaryRSPlacement(500, null, 5, 10, 10); 157 primaryRSPlacement(500, null, 9, 7, 8); 158 primaryRSPlacement(500, null, 8, 7, 9); 159 primaryRSPlacement(500, null, 7, 9, 8); 160 primaryRSPlacement(459, null, 7, 9, 8); 161 } 162 163 @Test 164 public void testSecondaryAndTertiaryPlacementWithSingleRack() { 165 // Test the case where there is a single rack and we need to choose 166 // Primary/Secondary/Tertiary from a single rack. 167 Map<String, Integer> rackToServerCount = new HashMap<>(); 168 rackToServerCount.put("rack1", 10); 169 // have lots of regions to test with 170 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, 171 List<RegionInfo>> primaryRSMapAndHelper = 172 secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); 173 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 174 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 175 List<RegionInfo> regions = primaryRSMapAndHelper.getThird(); 176 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 177 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 178 // although we created lots of regions we should have no overlap on the 179 // primary/secondary/tertiary for any given region 180 for (RegionInfo region : regions) { 181 ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region); 182 assertNotNull(secondaryAndTertiaryServers); 183 assertTrue(primaryRSMap.containsKey(region)); 184 assertNotEquals(secondaryAndTertiaryServers[0], primaryRSMap.get(region)); 185 assertNotEquals(secondaryAndTertiaryServers[1], primaryRSMap.get(region)); 186 assertNotEquals(secondaryAndTertiaryServers[0], secondaryAndTertiaryServers[1]); 187 } 188 } 189 190 @Test 191 public void testSecondaryAndTertiaryPlacementWithSingleServer() { 192 // Test the case where we have a single node in the cluster. In this case 193 // the primary can be assigned but the secondary/tertiary would be null 194 Map<String, Integer> rackToServerCount = new HashMap<>(); 195 rackToServerCount.put("rack1", 1); 196 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, 197 List<RegionInfo>> primaryRSMapAndHelper = 198 secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount); 199 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 200 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 201 List<RegionInfo> regions = primaryRSMapAndHelper.getThird(); 202 203 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 204 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 205 // no secondary/tertiary placement in case of a single RegionServer 206 assertNull(secondaryAndTertiaryMap.get(regions.get(0))); 207 } 208 209 @Test 210 public void testSecondaryAndTertiaryPlacementWithMultipleRacks() { 211 // Test the case where we have multiple racks and the region servers 212 // belong to multiple racks 213 Map<String, Integer> rackToServerCount = new HashMap<>(); 214 rackToServerCount.put("rack1", 10); 215 rackToServerCount.put("rack2", 10); 216 217 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, 218 List<RegionInfo>> primaryRSMapAndHelper = 219 secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); 220 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 221 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 222 223 assertEquals(60000, primaryRSMap.size()); 224 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 225 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 226 assertEquals(60000, secondaryAndTertiaryMap.size()); 227 // for every region, the primary should be on one rack and the secondary/tertiary 228 // on another (we create a lot of regions just to increase probability of failure) 229 for (Map.Entry<RegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) { 230 ServerName[] allServersForRegion = entry.getValue(); 231 String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey())); 232 String secondaryRSRack = rackManager.getRack(allServersForRegion[0]); 233 String tertiaryRSRack = rackManager.getRack(allServersForRegion[1]); 234 Set<String> racks = Sets.newHashSet(primaryRSRack); 235 racks.add(secondaryRSRack); 236 racks.add(tertiaryRSRack); 237 assertTrue(racks.size() >= 2); 238 } 239 } 240 241 @Test 242 public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() { 243 // Test the case where we have two racks but with less than two servers in each 244 // We will not have enough machines to select secondary/tertiary 245 Map<String, Integer> rackToServerCount = new HashMap<>(); 246 rackToServerCount.put("rack1", 1); 247 rackToServerCount.put("rack2", 1); 248 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, 249 List<RegionInfo>> primaryRSMapAndHelper = 250 secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); 251 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 252 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 253 List<RegionInfo> regions = primaryRSMapAndHelper.getThird(); 254 assertEquals(6, primaryRSMap.size()); 255 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 256 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 257 for (RegionInfo region : regions) { 258 // not enough secondary/tertiary room to place the regions 259 assertNull(secondaryAndTertiaryMap.get(region)); 260 } 261 } 262 263 @Test 264 public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack() { 265 // Test the case where there is only one server in one rack and another rack 266 // has more servers. We try to choose secondary/tertiary on different 267 // racks than what the primary is on. But if the other rack doesn't have 268 // enough nodes to have both secondary/tertiary RSs, the tertiary is placed 269 // on the same rack as the primary server is on 270 Map<String, Integer> rackToServerCount = new HashMap<>(); 271 rackToServerCount.put("rack1", 2); 272 rackToServerCount.put("rack2", 1); 273 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, 274 List<RegionInfo>> primaryRSMapAndHelper = 275 secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); 276 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 277 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 278 List<RegionInfo> regions = primaryRSMapAndHelper.getThird(); 279 assertEquals(6, primaryRSMap.size()); 280 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 281 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 282 assertEquals(secondaryAndTertiaryMap.size(), regions.size()); 283 for (RegionInfo region : regions) { 284 ServerName s = primaryRSMap.get(region); 285 ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0]; 286 ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1]; 287 Set<String> racks = Sets.newHashSet(rackManager.getRack(s)); 288 racks.add(rackManager.getRack(secondaryRS)); 289 racks.add(rackManager.getRack(tertiaryRS)); 290 assertTrue(racks.size() >= 2); 291 } 292 } 293 294 private Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>> 295 secondaryAndTertiaryRSPlacementHelper(int regionCount, Map<String, Integer> rackToServerCount) { 296 Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>(); 297 List<ServerName> servers = getServersFromRack(rackToServerCount); 298 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 299 Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<ServerName, List<RegionInfo>>(); 300 helper.initialize(); 301 // create regions 302 List<RegionInfo> regions = new ArrayList<>(regionCount); 303 for (int i = 0; i < regionCount; i++) { 304 regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(methodName)) 305 .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build()); 306 } 307 // place the regions 308 helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); 309 return new Triple<>(primaryRSMap, helper, regions); 310 } 311 312 private void primaryRSPlacement(int regionCount, Map<RegionInfo, ServerName> primaryRSMap, 313 int firstRackSize, int secondRackSize, int thirdRackSize) { 314 Map<String, Integer> rackToServerCount = new HashMap<>(); 315 rackToServerCount.put("rack1", firstRackSize); 316 rackToServerCount.put("rack2", secondRackSize); 317 rackToServerCount.put("rack3", thirdRackSize); 318 List<ServerName> servers = getServersFromRack(rackToServerCount); 319 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 320 helper.initialize(); 321 322 assertTrue(helper.canPlaceFavoredNodes()); 323 324 Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<>(); 325 if (primaryRSMap == null) { 326 primaryRSMap = new HashMap<>(); 327 } 328 // create some regions 329 List<RegionInfo> regions = new ArrayList<>(regionCount); 330 for (int i = 0; i < regionCount; i++) { 331 regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf("foobar")) 332 .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build()); 333 } 334 // place those regions in primary RSs 335 helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); 336 337 // we should have all the regions nicely spread across the racks 338 int regionsOnRack1 = 0; 339 int regionsOnRack2 = 0; 340 int regionsOnRack3 = 0; 341 for (RegionInfo region : regions) { 342 if (rackManager.getRack(primaryRSMap.get(region)).equals("rack1")) { 343 regionsOnRack1++; 344 } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack2")) { 345 regionsOnRack2++; 346 } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack3")) { 347 regionsOnRack3++; 348 } 349 } 350 // Verify that the regions got placed in the way we expect (documented in 351 // FavoredNodeAssignmentHelper#placePrimaryRSAsRoundRobin) 352 checkNumRegions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1, regionsOnRack2, 353 regionsOnRack3); 354 } 355 356 private void checkNumRegions(int firstRackSize, int secondRackSize, int thirdRackSize, 357 int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) { 358 // The regions should be distributed proportionately to the racksizes 359 // Verify the ordering was as expected by inserting the racks and regions 360 // in sorted maps. The keys being the racksize and numregions; values are 361 // the relative positions of the racksizes and numregions respectively 362 NavigableMap<Integer, Integer> rackMap = new TreeMap<>(); 363 rackMap.put(firstRackSize, 1); 364 rackMap.put(secondRackSize, 2); 365 rackMap.put(thirdRackSize, 3); 366 NavigableMap<Integer, Integer> regionMap = new TreeMap<>(); 367 regionMap.put(regionsOnRack1, 1); 368 regionMap.put(regionsOnRack2, 2); 369 regionMap.put(regionsOnRack3, 3); 370 assertEquals(rackMap.get(firstRackSize).intValue(), regionMap.get(regionsOnRack1).intValue(), 371 printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1, regionsOnRack2, 372 regionsOnRack3)); 373 assertEquals(rackMap.get(secondRackSize).intValue(), regionMap.get(regionsOnRack2).intValue(), 374 printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1, regionsOnRack2, 375 regionsOnRack3)); 376 assertEquals(rackMap.get(thirdRackSize).intValue(), regionMap.get(regionsOnRack3).intValue(), 377 printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1, regionsOnRack2, 378 regionsOnRack3)); 379 } 380 381 private String printProportions(int firstRackSize, int secondRackSize, int thirdRackSize, 382 int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) { 383 return "The rack sizes " + firstRackSize + " " + secondRackSize + " " + thirdRackSize + " " 384 + regionsOnRack1 + " " + regionsOnRack2 + " " + regionsOnRack3; 385 } 386 387 @Test 388 public void testConstrainedPlacement() throws Exception { 389 List<ServerName> servers = Lists.newArrayList(); 390 servers.add(ServerName.valueOf("foo" + 1 + ":1234", -1)); 391 servers.add(ServerName.valueOf("foo" + 2 + ":1234", -1)); 392 servers.add(ServerName.valueOf("foo" + 15 + ":1234", -1)); 393 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 394 helper.initialize(); 395 assertTrue(helper.canPlaceFavoredNodes()); 396 397 List<RegionInfo> regions = new ArrayList<>(20); 398 for (int i = 0; i < 20; i++) { 399 regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(methodName)) 400 .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build()); 401 } 402 Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<ServerName, List<RegionInfo>>(); 403 Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>(); 404 helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); 405 assertEquals(primaryRSMap.size(), regions.size()); 406 Map<RegionInfo, ServerName[]> secondaryAndTertiary = 407 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 408 assertEquals(regions.size(), secondaryAndTertiary.size()); 409 } 410 411 @Test 412 public void testGetOneRandomRack() throws IOException { 413 414 Map<String, Integer> rackToServerCount = new HashMap<>(); 415 Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3"); 416 for (String rack : rackList) { 417 rackToServerCount.put(rack, 2); 418 } 419 List<ServerName> servers = getServersFromRack(rackToServerCount); 420 421 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 422 helper.initialize(); 423 assertTrue(helper.canPlaceFavoredNodes()); 424 425 // Check we don't get a bad rack on any number of attempts 426 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 427 assertTrue(rackList.contains(helper.getOneRandomRack(Sets.newHashSet()))); 428 } 429 430 // Check skipRack multiple times when an invalid rack is specified 431 Set<String> skipRacks = Sets.newHashSet("rack"); 432 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 433 assertTrue(rackList.contains(helper.getOneRandomRack(skipRacks))); 434 } 435 436 // Check skipRack multiple times when an valid rack is specified 437 skipRacks = Sets.newHashSet("rack1"); 438 Set<String> validRacks = Sets.newHashSet("rack2", "rack3"); 439 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 440 assertTrue(validRacks.contains(helper.getOneRandomRack(skipRacks))); 441 } 442 } 443 444 @Test 445 public void testGetRandomServerSingleRack() throws IOException { 446 447 Map<String, Integer> rackToServerCount = new HashMap<>(); 448 final String rack = "rack1"; 449 rackToServerCount.put(rack, 4); 450 List<ServerName> servers = getServersFromRack(rackToServerCount); 451 452 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 453 helper.initialize(); 454 assertTrue(helper.canPlaceFavoredNodes()); 455 456 // Check we don't get a bad node on any number of attempts 457 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 458 ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet()); 459 assertTrue(servers.contains(sn), "Server:" + sn + " does not belong to list: " + servers); 460 } 461 462 // Check skipServers multiple times when an invalid server is specified 463 Set<ServerName> skipServers = 464 Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE)); 465 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 466 ServerName sn = helper.getOneRandomServer(rack, skipServers); 467 assertTrue(servers.contains(sn), "Server:" + sn + " does not belong to list: " + servers); 468 } 469 470 // Check skipRack multiple times when an valid servers are specified 471 ServerName skipSN = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); 472 skipServers = Sets.newHashSet(skipSN); 473 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 474 ServerName sn = helper.getOneRandomServer(rack, skipServers); 475 assertNotEquals(skipSN.getAddress(), sn.getAddress(), "Skip server should not be selected "); 476 assertTrue(servers.contains(sn), "Server:" + sn + " does not belong to list: " + servers); 477 } 478 } 479 480 @Test 481 public void testGetRandomServerMultiRack() throws IOException { 482 Map<String, Integer> rackToServerCount = new HashMap<>(); 483 Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3"); 484 for (String rack : rackList) { 485 rackToServerCount.put(rack, 4); 486 } 487 List<ServerName> servers = getServersFromRack(rackToServerCount); 488 489 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 490 helper.initialize(); 491 assertTrue(helper.canPlaceFavoredNodes()); 492 493 // Check we don't get a bad node on any number of attempts 494 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 495 for (String rack : rackList) { 496 ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet()); 497 assertTrue(rackToServers.get(rack).contains(sn), 498 "Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack)); 499 } 500 } 501 502 // Check skipServers multiple times when an invalid server is specified 503 Set<ServerName> skipServers = 504 Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE)); 505 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 506 for (String rack : rackList) { 507 ServerName sn = helper.getOneRandomServer(rack, skipServers); 508 assertTrue(rackToServers.get(rack).contains(sn), 509 "Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack)); 510 } 511 } 512 513 // Check skipRack multiple times when an valid servers are specified 514 ServerName skipSN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); 515 ServerName skipSN2 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE); 516 ServerName skipSN3 = ServerName.valueOf("foo20:1234", ServerName.NON_STARTCODE); 517 skipServers = Sets.newHashSet(skipSN1, skipSN2, skipSN3); 518 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 519 for (String rack : rackList) { 520 ServerName sn = helper.getOneRandomServer(rack, skipServers); 521 assertFalse(skipServers.contains(sn), "Skip server should not be selected "); 522 assertTrue(rackToServers.get(rack).contains(sn), 523 "Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack)); 524 } 525 } 526 } 527 528 @Test 529 public void testGetFavoredNodes() throws IOException { 530 Map<String, Integer> rackToServerCount = new HashMap<>(); 531 Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3"); 532 for (String rack : rackList) { 533 rackToServerCount.put(rack, 4); 534 } 535 List<ServerName> servers = getServersFromRack(rackToServerCount); 536 537 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 538 helper.initialize(); 539 assertTrue(helper.canPlaceFavoredNodes()); 540 541 RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(methodName)) 542 .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).build(); 543 544 for (int maxattempts = 0; maxattempts < MAX_ATTEMPTS; maxattempts++) { 545 List<ServerName> fn = helper.generateFavoredNodes(region); 546 checkDuplicateFN(fn); 547 checkFNRacks(fn); 548 } 549 } 550 551 @Test 552 public void testGenMissingFavoredNodeOneRack() throws IOException { 553 Map<String, Integer> rackToServerCount = new HashMap<>(); 554 final String rack = "rack1"; 555 rackToServerCount.put(rack, 6); 556 List<ServerName> servers = getServersFromRack(rackToServerCount); 557 558 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 559 helper.initialize(); 560 assertTrue(helper.canPlaceFavoredNodes()); 561 562 ServerName snRack1SN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); 563 ServerName snRack1SN2 = ServerName.valueOf("foo2:1234", ServerName.NON_STARTCODE); 564 ServerName snRack1SN3 = ServerName.valueOf("foo3:1234", ServerName.NON_STARTCODE); 565 566 List<ServerName> fn = Lists.newArrayList(snRack1SN1, snRack1SN2); 567 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 568 checkDuplicateFN(fn, helper.generateMissingFavoredNode(fn)); 569 } 570 571 fn = Lists.newArrayList(snRack1SN1, snRack1SN2); 572 List<ServerName> skipServers = Lists.newArrayList(snRack1SN3); 573 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 574 ServerName genSN = helper.generateMissingFavoredNode(fn, skipServers); 575 checkDuplicateFN(fn, genSN); 576 assertNotEquals(snRack1SN3, genSN, "Generated FN should not match excluded one"); 577 } 578 } 579 580 @Test 581 public void testGenMissingFavoredNodeMultiRack() throws IOException { 582 583 ServerName snRack1SN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); 584 ServerName snRack1SN2 = ServerName.valueOf("foo2:1234", ServerName.NON_STARTCODE); 585 ServerName snRack2SN1 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE); 586 ServerName snRack2SN2 = ServerName.valueOf("foo11:1234", ServerName.NON_STARTCODE); 587 588 Map<String, Integer> rackToServerCount = new HashMap<>(); 589 Set<String> rackList = Sets.newHashSet("rack1", "rack2"); 590 for (String rack : rackList) { 591 rackToServerCount.put(rack, 4); 592 } 593 List<ServerName> servers = getServersFromRack(rackToServerCount); 594 595 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 596 helper.initialize(); 597 assertTrue(helper.canPlaceFavoredNodes()); 598 599 List<ServerName> fn = Lists.newArrayList(snRack1SN1, snRack1SN2); 600 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 601 ServerName genSN = helper.generateMissingFavoredNode(fn); 602 checkDuplicateFN(fn, genSN); 603 checkFNRacks(fn, genSN); 604 } 605 606 fn = Lists.newArrayList(snRack1SN1, snRack2SN1); 607 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 608 ServerName genSN = helper.generateMissingFavoredNode(fn); 609 checkDuplicateFN(fn, genSN); 610 checkFNRacks(fn, genSN); 611 } 612 613 fn = Lists.newArrayList(snRack1SN1, snRack2SN1); 614 List<ServerName> skipServers = Lists.newArrayList(snRack2SN2); 615 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 616 ServerName genSN = helper.generateMissingFavoredNode(fn, skipServers); 617 checkDuplicateFN(fn, genSN); 618 checkFNRacks(fn, genSN); 619 assertNotEquals(snRack2SN2, genSN, "Generated FN should not match excluded one"); 620 } 621 } 622 623 private void checkDuplicateFN(List<ServerName> fnList, ServerName genFN) { 624 Set<ServerName> favoredNodes = Sets.newHashSet(fnList); 625 assertNotNull(genFN, "Generated FN can't be null"); 626 favoredNodes.add(genFN); 627 assertEquals(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size(), 628 "Did not find expected number of favored nodes"); 629 } 630 631 private void checkDuplicateFN(List<ServerName> fnList) { 632 Set<ServerName> favoredNodes = Sets.newHashSet(fnList); 633 assertEquals(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size(), 634 "Did not find expected number of favored nodes"); 635 } 636 637 private void checkFNRacks(List<ServerName> fnList, ServerName genFN) { 638 Set<ServerName> favoredNodes = Sets.newHashSet(fnList); 639 favoredNodes.add(genFN); 640 Set<String> racks = Sets.newHashSet(); 641 for (ServerName sn : favoredNodes) { 642 racks.add(rackManager.getRack(sn)); 643 } 644 assertTrue(racks.size() >= 2, "FN should be spread atleast across 2 racks"); 645 } 646 647 private void checkFNRacks(List<ServerName> fnList) { 648 Set<ServerName> favoredNodes = Sets.newHashSet(fnList); 649 Set<String> racks = Sets.newHashSet(); 650 for (ServerName sn : favoredNodes) { 651 racks.add(rackManager.getRack(sn)); 652 } 653 assertTrue(racks.size() >= 2, "FN should be spread atleast across 2 racks"); 654 } 655}