001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.favored; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertFalse; 022import static org.junit.Assert.assertNotEquals; 023import static org.junit.Assert.assertNotNull; 024import static org.junit.Assert.assertTrue; 025 026import java.io.IOException; 027import java.util.ArrayList; 028import java.util.HashMap; 029import java.util.List; 030import java.util.Map; 031import java.util.Set; 032import java.util.SortedMap; 033import java.util.TreeMap; 034import org.apache.hadoop.conf.Configuration; 035import org.apache.hadoop.hbase.HBaseClassTestRule; 036import org.apache.hadoop.hbase.HConstants; 037import org.apache.hadoop.hbase.ServerName; 038import org.apache.hadoop.hbase.TableName; 039import org.apache.hadoop.hbase.client.RegionInfo; 040import org.apache.hadoop.hbase.client.RegionInfoBuilder; 041import org.apache.hadoop.hbase.master.RackManager; 042import org.apache.hadoop.hbase.testclassification.LargeTests; 043import org.apache.hadoop.hbase.testclassification.MasterTests; 044import org.apache.hadoop.hbase.util.Bytes; 045import org.apache.hadoop.hbase.util.Triple; 046import org.junit.BeforeClass; 047import org.junit.ClassRule; 048import org.junit.Rule; 049import org.junit.Test; 050import org.junit.experimental.categories.Category; 051import org.junit.rules.TestName; 052import org.mockito.Mockito; 053 054import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 055import org.apache.hbase.thirdparty.com.google.common.collect.Sets; 056 057@Category({MasterTests.class, LargeTests.class}) 058public class TestFavoredNodeAssignmentHelper { 059 060 @ClassRule 061 public static final HBaseClassTestRule CLASS_RULE = 062 HBaseClassTestRule.forClass(TestFavoredNodeAssignmentHelper.class); 063 064 private static List<ServerName> servers = new ArrayList<>(); 065 private static Map<String, List<ServerName>> rackToServers = new HashMap<>(); 066 private static RackManager rackManager = Mockito.mock(RackManager.class); 067 068 // Some tests have randomness, so we run them multiple times 069 private static final int MAX_ATTEMPTS = 100; 070 071 @Rule 072 public TestName name = new TestName(); 073 074 @BeforeClass 075 public static void setupBeforeClass() throws Exception { 076 // Set up some server -> rack mappings 077 // Have three racks in the cluster with 10 hosts each. 078 for (int i = 0; i < 40; i++) { 079 ServerName server = ServerName.valueOf("foo" + i + ":1234", -1); 080 if (i < 10) { 081 Mockito.when(rackManager.getRack(server)).thenReturn("rack1"); 082 if (rackToServers.get("rack1") == null) { 083 List<ServerName> servers = new ArrayList<>(); 084 rackToServers.put("rack1", servers); 085 } 086 rackToServers.get("rack1").add(server); 087 } 088 if (i >= 10 && i < 20) { 089 Mockito.when(rackManager.getRack(server)).thenReturn("rack2"); 090 if (rackToServers.get("rack2") == null) { 091 List<ServerName> servers = new ArrayList<>(); 092 rackToServers.put("rack2", servers); 093 } 094 rackToServers.get("rack2").add(server); 095 } 096 if (i >= 20 && i < 30) { 097 Mockito.when(rackManager.getRack(server)).thenReturn("rack3"); 098 if (rackToServers.get("rack3") == null) { 099 List<ServerName> servers = new ArrayList<>(); 100 rackToServers.put("rack3", servers); 101 } 102 rackToServers.get("rack3").add(server); 103 } 104 servers.add(server); 105 } 106 } 107 108 // The tests decide which racks to work with, and how many machines to 109 // work with from any given rack 110 // Return a rondom 'count' number of servers from 'rack' 111 private static List<ServerName> getServersFromRack(Map<String, Integer> rackToServerCount) { 112 List<ServerName> chosenServers = new ArrayList<>(); 113 for (Map.Entry<String, Integer> entry : rackToServerCount.entrySet()) { 114 List<ServerName> servers = rackToServers.get(entry.getKey()); 115 for (int i = 0; i < entry.getValue(); i++) { 116 chosenServers.add(servers.get(i)); 117 } 118 } 119 return chosenServers; 120 } 121 122 @Test 123 public void testSmallCluster() { 124 // Test the case where we cannot assign favored nodes (because the number 125 // of nodes in the cluster is too less) 126 Map<String,Integer> rackToServerCount = new HashMap<>(); 127 rackToServerCount.put("rack1", 2); 128 List<ServerName> servers = getServersFromRack(rackToServerCount); 129 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, 130 new Configuration()); 131 helper.initialize(); 132 assertFalse(helper.canPlaceFavoredNodes()); 133 } 134 135 @Test 136 public void testPlacePrimaryRSAsRoundRobin() { 137 // Test the regular case where there are many servers in different racks 138 // Test once for few regions and once for many regions 139 primaryRSPlacement(6, null, 10, 10, 10); 140 // now create lots of regions and try to place them on the limited number of machines 141 primaryRSPlacement(600, null, 10, 10, 10); 142 } 143 144 @Test 145 public void testRoundRobinAssignmentsWithUnevenSizedRacks() { 146 //In the case of uneven racks, the regions should be distributed 147 //proportionately to the rack sizes 148 primaryRSPlacement(6, null, 10, 10, 10); 149 primaryRSPlacement(600, null, 10, 10, 5); 150 primaryRSPlacement(600, null, 10, 5, 10); 151 primaryRSPlacement(600, null, 5, 10, 10); 152 primaryRSPlacement(500, null, 10, 10, 5); 153 primaryRSPlacement(500, null, 10, 5, 10); 154 primaryRSPlacement(500, null, 5, 10, 10); 155 primaryRSPlacement(500, null, 9, 7, 8); 156 primaryRSPlacement(500, null, 8, 7, 9); 157 primaryRSPlacement(500, null, 7, 9, 8); 158 primaryRSPlacement(459, null, 7, 9, 8); 159 } 160 161 @Test 162 public void testSecondaryAndTertiaryPlacementWithSingleRack() { 163 // Test the case where there is a single rack and we need to choose 164 // Primary/Secondary/Tertiary from a single rack. 165 Map<String,Integer> rackToServerCount = new HashMap<>(); 166 rackToServerCount.put("rack1", 10); 167 // have lots of regions to test with 168 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>> 169 primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); 170 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 171 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 172 List<RegionInfo> regions = primaryRSMapAndHelper.getThird(); 173 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 174 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 175 // although we created lots of regions we should have no overlap on the 176 // primary/secondary/tertiary for any given region 177 for (RegionInfo region : regions) { 178 ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region); 179 assertNotNull(secondaryAndTertiaryServers); 180 assertTrue(primaryRSMap.containsKey(region)); 181 assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region))); 182 assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region))); 183 assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1])); 184 } 185 } 186 187 @Test 188 public void testSecondaryAndTertiaryPlacementWithSingleServer() { 189 // Test the case where we have a single node in the cluster. In this case 190 // the primary can be assigned but the secondary/tertiary would be null 191 Map<String,Integer> rackToServerCount = new HashMap<>(); 192 rackToServerCount.put("rack1", 1); 193 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>> 194 primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount); 195 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 196 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 197 List<RegionInfo> regions = primaryRSMapAndHelper.getThird(); 198 199 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 200 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 201 // no secondary/tertiary placement in case of a single RegionServer 202 assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null); 203 } 204 205 @Test 206 public void testSecondaryAndTertiaryPlacementWithMultipleRacks() { 207 // Test the case where we have multiple racks and the region servers 208 // belong to multiple racks 209 Map<String,Integer> rackToServerCount = new HashMap<>(); 210 rackToServerCount.put("rack1", 10); 211 rackToServerCount.put("rack2", 10); 212 213 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>> 214 primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); 215 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 216 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 217 218 assertTrue(primaryRSMap.size() == 60000); 219 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 220 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 221 assertTrue(secondaryAndTertiaryMap.size() == 60000); 222 // for every region, the primary should be on one rack and the secondary/tertiary 223 // on another (we create a lot of regions just to increase probability of failure) 224 for (Map.Entry<RegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) { 225 ServerName[] allServersForRegion = entry.getValue(); 226 String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey())); 227 String secondaryRSRack = rackManager.getRack(allServersForRegion[0]); 228 String tertiaryRSRack = rackManager.getRack(allServersForRegion[1]); 229 Set<String> racks = Sets.newHashSet(primaryRSRack); 230 racks.add(secondaryRSRack); 231 racks.add(tertiaryRSRack); 232 assertTrue(racks.size() >= 2); 233 } 234 } 235 236 @Test 237 public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() { 238 // Test the case where we have two racks but with less than two servers in each 239 // We will not have enough machines to select secondary/tertiary 240 Map<String,Integer> rackToServerCount = new HashMap<>(); 241 rackToServerCount.put("rack1", 1); 242 rackToServerCount.put("rack2", 1); 243 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>> 244 primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); 245 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 246 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 247 List<RegionInfo> regions = primaryRSMapAndHelper.getThird(); 248 assertTrue(primaryRSMap.size() == 6); 249 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 250 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 251 for (RegionInfo region : regions) { 252 // not enough secondary/tertiary room to place the regions 253 assertTrue(secondaryAndTertiaryMap.get(region) == null); 254 } 255 } 256 257 @Test 258 public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack() { 259 // Test the case where there is only one server in one rack and another rack 260 // has more servers. We try to choose secondary/tertiary on different 261 // racks than what the primary is on. But if the other rack doesn't have 262 // enough nodes to have both secondary/tertiary RSs, the tertiary is placed 263 // on the same rack as the primary server is on 264 Map<String,Integer> rackToServerCount = new HashMap<>(); 265 rackToServerCount.put("rack1", 2); 266 rackToServerCount.put("rack2", 1); 267 Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>> 268 primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); 269 FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); 270 Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); 271 List<RegionInfo> regions = primaryRSMapAndHelper.getThird(); 272 assertTrue(primaryRSMap.size() == 6); 273 Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = 274 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 275 assertTrue(secondaryAndTertiaryMap.size() == regions.size()); 276 for (RegionInfo region : regions) { 277 ServerName s = primaryRSMap.get(region); 278 ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0]; 279 ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1]; 280 Set<String> racks = Sets.newHashSet(rackManager.getRack(s)); 281 racks.add(rackManager.getRack(secondaryRS)); 282 racks.add(rackManager.getRack(tertiaryRS)); 283 assertTrue(racks.size() >= 2); 284 } 285 } 286 287 private Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>> 288 secondaryAndTertiaryRSPlacementHelper( 289 int regionCount, Map<String, Integer> rackToServerCount) { 290 Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>(); 291 List<ServerName> servers = getServersFromRack(rackToServerCount); 292 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 293 Map<ServerName, List<RegionInfo>> assignmentMap = 294 new HashMap<ServerName, List<RegionInfo>>(); 295 helper.initialize(); 296 // create regions 297 List<RegionInfo> regions = new ArrayList<>(regionCount); 298 for (int i = 0; i < regionCount; i++) { 299 regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) 300 .setStartKey(Bytes.toBytes(i)) 301 .setEndKey(Bytes.toBytes(i + 1)) 302 .build()); 303 } 304 // place the regions 305 helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); 306 return new Triple<>(primaryRSMap, helper, regions); 307 } 308 309 private void primaryRSPlacement(int regionCount, Map<RegionInfo, ServerName> primaryRSMap, 310 int firstRackSize, int secondRackSize, int thirdRackSize) { 311 Map<String,Integer> rackToServerCount = new HashMap<>(); 312 rackToServerCount.put("rack1", firstRackSize); 313 rackToServerCount.put("rack2", secondRackSize); 314 rackToServerCount.put("rack3", thirdRackSize); 315 List<ServerName> servers = getServersFromRack(rackToServerCount); 316 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, 317 rackManager); 318 helper.initialize(); 319 320 assertTrue(helper.canPlaceFavoredNodes()); 321 322 Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<>(); 323 if (primaryRSMap == null) primaryRSMap = new HashMap<>(); 324 // create some regions 325 List<RegionInfo> regions = new ArrayList<>(regionCount); 326 for (int i = 0; i < regionCount; i++) { 327 regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf("foobar")) 328 .setStartKey(Bytes.toBytes(i)) 329 .setEndKey(Bytes.toBytes(i + 1)) 330 .build()); 331 } 332 // place those regions in primary RSs 333 helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); 334 335 // we should have all the regions nicely spread across the racks 336 int regionsOnRack1 = 0; 337 int regionsOnRack2 = 0; 338 int regionsOnRack3 = 0; 339 for (RegionInfo region : regions) { 340 if (rackManager.getRack(primaryRSMap.get(region)).equals("rack1")) { 341 regionsOnRack1++; 342 } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack2")) { 343 regionsOnRack2++; 344 } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack3")) { 345 regionsOnRack3++; 346 } 347 } 348 // Verify that the regions got placed in the way we expect (documented in 349 // FavoredNodeAssignmentHelper#placePrimaryRSAsRoundRobin) 350 checkNumRegions(regionCount, firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1, 351 regionsOnRack2, regionsOnRack3, assignmentMap); 352 } 353 354 private void checkNumRegions(int regionCount, int firstRackSize, int secondRackSize, 355 int thirdRackSize, int regionsOnRack1, int regionsOnRack2, int regionsOnRack3, 356 Map<ServerName, List<RegionInfo>> assignmentMap) { 357 //The regions should be distributed proportionately to the racksizes 358 //Verify the ordering was as expected by inserting the racks and regions 359 //in sorted maps. The keys being the racksize and numregions; values are 360 //the relative positions of the racksizes and numregions respectively 361 SortedMap<Integer, Integer> rackMap = new TreeMap<>(); 362 rackMap.put(firstRackSize, 1); 363 rackMap.put(secondRackSize, 2); 364 rackMap.put(thirdRackSize, 3); 365 SortedMap<Integer, Integer> regionMap = new TreeMap<>(); 366 regionMap.put(regionsOnRack1, 1); 367 regionMap.put(regionsOnRack2, 2); 368 regionMap.put(regionsOnRack3, 3); 369 assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize, 370 regionsOnRack1, regionsOnRack2, regionsOnRack3), 371 rackMap.get(firstRackSize) == regionMap.get(regionsOnRack1)); 372 assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize, 373 regionsOnRack1, regionsOnRack2, regionsOnRack3), 374 rackMap.get(secondRackSize) == regionMap.get(regionsOnRack2)); 375 assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize, 376 regionsOnRack1, regionsOnRack2, regionsOnRack3), 377 rackMap.get(thirdRackSize) == regionMap.get(regionsOnRack3)); 378 } 379 380 private String printProportions(int firstRackSize, int secondRackSize, 381 int thirdRackSize, int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) { 382 return "The rack sizes " + firstRackSize + " " + secondRackSize 383 + " " + thirdRackSize + " " + regionsOnRack1 + " " + regionsOnRack2 + 384 " " + regionsOnRack3; 385 } 386 387 @Test 388 public void testConstrainedPlacement() throws Exception { 389 List<ServerName> servers = Lists.newArrayList(); 390 servers.add(ServerName.valueOf("foo" + 1 + ":1234", -1)); 391 servers.add(ServerName.valueOf("foo" + 2 + ":1234", -1)); 392 servers.add(ServerName.valueOf("foo" + 15 + ":1234", -1)); 393 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 394 helper.initialize(); 395 assertTrue(helper.canPlaceFavoredNodes()); 396 397 List<RegionInfo> regions = new ArrayList<>(20); 398 for (int i = 0; i < 20; i++) { 399 regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) 400 .setStartKey(Bytes.toBytes(i)) 401 .setEndKey(Bytes.toBytes(i + 1)) 402 .build()); 403 } 404 Map<ServerName, List<RegionInfo>> assignmentMap = 405 new HashMap<ServerName, List<RegionInfo>>(); 406 Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>(); 407 helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); 408 assertTrue(primaryRSMap.size() == regions.size()); 409 Map<RegionInfo, ServerName[]> secondaryAndTertiary = 410 helper.placeSecondaryAndTertiaryRS(primaryRSMap); 411 assertEquals(regions.size(), secondaryAndTertiary.size()); 412 } 413 414 @Test 415 public void testGetOneRandomRack() throws IOException { 416 417 Map<String,Integer> rackToServerCount = new HashMap<>(); 418 Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3"); 419 for (String rack : rackList) { 420 rackToServerCount.put(rack, 2); 421 } 422 List<ServerName> servers = getServersFromRack(rackToServerCount); 423 424 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 425 helper.initialize(); 426 assertTrue(helper.canPlaceFavoredNodes()); 427 428 // Check we don't get a bad rack on any number of attempts 429 for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { 430 assertTrue(rackList.contains(helper.getOneRandomRack(Sets.newHashSet()))); 431 } 432 433 // Check skipRack multiple times when an invalid rack is specified 434 Set<String> skipRacks = Sets.newHashSet("rack"); 435 for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { 436 assertTrue(rackList.contains(helper.getOneRandomRack(skipRacks))); 437 } 438 439 // Check skipRack multiple times when an valid rack is specified 440 skipRacks = Sets.newHashSet("rack1"); 441 Set<String> validRacks = Sets.newHashSet("rack2", "rack3"); 442 for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { 443 assertTrue(validRacks.contains(helper.getOneRandomRack(skipRacks))); 444 } 445 } 446 447 @Test 448 public void testGetRandomServerSingleRack() throws IOException { 449 450 Map<String,Integer> rackToServerCount = new HashMap<>(); 451 final String rack = "rack1"; 452 rackToServerCount.put(rack, 4); 453 List<ServerName> servers = getServersFromRack(rackToServerCount); 454 455 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 456 helper.initialize(); 457 assertTrue(helper.canPlaceFavoredNodes()); 458 459 // Check we don't get a bad node on any number of attempts 460 for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { 461 ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet()); 462 assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn)); 463 } 464 465 // Check skipServers multiple times when an invalid server is specified 466 Set<ServerName> skipServers = 467 Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE)); 468 for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { 469 ServerName sn = helper.getOneRandomServer(rack, skipServers); 470 assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn)); 471 } 472 473 // Check skipRack multiple times when an valid servers are specified 474 ServerName skipSN = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); 475 skipServers = Sets.newHashSet(skipSN); 476 for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { 477 ServerName sn = helper.getOneRandomServer(rack, skipServers); 478 assertNotEquals("Skip server should not be selected ", 479 skipSN.getHostAndPort(), sn.getHostAndPort()); 480 assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn)); 481 } 482 } 483 484 @Test 485 public void testGetRandomServerMultiRack() throws IOException { 486 Map<String,Integer> rackToServerCount = new HashMap<>(); 487 Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3"); 488 for (String rack : rackList) { 489 rackToServerCount.put(rack, 4); 490 } 491 List<ServerName> servers = getServersFromRack(rackToServerCount); 492 493 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 494 helper.initialize(); 495 assertTrue(helper.canPlaceFavoredNodes()); 496 497 // Check we don't get a bad node on any number of attempts 498 for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { 499 for (String rack : rackList) { 500 ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet()); 501 assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack), 502 rackToServers.get(rack).contains(sn)); 503 } 504 } 505 506 // Check skipServers multiple times when an invalid server is specified 507 Set<ServerName> skipServers = 508 Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE)); 509 for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { 510 for (String rack : rackList) { 511 ServerName sn = helper.getOneRandomServer(rack, skipServers); 512 assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack), 513 rackToServers.get(rack).contains(sn)); 514 } 515 } 516 517 // Check skipRack multiple times when an valid servers are specified 518 ServerName skipSN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); 519 ServerName skipSN2 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE); 520 ServerName skipSN3 = ServerName.valueOf("foo20:1234", ServerName.NON_STARTCODE); 521 skipServers = Sets.newHashSet(skipSN1, skipSN2, skipSN3); 522 for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { 523 for (String rack : rackList) { 524 ServerName sn = helper.getOneRandomServer(rack, skipServers); 525 assertFalse("Skip server should not be selected ", skipServers.contains(sn)); 526 assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack), 527 rackToServers.get(rack).contains(sn)); 528 } 529 } 530 } 531 532 @Test 533 public void testGetFavoredNodes() throws IOException { 534 Map<String,Integer> rackToServerCount = new HashMap<>(); 535 Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3"); 536 for (String rack : rackList) { 537 rackToServerCount.put(rack, 4); 538 } 539 List<ServerName> servers = getServersFromRack(rackToServerCount); 540 541 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 542 helper.initialize(); 543 assertTrue(helper.canPlaceFavoredNodes()); 544 545 RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) 546 .setStartKey(HConstants.EMPTY_START_ROW) 547 .setEndKey(HConstants.EMPTY_END_ROW) 548 .build(); 549 550 for (int maxattempts = 0; maxattempts < MAX_ATTEMPTS; maxattempts++) { 551 List<ServerName> fn = helper.generateFavoredNodes(region); 552 checkDuplicateFN(fn); 553 checkFNRacks(fn); 554 } 555 } 556 557 @Test 558 public void testGenMissingFavoredNodeOneRack() throws IOException { 559 Map<String, Integer> rackToServerCount = new HashMap<>(); 560 final String rack = "rack1"; 561 rackToServerCount.put(rack, 6); 562 List<ServerName> servers = getServersFromRack(rackToServerCount); 563 564 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 565 helper.initialize(); 566 assertTrue(helper.canPlaceFavoredNodes()); 567 568 569 ServerName snRack1SN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); 570 ServerName snRack1SN2 = ServerName.valueOf("foo2:1234", ServerName.NON_STARTCODE); 571 ServerName snRack1SN3 = ServerName.valueOf("foo3:1234", ServerName.NON_STARTCODE); 572 573 List<ServerName> fn = Lists.newArrayList(snRack1SN1, snRack1SN2); 574 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 575 checkDuplicateFN(fn, helper.generateMissingFavoredNode(fn)); 576 } 577 578 fn = Lists.newArrayList(snRack1SN1, snRack1SN2); 579 List<ServerName> skipServers = Lists.newArrayList(snRack1SN3); 580 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 581 ServerName genSN = helper.generateMissingFavoredNode(fn, skipServers); 582 checkDuplicateFN(fn, genSN); 583 assertNotEquals("Generated FN should not match excluded one", snRack1SN3, genSN); 584 } 585 } 586 587 @Test 588 public void testGenMissingFavoredNodeMultiRack() throws IOException { 589 590 ServerName snRack1SN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); 591 ServerName snRack1SN2 = ServerName.valueOf("foo2:1234", ServerName.NON_STARTCODE); 592 ServerName snRack2SN1 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE); 593 ServerName snRack2SN2 = ServerName.valueOf("foo11:1234", ServerName.NON_STARTCODE); 594 595 Map<String,Integer> rackToServerCount = new HashMap<>(); 596 Set<String> rackList = Sets.newHashSet("rack1", "rack2"); 597 for (String rack : rackList) { 598 rackToServerCount.put(rack, 4); 599 } 600 List<ServerName> servers = getServersFromRack(rackToServerCount); 601 602 FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); 603 helper.initialize(); 604 assertTrue(helper.canPlaceFavoredNodes()); 605 606 List<ServerName> fn = Lists.newArrayList(snRack1SN1, snRack1SN2); 607 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 608 ServerName genSN = helper.generateMissingFavoredNode(fn); 609 checkDuplicateFN(fn, genSN); 610 checkFNRacks(fn, genSN); 611 } 612 613 fn = Lists.newArrayList(snRack1SN1, snRack2SN1); 614 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 615 ServerName genSN = helper.generateMissingFavoredNode(fn); 616 checkDuplicateFN(fn, genSN); 617 checkFNRacks(fn, genSN); 618 } 619 620 fn = Lists.newArrayList(snRack1SN1, snRack2SN1); 621 List<ServerName> skipServers = Lists.newArrayList(snRack2SN2); 622 for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { 623 ServerName genSN = helper.generateMissingFavoredNode(fn, skipServers); 624 checkDuplicateFN(fn, genSN); 625 checkFNRacks(fn, genSN); 626 assertNotEquals("Generated FN should not match excluded one", snRack2SN2, genSN); 627 } 628 } 629 630 private void checkDuplicateFN(List<ServerName> fnList, ServerName genFN) { 631 Set<ServerName> favoredNodes = Sets.newHashSet(fnList); 632 assertNotNull("Generated FN can't be null", genFN); 633 favoredNodes.add(genFN); 634 assertEquals("Did not find expected number of favored nodes", 635 FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size()); 636 } 637 638 private void checkDuplicateFN(List<ServerName> fnList) { 639 Set<ServerName> favoredNodes = Sets.newHashSet(fnList); 640 assertEquals("Did not find expected number of favored nodes", 641 FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size()); 642 } 643 644 private void checkFNRacks(List<ServerName> fnList, ServerName genFN) { 645 Set<ServerName> favoredNodes = Sets.newHashSet(fnList); 646 favoredNodes.add(genFN); 647 Set<String> racks = Sets.newHashSet(); 648 for (ServerName sn : favoredNodes) { 649 racks.add(rackManager.getRack(sn)); 650 } 651 assertTrue("FN should be spread atleast across 2 racks", racks.size() >= 2); 652 } 653 654 private void checkFNRacks(List<ServerName> fnList) { 655 Set<ServerName> favoredNodes = Sets.newHashSet(fnList); 656 Set<String> racks = Sets.newHashSet(); 657 for (ServerName sn : favoredNodes) { 658 racks.add(rackManager.getRack(sn)); 659 } 660 assertTrue("FN should be spread atleast across 2 racks", racks.size() >= 2); 661 } 662}