001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.favored;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertFalse;
022import static org.junit.Assert.assertNotEquals;
023import static org.junit.Assert.assertNotNull;
024import static org.junit.Assert.assertTrue;
025import static org.mockito.ArgumentMatchers.any;
026import static org.mockito.Mockito.when;
027
028import java.io.IOException;
029import java.util.ArrayList;
030import java.util.HashMap;
031import java.util.List;
032import java.util.Map;
033import java.util.Set;
034import java.util.SortedMap;
035import java.util.TreeMap;
036import org.apache.hadoop.hbase.HBaseClassTestRule;
037import org.apache.hadoop.hbase.HConstants;
038import org.apache.hadoop.hbase.ServerName;
039import org.apache.hadoop.hbase.TableName;
040import org.apache.hadoop.hbase.client.RegionInfo;
041import org.apache.hadoop.hbase.client.RegionInfoBuilder;
042import org.apache.hadoop.hbase.master.RackManager;
043import org.apache.hadoop.hbase.testclassification.MasterTests;
044import org.apache.hadoop.hbase.testclassification.MediumTests;
045import org.apache.hadoop.hbase.util.Bytes;
046import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
047import org.apache.hadoop.hbase.util.Triple;
048import org.junit.BeforeClass;
049import org.junit.ClassRule;
050import org.junit.Rule;
051import org.junit.Test;
052import org.junit.experimental.categories.Category;
053import org.junit.rules.TestName;
054import org.mockito.Mockito;
055
056import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
057import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
058
059@Category({ MasterTests.class, MediumTests.class })
060public class TestFavoredNodeAssignmentHelper {
061
062  @ClassRule
063  public static final HBaseClassTestRule CLASS_RULE =
064    HBaseClassTestRule.forClass(TestFavoredNodeAssignmentHelper.class);
065
066  private static List<ServerName> servers = new ArrayList<>();
067  private static Map<String, List<ServerName>> rackToServers = new HashMap<>();
068  private static RackManager rackManager = Mockito.mock(RackManager.class);
069
070  // Some tests have randomness, so we run them multiple times
071  private static final int MAX_ATTEMPTS = 100;
072
073  @Rule
074  public TestName name = new TestName();
075
076  private static String getRack(int index) {
077    if (index < 10) {
078      return "rack1";
079    } else if (index < 20) {
080      return "rack2";
081    } else if (index < 30) {
082      return "rack3";
083    } else {
084      return RackManager.UNKNOWN_RACK;
085    }
086  }
087
088  @BeforeClass
089  public static void setupBeforeClass() throws Exception {
090    // Set up some server -> rack mappings
091    // Have three racks in the cluster with 10 hosts each.
092    when(rackManager.getRack(any(ServerName.class))).then(invocation -> {
093      ServerName sn = invocation.getArgument(0, ServerName.class);
094      try {
095        int i = Integer.parseInt(sn.getHostname().substring("foo".length()));
096        return getRack(i);
097      } catch (NumberFormatException e) {
098        return RackManager.UNKNOWN_RACK;
099      }
100    });
101    for (int i = 0; i < 40; i++) {
102      ServerName server = ServerName.valueOf("foo" + i, 1234, EnvironmentEdgeManager.currentTime());
103      String rack = getRack(i);
104      if (!rack.equals(RackManager.UNKNOWN_RACK)) {
105        rackToServers.computeIfAbsent(rack, k -> new ArrayList<>()).add(server);
106      }
107      servers.add(server);
108    }
109  }
110
111  // The tests decide which racks to work with, and how many machines to
112  // work with from any given rack
113  // Return a random 'count' number of servers from 'rack'
114  private static List<ServerName> getServersFromRack(Map<String, Integer> rackToServerCount) {
115    List<ServerName> chosenServers = new ArrayList<>();
116    for (Map.Entry<String, Integer> entry : rackToServerCount.entrySet()) {
117      List<ServerName> servers = rackToServers.get(entry.getKey());
118      for (int i = 0; i < entry.getValue(); i++) {
119        chosenServers.add(servers.get(i));
120      }
121    }
122    return chosenServers;
123  }
124
125  @Test
126  public void testSmallCluster() {
127    // Test the case where we cannot assign favored nodes (because the number
128    // of nodes in the cluster is too less)
129    Map<String, Integer> rackToServerCount = new HashMap<>();
130    rackToServerCount.put("rack1", 2);
131    List<ServerName> servers = getServersFromRack(rackToServerCount);
132    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
133    helper.initialize();
134    assertFalse(helper.canPlaceFavoredNodes());
135  }
136
137  @Test
138  public void testPlacePrimaryRSAsRoundRobin() {
139    // Test the regular case where there are many servers in different racks
140    // Test once for few regions and once for many regions
141    primaryRSPlacement(6, null, 10, 10, 10);
142    // now create lots of regions and try to place them on the limited number of machines
143    primaryRSPlacement(600, null, 10, 10, 10);
144  }
145
146  @Test
147  public void testRoundRobinAssignmentsWithUnevenSizedRacks() {
148    // In the case of uneven racks, the regions should be distributed
149    // proportionately to the rack sizes
150    primaryRSPlacement(6, null, 10, 10, 10);
151    primaryRSPlacement(600, null, 10, 10, 5);
152    primaryRSPlacement(600, null, 10, 5, 10);
153    primaryRSPlacement(600, null, 5, 10, 10);
154    primaryRSPlacement(500, null, 10, 10, 5);
155    primaryRSPlacement(500, null, 10, 5, 10);
156    primaryRSPlacement(500, null, 5, 10, 10);
157    primaryRSPlacement(500, null, 9, 7, 8);
158    primaryRSPlacement(500, null, 8, 7, 9);
159    primaryRSPlacement(500, null, 7, 9, 8);
160    primaryRSPlacement(459, null, 7, 9, 8);
161  }
162
163  @Test
164  public void testSecondaryAndTertiaryPlacementWithSingleRack() {
165    // Test the case where there is a single rack and we need to choose
166    // Primary/Secondary/Tertiary from a single rack.
167    Map<String, Integer> rackToServerCount = new HashMap<>();
168    rackToServerCount.put("rack1", 10);
169    // have lots of regions to test with
170    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper,
171      List<RegionInfo>> primaryRSMapAndHelper =
172        secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
173    FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
174    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
175    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
176    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
177      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
178    // although we created lots of regions we should have no overlap on the
179    // primary/secondary/tertiary for any given region
180    for (RegionInfo region : regions) {
181      ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
182      assertNotNull(secondaryAndTertiaryServers);
183      assertTrue(primaryRSMap.containsKey(region));
184      assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
185      assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
186      assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
187    }
188  }
189
190  @Test
191  public void testSecondaryAndTertiaryPlacementWithSingleServer() {
192    // Test the case where we have a single node in the cluster. In this case
193    // the primary can be assigned but the secondary/tertiary would be null
194    Map<String, Integer> rackToServerCount = new HashMap<>();
195    rackToServerCount.put("rack1", 1);
196    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper,
197      List<RegionInfo>> primaryRSMapAndHelper =
198        secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
199    FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
200    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
201    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
202
203    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
204      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
205    // no secondary/tertiary placement in case of a single RegionServer
206    assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
207  }
208
209  @Test
210  public void testSecondaryAndTertiaryPlacementWithMultipleRacks() {
211    // Test the case where we have multiple racks and the region servers
212    // belong to multiple racks
213    Map<String, Integer> rackToServerCount = new HashMap<>();
214    rackToServerCount.put("rack1", 10);
215    rackToServerCount.put("rack2", 10);
216
217    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper,
218      List<RegionInfo>> primaryRSMapAndHelper =
219        secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
220    FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
221    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
222
223    assertTrue(primaryRSMap.size() == 60000);
224    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
225      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
226    assertTrue(secondaryAndTertiaryMap.size() == 60000);
227    // for every region, the primary should be on one rack and the secondary/tertiary
228    // on another (we create a lot of regions just to increase probability of failure)
229    for (Map.Entry<RegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) {
230      ServerName[] allServersForRegion = entry.getValue();
231      String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey()));
232      String secondaryRSRack = rackManager.getRack(allServersForRegion[0]);
233      String tertiaryRSRack = rackManager.getRack(allServersForRegion[1]);
234      Set<String> racks = Sets.newHashSet(primaryRSRack);
235      racks.add(secondaryRSRack);
236      racks.add(tertiaryRSRack);
237      assertTrue(racks.size() >= 2);
238    }
239  }
240
241  @Test
242  public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
243    // Test the case where we have two racks but with less than two servers in each
244    // We will not have enough machines to select secondary/tertiary
245    Map<String, Integer> rackToServerCount = new HashMap<>();
246    rackToServerCount.put("rack1", 1);
247    rackToServerCount.put("rack2", 1);
248    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper,
249      List<RegionInfo>> primaryRSMapAndHelper =
250        secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
251    FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
252    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
253    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
254    assertTrue(primaryRSMap.size() == 6);
255    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
256      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
257    for (RegionInfo region : regions) {
258      // not enough secondary/tertiary room to place the regions
259      assertTrue(secondaryAndTertiaryMap.get(region) == null);
260    }
261  }
262
263  @Test
264  public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack() {
265    // Test the case where there is only one server in one rack and another rack
266    // has more servers. We try to choose secondary/tertiary on different
267    // racks than what the primary is on. But if the other rack doesn't have
268    // enough nodes to have both secondary/tertiary RSs, the tertiary is placed
269    // on the same rack as the primary server is on
270    Map<String, Integer> rackToServerCount = new HashMap<>();
271    rackToServerCount.put("rack1", 2);
272    rackToServerCount.put("rack2", 1);
273    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper,
274      List<RegionInfo>> primaryRSMapAndHelper =
275        secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
276    FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
277    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
278    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
279    assertTrue(primaryRSMap.size() == 6);
280    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
281      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
282    assertTrue(secondaryAndTertiaryMap.size() == regions.size());
283    for (RegionInfo region : regions) {
284      ServerName s = primaryRSMap.get(region);
285      ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0];
286      ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1];
287      Set<String> racks = Sets.newHashSet(rackManager.getRack(s));
288      racks.add(rackManager.getRack(secondaryRS));
289      racks.add(rackManager.getRack(tertiaryRS));
290      assertTrue(racks.size() >= 2);
291    }
292  }
293
294  private Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
295    secondaryAndTertiaryRSPlacementHelper(int regionCount, Map<String, Integer> rackToServerCount) {
296    Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>();
297    List<ServerName> servers = getServersFromRack(rackToServerCount);
298    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
299    Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<ServerName, List<RegionInfo>>();
300    helper.initialize();
301    // create regions
302    List<RegionInfo> regions = new ArrayList<>(regionCount);
303    for (int i = 0; i < regionCount; i++) {
304      regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
305        .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build());
306    }
307    // place the regions
308    helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
309    return new Triple<>(primaryRSMap, helper, regions);
310  }
311
312  private void primaryRSPlacement(int regionCount, Map<RegionInfo, ServerName> primaryRSMap,
313    int firstRackSize, int secondRackSize, int thirdRackSize) {
314    Map<String, Integer> rackToServerCount = new HashMap<>();
315    rackToServerCount.put("rack1", firstRackSize);
316    rackToServerCount.put("rack2", secondRackSize);
317    rackToServerCount.put("rack3", thirdRackSize);
318    List<ServerName> servers = getServersFromRack(rackToServerCount);
319    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
320    helper.initialize();
321
322    assertTrue(helper.canPlaceFavoredNodes());
323
324    Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<>();
325    if (primaryRSMap == null) primaryRSMap = new HashMap<>();
326    // create some regions
327    List<RegionInfo> regions = new ArrayList<>(regionCount);
328    for (int i = 0; i < regionCount; i++) {
329      regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf("foobar"))
330        .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build());
331    }
332    // place those regions in primary RSs
333    helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
334
335    // we should have all the regions nicely spread across the racks
336    int regionsOnRack1 = 0;
337    int regionsOnRack2 = 0;
338    int regionsOnRack3 = 0;
339    for (RegionInfo region : regions) {
340      if (rackManager.getRack(primaryRSMap.get(region)).equals("rack1")) {
341        regionsOnRack1++;
342      } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack2")) {
343        regionsOnRack2++;
344      } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack3")) {
345        regionsOnRack3++;
346      }
347    }
348    // Verify that the regions got placed in the way we expect (documented in
349    // FavoredNodeAssignmentHelper#placePrimaryRSAsRoundRobin)
350    checkNumRegions(regionCount, firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
351      regionsOnRack2, regionsOnRack3, assignmentMap);
352  }
353
354  private void checkNumRegions(int regionCount, int firstRackSize, int secondRackSize,
355    int thirdRackSize, int regionsOnRack1, int regionsOnRack2, int regionsOnRack3,
356    Map<ServerName, List<RegionInfo>> assignmentMap) {
357    // The regions should be distributed proportionately to the racksizes
358    // Verify the ordering was as expected by inserting the racks and regions
359    // in sorted maps. The keys being the racksize and numregions; values are
360    // the relative positions of the racksizes and numregions respectively
361    SortedMap<Integer, Integer> rackMap = new TreeMap<>();
362    rackMap.put(firstRackSize, 1);
363    rackMap.put(secondRackSize, 2);
364    rackMap.put(thirdRackSize, 3);
365    SortedMap<Integer, Integer> regionMap = new TreeMap<>();
366    regionMap.put(regionsOnRack1, 1);
367    regionMap.put(regionsOnRack2, 2);
368    regionMap.put(regionsOnRack3, 3);
369    assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
370      regionsOnRack2, regionsOnRack3), rackMap.get(firstRackSize) == regionMap.get(regionsOnRack1));
371    assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
372      regionsOnRack2, regionsOnRack3),
373      rackMap.get(secondRackSize) == regionMap.get(regionsOnRack2));
374    assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
375      regionsOnRack2, regionsOnRack3), rackMap.get(thirdRackSize) == regionMap.get(regionsOnRack3));
376  }
377
378  private String printProportions(int firstRackSize, int secondRackSize, int thirdRackSize,
379    int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) {
380    return "The rack sizes " + firstRackSize + " " + secondRackSize + " " + thirdRackSize + " "
381      + regionsOnRack1 + " " + regionsOnRack2 + " " + regionsOnRack3;
382  }
383
384  @Test
385  public void testConstrainedPlacement() throws Exception {
386    List<ServerName> servers = Lists.newArrayList();
387    servers.add(ServerName.valueOf("foo" + 1 + ":1234", -1));
388    servers.add(ServerName.valueOf("foo" + 2 + ":1234", -1));
389    servers.add(ServerName.valueOf("foo" + 15 + ":1234", -1));
390    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
391    helper.initialize();
392    assertTrue(helper.canPlaceFavoredNodes());
393
394    List<RegionInfo> regions = new ArrayList<>(20);
395    for (int i = 0; i < 20; i++) {
396      regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
397        .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build());
398    }
399    Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<ServerName, List<RegionInfo>>();
400    Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>();
401    helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
402    assertTrue(primaryRSMap.size() == regions.size());
403    Map<RegionInfo, ServerName[]> secondaryAndTertiary =
404      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
405    assertEquals(regions.size(), secondaryAndTertiary.size());
406  }
407
408  @Test
409  public void testGetOneRandomRack() throws IOException {
410
411    Map<String, Integer> rackToServerCount = new HashMap<>();
412    Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3");
413    for (String rack : rackList) {
414      rackToServerCount.put(rack, 2);
415    }
416    List<ServerName> servers = getServersFromRack(rackToServerCount);
417
418    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
419    helper.initialize();
420    assertTrue(helper.canPlaceFavoredNodes());
421
422    // Check we don't get a bad rack on any number of attempts
423    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
424      assertTrue(rackList.contains(helper.getOneRandomRack(Sets.newHashSet())));
425    }
426
427    // Check skipRack multiple times when an invalid rack is specified
428    Set<String> skipRacks = Sets.newHashSet("rack");
429    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
430      assertTrue(rackList.contains(helper.getOneRandomRack(skipRacks)));
431    }
432
433    // Check skipRack multiple times when an valid rack is specified
434    skipRacks = Sets.newHashSet("rack1");
435    Set<String> validRacks = Sets.newHashSet("rack2", "rack3");
436    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
437      assertTrue(validRacks.contains(helper.getOneRandomRack(skipRacks)));
438    }
439  }
440
441  @Test
442  public void testGetRandomServerSingleRack() throws IOException {
443
444    Map<String, Integer> rackToServerCount = new HashMap<>();
445    final String rack = "rack1";
446    rackToServerCount.put(rack, 4);
447    List<ServerName> servers = getServersFromRack(rackToServerCount);
448
449    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
450    helper.initialize();
451    assertTrue(helper.canPlaceFavoredNodes());
452
453    // Check we don't get a bad node on any number of attempts
454    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
455      ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet());
456      assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn));
457    }
458
459    // Check skipServers multiple times when an invalid server is specified
460    Set<ServerName> skipServers =
461      Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE));
462    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
463      ServerName sn = helper.getOneRandomServer(rack, skipServers);
464      assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn));
465    }
466
467    // Check skipRack multiple times when an valid servers are specified
468    ServerName skipSN = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE);
469    skipServers = Sets.newHashSet(skipSN);
470    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
471      ServerName sn = helper.getOneRandomServer(rack, skipServers);
472      assertNotEquals("Skip server should not be selected ", skipSN.getAddress(), sn.getAddress());
473      assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn));
474    }
475  }
476
477  @Test
478  public void testGetRandomServerMultiRack() throws IOException {
479    Map<String, Integer> rackToServerCount = new HashMap<>();
480    Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3");
481    for (String rack : rackList) {
482      rackToServerCount.put(rack, 4);
483    }
484    List<ServerName> servers = getServersFromRack(rackToServerCount);
485
486    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
487    helper.initialize();
488    assertTrue(helper.canPlaceFavoredNodes());
489
490    // Check we don't get a bad node on any number of attempts
491    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
492      for (String rack : rackList) {
493        ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet());
494        assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack),
495          rackToServers.get(rack).contains(sn));
496      }
497    }
498
499    // Check skipServers multiple times when an invalid server is specified
500    Set<ServerName> skipServers =
501      Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE));
502    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
503      for (String rack : rackList) {
504        ServerName sn = helper.getOneRandomServer(rack, skipServers);
505        assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack),
506          rackToServers.get(rack).contains(sn));
507      }
508    }
509
510    // Check skipRack multiple times when an valid servers are specified
511    ServerName skipSN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE);
512    ServerName skipSN2 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE);
513    ServerName skipSN3 = ServerName.valueOf("foo20:1234", ServerName.NON_STARTCODE);
514    skipServers = Sets.newHashSet(skipSN1, skipSN2, skipSN3);
515    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
516      for (String rack : rackList) {
517        ServerName sn = helper.getOneRandomServer(rack, skipServers);
518        assertFalse("Skip server should not be selected ", skipServers.contains(sn));
519        assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack),
520          rackToServers.get(rack).contains(sn));
521      }
522    }
523  }
524
525  @Test
526  public void testGetFavoredNodes() throws IOException {
527    Map<String, Integer> rackToServerCount = new HashMap<>();
528    Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3");
529    for (String rack : rackList) {
530      rackToServerCount.put(rack, 4);
531    }
532    List<ServerName> servers = getServersFromRack(rackToServerCount);
533
534    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
535    helper.initialize();
536    assertTrue(helper.canPlaceFavoredNodes());
537
538    RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
539      .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).build();
540
541    for (int maxattempts = 0; maxattempts < MAX_ATTEMPTS; maxattempts++) {
542      List<ServerName> fn = helper.generateFavoredNodes(region);
543      checkDuplicateFN(fn);
544      checkFNRacks(fn);
545    }
546  }
547
548  @Test
549  public void testGenMissingFavoredNodeOneRack() throws IOException {
550    Map<String, Integer> rackToServerCount = new HashMap<>();
551    final String rack = "rack1";
552    rackToServerCount.put(rack, 6);
553    List<ServerName> servers = getServersFromRack(rackToServerCount);
554
555    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
556    helper.initialize();
557    assertTrue(helper.canPlaceFavoredNodes());
558
559    ServerName snRack1SN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE);
560    ServerName snRack1SN2 = ServerName.valueOf("foo2:1234", ServerName.NON_STARTCODE);
561    ServerName snRack1SN3 = ServerName.valueOf("foo3:1234", ServerName.NON_STARTCODE);
562
563    List<ServerName> fn = Lists.newArrayList(snRack1SN1, snRack1SN2);
564    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
565      checkDuplicateFN(fn, helper.generateMissingFavoredNode(fn));
566    }
567
568    fn = Lists.newArrayList(snRack1SN1, snRack1SN2);
569    List<ServerName> skipServers = Lists.newArrayList(snRack1SN3);
570    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
571      ServerName genSN = helper.generateMissingFavoredNode(fn, skipServers);
572      checkDuplicateFN(fn, genSN);
573      assertNotEquals("Generated FN should not match excluded one", snRack1SN3, genSN);
574    }
575  }
576
577  @Test
578  public void testGenMissingFavoredNodeMultiRack() throws IOException {
579
580    ServerName snRack1SN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE);
581    ServerName snRack1SN2 = ServerName.valueOf("foo2:1234", ServerName.NON_STARTCODE);
582    ServerName snRack2SN1 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE);
583    ServerName snRack2SN2 = ServerName.valueOf("foo11:1234", ServerName.NON_STARTCODE);
584
585    Map<String, Integer> rackToServerCount = new HashMap<>();
586    Set<String> rackList = Sets.newHashSet("rack1", "rack2");
587    for (String rack : rackList) {
588      rackToServerCount.put(rack, 4);
589    }
590    List<ServerName> servers = getServersFromRack(rackToServerCount);
591
592    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
593    helper.initialize();
594    assertTrue(helper.canPlaceFavoredNodes());
595
596    List<ServerName> fn = Lists.newArrayList(snRack1SN1, snRack1SN2);
597    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
598      ServerName genSN = helper.generateMissingFavoredNode(fn);
599      checkDuplicateFN(fn, genSN);
600      checkFNRacks(fn, genSN);
601    }
602
603    fn = Lists.newArrayList(snRack1SN1, snRack2SN1);
604    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
605      ServerName genSN = helper.generateMissingFavoredNode(fn);
606      checkDuplicateFN(fn, genSN);
607      checkFNRacks(fn, genSN);
608    }
609
610    fn = Lists.newArrayList(snRack1SN1, snRack2SN1);
611    List<ServerName> skipServers = Lists.newArrayList(snRack2SN2);
612    for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) {
613      ServerName genSN = helper.generateMissingFavoredNode(fn, skipServers);
614      checkDuplicateFN(fn, genSN);
615      checkFNRacks(fn, genSN);
616      assertNotEquals("Generated FN should not match excluded one", snRack2SN2, genSN);
617    }
618  }
619
620  private void checkDuplicateFN(List<ServerName> fnList, ServerName genFN) {
621    Set<ServerName> favoredNodes = Sets.newHashSet(fnList);
622    assertNotNull("Generated FN can't be null", genFN);
623    favoredNodes.add(genFN);
624    assertEquals("Did not find expected number of favored nodes",
625      FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size());
626  }
627
628  private void checkDuplicateFN(List<ServerName> fnList) {
629    Set<ServerName> favoredNodes = Sets.newHashSet(fnList);
630    assertEquals("Did not find expected number of favored nodes",
631      FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size());
632  }
633
634  private void checkFNRacks(List<ServerName> fnList, ServerName genFN) {
635    Set<ServerName> favoredNodes = Sets.newHashSet(fnList);
636    favoredNodes.add(genFN);
637    Set<String> racks = Sets.newHashSet();
638    for (ServerName sn : favoredNodes) {
639      racks.add(rackManager.getRack(sn));
640    }
641    assertTrue("FN should be spread atleast across 2 racks", racks.size() >= 2);
642  }
643
644  private void checkFNRacks(List<ServerName> fnList) {
645    Set<ServerName> favoredNodes = Sets.newHashSet(fnList);
646    Set<String> racks = Sets.newHashSet();
647    for (ServerName sn : favoredNodes) {
648      racks.add(rackManager.getRack(sn));
649    }
650    assertTrue("FN should be spread atleast across 2 racks", racks.size() >= 2);
651  }
652}