001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master.balancer;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertNull;
022import static org.junit.Assert.assertTrue;
023import static org.mockito.Mockito.mock;
024import static org.mockito.Mockito.when;
025
026import java.util.ArrayList;
027import java.util.Collections;
028import java.util.HashMap;
029import java.util.LinkedHashMap;
030import java.util.List;
031import java.util.Map;
032import java.util.Set;
033import java.util.TreeMap;
034import java.util.TreeSet;
035import java.util.function.Predicate;
036import java.util.stream.Collectors;
037import org.apache.commons.lang3.ArrayUtils;
038import org.apache.hadoop.conf.Configuration;
039import org.apache.hadoop.hbase.HBaseClassTestRule;
040import org.apache.hadoop.hbase.HBaseConfiguration;
041import org.apache.hadoop.hbase.ServerMetrics;
042import org.apache.hadoop.hbase.ServerName;
043import org.apache.hadoop.hbase.TableName;
044import org.apache.hadoop.hbase.client.RegionInfo;
045import org.apache.hadoop.hbase.client.RegionInfoBuilder;
046import org.apache.hadoop.hbase.client.RegionReplicaUtil;
047import org.apache.hadoop.hbase.master.LoadBalancer;
048import org.apache.hadoop.hbase.master.RackManager;
049import org.apache.hadoop.hbase.master.RegionPlan;
050import org.apache.hadoop.hbase.testclassification.MasterTests;
051import org.apache.hadoop.hbase.testclassification.MediumTests;
052import org.apache.hadoop.hbase.util.Bytes;
053import org.apache.hadoop.net.DNSToSwitchMapping;
054import org.junit.BeforeClass;
055import org.junit.ClassRule;
056import org.junit.Rule;
057import org.junit.Test;
058import org.junit.experimental.categories.Category;
059import org.junit.rules.TestName;
060import org.slf4j.Logger;
061import org.slf4j.LoggerFactory;
062
063import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
064
065@Category({MasterTests.class, MediumTests.class})
066public class TestBaseLoadBalancer extends BalancerTestBase {
067
068  @ClassRule
069  public static final HBaseClassTestRule CLASS_RULE =
070      HBaseClassTestRule.forClass(TestBaseLoadBalancer.class);
071
072  private static LoadBalancer loadBalancer;
073  private static final Logger LOG = LoggerFactory.getLogger(TestBaseLoadBalancer.class);
074  private static final ServerName master = ServerName.valueOf("fake-master", 0, 1L);
075  private static RackManager rackManager;
076  private static final int NUM_SERVERS = 15;
077  private static ServerName[] servers = new ServerName[NUM_SERVERS];
078
079  int[][] regionsAndServersMocks = new int[][] {
080      // { num regions, num servers }
081      new int[] { 0, 0 }, new int[] { 0, 1 }, new int[] { 1, 1 }, new int[] { 2, 1 },
082      new int[] { 10, 1 }, new int[] { 1, 2 }, new int[] { 2, 2 }, new int[] { 3, 2 },
083      new int[] { 1, 3 }, new int[] { 2, 3 }, new int[] { 3, 3 }, new int[] { 25, 3 },
084      new int[] { 2, 10 }, new int[] { 2, 100 }, new int[] { 12, 10 }, new int[] { 12, 100 }, };
085
086  @Rule
087  public TestName name = new TestName();
088
089  @BeforeClass
090  public static void beforeAllTests() throws Exception {
091    Configuration conf = HBaseConfiguration.create();
092    conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
093    loadBalancer = new MockBalancer();
094    loadBalancer.setClusterInfoProvider(new DummyClusterInfoProvider(conf));
095
096    // Set up the rack topologies (5 machines per rack)
097    rackManager = mock(RackManager.class);
098    for (int i = 0; i < NUM_SERVERS; i++) {
099      servers[i] = ServerName.valueOf("foo"+i+":1234",-1);
100      if (i < 5) {
101        when(rackManager.getRack(servers[i])).thenReturn("rack1");
102      }
103      if (i >= 5 && i < 10) {
104        when(rackManager.getRack(servers[i])).thenReturn("rack2");
105      }
106      if (i >= 10) {
107        when(rackManager.getRack(servers[i])).thenReturn("rack3");
108      }
109    }
110  }
111
112  public static class MockBalancer extends BaseLoadBalancer {
113
114    @Override
115    protected List<RegionPlan> balanceTable(TableName tableName,
116        Map<ServerName, List<RegionInfo>> loadOfOneTable) {
117      return null;
118    }
119  }
120
121  /**
122   * Tests the bulk assignment used during cluster startup.
123   *
124   * Round-robin. Should yield a balanced cluster so same invariant as the load
125   * balancer holds, all servers holding either floor(avg) or ceiling(avg).
126   */
127  @Test
128  public void testBulkAssignment() throws Exception {
129    List<ServerName> tmp = getListOfServerNames(randomServers(5, 0));
130    List<RegionInfo> hris = randomRegions(20);
131    hris.add(RegionInfoBuilder.FIRST_META_REGIONINFO);
132    tmp.add(master);
133    Map<ServerName, List<RegionInfo>> plans = loadBalancer.roundRobinAssignment(hris, tmp);
134    int totalRegion = 0;
135    for (List<RegionInfo> regions: plans.values()) {
136      totalRegion += regions.size();
137    }
138    assertEquals(hris.size(), totalRegion);
139    for (int[] mock : regionsAndServersMocks) {
140      LOG.debug("testBulkAssignment with " + mock[0] + " regions and " + mock[1] + " servers");
141      List<RegionInfo> regions = randomRegions(mock[0]);
142      List<ServerAndLoad> servers = randomServers(mock[1], 0);
143      List<ServerName> list = getListOfServerNames(servers);
144      Map<ServerName, List<RegionInfo>> assignments =
145          loadBalancer.roundRobinAssignment(regions, list);
146      float average = (float) regions.size() / servers.size();
147      int min = (int) Math.floor(average);
148      int max = (int) Math.ceil(average);
149      if (assignments != null && !assignments.isEmpty()) {
150        for (List<RegionInfo> regionList : assignments.values()) {
151          assertTrue(regionList.size() == min || regionList.size() == max);
152        }
153      }
154      returnRegions(regions);
155      returnServers(list);
156    }
157  }
158
159  /**
160   * Test the cluster startup bulk assignment which attempts to retain
161   * assignment info.
162   */
163  @Test
164  public void testRetainAssignment() throws Exception {
165    // Test simple case where all same servers are there
166    List<ServerAndLoad> servers = randomServers(10, 10);
167    List<RegionInfo> regions = randomRegions(100);
168    Map<RegionInfo, ServerName> existing = new TreeMap<>(RegionInfo.COMPARATOR);
169    for (int i = 0; i < regions.size(); i++) {
170      ServerName sn = servers.get(i % servers.size()).getServerName();
171      // The old server would have had same host and port, but different
172      // start code!
173      ServerName snWithOldStartCode =
174          ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 10);
175      existing.put(regions.get(i), snWithOldStartCode);
176    }
177    List<ServerName> listOfServerNames = getListOfServerNames(servers);
178    Map<ServerName, List<RegionInfo>> assignment =
179        loadBalancer.retainAssignment(existing, listOfServerNames);
180    assertRetainedAssignment(existing, listOfServerNames, assignment);
181
182    // Include two new servers that were not there before
183    List<ServerAndLoad> servers2 = new ArrayList<>(servers);
184    servers2.add(randomServer(10));
185    servers2.add(randomServer(10));
186    listOfServerNames = getListOfServerNames(servers2);
187    assignment = loadBalancer.retainAssignment(existing, listOfServerNames);
188    assertRetainedAssignment(existing, listOfServerNames, assignment);
189
190    // Remove two of the servers that were previously there
191    List<ServerAndLoad> servers3 = new ArrayList<>(servers);
192    servers3.remove(0);
193    servers3.remove(0);
194    listOfServerNames = getListOfServerNames(servers3);
195    assignment = loadBalancer.retainAssignment(existing, listOfServerNames);
196    assertRetainedAssignment(existing, listOfServerNames, assignment);
197  }
198
199  @Test
200  public void testRandomAssignment() throws Exception {
201    for (int i = 1; i != 5; ++i) {
202      LOG.info("run testRandomAssignment() with idle servers:" + i);
203      testRandomAssignment(i);
204    }
205  }
206
207  private void testRandomAssignment(int numberOfIdleServers) throws Exception {
208    assert numberOfIdleServers > 0;
209    List<ServerName> idleServers = new ArrayList<>(numberOfIdleServers);
210    for (int i = 0; i != numberOfIdleServers; ++i) {
211      idleServers.add(ServerName.valueOf("server-" + i, 1000, 1L));
212    }
213    List<ServerName> allServers = new ArrayList<>(idleServers.size() + 1);
214    allServers.add(ServerName.valueOf("server-" + numberOfIdleServers, 1000, 1L));
215    allServers.addAll(idleServers);
216    LoadBalancer balancer = new MockBalancer();
217    Configuration conf = HBaseConfiguration.create();
218    conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
219    balancer.setClusterInfoProvider(new DummyClusterInfoProvider(conf) {
220
221      @Override
222      public List<ServerName> getOnlineServersListWithPredicator(List<ServerName> servers,
223        Predicate<ServerMetrics> filter) {
224        return idleServers;
225      }
226    });
227    RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
228        .setStartKey(Bytes.toBytes("key1"))
229        .setEndKey(Bytes.toBytes("key2"))
230        .setSplit(false)
231        .setRegionId(100)
232        .build();
233    assertNull(balancer.randomAssignment(hri1, Collections.emptyList()));
234    assertNull(balancer.randomAssignment(hri1, null));
235    for (int i = 0; i != 3; ++i) {
236      ServerName sn = balancer.randomAssignment(hri1, allServers);
237      assertTrue("actual:" + sn + ", except:" + idleServers, idleServers.contains(sn));
238    }
239  }
240
241  @Test
242  public void testRegionAvailability() throws Exception {
243    // Create a cluster with a few servers, assign them to specific racks
244    // then assign some regions. The tests should check whether moving a
245    // replica from one node to a specific other node or rack lowers the
246    // availability of the region or not
247
248    List<RegionInfo> list0 = new ArrayList<>();
249    List<RegionInfo> list1 = new ArrayList<>();
250    List<RegionInfo> list2 = new ArrayList<>();
251    // create a region (region1)
252    RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
253        .setStartKey(Bytes.toBytes("key1"))
254        .setEndKey(Bytes.toBytes("key2"))
255        .setSplit(false)
256        .setRegionId(100)
257        .build();
258    // create a replica of the region (replica_of_region1)
259    RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
260    // create a second region (region2)
261    RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
262        .setStartKey(Bytes.toBytes("key2"))
263        .setEndKey(Bytes.toBytes("key3"))
264        .setSplit(false)
265        .setRegionId(101)
266        .build();
267    list0.add(hri1); //only region1
268    list1.add(hri2); //only replica_of_region1
269    list2.add(hri3); //only region2
270    Map<ServerName, List<RegionInfo>> clusterState = new LinkedHashMap<>();
271    clusterState.put(servers[0], list0); //servers[0] hosts region1
272    clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1
273    clusterState.put(servers[2], list2); //servers[2] hosts region2
274    // create a cluster with the above clusterState. The way in which the
275    // cluster is created (constructor code) would make sure the indices of
276    // the servers are in the order in which it is inserted in the clusterState
277    // map (linkedhashmap is important). A similar thing applies to the region lists
278    BalancerClusterState cluster = new BalancerClusterState(clusterState, null, null, rackManager);
279    // check whether a move of region1 from servers[0] to servers[1] would lower
280    // the availability of region1
281    assertTrue(cluster.wouldLowerAvailability(hri1, servers[1]));
282    // check whether a move of region1 from servers[0] to servers[2] would lower
283    // the availability of region1
284    assertTrue(!cluster.wouldLowerAvailability(hri1, servers[2]));
285    // check whether a move of replica_of_region1 from servers[0] to servers[2] would lower
286    // the availability of replica_of_region1
287    assertTrue(!cluster.wouldLowerAvailability(hri2, servers[2]));
288    // check whether a move of region2 from servers[0] to servers[1] would lower
289    // the availability of region2
290    assertTrue(!cluster.wouldLowerAvailability(hri3, servers[1]));
291
292    // now lets have servers[1] host replica_of_region2
293    list1.add(RegionReplicaUtil.getRegionInfoForReplica(hri3, 1));
294    // create a new clusterState with the above change
295    cluster = new BalancerClusterState(clusterState, null, null, rackManager);
296    // now check whether a move of a replica from servers[0] to servers[1] would lower
297    // the availability of region2
298    assertTrue(cluster.wouldLowerAvailability(hri3, servers[1]));
299
300    // start over again
301    clusterState.clear();
302    clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1
303    clusterState.put(servers[5], list1); //servers[5], rack2 hosts replica_of_region1 and replica_of_region2
304    clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2
305    clusterState.put(servers[10], new ArrayList<>()); //servers[10], rack3 hosts no region
306    // create a cluster with the above clusterState
307    cluster = new BalancerClusterState(clusterState, null, null, rackManager);
308    // check whether a move of region1 from servers[0],rack1 to servers[6],rack2 would
309    // lower the availability
310
311    assertTrue(cluster.wouldLowerAvailability(hri1, servers[0]));
312
313    // now create a cluster without the rack manager
314    cluster = new BalancerClusterState(clusterState, null, null, null);
315    // now repeat check whether a move of region1 from servers[0] to servers[6] would
316    // lower the availability
317    assertTrue(!cluster.wouldLowerAvailability(hri1, servers[6]));
318  }
319
320  @Test
321  public void testRegionAvailabilityWithRegionMoves() throws Exception {
322    List<RegionInfo> list0 = new ArrayList<>();
323    List<RegionInfo> list1 = new ArrayList<>();
324    List<RegionInfo> list2 = new ArrayList<>();
325    // create a region (region1)
326    RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
327        .setStartKey(Bytes.toBytes("key1"))
328        .setEndKey(Bytes.toBytes("key2"))
329        .setSplit(false)
330        .setRegionId(100)
331        .build();
332    // create a replica of the region (replica_of_region1)
333    RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
334    // create a second region (region2)
335    RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
336        .setStartKey(Bytes.toBytes("key2"))
337        .setEndKey(Bytes.toBytes("key3"))
338        .setSplit(false)
339        .setRegionId(101)
340        .build();
341    list0.add(hri1); //only region1
342    list1.add(hri2); //only replica_of_region1
343    list2.add(hri3); //only region2
344    Map<ServerName, List<RegionInfo>> clusterState = new LinkedHashMap<>();
345    clusterState.put(servers[0], list0); //servers[0] hosts region1
346    clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1
347    clusterState.put(servers[2], list2); //servers[2] hosts region2
348    // create a cluster with the above clusterState. The way in which the
349    // cluster is created (constructor code) would make sure the indices of
350    // the servers are in the order in which it is inserted in the clusterState
351    // map (linkedhashmap is important).
352    BalancerClusterState cluster = new BalancerClusterState(clusterState, null, null, rackManager);
353    // check whether moving region1 from servers[1] to servers[2] would lower availability
354    assertTrue(!cluster.wouldLowerAvailability(hri1, servers[2]));
355
356    // now move region1 from servers[0] to servers[2]
357    cluster.doAction(new MoveRegionAction(0, 0, 2));
358    // now repeat check whether moving region1 from servers[1] to servers[2]
359    // would lower availability
360    assertTrue(cluster.wouldLowerAvailability(hri1, servers[2]));
361
362    // start over again
363    clusterState.clear();
364    List<RegionInfo> list3 = new ArrayList<>();
365    RegionInfo hri4 = RegionReplicaUtil.getRegionInfoForReplica(hri3, 1);
366    list3.add(hri4);
367    clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1
368    clusterState.put(servers[5], list1); //servers[5], rack2 hosts replica_of_region1
369    clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2
370    clusterState.put(servers[12], list3); //servers[12], rack3 hosts replica_of_region2
371    // create a cluster with the above clusterState
372    cluster = new BalancerClusterState(clusterState, null, null, rackManager);
373    // check whether a move of replica_of_region2 from servers[12],rack3 to servers[0],rack1 would
374    // lower the availability
375    assertTrue(!cluster.wouldLowerAvailability(hri4, servers[0]));
376    // now move region2 from servers[6],rack2 to servers[0],rack1
377    cluster.doAction(new MoveRegionAction(2, 2, 0));
378    // now repeat check if replica_of_region2 from servers[12],rack3 to servers[0],rack1 would
379    // lower the availability
380    assertTrue(cluster.wouldLowerAvailability(hri3, servers[0]));
381  }
382
383  private List<ServerName> getListOfServerNames(final List<ServerAndLoad> sals) {
384    return sals.stream().map(ServerAndLoad::getServerName).collect(Collectors.toList());
385  }
386
387  /**
388   * Asserts a valid retained assignment plan.
389   * <p>
390   * Must meet the following conditions:
391   * <ul>
392   * <li>Every input region has an assignment, and to an online server
393   * <li>If a region had an existing assignment to a server with the same
394   * address a a currently online server, it will be assigned to it
395   * </ul>
396   */
397  private void assertRetainedAssignment(Map<RegionInfo, ServerName> existing,
398      List<ServerName> servers, Map<ServerName, List<RegionInfo>> assignment) {
399    // Verify condition 1, every region assigned, and to online server
400    Set<ServerName> onlineServerSet = new TreeSet<>(servers);
401    Set<RegionInfo> assignedRegions = new TreeSet<>(RegionInfo.COMPARATOR);
402    for (Map.Entry<ServerName, List<RegionInfo>> a : assignment.entrySet()) {
403      assertTrue("Region assigned to server that was not listed as online",
404        onlineServerSet.contains(a.getKey()));
405      for (RegionInfo r : a.getValue())
406        assignedRegions.add(r);
407    }
408    assertEquals(existing.size(), assignedRegions.size());
409
410    // Verify condition 2, if server had existing assignment, must have same
411    Set<String> onlineHostNames = new TreeSet<>();
412    for (ServerName s : servers) {
413      onlineHostNames.add(s.getHostname());
414    }
415
416    for (Map.Entry<ServerName, List<RegionInfo>> a : assignment.entrySet()) {
417      ServerName assignedTo = a.getKey();
418      for (RegionInfo r : a.getValue()) {
419        ServerName address = existing.get(r);
420        if (address != null && onlineHostNames.contains(address.getHostname())) {
421          // this region was prevously assigned somewhere, and that
422          // host is still around, then it should be re-assigned on the
423          // same host
424          assertEquals(address.getHostname(), assignedTo.getHostname());
425        }
426      }
427    }
428  }
429
430  @Test
431  public void testClusterServersWithSameHostPort() {
432    // tests whether the BaseLoadBalancer.Cluster can be constructed with servers
433    // sharing same host and port
434    List<ServerName> servers = getListOfServerNames(randomServers(10, 10));
435    List<RegionInfo> regions = randomRegions(101);
436    Map<ServerName, List<RegionInfo>> clusterState = new TreeMap<>();
437
438    assignRegions(regions, servers, clusterState);
439
440    // construct another list of servers, but sharing same hosts and ports
441    List<ServerName> oldServers = new ArrayList<>(servers.size());
442    for (ServerName sn : servers) {
443      // The old server would have had same host and port, but different start code!
444      oldServers.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 10));
445    }
446
447    regions = randomRegions(9); // some more regions
448    assignRegions(regions, oldServers, clusterState);
449
450    // should not throw exception:
451    BalancerClusterState cluster = new BalancerClusterState(clusterState, null, null, null);
452    assertEquals(101 + 9, cluster.numRegions);
453    assertEquals(10, cluster.numServers); // only 10 servers because they share the same host + port
454
455    // test move
456    ServerName sn = oldServers.get(0);
457    int r0 = ArrayUtils.indexOf(cluster.regions, clusterState.get(sn).get(0));
458    int f0 = cluster.serversToIndex.get(sn.getAddress());
459    int t0 = cluster.serversToIndex.get(servers.get(1).getAddress());
460    cluster.doAction(new MoveRegionAction(r0, f0, t0));
461  }
462
463  private void assignRegions(List<RegionInfo> regions, List<ServerName> servers,
464      Map<ServerName, List<RegionInfo>> clusterState) {
465    for (int i = 0; i < regions.size(); i++) {
466      ServerName sn = servers.get(i % servers.size());
467      List<RegionInfo> regionsOfServer = clusterState.get(sn);
468      if (regionsOfServer == null) {
469        regionsOfServer = new ArrayList<>(10);
470        clusterState.put(sn, regionsOfServer);
471      }
472
473      regionsOfServer.add(regions.get(i));
474    }
475  }
476
477  @Test
478  public void testClusterRegionLocations() {
479    // tests whether region locations are handled correctly in Cluster
480    List<ServerName> servers = getListOfServerNames(randomServers(10, 10));
481    List<RegionInfo> regions = randomRegions(101);
482    Map<ServerName, List<RegionInfo>> clusterState = new HashMap<>();
483
484    assignRegions(regions, servers, clusterState);
485
486    // mock block locality for some regions
487    RegionHDFSBlockLocationFinder locationFinder = mock(RegionHDFSBlockLocationFinder.class);
488    // block locality: region:0   => {server:0}
489    //                 region:1   => {server:0, server:1}
490    //                 region:42 => {server:4, server:9, server:5}
491    when(locationFinder.getTopBlockLocations(regions.get(0))).thenReturn(
492        Lists.newArrayList(servers.get(0)));
493    when(locationFinder.getTopBlockLocations(regions.get(1))).thenReturn(
494        Lists.newArrayList(servers.get(0), servers.get(1)));
495    when(locationFinder.getTopBlockLocations(regions.get(42))).thenReturn(
496        Lists.newArrayList(servers.get(4), servers.get(9), servers.get(5)));
497    // this server does not exists in clusterStatus
498    when(locationFinder.getTopBlockLocations(regions.get(43)))
499      .thenReturn(Lists.newArrayList(ServerName.valueOf("foo", 0, 0)));
500
501    BalancerClusterState cluster =
502      new BalancerClusterState(clusterState, null, locationFinder, null);
503
504    // this is ok, it is just a test
505    int r0 = ArrayUtils.indexOf(cluster.regions, regions.get(0));
506    int r1 = ArrayUtils.indexOf(cluster.regions, regions.get(1));
507    int r10 = ArrayUtils.indexOf(cluster.regions, regions.get(10));
508    int r42 = ArrayUtils.indexOf(cluster.regions, regions.get(42));
509    int r43 = ArrayUtils.indexOf(cluster.regions, regions.get(43));
510
511    int s0 = cluster.serversToIndex.get(servers.get(0).getAddress());
512    int s1 = cluster.serversToIndex.get(servers.get(1).getAddress());
513    int s4 = cluster.serversToIndex.get(servers.get(4).getAddress());
514    int s5 = cluster.serversToIndex.get(servers.get(5).getAddress());
515    int s9 = cluster.serversToIndex.get(servers.get(9).getAddress());
516
517    // region 0 locations
518    assertEquals(1, cluster.regionLocations[r0].length);
519    assertEquals(s0, cluster.regionLocations[r0][0]);
520
521    // region 1 locations
522    assertEquals(2, cluster.regionLocations[r1].length);
523    assertEquals(s0, cluster.regionLocations[r1][0]);
524    assertEquals(s1, cluster.regionLocations[r1][1]);
525
526    // region 10 locations
527    assertEquals(0, cluster.regionLocations[r10].length);
528
529    // region 42 locations
530    assertEquals(3, cluster.regionLocations[r42].length);
531    assertEquals(s4, cluster.regionLocations[r42][0]);
532    assertEquals(s9, cluster.regionLocations[r42][1]);
533    assertEquals(s5, cluster.regionLocations[r42][2]);
534
535    // region 43 locations
536    assertEquals(1, cluster.regionLocations[r43].length);
537    assertEquals(-1, cluster.regionLocations[r43][0]);
538  }
539}