001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master.balancer;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertNull;
022import static org.junit.Assert.assertTrue;
023import static org.mockito.ArgumentMatchers.any;
024import static org.mockito.ArgumentMatchers.anyList;
025import static org.mockito.Mockito.mock;
026import static org.mockito.Mockito.when;
027
028import java.util.ArrayList;
029import java.util.Collections;
030import java.util.HashMap;
031import java.util.LinkedHashMap;
032import java.util.List;
033import java.util.Map;
034import java.util.Set;
035import java.util.TreeMap;
036import java.util.TreeSet;
037import java.util.stream.Collectors;
038import org.apache.commons.lang3.ArrayUtils;
039import org.apache.hadoop.conf.Configuration;
040import org.apache.hadoop.hbase.HBaseClassTestRule;
041import org.apache.hadoop.hbase.HBaseConfiguration;
042import org.apache.hadoop.hbase.ServerName;
043import org.apache.hadoop.hbase.TableName;
044import org.apache.hadoop.hbase.client.RegionInfo;
045import org.apache.hadoop.hbase.client.RegionInfoBuilder;
046import org.apache.hadoop.hbase.client.RegionReplicaUtil;
047import org.apache.hadoop.hbase.master.LoadBalancer;
048import org.apache.hadoop.hbase.master.MasterServices;
049import org.apache.hadoop.hbase.master.RackManager;
050import org.apache.hadoop.hbase.master.RegionPlan;
051import org.apache.hadoop.hbase.master.ServerManager;
052import org.apache.hadoop.hbase.testclassification.MasterTests;
053import org.apache.hadoop.hbase.testclassification.MediumTests;
054import org.apache.hadoop.net.DNSToSwitchMapping;
055import org.junit.BeforeClass;
056import org.junit.ClassRule;
057import org.junit.Rule;
058import org.junit.Test;
059import org.junit.experimental.categories.Category;
060import org.junit.rules.TestName;
061import org.slf4j.Logger;
062import org.slf4j.LoggerFactory;
063
064import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
065
066@Category({ MasterTests.class, MediumTests.class })
067public class TestBaseLoadBalancer extends BalancerTestBase {
068
069  @ClassRule
070  public static final HBaseClassTestRule CLASS_RULE =
071    HBaseClassTestRule.forClass(TestBaseLoadBalancer.class);
072
073  private static MockBalancer loadBalancer;
074  private static final Logger LOG = LoggerFactory.getLogger(TestBaseLoadBalancer.class);
075  private static final ServerName master = ServerName.valueOf("fake-master", 0, 1L);
076  private static RackManager rackManager;
077  private static final int NUM_SERVERS = 15;
078  private static ServerName[] servers = new ServerName[NUM_SERVERS];
079
080  int[][] regionsAndServersMocks = new int[][] {
081    // { num regions, num servers }
082    new int[] { 0, 0 }, new int[] { 0, 1 }, new int[] { 1, 1 }, new int[] { 2, 1 },
083    new int[] { 10, 1 }, new int[] { 1, 2 }, new int[] { 2, 2 }, new int[] { 3, 2 },
084    new int[] { 1, 3 }, new int[] { 2, 3 }, new int[] { 3, 3 }, new int[] { 25, 3 },
085    new int[] { 2, 10 }, new int[] { 2, 100 }, new int[] { 12, 10 }, new int[] { 12, 100 }, };
086
087  @Rule
088  public TestName name = new TestName();
089
090  @BeforeClass
091  public static void beforeAllTests() throws Exception {
092    Configuration conf = HBaseConfiguration.create();
093    conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
094    loadBalancer = new MockBalancer();
095    MasterServices st = mock(MasterServices.class);
096    when(st.getServerName()).thenReturn(master);
097    when(st.getConfiguration()).thenReturn(conf);
098    loadBalancer.setMasterServices(st);
099
100    // Set up the rack topologies (5 machines per rack)
101    rackManager = mock(RackManager.class);
102    for (int i = 0; i < NUM_SERVERS; i++) {
103      servers[i] = ServerName.valueOf("foo" + i + ":1234", -1);
104      if (i < 5) {
105        when(rackManager.getRack(servers[i])).thenReturn("rack1");
106      }
107      if (i >= 5 && i < 10) {
108        when(rackManager.getRack(servers[i])).thenReturn("rack2");
109      }
110      if (i >= 10) {
111        when(rackManager.getRack(servers[i])).thenReturn("rack3");
112      }
113    }
114  }
115
116  public static class MockBalancer extends BaseLoadBalancer {
117
118    @Override
119    protected List<RegionPlan> balanceTable(TableName tableName,
120      Map<ServerName, List<RegionInfo>> loadOfOneTable) {
121      return null;
122    }
123  }
124
125  /**
126   * Tests the bulk assignment used during cluster startup. Round-robin. Should yield a balanced
127   * cluster so same invariant as the load balancer holds, all servers holding either floor(avg) or
128   * ceiling(avg).
129   */
130  @Test
131  public void testBulkAssignment() throws Exception {
132    List<ServerName> tmp = getListOfServerNames(randomServers(5, 0));
133    List<RegionInfo> hris = randomRegions(20);
134    hris.add(RegionInfoBuilder.FIRST_META_REGIONINFO);
135    tmp.add(master);
136    Map<ServerName, List<RegionInfo>> plans = loadBalancer.roundRobinAssignment(hris, tmp);
137    if (LoadBalancer.isTablesOnMaster(loadBalancer.getConf())) {
138      assertTrue(plans.get(master).contains(RegionInfoBuilder.FIRST_META_REGIONINFO));
139      assertEquals(1, plans.get(master).size());
140    }
141    int totalRegion = 0;
142    for (List<RegionInfo> regions : plans.values()) {
143      totalRegion += regions.size();
144    }
145    assertEquals(hris.size(), totalRegion);
146    for (int[] mock : regionsAndServersMocks) {
147      LOG.debug("testBulkAssignment with " + mock[0] + " regions and " + mock[1] + " servers");
148      List<RegionInfo> regions = randomRegions(mock[0]);
149      List<ServerAndLoad> servers = randomServers(mock[1], 0);
150      List<ServerName> list = getListOfServerNames(servers);
151      Map<ServerName, List<RegionInfo>> assignments =
152        loadBalancer.roundRobinAssignment(regions, list);
153      float average = (float) regions.size() / servers.size();
154      int min = (int) Math.floor(average);
155      int max = (int) Math.ceil(average);
156      if (assignments != null && !assignments.isEmpty()) {
157        for (List<RegionInfo> regionList : assignments.values()) {
158          assertTrue(regionList.size() == min || regionList.size() == max);
159        }
160      }
161      returnRegions(regions);
162      returnServers(list);
163    }
164  }
165
166  /**
167   * Test the cluster startup bulk assignment which attempts to retain assignment info.
168   */
169  @Test
170  public void testRetainAssignment() throws Exception {
171    // Test simple case where all same servers are there
172    List<ServerAndLoad> servers = randomServers(10, 10);
173    List<RegionInfo> regions = randomRegions(100);
174    Map<RegionInfo, ServerName> existing = new TreeMap<>(RegionInfo.COMPARATOR);
175    for (int i = 0; i < regions.size(); i++) {
176      ServerName sn = servers.get(i % servers.size()).getServerName();
177      // The old server would have had same host and port, but different
178      // start code!
179      ServerName snWithOldStartCode =
180        ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 10);
181      existing.put(regions.get(i), snWithOldStartCode);
182    }
183    List<ServerName> listOfServerNames = getListOfServerNames(servers);
184    Map<ServerName, List<RegionInfo>> assignment =
185      loadBalancer.retainAssignment(existing, listOfServerNames);
186    assertRetainedAssignment(existing, listOfServerNames, assignment);
187
188    // Include two new servers that were not there before
189    List<ServerAndLoad> servers2 = new ArrayList<>(servers);
190    servers2.add(randomServer(10));
191    servers2.add(randomServer(10));
192    listOfServerNames = getListOfServerNames(servers2);
193    assignment = loadBalancer.retainAssignment(existing, listOfServerNames);
194    assertRetainedAssignment(existing, listOfServerNames, assignment);
195
196    // Remove two of the servers that were previously there
197    List<ServerAndLoad> servers3 = new ArrayList<>(servers);
198    servers3.remove(0);
199    servers3.remove(0);
200    listOfServerNames = getListOfServerNames(servers3);
201    assignment = loadBalancer.retainAssignment(existing, listOfServerNames);
202    assertRetainedAssignment(existing, listOfServerNames, assignment);
203  }
204
205  @Test
206  public void testRandomAssignment() throws Exception {
207    for (int i = 1; i != 5; ++i) {
208      LOG.info("run testRandomAssignment() with idle servers:" + i);
209      testRandomAssignment(i);
210    }
211  }
212
213  private void testRandomAssignment(int numberOfIdleServers) throws Exception {
214    assert numberOfIdleServers > 0;
215    List<ServerName> idleServers = new ArrayList<>(numberOfIdleServers);
216    for (int i = 0; i != numberOfIdleServers; ++i) {
217      idleServers.add(ServerName.valueOf("server-" + i, 1000, 1L));
218    }
219    List<ServerName> allServers = new ArrayList<>(idleServers.size() + 1);
220    allServers.add(ServerName.valueOf("server-" + numberOfIdleServers, 1000, 1L));
221    allServers.addAll(idleServers);
222    LoadBalancer balancer = new MockBalancer() {
223      @Override
224      public boolean shouldBeOnMaster(RegionInfo region) {
225        return false;
226      }
227    };
228    Configuration conf = HBaseConfiguration.create();
229    conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
230    ServerManager sm = mock(ServerManager.class);
231    when(sm.getOnlineServersListWithPredicator(anyList(), any())).thenReturn(idleServers);
232    MasterServices services = mock(MasterServices.class);
233    when(services.getServerManager()).thenReturn(sm);
234    when(services.getConfiguration()).thenReturn(conf);
235    balancer.setMasterServices(services);
236    RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
237      .setStartKey("key1".getBytes()).setEndKey("key2".getBytes()).setSplit(false).setRegionId(100)
238      .build();
239    assertNull(balancer.randomAssignment(hri1, Collections.emptyList()));
240    assertNull(balancer.randomAssignment(hri1, null));
241    for (int i = 0; i != 3; ++i) {
242      ServerName sn = balancer.randomAssignment(hri1, allServers);
243      assertTrue("actual:" + sn + ", except:" + idleServers, idleServers.contains(sn));
244    }
245  }
246
247  @Test
248  public void testRegionAvailability() throws Exception {
249    // Create a cluster with a few servers, assign them to specific racks
250    // then assign some regions. The tests should check whether moving a
251    // replica from one node to a specific other node or rack lowers the
252    // availability of the region or not
253
254    List<RegionInfo> list0 = new ArrayList<>();
255    List<RegionInfo> list1 = new ArrayList<>();
256    List<RegionInfo> list2 = new ArrayList<>();
257    // create a region (region1)
258    RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
259      .setStartKey("key1".getBytes()).setEndKey("key2".getBytes()).setSplit(false).setRegionId(100)
260      .build();
261    // create a replica of the region (replica_of_region1)
262    RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
263    // create a second region (region2)
264    RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
265      .setStartKey("key2".getBytes()).setEndKey("key3".getBytes()).setSplit(false).setRegionId(101)
266      .build();
267    list0.add(hri1); // only region1
268    list1.add(hri2); // only replica_of_region1
269    list2.add(hri3); // only region2
270    Map<ServerName, List<RegionInfo>> clusterState = new LinkedHashMap<>();
271    clusterState.put(servers[0], list0); // servers[0] hosts region1
272    clusterState.put(servers[1], list1); // servers[1] hosts replica_of_region1
273    clusterState.put(servers[2], list2); // servers[2] hosts region2
274    // create a cluster with the above clusterState. The way in which the
275    // cluster is created (constructor code) would make sure the indices of
276    // the servers are in the order in which it is inserted in the clusterState
277    // map (linkedhashmap is important). A similar thing applies to the region lists
278    BalancerClusterState cluster = new BalancerClusterState(clusterState, null, null, rackManager);
279    // check whether a move of region1 from servers[0] to servers[1] would lower
280    // the availability of region1
281    assertTrue(cluster.wouldLowerAvailability(hri1, servers[1]));
282    // check whether a move of region1 from servers[0] to servers[2] would lower
283    // the availability of region1
284    assertTrue(!cluster.wouldLowerAvailability(hri1, servers[2]));
285    // check whether a move of replica_of_region1 from servers[0] to servers[2] would lower
286    // the availability of replica_of_region1
287    assertTrue(!cluster.wouldLowerAvailability(hri2, servers[2]));
288    // check whether a move of region2 from servers[0] to servers[1] would lower
289    // the availability of region2
290    assertTrue(!cluster.wouldLowerAvailability(hri3, servers[1]));
291
292    // now lets have servers[1] host replica_of_region2
293    list1.add(RegionReplicaUtil.getRegionInfoForReplica(hri3, 1));
294    // create a new clusterState with the above change
295    cluster = new BalancerClusterState(clusterState, null, null, rackManager);
296    // now check whether a move of a replica from servers[0] to servers[1] would lower
297    // the availability of region2
298    assertTrue(cluster.wouldLowerAvailability(hri3, servers[1]));
299
300    // start over again
301    clusterState.clear();
302    clusterState.put(servers[0], list0); // servers[0], rack1 hosts region1
303    clusterState.put(servers[5], list1); // servers[5], rack2 hosts replica_of_region1 and
304                                         // replica_of_region2
305    clusterState.put(servers[6], list2); // servers[6], rack2 hosts region2
306    clusterState.put(servers[10], new ArrayList<>()); // servers[10], rack3 hosts no region
307    // create a cluster with the above clusterState
308    cluster = new BalancerClusterState(clusterState, null, null, rackManager);
309    // check whether a move of region1 from servers[0],rack1 to servers[6],rack2 would
310    // lower the availability
311
312    assertTrue(cluster.wouldLowerAvailability(hri1, servers[0]));
313
314    // now create a cluster without the rack manager
315    cluster = new BalancerClusterState(clusterState, null, null, null);
316    // now repeat check whether a move of region1 from servers[0] to servers[6] would
317    // lower the availability
318    assertTrue(!cluster.wouldLowerAvailability(hri1, servers[6]));
319  }
320
321  @Test
322  public void testRegionAvailabilityWithRegionMoves() throws Exception {
323    List<RegionInfo> list0 = new ArrayList<>();
324    List<RegionInfo> list1 = new ArrayList<>();
325    List<RegionInfo> list2 = new ArrayList<>();
326    // create a region (region1)
327    RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
328      .setStartKey("key1".getBytes()).setEndKey("key2".getBytes()).setSplit(false).setRegionId(100)
329      .build();
330    // create a replica of the region (replica_of_region1)
331    RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
332    // create a second region (region2)
333    RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
334      .setStartKey("key2".getBytes()).setEndKey("key3".getBytes()).setSplit(false).setRegionId(101)
335      .build();
336    list0.add(hri1); // only region1
337    list1.add(hri2); // only replica_of_region1
338    list2.add(hri3); // only region2
339    Map<ServerName, List<RegionInfo>> clusterState = new LinkedHashMap<>();
340    clusterState.put(servers[0], list0); // servers[0] hosts region1
341    clusterState.put(servers[1], list1); // servers[1] hosts replica_of_region1
342    clusterState.put(servers[2], list2); // servers[2] hosts region2
343    // create a cluster with the above clusterState. The way in which the
344    // cluster is created (constructor code) would make sure the indices of
345    // the servers are in the order in which it is inserted in the clusterState
346    // map (linkedhashmap is important).
347    BalancerClusterState cluster = new BalancerClusterState(clusterState, null, null, rackManager);
348    // check whether moving region1 from servers[1] to servers[2] would lower availability
349    assertTrue(!cluster.wouldLowerAvailability(hri1, servers[2]));
350
351    // now move region1 from servers[0] to servers[2]
352    cluster.doAction(new MoveRegionAction(0, 0, 2));
353    // now repeat check whether moving region1 from servers[1] to servers[2]
354    // would lower availability
355    assertTrue(cluster.wouldLowerAvailability(hri1, servers[2]));
356
357    // start over again
358    clusterState.clear();
359    List<RegionInfo> list3 = new ArrayList<>();
360    RegionInfo hri4 = RegionReplicaUtil.getRegionInfoForReplica(hri3, 1);
361    list3.add(hri4);
362    clusterState.put(servers[0], list0); // servers[0], rack1 hosts region1
363    clusterState.put(servers[5], list1); // servers[5], rack2 hosts replica_of_region1
364    clusterState.put(servers[6], list2); // servers[6], rack2 hosts region2
365    clusterState.put(servers[12], list3); // servers[12], rack3 hosts replica_of_region2
366    // create a cluster with the above clusterState
367    cluster = new BalancerClusterState(clusterState, null, null, rackManager);
368    // check whether a move of replica_of_region2 from servers[12],rack3 to servers[0],rack1 would
369    // lower the availability
370    assertTrue(!cluster.wouldLowerAvailability(hri4, servers[0]));
371    // now move region2 from servers[6],rack2 to servers[0],rack1
372    cluster.doAction(new MoveRegionAction(2, 2, 0));
373    // now repeat check if replica_of_region2 from servers[12],rack3 to servers[0],rack1 would
374    // lower the availability
375    assertTrue(cluster.wouldLowerAvailability(hri3, servers[0]));
376  }
377
378  private List<ServerName> getListOfServerNames(final List<ServerAndLoad> sals) {
379    return sals.stream().map(ServerAndLoad::getServerName).collect(Collectors.toList());
380  }
381
382  /**
383   * Asserts a valid retained assignment plan.
384   * <p>
385   * Must meet the following conditions:
386   * <ul>
387   * <li>Every input region has an assignment, and to an online server
388   * <li>If a region had an existing assignment to a server with the same address a a currently
389   * online server, it will be assigned to it
390   * </ul>
391   */
392  private void assertRetainedAssignment(Map<RegionInfo, ServerName> existing,
393    List<ServerName> servers, Map<ServerName, List<RegionInfo>> assignment) {
394    // Verify condition 1, every region assigned, and to online server
395    Set<ServerName> onlineServerSet = new TreeSet<>(servers);
396    Set<RegionInfo> assignedRegions = new TreeSet<>(RegionInfo.COMPARATOR);
397    for (Map.Entry<ServerName, List<RegionInfo>> a : assignment.entrySet()) {
398      assertTrue("Region assigned to server that was not listed as online",
399        onlineServerSet.contains(a.getKey()));
400      for (RegionInfo r : a.getValue())
401        assignedRegions.add(r);
402    }
403    assertEquals(existing.size(), assignedRegions.size());
404
405    // Verify condition 2, if server had existing assignment, must have same
406    Set<String> onlineHostNames = new TreeSet<>();
407    for (ServerName s : servers) {
408      onlineHostNames.add(s.getHostname());
409    }
410
411    for (Map.Entry<ServerName, List<RegionInfo>> a : assignment.entrySet()) {
412      ServerName assignedTo = a.getKey();
413      for (RegionInfo r : a.getValue()) {
414        ServerName address = existing.get(r);
415        if (address != null && onlineHostNames.contains(address.getHostname())) {
416          // this region was prevously assigned somewhere, and that
417          // host is still around, then it should be re-assigned on the
418          // same host
419          assertEquals(address.getHostname(), assignedTo.getHostname());
420        }
421      }
422    }
423  }
424
425  @Test
426  public void testClusterServersWithSameHostPort() {
427    // tests whether the BaseLoadBalancer.Cluster can be constructed with servers
428    // sharing same host and port
429    List<ServerName> servers = getListOfServerNames(randomServers(10, 10));
430    List<RegionInfo> regions = randomRegions(101);
431    Map<ServerName, List<RegionInfo>> clusterState = new TreeMap<>();
432
433    assignRegions(regions, servers, clusterState);
434
435    // construct another list of servers, but sharing same hosts and ports
436    List<ServerName> oldServers = new ArrayList<>(servers.size());
437    for (ServerName sn : servers) {
438      // The old server would have had same host and port, but different start code!
439      oldServers.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 10));
440    }
441
442    regions = randomRegions(9); // some more regions
443    assignRegions(regions, oldServers, clusterState);
444
445    // should not throw exception:
446    BalancerClusterState cluster = new BalancerClusterState(clusterState, null, null, null);
447    assertEquals(101 + 9, cluster.numRegions);
448    assertEquals(10, cluster.numServers); // only 10 servers because they share the same host + port
449
450    // test move
451    ServerName sn = oldServers.get(0);
452    int r0 = ArrayUtils.indexOf(cluster.regions, clusterState.get(sn).get(0));
453    int f0 = cluster.serversToIndex.get(sn.getAddress());
454    int t0 = cluster.serversToIndex.get(servers.get(1).getAddress());
455    cluster.doAction(new MoveRegionAction(r0, f0, t0));
456  }
457
458  private void assignRegions(List<RegionInfo> regions, List<ServerName> servers,
459    Map<ServerName, List<RegionInfo>> clusterState) {
460    for (int i = 0; i < regions.size(); i++) {
461      ServerName sn = servers.get(i % servers.size());
462      List<RegionInfo> regionsOfServer = clusterState.get(sn);
463      if (regionsOfServer == null) {
464        regionsOfServer = new ArrayList<>(10);
465        clusterState.put(sn, regionsOfServer);
466      }
467
468      regionsOfServer.add(regions.get(i));
469    }
470  }
471
472  @Test
473  public void testClusterRegionLocations() {
474    // tests whether region locations are handled correctly in Cluster
475    List<ServerName> servers = getListOfServerNames(randomServers(10, 10));
476    List<RegionInfo> regions = randomRegions(101);
477    Map<ServerName, List<RegionInfo>> clusterState = new HashMap<>();
478
479    assignRegions(regions, servers, clusterState);
480
481    // mock block locality for some regions
482    RegionLocationFinder locationFinder = mock(RegionLocationFinder.class);
483    // block locality: region:0 => {server:0}
484    // region:1 => {server:0, server:1}
485    // region:42 => {server:4, server:9, server:5}
486    when(locationFinder.getTopBlockLocations(regions.get(0)))
487      .thenReturn(Lists.newArrayList(servers.get(0)));
488    when(locationFinder.getTopBlockLocations(regions.get(1)))
489      .thenReturn(Lists.newArrayList(servers.get(0), servers.get(1)));
490    when(locationFinder.getTopBlockLocations(regions.get(42)))
491      .thenReturn(Lists.newArrayList(servers.get(4), servers.get(9), servers.get(5)));
492    // this server does not exists in clusterStatus
493    when(locationFinder.getTopBlockLocations(regions.get(43)))
494      .thenReturn(Lists.newArrayList(ServerName.valueOf("foo", 0, 0)));
495
496    BalancerClusterState cluster =
497      new BalancerClusterState(clusterState, null, locationFinder, null);
498
499    // this is ok, it is just a test
500    int r0 = ArrayUtils.indexOf(cluster.regions, regions.get(0));
501    int r1 = ArrayUtils.indexOf(cluster.regions, regions.get(1));
502    int r10 = ArrayUtils.indexOf(cluster.regions, regions.get(10));
503    int r42 = ArrayUtils.indexOf(cluster.regions, regions.get(42));
504    int r43 = ArrayUtils.indexOf(cluster.regions, regions.get(43));
505
506    int s0 = cluster.serversToIndex.get(servers.get(0).getAddress());
507    int s1 = cluster.serversToIndex.get(servers.get(1).getAddress());
508    int s4 = cluster.serversToIndex.get(servers.get(4).getAddress());
509    int s5 = cluster.serversToIndex.get(servers.get(5).getAddress());
510    int s9 = cluster.serversToIndex.get(servers.get(9).getAddress());
511
512    // region 0 locations
513    assertEquals(1, cluster.regionLocations[r0].length);
514    assertEquals(s0, cluster.regionLocations[r0][0]);
515
516    // region 1 locations
517    assertEquals(2, cluster.regionLocations[r1].length);
518    assertEquals(s0, cluster.regionLocations[r1][0]);
519    assertEquals(s1, cluster.regionLocations[r1][1]);
520
521    // region 10 locations
522    assertEquals(0, cluster.regionLocations[r10].length);
523
524    // region 42 locations
525    assertEquals(3, cluster.regionLocations[r42].length);
526    assertEquals(s4, cluster.regionLocations[r42][0]);
527    assertEquals(s9, cluster.regionLocations[r42][1]);
528    assertEquals(s5, cluster.regionLocations[r42][2]);
529
530    // region 43 locations
531    assertEquals(1, cluster.regionLocations[r43].length);
532    assertEquals(-1, cluster.regionLocations[r43][0]);
533  }
534}