001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master.balancer;
019
020import static org.junit.Assert.assertNotNull;
021import static org.junit.Assert.assertNull;
022import static org.junit.Assert.assertTrue;
023import static org.mockito.Mockito.mock;
024import static org.mockito.Mockito.when;
025
026import java.util.ArrayList;
027import java.util.HashMap;
028import java.util.HashSet;
029import java.util.LinkedList;
030import java.util.List;
031import java.util.Map;
032import java.util.Map.Entry;
033import java.util.Queue;
034import java.util.Random;
035import java.util.Set;
036import java.util.SortedSet;
037import java.util.TreeMap;
038import java.util.TreeSet;
039import java.util.concurrent.ThreadLocalRandom;
040import java.util.stream.Collectors;
041import java.util.stream.Stream;
042import org.apache.hadoop.conf.Configuration;
043import org.apache.hadoop.hbase.HBaseConfiguration;
044import org.apache.hadoop.hbase.ServerName;
045import org.apache.hadoop.hbase.TableName;
046import org.apache.hadoop.hbase.client.RegionInfo;
047import org.apache.hadoop.hbase.client.RegionInfoBuilder;
048import org.apache.hadoop.hbase.client.RegionReplicaUtil;
049import org.apache.hadoop.hbase.master.MasterServices;
050import org.apache.hadoop.hbase.master.RackManager;
051import org.apache.hadoop.hbase.master.RegionPlan;
052import org.apache.hadoop.hbase.util.Bytes;
053import org.apache.hadoop.net.DNSToSwitchMapping;
054import org.junit.Assert;
055import org.junit.BeforeClass;
056import org.slf4j.Logger;
057import org.slf4j.LoggerFactory;
058
059/**
060 * Class used to be the base of unit tests on load balancers. It gives helper methods to create maps
061 * of {@link ServerName} to lists of {@link RegionInfo} and to check list of region plans.
062 */
063public class BalancerTestBase {
064  private static final Logger LOG = LoggerFactory.getLogger(BalancerTestBase.class);
065  static int regionId = 0;
066  protected static Configuration conf;
067  protected static StochasticLoadBalancer loadBalancer;
068
069  protected static DummyMetricsStochasticBalancer dummyMetricsStochasticBalancer =
070    new DummyMetricsStochasticBalancer();
071
072  @BeforeClass
073  public static void beforeAllTests() throws Exception {
074    conf = HBaseConfiguration.create();
075    conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
076    conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
077    loadBalancer = new StochasticLoadBalancer(dummyMetricsStochasticBalancer);
078    MasterServices services = mock(MasterServices.class);
079    when(services.getConfiguration()).thenReturn(conf);
080    loadBalancer.setMasterServices(services);
081    loadBalancer.initialize();
082  }
083
084  protected int[] largeCluster = new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
085    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
086    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
087    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
088    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
089    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
090    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
091    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
092    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
093    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
094    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
095    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
096    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 };
097
098  // int[testnum][servernumber] -> numregions
099  protected int[][] clusterStateMocks = new int[][] {
100    // 1 node
101    new int[] { 0 }, new int[] { 1 }, new int[] { 10 },
102    // 2 node
103    new int[] { 0, 0 }, new int[] { 2, 0 }, new int[] { 2, 1 }, new int[] { 2, 2 },
104    new int[] { 2, 3 }, new int[] { 2, 4 }, new int[] { 1, 1 }, new int[] { 0, 1 },
105    new int[] { 10, 1 }, new int[] { 514, 1432 }, new int[] { 48, 53 },
106    // 3 node
107    new int[] { 0, 1, 2 }, new int[] { 1, 2, 3 }, new int[] { 0, 2, 2 }, new int[] { 0, 3, 0 },
108    new int[] { 0, 4, 0 }, new int[] { 20, 20, 0 },
109    // 4 node
110    new int[] { 0, 1, 2, 3 }, new int[] { 4, 0, 0, 0 }, new int[] { 5, 0, 0, 0 },
111    new int[] { 6, 6, 0, 0 }, new int[] { 6, 2, 0, 0 }, new int[] { 6, 1, 0, 0 },
112    new int[] { 6, 0, 0, 0 }, new int[] { 4, 4, 4, 7 }, new int[] { 4, 4, 4, 8 },
113    new int[] { 0, 0, 0, 7 },
114    // 5 node
115    new int[] { 1, 1, 1, 1, 4 },
116    // 6 nodes
117    new int[] { 1500, 500, 500, 500, 10, 0 }, new int[] { 1500, 500, 500, 500, 500, 0 },
118    // more nodes
119    new int[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
120    new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 }, new int[] { 6, 6, 5, 6, 6, 6, 6, 6, 6, 1 },
121    new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 54 }, new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 55 },
122    new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 }, new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 },
123    new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 8 }, new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 9 },
124    new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 10 }, new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 123 },
125    new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 155 }, new int[] { 10, 7, 12, 8, 11, 10, 9, 14 },
126    new int[] { 13, 14, 6, 10, 10, 10, 8, 10 }, new int[] { 130, 14, 60, 10, 100, 10, 80, 10 },
127    new int[] { 130, 140, 60, 100, 100, 100, 80, 100 }, new int[] { 0, 5, 5, 5, 5 }, largeCluster,
128
129  };
130
131  // int[testnum][servernumber] -> numregions
132  protected int[][] clusterStateMocksWithNoSlop = new int[][] {
133    // 1 node
134    new int[] { 0 }, new int[] { 1 }, new int[] { 10 },
135    // 2 node
136    new int[] { 0, 0 }, new int[] { 2, 1 }, new int[] { 2, 2 }, new int[] { 2, 3 },
137    new int[] { 1, 1 }, new int[] { 80, 120 }, new int[] { 1428, 1432 },
138    // more nodes
139    new int[] { 100, 90, 120, 90, 110, 100, 90, 120 }, };
140
141  // int[testnum][servernumber] -> numregions
142  protected int[][] clusterStateMocksWithSlop = new int[][] {
143    // 2 node
144    new int[] { 1, 4 }, new int[] { 10, 20 }, new int[] { 80, 123 },
145    // more nodes
146    new int[] { 100, 100, 100, 100, 100, 100, 100, 100, 100, 200 },
147    new int[] { 10, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
148      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
149      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
150      5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }, };
151
152  // This class is introduced because IP to rack resolution can be lengthy.
153  public static class MockMapping implements DNSToSwitchMapping {
154    public MockMapping(Configuration conf) {
155    }
156
157    @Override
158    public List<String> resolve(List<String> names) {
159      return Stream.generate(() -> "rack").limit(names.size()).collect(Collectors.toList());
160    }
161
162    // do not add @Override annotations here. It mighty break compilation with earlier Hadoops
163    public void reloadCachedMappings() {
164    }
165
166    // do not add @Override annotations here. It mighty break compilation with earlier Hadoops
167    public void reloadCachedMappings(List<String> arg0) {
168    }
169  }
170
171  /**
172   * Invariant is that all servers have between floor(avg) and ceiling(avg) number of regions.
173   */
174  public void assertClusterAsBalanced(List<ServerAndLoad> servers) {
175    int numServers = servers.size();
176    int numRegions = 0;
177    int maxRegions = 0;
178    int minRegions = Integer.MAX_VALUE;
179    for (ServerAndLoad server : servers) {
180      int nr = server.getLoad();
181      if (nr > maxRegions) {
182        maxRegions = nr;
183      }
184      if (nr < minRegions) {
185        minRegions = nr;
186      }
187      numRegions += nr;
188    }
189    if (maxRegions - minRegions < 2) {
190      // less than 2 between max and min, can't balance
191      return;
192    }
193    int min = numRegions / numServers;
194    int max = numRegions % numServers == 0 ? min : min + 1;
195
196    for (ServerAndLoad server : servers) {
197      assertTrue("All servers should have a positive load. " + server, server.getLoad() >= 0);
198      assertTrue("All servers should have load no more than " + max + ". " + server,
199        server.getLoad() <= max);
200      assertTrue("All servers should have load no less than " + min + ". " + server,
201        server.getLoad() >= min);
202    }
203  }
204
205  /**
206   * Invariant is that all servers have between acceptable range number of regions.
207   */
208  public boolean assertClusterOverallAsBalanced(List<ServerAndLoad> servers, int tablenum) {
209    int numServers = servers.size();
210    int numRegions = 0;
211    int maxRegions = 0;
212    int minRegions = Integer.MAX_VALUE;
213    for (ServerAndLoad server : servers) {
214      int nr = server.getLoad();
215      if (nr > maxRegions) {
216        maxRegions = nr;
217      }
218      if (nr < minRegions) {
219        minRegions = nr;
220      }
221      numRegions += nr;
222    }
223    if (maxRegions - minRegions < 2) {
224      // less than 2 between max and min, can't balance
225      return true;
226    }
227    int min = numRegions / numServers;
228    int max = numRegions % numServers == 0 ? min : min + 1;
229
230    for (ServerAndLoad server : servers) {
231      // The '5' in below is arbitrary.
232      if (
233        server.getLoad() < 0 || server.getLoad() > max + (tablenum / 2 + 5)
234          || server.getLoad() < (min - tablenum / 2 - 5)
235      ) {
236        LOG.warn("server={}, load={}, max={}, tablenum={}, min={}", server.getServerName(),
237          server.getLoad(), max, tablenum, min);
238        return false;
239      }
240    }
241    return true;
242  }
243
244  /**
245   * Checks whether region replicas are not hosted on the same host.
246   */
247  public void assertRegionReplicaPlacement(Map<ServerName, List<RegionInfo>> serverMap,
248    RackManager rackManager) {
249    TreeMap<String, Set<RegionInfo>> regionsPerHost = new TreeMap<>();
250    TreeMap<String, Set<RegionInfo>> regionsPerRack = new TreeMap<>();
251
252    for (Entry<ServerName, List<RegionInfo>> entry : serverMap.entrySet()) {
253      String hostname = entry.getKey().getHostname();
254      Set<RegionInfo> infos = regionsPerHost.get(hostname);
255      if (infos == null) {
256        infos = new HashSet<>();
257        regionsPerHost.put(hostname, infos);
258      }
259
260      for (RegionInfo info : entry.getValue()) {
261        RegionInfo primaryInfo = RegionReplicaUtil.getRegionInfoForDefaultReplica(info);
262        if (!infos.add(primaryInfo)) {
263          Assert.fail("Two or more region replicas are hosted on the same host after balance");
264        }
265      }
266    }
267
268    if (rackManager == null) {
269      return;
270    }
271
272    for (Entry<ServerName, List<RegionInfo>> entry : serverMap.entrySet()) {
273      String rack = rackManager.getRack(entry.getKey());
274      Set<RegionInfo> infos = regionsPerRack.get(rack);
275      if (infos == null) {
276        infos = new HashSet<>();
277        regionsPerRack.put(rack, infos);
278      }
279
280      for (RegionInfo info : entry.getValue()) {
281        RegionInfo primaryInfo = RegionReplicaUtil.getRegionInfoForDefaultReplica(info);
282        if (!infos.add(primaryInfo)) {
283          Assert.fail("Two or more region replicas are hosted on the same rack after balance");
284        }
285      }
286    }
287  }
288
289  protected String printStats(List<ServerAndLoad> servers) {
290    int numServers = servers.size();
291    int totalRegions = 0;
292    for (ServerAndLoad server : servers) {
293      totalRegions += server.getLoad();
294    }
295    float average = (float) totalRegions / numServers;
296    int max = (int) Math.ceil(average);
297    int min = (int) Math.floor(average);
298    return "[srvr=" + numServers + " rgns=" + totalRegions + " avg=" + average + " max=" + max
299      + " min=" + min + "]";
300  }
301
302  protected List<ServerAndLoad> convertToList(final Map<ServerName, List<RegionInfo>> servers) {
303    List<ServerAndLoad> list = new ArrayList<>(servers.size());
304    for (Map.Entry<ServerName, List<RegionInfo>> e : servers.entrySet()) {
305      list.add(new ServerAndLoad(e.getKey(), e.getValue().size()));
306    }
307    return list;
308  }
309
310  protected String printMock(List<ServerAndLoad> balancedCluster) {
311    SortedSet<ServerAndLoad> sorted = new TreeSet<>(balancedCluster);
312    ServerAndLoad[] arr = sorted.toArray(new ServerAndLoad[sorted.size()]);
313    StringBuilder sb = new StringBuilder(sorted.size() * 4 + 4);
314    sb.append("{ ");
315    for (int i = 0; i < arr.length; i++) {
316      if (i != 0) {
317        sb.append(" , ");
318      }
319      sb.append(arr[i].getServerName().getHostname());
320      sb.append(":");
321      sb.append(arr[i].getLoad());
322    }
323    sb.append(" }");
324    return sb.toString();
325  }
326
327  /**
328   * This assumes the RegionPlan HSI instances are the same ones in the map, so actually no need to
329   * even pass in the map, but I think it's clearer. nn * @return a list of all added
330   * {@link ServerAndLoad} values.
331   */
332  protected List<ServerAndLoad> reconcile(List<ServerAndLoad> list, List<RegionPlan> plans,
333    Map<ServerName, List<RegionInfo>> servers) {
334    List<ServerAndLoad> result = new ArrayList<>(list.size());
335
336    Map<ServerName, ServerAndLoad> map = new HashMap<>(list.size());
337    for (ServerAndLoad sl : list) {
338      map.put(sl.getServerName(), sl);
339    }
340    if (plans != null) {
341      for (RegionPlan plan : plans) {
342        ServerName source = plan.getSource();
343
344        updateLoad(map, source, -1);
345        ServerName destination = plan.getDestination();
346        updateLoad(map, destination, +1);
347
348        servers.get(source).remove(plan.getRegionInfo());
349        servers.get(destination).add(plan.getRegionInfo());
350      }
351    }
352    result.clear();
353    result.addAll(map.values());
354    return result;
355  }
356
357  protected void updateLoad(final Map<ServerName, ServerAndLoad> map, final ServerName sn,
358    final int diff) {
359    ServerAndLoad sal = map.get(sn);
360    if (sal == null) sal = new ServerAndLoad(sn, 0);
361    sal = new ServerAndLoad(sn, sal.getLoad() + diff);
362    map.put(sn, sal);
363  }
364
365  protected TreeMap<ServerName, List<RegionInfo>> mockClusterServers(int[][] mockCluster) {
366    // dimension1: table, dimension2: regions per server
367    int numTables = mockCluster.length;
368    TreeMap<ServerName, List<RegionInfo>> servers = new TreeMap<>();
369    for (int i = 0; i < numTables; i++) {
370      TableName tableName = TableName.valueOf("table" + i);
371      for (int j = 0; j < mockCluster[i].length; j++) {
372        ServerName serverName = ServerName.valueOf("server" + j, 1000, -1);
373        int numRegions = mockCluster[i][j];
374        List<RegionInfo> regions = createRegions(numRegions, tableName);
375        servers.putIfAbsent(serverName, new ArrayList<>());
376        servers.get(serverName).addAll(regions);
377      }
378    }
379    return servers;
380  }
381
382  protected TreeMap<ServerName, List<RegionInfo>> mockClusterServers(int[] mockCluster) {
383    return mockClusterServers(mockCluster, -1);
384  }
385
386  protected BalancerClusterState mockCluster(int[] mockCluster) {
387    return new BalancerClusterState(mockClusterServers(mockCluster, -1), null, null, null);
388  }
389
390  protected TreeMap<ServerName, List<RegionInfo>> mockClusterServers(int[] mockCluster,
391    int numTables) {
392    int numServers = mockCluster.length;
393    TreeMap<ServerName, List<RegionInfo>> servers = new TreeMap<>();
394    for (int i = 0; i < numServers; i++) {
395      int numRegions = mockCluster[i];
396      ServerAndLoad sal = randomServer(0);
397      List<RegionInfo> regions = randomRegions(numRegions, numTables);
398      servers.put(sal.getServerName(), regions);
399    }
400    return servers;
401  }
402
403  protected TreeMap<ServerName, List<RegionInfo>> mockUniformClusterServers(int[] mockCluster) {
404    int numServers = mockCluster.length;
405    TreeMap<ServerName, List<RegionInfo>> servers = new TreeMap<>();
406    for (int i = 0; i < numServers; i++) {
407      int numRegions = mockCluster[i];
408      ServerAndLoad sal = randomServer(0);
409      List<RegionInfo> regions = uniformRegions(numRegions);
410      servers.put(sal.getServerName(), regions);
411    }
412    return servers;
413  }
414
415  protected HashMap<TableName, TreeMap<ServerName, List<RegionInfo>>>
416    mockClusterServersWithTables(Map<ServerName, List<RegionInfo>> clusterServers) {
417    HashMap<TableName, TreeMap<ServerName, List<RegionInfo>>> result = new HashMap<>();
418    for (Map.Entry<ServerName, List<RegionInfo>> entry : clusterServers.entrySet()) {
419      ServerName sal = entry.getKey();
420      List<RegionInfo> regions = entry.getValue();
421      for (RegionInfo hri : regions) {
422        TreeMap<ServerName, List<RegionInfo>> servers = result.get(hri.getTable());
423        if (servers == null) {
424          servers = new TreeMap<>();
425          result.put(hri.getTable(), servers);
426        }
427        List<RegionInfo> hrilist = servers.get(sal);
428        if (hrilist == null) {
429          hrilist = new ArrayList<>();
430          servers.put(sal, hrilist);
431        }
432        hrilist.add(hri);
433      }
434    }
435    for (Map.Entry<TableName, TreeMap<ServerName, List<RegionInfo>>> entry : result.entrySet()) {
436      for (ServerName srn : clusterServers.keySet()) {
437        if (!entry.getValue().containsKey(srn)) entry.getValue().put(srn, new ArrayList<>());
438      }
439    }
440    return result;
441  }
442
443  private Queue<RegionInfo> regionQueue = new LinkedList<>();
444
445  protected List<RegionInfo> randomRegions(int numRegions) {
446    return randomRegions(numRegions, -1);
447  }
448
449  protected List<RegionInfo> createRegions(int numRegions, TableName tableName) {
450    List<RegionInfo> regions = new ArrayList<>(numRegions);
451    byte[] start = new byte[16];
452    Bytes.random(start);
453    byte[] end = new byte[16];
454    Bytes.random(end);
455    for (int i = 0; i < numRegions; i++) {
456      Bytes.putInt(start, 0, numRegions << 1);
457      Bytes.putInt(end, 0, (numRegions << 1) + 1);
458      RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end)
459        .setSplit(false).build();
460      regions.add(hri);
461    }
462    return regions;
463  }
464
465  protected List<RegionInfo> randomRegions(int numRegions, int numTables) {
466    List<RegionInfo> regions = new ArrayList<>(numRegions);
467    byte[] start = new byte[16];
468    Bytes.random(start);
469    byte[] end = new byte[16];
470    Bytes.random(end);
471    for (int i = 0; i < numRegions; i++) {
472      if (!regionQueue.isEmpty()) {
473        regions.add(regionQueue.poll());
474        continue;
475      }
476      Bytes.putInt(start, 0, numRegions << 1);
477      Bytes.putInt(end, 0, (numRegions << 1) + 1);
478      TableName tableName = TableName
479        .valueOf("table" + (numTables > 0 ? ThreadLocalRandom.current().nextInt(numTables) : i));
480      RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end)
481        .setSplit(false).setRegionId(regionId++).build();
482      regions.add(hri);
483    }
484    return regions;
485  }
486
487  protected List<RegionInfo> uniformRegions(int numRegions) {
488    List<RegionInfo> regions = new ArrayList<>(numRegions);
489    byte[] start = new byte[16];
490    Bytes.random(start);
491    byte[] end = new byte[16];
492    Bytes.random(end);
493    for (int i = 0; i < numRegions; i++) {
494      Bytes.putInt(start, 0, numRegions << 1);
495      Bytes.putInt(end, 0, (numRegions << 1) + 1);
496      TableName tableName = TableName.valueOf("table" + i);
497      RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end)
498        .setSplit(false).build();
499      regions.add(hri);
500    }
501    return regions;
502  }
503
504  protected void returnRegions(List<RegionInfo> regions) {
505    regionQueue.addAll(regions);
506  }
507
508  private Queue<ServerName> serverQueue = new LinkedList<>();
509
510  protected ServerAndLoad randomServer(final int numRegionsPerServer) {
511    if (!this.serverQueue.isEmpty()) {
512      ServerName sn = this.serverQueue.poll();
513      return new ServerAndLoad(sn, numRegionsPerServer);
514    }
515    Random rand = ThreadLocalRandom.current();
516    String host = "srv" + rand.nextInt(Integer.MAX_VALUE);
517    int port = rand.nextInt(60000);
518    long startCode = rand.nextLong();
519    ServerName sn = ServerName.valueOf(host, port, startCode);
520    return new ServerAndLoad(sn, numRegionsPerServer);
521  }
522
523  protected List<ServerAndLoad> randomServers(int numServers, int numRegionsPerServer) {
524    List<ServerAndLoad> servers = new ArrayList<>(numServers);
525    for (int i = 0; i < numServers; i++) {
526      servers.add(randomServer(numRegionsPerServer));
527    }
528    return servers;
529  }
530
531  protected void returnServer(ServerName server) {
532    serverQueue.add(server);
533  }
534
535  protected void returnServers(List<ServerName> servers) {
536    this.serverQueue.addAll(servers);
537  }
538
539  protected void testWithCluster(int numNodes, int numRegions, int numRegionsPerServer,
540    int replication, int numTables, boolean assertFullyBalanced,
541    boolean assertFullyBalancedForReplicas) {
542    Map<ServerName, List<RegionInfo>> serverMap =
543      createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables);
544    testWithCluster(serverMap, null, assertFullyBalanced, assertFullyBalancedForReplicas);
545  }
546
547  protected void testWithClusterWithIteration(int numNodes, int numRegions, int numRegionsPerServer,
548    int replication, int numTables, boolean assertFullyBalanced,
549    boolean assertFullyBalancedForReplicas) {
550    Map<ServerName, List<RegionInfo>> serverMap =
551      createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables);
552    testWithClusterWithIteration(serverMap, null, assertFullyBalanced,
553      assertFullyBalancedForReplicas);
554  }
555
556  protected void testWithCluster(Map<ServerName, List<RegionInfo>> serverMap,
557    RackManager rackManager, boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) {
558    List<ServerAndLoad> list = convertToList(serverMap);
559    LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
560
561    loadBalancer.setRackManager(rackManager);
562    // Run the balancer.
563    Map<TableName, Map<ServerName, List<RegionInfo>>> LoadOfAllTable =
564      (Map) mockClusterServersWithTables(serverMap);
565    List<RegionPlan> plans = loadBalancer.balanceCluster(LoadOfAllTable);
566    assertNotNull("Initial cluster balance should produce plans.", plans);
567
568    // Check to see that this actually got to a stable place.
569    if (assertFullyBalanced || assertFullyBalancedForReplicas) {
570      // Apply the plan to the mock cluster.
571      List<ServerAndLoad> balancedCluster = reconcile(list, plans, serverMap);
572
573      // Print out the cluster loads to make debugging easier.
574      LOG.info("Mock after Balance : " + printMock(balancedCluster));
575
576      if (assertFullyBalanced) {
577        assertClusterAsBalanced(balancedCluster);
578        LoadOfAllTable = (Map) mockClusterServersWithTables(serverMap);
579        List<RegionPlan> secondPlans = loadBalancer.balanceCluster(LoadOfAllTable);
580        assertNull("Given a requirement to be fully balanced, second attempt at plans should "
581          + "produce none.", secondPlans);
582      }
583
584      if (assertFullyBalancedForReplicas) {
585        assertRegionReplicaPlacement(serverMap, rackManager);
586      }
587    }
588  }
589
590  protected void testWithClusterWithIteration(Map<ServerName, List<RegionInfo>> serverMap,
591    RackManager rackManager, boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) {
592    List<ServerAndLoad> list = convertToList(serverMap);
593    LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
594
595    loadBalancer.setRackManager(rackManager);
596    // Run the balancer.
597    Map<TableName, Map<ServerName, List<RegionInfo>>> LoadOfAllTable =
598      (Map) mockClusterServersWithTables(serverMap);
599    List<RegionPlan> plans = loadBalancer.balanceCluster(LoadOfAllTable);
600    assertNotNull("Initial cluster balance should produce plans.", plans);
601
602    List<ServerAndLoad> balancedCluster = null;
603    // Run through iteration until done. Otherwise will be killed as test time out
604    while (plans != null && (assertFullyBalanced || assertFullyBalancedForReplicas)) {
605      // Apply the plan to the mock cluster.
606      balancedCluster = reconcile(list, plans, serverMap);
607
608      // Print out the cluster loads to make debugging easier.
609      LOG.info("Mock after balance: " + printMock(balancedCluster));
610
611      LoadOfAllTable = (Map) mockClusterServersWithTables(serverMap);
612      plans = loadBalancer.balanceCluster(LoadOfAllTable);
613    }
614
615    // Print out the cluster loads to make debugging easier.
616    LOG.info("Mock Final balance: " + printMock(balancedCluster));
617
618    if (assertFullyBalanced) {
619      assertNull("Given a requirement to be fully balanced, second attempt at plans should "
620        + "produce none.", plans);
621    }
622    if (assertFullyBalancedForReplicas) {
623      assertRegionReplicaPlacement(serverMap, rackManager);
624    }
625  }
626
627  protected Map<ServerName, List<RegionInfo>> createServerMap(int numNodes, int numRegions,
628    int numRegionsPerServer, int replication, int numTables) {
629    // construct a cluster of numNodes, having a total of numRegions. Each RS will hold
630    // numRegionsPerServer many regions except for the last one, which will host all the
631    // remaining regions
632    int[] cluster = new int[numNodes];
633    for (int i = 0; i < numNodes; i++) {
634      cluster[i] = numRegionsPerServer;
635    }
636    cluster[cluster.length - 1] = numRegions - ((cluster.length - 1) * numRegionsPerServer);
637    Map<ServerName, List<RegionInfo>> clusterState = mockClusterServers(cluster, numTables);
638    if (replication > 0) {
639      // replicate the regions to the same servers
640      for (List<RegionInfo> regions : clusterState.values()) {
641        int length = regions.size();
642        for (int i = 0; i < length; i++) {
643          for (int r = 1; r < replication; r++) {
644            regions.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(i), r));
645          }
646        }
647      }
648    }
649
650    return clusterState;
651  }
652
653}