001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master.balancer;
019
020import edu.umd.cs.findbugs.annotations.NonNull;
021import java.io.IOException;
022import java.util.ArrayList;
023import java.util.Collection;
024import java.util.Collections;
025import java.util.HashMap;
026import java.util.List;
027import java.util.Map;
028import java.util.NavigableMap;
029import java.util.Random;
030import java.util.Set;
031import java.util.TreeMap;
032import java.util.concurrent.ThreadLocalRandom;
033import org.apache.hadoop.conf.Configuration;
034import org.apache.hadoop.hbase.ClusterMetrics;
035import org.apache.hadoop.hbase.HBaseIOException;
036import org.apache.hadoop.hbase.HConstants;
037import org.apache.hadoop.hbase.ServerName;
038import org.apache.hadoop.hbase.TableName;
039import org.apache.hadoop.hbase.client.RegionInfo;
040import org.apache.hadoop.hbase.master.LoadBalancer;
041import org.apache.hadoop.hbase.master.RackManager;
042import org.apache.hadoop.hbase.master.RegionPlan;
043import org.apache.yetus.audience.InterfaceAudience;
044import org.slf4j.Logger;
045import org.slf4j.LoggerFactory;
046
047import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
048import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
049import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
050import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
051
052/**
053 * The base class for load balancers. It provides the functions used to by {@code AssignmentManager}
054 * to assign regions in the edge cases. It doesn't provide an implementation of the actual balancing
055 * algorithm.
056 * <p/>
057 * Since 3.0.0, all the balancers will be wrapped inside a {@code RSGroupBasedLoadBalancer}, it will
058 * be in charge of the synchronization of balancing and configuration changing, so we do not need to
059 * synchronize by ourselves.
060 */
061@InterfaceAudience.Private
062public abstract class BaseLoadBalancer implements LoadBalancer {
063
064  private static final Logger LOG = LoggerFactory.getLogger(BaseLoadBalancer.class);
065
066  public static final String BALANCER_DECISION_BUFFER_ENABLED =
067    "hbase.master.balancer.decision.buffer.enabled";
068  public static final boolean DEFAULT_BALANCER_DECISION_BUFFER_ENABLED = false;
069
070  public static final String BALANCER_REJECTION_BUFFER_ENABLED =
071    "hbase.master.balancer.rejection.buffer.enabled";
072  public static final boolean DEFAULT_BALANCER_REJECTION_BUFFER_ENABLED = false;
073
074  public static final boolean DEFAULT_HBASE_MASTER_LOADBALANCE_BYTABLE = false;
075
076  protected static final int MIN_SERVER_BALANCE = 2;
077  private volatile boolean stopped = false;
078
079  protected volatile RegionHDFSBlockLocationFinder regionFinder;
080  protected boolean useRegionFinder;
081  protected boolean isByTable = DEFAULT_HBASE_MASTER_LOADBALANCE_BYTABLE;
082
083  // slop for regions
084  protected float slop;
085  protected volatile RackManager rackManager;
086  protected MetricsBalancer metricsBalancer = null;
087  protected ClusterMetrics clusterStatus = null;
088  protected ServerName masterServerName;
089  protected ClusterInfoProvider provider;
090
091  /**
092   * The constructor that uses the basic MetricsBalancer
093   */
094  protected BaseLoadBalancer() {
095    this(null);
096  }
097
098  /**
099   * This Constructor accepts an instance of MetricsBalancer, which will be used instead of creating
100   * a new one
101   */
102  protected BaseLoadBalancer(MetricsBalancer metricsBalancer) {
103    this.metricsBalancer = (metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
104  }
105
106  protected final Configuration getConf() {
107    return provider.getConfiguration();
108  }
109
110  @Override
111  public void updateClusterMetrics(ClusterMetrics st) {
112    this.clusterStatus = st;
113    if (useRegionFinder) {
114      regionFinder.setClusterMetrics(st);
115    }
116  }
117
118  @Override
119  public void setClusterInfoProvider(ClusterInfoProvider provider) {
120    this.provider = provider;
121  }
122
123  @Override
124  public void postMasterStartupInitialize() {
125    if (provider != null && regionFinder != null) {
126      regionFinder.refreshAndWait(provider.getAssignedRegions());
127    }
128  }
129
130  protected final boolean idleRegionServerExist(BalancerClusterState c) {
131    boolean isServerExistsWithMoreRegions = false;
132    boolean isServerExistsWithZeroRegions = false;
133    for (int[] serverList : c.regionsPerServer) {
134      if (serverList.length > 1) {
135        isServerExistsWithMoreRegions = true;
136      }
137      if (serverList.length == 0) {
138        isServerExistsWithZeroRegions = true;
139      }
140    }
141    return isServerExistsWithMoreRegions && isServerExistsWithZeroRegions;
142  }
143
144  protected final boolean sloppyRegionServerExist(ClusterLoadState cs) {
145    if (slop < 0) {
146      LOG.debug("Slop is less than zero, not checking for sloppiness.");
147      return false;
148    }
149    float average = cs.getLoadAverage(); // for logging
150    int floor = (int) Math.floor(average * (1 - slop));
151    int ceiling = (int) Math.ceil(average * (1 + slop));
152    if (!(cs.getMaxLoad() > ceiling || cs.getMinLoad() < floor)) {
153      NavigableMap<ServerAndLoad, List<RegionInfo>> serversByLoad = cs.getServersByLoad();
154      if (LOG.isTraceEnabled()) {
155        // If nothing to balance, then don't say anything unless trace-level logging.
156        LOG.trace("Skipping load balancing because balanced cluster; " + "servers="
157          + cs.getNumServers() + " regions=" + cs.getNumRegions() + " average=" + average
158          + " mostloaded=" + serversByLoad.lastKey().getLoad() + " leastloaded="
159          + serversByLoad.firstKey().getLoad());
160      }
161      return false;
162    }
163    return true;
164  }
165
166  /**
167   * Generates a bulk assignment plan to be used on cluster startup using a simple round-robin
168   * assignment.
169   * <p/>
170   * Takes a list of all the regions and all the servers in the cluster and returns a map of each
171   * server to the regions that it should be assigned.
172   * <p/>
173   * Currently implemented as a round-robin assignment. Same invariant as load balancing, all
174   * servers holding floor(avg) or ceiling(avg). TODO: Use block locations from HDFS to place
175   * regions with their blocks
176   * @param regions all regions
177   * @param servers all servers
178   * @return map of server to the regions it should take, or emptyMap if no assignment is possible
179   *         (ie. no servers)
180   */
181  @Override
182  @NonNull
183  public Map<ServerName, List<RegionInfo>> roundRobinAssignment(List<RegionInfo> regions,
184    List<ServerName> servers) throws HBaseIOException {
185    metricsBalancer.incrMiscInvocations();
186    int numServers = servers == null ? 0 : servers.size();
187    if (numServers == 0) {
188      LOG.warn("Wanted to do round robin assignment but no servers to assign to");
189      return Collections.singletonMap(BOGUS_SERVER_NAME, new ArrayList<>(regions));
190    }
191
192    // TODO: instead of retainAssignment() and roundRobinAssignment(), we should just run the
193    // normal LB.balancerCluster() with unassignedRegions. We only need to have a candidate
194    // generator for AssignRegionAction. The LB will ensure the regions are mostly local
195    // and balanced. This should also run fast with fewer number of iterations.
196
197    if (numServers == 1) { // Only one server, nothing fancy we can do here
198      return Collections.singletonMap(servers.get(0), new ArrayList<>(regions));
199    }
200
201    BalancerClusterState cluster = createCluster(servers, regions);
202    Map<ServerName, List<RegionInfo>> assignments = new HashMap<>();
203    roundRobinAssignment(cluster, regions, servers, assignments);
204    return Collections.unmodifiableMap(assignments);
205  }
206
207  private BalancerClusterState createCluster(List<ServerName> servers,
208    Collection<RegionInfo> regions) throws HBaseIOException {
209    boolean hasRegionReplica = false;
210    try {
211      if (provider != null) {
212        hasRegionReplica = provider.hasRegionReplica(regions);
213      }
214    } catch (IOException ioe) {
215      throw new HBaseIOException(ioe);
216    }
217
218    // Get the snapshot of the current assignments for the regions in question, and then create
219    // a cluster out of it. Note that we might have replicas already assigned to some servers
220    // earlier. So we want to get the snapshot to see those assignments, but this will only contain
221    // replicas of the regions that are passed (for performance).
222    Map<ServerName, List<RegionInfo>> clusterState = null;
223    if (!hasRegionReplica) {
224      clusterState = getRegionAssignmentsByServer(regions);
225    } else {
226      // for the case where we have region replica it is better we get the entire cluster's snapshot
227      clusterState = getRegionAssignmentsByServer(null);
228    }
229
230    for (ServerName server : servers) {
231      if (!clusterState.containsKey(server)) {
232        clusterState.put(server, Collections.emptyList());
233      }
234    }
235    return new BalancerClusterState(regions, clusterState, null, this.regionFinder, rackManager,
236      null);
237  }
238
239  private List<ServerName> findIdleServers(List<ServerName> servers) {
240    return provider.getOnlineServersListWithPredicator(servers,
241      metrics -> metrics.getRegionMetrics().isEmpty());
242  }
243
244  /**
245   * Used to assign a single region to a random server.
246   */
247  @Override
248  public ServerName randomAssignment(RegionInfo regionInfo, List<ServerName> servers)
249    throws HBaseIOException {
250    metricsBalancer.incrMiscInvocations();
251    int numServers = servers == null ? 0 : servers.size();
252    if (numServers == 0) {
253      LOG.warn("Wanted to retain assignment but no servers to assign to");
254      return null;
255    }
256    if (numServers == 1) { // Only one server, nothing fancy we can do here
257      return servers.get(0);
258    }
259    List<ServerName> idleServers = findIdleServers(servers);
260    if (idleServers.size() == 1) {
261      return idleServers.get(0);
262    }
263    final List<ServerName> finalServers = idleServers.isEmpty() ? servers : idleServers;
264    List<RegionInfo> regions = Lists.newArrayList(regionInfo);
265    BalancerClusterState cluster = createCluster(finalServers, regions);
266    return randomAssignment(cluster, regionInfo, finalServers);
267  }
268
269  /**
270   * Generates a bulk assignment startup plan, attempting to reuse the existing assignment
271   * information from META, but adjusting for the specified list of available/online servers
272   * available for assignment.
273   * <p>
274   * Takes a map of all regions to their existing assignment from META. Also takes a list of online
275   * servers for regions to be assigned to. Attempts to retain all assignment, so in some instances
276   * initial assignment will not be completely balanced.
277   * <p>
278   * Any leftover regions without an existing server to be assigned to will be assigned randomly to
279   * available servers.
280   * @param regions regions and existing assignment from meta
281   * @param servers available servers
282   * @return map of servers and regions to be assigned to them, or emptyMap if no assignment is
283   *         possible (ie. no servers)
284   */
285  @Override
286  @NonNull
287  public Map<ServerName, List<RegionInfo>> retainAssignment(Map<RegionInfo, ServerName> regions,
288    List<ServerName> servers) throws HBaseIOException {
289    // Update metrics
290    metricsBalancer.incrMiscInvocations();
291    int numServers = servers == null ? 0 : servers.size();
292    if (numServers == 0) {
293      LOG.warn("Wanted to do retain assignment but no servers to assign to");
294      return Collections.singletonMap(BOGUS_SERVER_NAME, new ArrayList<>(regions.keySet()));
295    }
296
297    if (numServers == 1) { // Only one server, nothing fancy we can do here
298      return Collections.singletonMap(servers.get(0), new ArrayList<>(regions.keySet()));
299    }
300
301    // Group all the old assignments by their hostname.
302    // We can't group directly by ServerName since the servers all have
303    // new start-codes.
304
305    // Group the servers by their hostname. It's possible we have multiple
306    // servers on the same host on different ports.
307    Map<ServerName, List<RegionInfo>> assignments = new HashMap<>();
308    ArrayListMultimap<String, ServerName> serversByHostname = ArrayListMultimap.create();
309    for (ServerName server : servers) {
310      assignments.put(server, new ArrayList<>());
311      serversByHostname.put(server.getHostnameLowerCase(), server);
312    }
313
314    // Collection of the hostnames that used to have regions
315    // assigned, but for which we no longer have any RS running
316    // after the cluster restart.
317    Set<String> oldHostsNoLongerPresent = Sets.newTreeSet();
318
319    // If the old servers aren't present, lets assign those regions later.
320    List<RegionInfo> randomAssignRegions = Lists.newArrayList();
321
322    int numRandomAssignments = 0;
323    int numRetainedAssigments = 0;
324    for (Map.Entry<RegionInfo, ServerName> entry : regions.entrySet()) {
325      RegionInfo region = entry.getKey();
326      ServerName oldServerName = entry.getValue();
327      List<ServerName> localServers = new ArrayList<>();
328      if (oldServerName != null) {
329        localServers = serversByHostname.get(oldServerName.getHostnameLowerCase());
330      }
331      if (localServers.isEmpty()) {
332        // No servers on the new cluster match up with this hostname, assign randomly, later.
333        randomAssignRegions.add(region);
334        if (oldServerName != null) {
335          oldHostsNoLongerPresent.add(oldServerName.getHostnameLowerCase());
336        }
337      } else if (localServers.size() == 1) {
338        // the usual case - one new server on same host
339        ServerName target = localServers.get(0);
340        assignments.get(target).add(region);
341        numRetainedAssigments++;
342      } else {
343        // multiple new servers in the cluster on this same host
344        if (localServers.contains(oldServerName)) {
345          assignments.get(oldServerName).add(region);
346          numRetainedAssigments++;
347        } else {
348          ServerName target = null;
349          for (ServerName tmp : localServers) {
350            if (tmp.getPort() == oldServerName.getPort()) {
351              target = tmp;
352              assignments.get(tmp).add(region);
353              numRetainedAssigments++;
354              break;
355            }
356          }
357          if (target == null) {
358            randomAssignRegions.add(region);
359          }
360        }
361      }
362    }
363
364    // If servers from prior assignment aren't present, then lets do randomAssignment on regions.
365    if (randomAssignRegions.size() > 0) {
366      BalancerClusterState cluster = createCluster(servers, regions.keySet());
367      for (Map.Entry<ServerName, List<RegionInfo>> entry : assignments.entrySet()) {
368        ServerName sn = entry.getKey();
369        for (RegionInfo region : entry.getValue()) {
370          cluster.doAssignRegion(region, sn);
371        }
372      }
373      for (RegionInfo region : randomAssignRegions) {
374        ServerName target = randomAssignment(cluster, region, servers);
375        assignments.get(target).add(region);
376        numRandomAssignments++;
377      }
378    }
379
380    String randomAssignMsg = "";
381    if (numRandomAssignments > 0) {
382      randomAssignMsg = numRandomAssignments + " regions were assigned "
383        + "to random hosts, since the old hosts for these regions are no "
384        + "longer present in the cluster. These hosts were:\n  "
385        + Joiner.on("\n  ").join(oldHostsNoLongerPresent);
386    }
387
388    LOG.info("Reassigned " + regions.size() + " regions. " + numRetainedAssigments
389      + " retained the pre-restart assignment. " + randomAssignMsg);
390    return Collections.unmodifiableMap(assignments);
391  }
392
393  protected float getDefaultSlop() {
394    return 0.2f;
395  }
396
397  private RegionHDFSBlockLocationFinder createRegionLocationFinder(Configuration conf) {
398    RegionHDFSBlockLocationFinder finder = new RegionHDFSBlockLocationFinder();
399    finder.setConf(conf);
400    finder.setClusterInfoProvider(provider);
401    return finder;
402  }
403
404  protected void loadConf(Configuration conf) {
405    this.slop = conf.getFloat("hbase.regions.slop", getDefaultSlop());
406    this.rackManager = new RackManager(conf);
407    useRegionFinder = conf.getBoolean("hbase.master.balancer.uselocality", true);
408    if (useRegionFinder) {
409      regionFinder = createRegionLocationFinder(conf);
410    } else {
411      regionFinder = null;
412    }
413    this.isByTable = conf.getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE,
414      DEFAULT_HBASE_MASTER_LOADBALANCE_BYTABLE);
415    // Print out base configs. Don't print overallSlop since it for simple balancer exclusively.
416    LOG.info("slop={}", this.slop);
417  }
418
419  @Override
420  public void initialize() {
421    loadConf(getConf());
422  }
423
424  @Override
425  public void regionOnline(RegionInfo regionInfo, ServerName sn) {
426  }
427
428  @Override
429  public void regionOffline(RegionInfo regionInfo) {
430  }
431
432  @Override
433  public boolean isStopped() {
434    return stopped;
435  }
436
437  @Override
438  public void stop(String why) {
439    LOG.info("Load Balancer stop requested: {}", why);
440    stopped = true;
441  }
442
443  /**
444   * Updates the balancer status tag reported to JMX
445   */
446  @Override
447  public void updateBalancerStatus(boolean status) {
448    metricsBalancer.balancerStatus(status);
449  }
450
451  /**
452   * Used to assign a single region to a random server.
453   */
454  private ServerName randomAssignment(BalancerClusterState cluster, RegionInfo regionInfo,
455    List<ServerName> servers) {
456    int numServers = servers.size(); // servers is not null, numServers > 1
457    ServerName sn = null;
458    final int maxIterations = numServers * 4;
459    int iterations = 0;
460    List<ServerName> usedSNs = new ArrayList<>(servers.size());
461    Random rand = ThreadLocalRandom.current();
462    do {
463      int i = rand.nextInt(numServers);
464      sn = servers.get(i);
465      if (!usedSNs.contains(sn)) {
466        usedSNs.add(sn);
467      }
468    } while (cluster.wouldLowerAvailability(regionInfo, sn) && iterations++ < maxIterations);
469    if (iterations >= maxIterations) {
470      // We have reached the max. Means the servers that we collected is still lowering the
471      // availability
472      for (ServerName unusedServer : servers) {
473        if (!usedSNs.contains(unusedServer)) {
474          // check if any other unused server is there for us to use.
475          // If so use it. Else we have not other go but to go with one of them
476          if (!cluster.wouldLowerAvailability(regionInfo, unusedServer)) {
477            sn = unusedServer;
478            break;
479          }
480        }
481      }
482    }
483    cluster.doAssignRegion(regionInfo, sn);
484    return sn;
485  }
486
487  /**
488   * Round-robin a list of regions to a list of servers
489   */
490  private void roundRobinAssignment(BalancerClusterState cluster, List<RegionInfo> regions,
491    List<ServerName> servers, Map<ServerName, List<RegionInfo>> assignments) {
492    Random rand = ThreadLocalRandom.current();
493    List<RegionInfo> unassignedRegions = new ArrayList<>();
494    int numServers = servers.size();
495    int numRegions = regions.size();
496    int max = (int) Math.ceil((float) numRegions / numServers);
497    int serverIdx = 0;
498    if (numServers > 1) {
499      serverIdx = rand.nextInt(numServers);
500    }
501    int regionIdx = 0;
502    for (int j = 0; j < numServers; j++) {
503      ServerName server = servers.get((j + serverIdx) % numServers);
504      List<RegionInfo> serverRegions = new ArrayList<>(max);
505      for (int i = regionIdx; i < numRegions; i += numServers) {
506        RegionInfo region = regions.get(i % numRegions);
507        if (cluster.wouldLowerAvailability(region, server)) {
508          unassignedRegions.add(region);
509        } else {
510          serverRegions.add(region);
511          cluster.doAssignRegion(region, server);
512        }
513      }
514      assignments.put(server, serverRegions);
515      regionIdx++;
516    }
517
518    List<RegionInfo> lastFewRegions = new ArrayList<>();
519    // assign the remaining by going through the list and try to assign to servers one-by-one
520    serverIdx = rand.nextInt(numServers);
521    for (RegionInfo region : unassignedRegions) {
522      boolean assigned = false;
523      for (int j = 0; j < numServers; j++) { // try all servers one by one
524        ServerName server = servers.get((j + serverIdx) % numServers);
525        if (cluster.wouldLowerAvailability(region, server)) {
526          continue;
527        } else {
528          assignments.computeIfAbsent(server, k -> new ArrayList<>()).add(region);
529          cluster.doAssignRegion(region, server);
530          serverIdx = (j + serverIdx + 1) % numServers; // remain from next server
531          assigned = true;
532          break;
533        }
534      }
535      if (!assigned) {
536        lastFewRegions.add(region);
537      }
538    }
539    // just sprinkle the rest of the regions on random regionservers. The balanceCluster will
540    // make it optimal later. we can end up with this if numReplicas > numServers.
541    for (RegionInfo region : lastFewRegions) {
542      int i = rand.nextInt(numServers);
543      ServerName server = servers.get(i);
544      assignments.computeIfAbsent(server, k -> new ArrayList<>()).add(region);
545      cluster.doAssignRegion(region, server);
546    }
547  }
548
549  // return a modifiable map, as we may add more entries into the returned map.
550  private Map<ServerName, List<RegionInfo>>
551    getRegionAssignmentsByServer(Collection<RegionInfo> regions) {
552    return provider != null
553      ? new HashMap<>(provider.getSnapShotOfAssignment(regions))
554      : new HashMap<>();
555  }
556
557  protected final Map<ServerName, List<RegionInfo>>
558    toEnsumbleTableLoad(Map<TableName, Map<ServerName, List<RegionInfo>>> LoadOfAllTable) {
559    Map<ServerName, List<RegionInfo>> returnMap = new TreeMap<>();
560    for (Map<ServerName, List<RegionInfo>> serverNameListMap : LoadOfAllTable.values()) {
561      serverNameListMap.forEach((serverName, regionInfoList) -> {
562        List<RegionInfo> regionInfos =
563          returnMap.computeIfAbsent(serverName, k -> new ArrayList<>());
564        regionInfos.addAll(regionInfoList);
565      });
566    }
567    return returnMap;
568  }
569
570  /**
571   * Perform the major balance operation for table, all sub classes should override this method.
572   * <p/>
573   * Will be invoked by {@link #balanceCluster(Map)}. If
574   * {@link HConstants#HBASE_MASTER_LOADBALANCE_BYTABLE} is enabled, we will call this method
575   * multiple times, one table a time, where we will only pass in the regions for a single table
576   * each time. If not, we will pass in all the regions at once, and the {@code tableName} will be
577   * {@link HConstants#ENSEMBLE_TABLE_NAME}.
578   * @param tableName      the table to be balanced
579   * @param loadOfOneTable region load of servers for the specific one table
580   * @return List of plans
581   */
582  protected abstract List<RegionPlan> balanceTable(TableName tableName,
583    Map<ServerName, List<RegionInfo>> loadOfOneTable);
584
585  /**
586   * Called before actually executing balanceCluster. The sub classes could override this method to
587   * do some initialization work.
588   */
589  protected void
590    preBalanceCluster(Map<TableName, Map<ServerName, List<RegionInfo>>> loadOfAllTable) {
591  }
592
593  /**
594   * Perform the major balance operation for cluster, will invoke
595   * {@link #balanceTable(TableName, Map)} to do actual balance.
596   * <p/>
597   * THIs method is marked as final which means you should not override this method. See the javadoc
598   * for {@link #balanceTable(TableName, Map)} for more details.
599   * @param loadOfAllTable region load of servers for all table
600   * @return a list of regions to be moved, including source and destination, or null if cluster is
601   *         already balanced
602   * @see #balanceTable(TableName, Map)
603   */
604  @Override
605  public final List<RegionPlan>
606    balanceCluster(Map<TableName, Map<ServerName, List<RegionInfo>>> loadOfAllTable) {
607    preBalanceCluster(loadOfAllTable);
608    if (isByTable) {
609      List<RegionPlan> result = new ArrayList<>();
610      loadOfAllTable.forEach((tableName, loadOfOneTable) -> {
611        LOG.info("Start Generate Balance plan for table: " + tableName);
612        List<RegionPlan> partialPlans = balanceTable(tableName, loadOfOneTable);
613        if (partialPlans != null) {
614          result.addAll(partialPlans);
615        }
616      });
617      return result;
618    } else {
619      LOG.debug("Start Generate Balance plan for cluster.");
620      return balanceTable(HConstants.ENSEMBLE_TABLE_NAME, toEnsumbleTableLoad(loadOfAllTable));
621    }
622  }
623
624  @Override
625  public void onConfigurationChange(Configuration conf) {
626    loadConf(conf);
627  }
628}