View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.net.InetAddress;
23  import java.util.ArrayList;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Map.Entry;
31  import java.util.Set;
32  import java.util.SortedMap;
33  import java.util.concurrent.ConcurrentHashMap;
34  import java.util.concurrent.ConcurrentSkipListMap;
35  
36  import org.apache.commons.logging.Log;
37  import org.apache.commons.logging.LogFactory;
38  import org.apache.hadoop.classification.InterfaceAudience;
39  import org.apache.hadoop.conf.Configuration;
40  import org.apache.hadoop.hbase.ClockOutOfSyncException;
41  import org.apache.hadoop.hbase.HRegionInfo;
42  import org.apache.hadoop.hbase.RegionLoad;
43  import org.apache.hadoop.hbase.Server;
44  import org.apache.hadoop.hbase.ServerLoad;
45  import org.apache.hadoop.hbase.ServerName;
46  import org.apache.hadoop.hbase.YouAreDeadException;
47  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
48  import org.apache.hadoop.hbase.client.HConnection;
49  import org.apache.hadoop.hbase.client.HConnectionManager;
50  import org.apache.hadoop.hbase.client.RetriesExhaustedException;
51  import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler;
52  import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
53  import org.apache.hadoop.hbase.monitoring.MonitoredTask;
54  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
55  import org.apache.hadoop.hbase.protobuf.RequestConverter;
56  import org.apache.hadoop.hbase.protobuf.ResponseConverter;
57  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
58  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
59  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
60  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
61  import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
62  import org.apache.hadoop.hbase.util.Bytes;
63  import org.apache.hadoop.hbase.util.Triple;
64  
65  import com.google.common.annotations.VisibleForTesting;
66  import com.google.protobuf.ServiceException;
67  
68  /**
69   * The ServerManager class manages info about region servers.
70   * <p>
71   * Maintains lists of online and dead servers.  Processes the startups,
72   * shutdowns, and deaths of region servers.
73   * <p>
74   * Servers are distinguished in two different ways.  A given server has a
75   * location, specified by hostname and port, and of which there can only be one
76   * online at any given time.  A server instance is specified by the location
77   * (hostname and port) as well as the startcode (timestamp from when the server
78   * was started).  This is used to differentiate a restarted instance of a given
79   * server from the original instance.
80   * <p>
81   * If a sever is known not to be running any more, it is called dead. The dead
82   * server needs to be handled by a ServerShutdownHandler.  If the handler is not
83   * enabled yet, the server can't be handled right away so it is queued up.
84   * After the handler is enabled, the server will be submitted to a handler to handle.
85   * However, the handler may be just partially enabled.  If so,
86   * the server cannot be fully processed, and be queued up for further processing.
87   * A server is fully processed only after the handler is fully enabled
88   * and has completed the handling.
89   */
90  @InterfaceAudience.Private
91  public class ServerManager {
92    public static final String WAIT_ON_REGIONSERVERS_MAXTOSTART =
93        "hbase.master.wait.on.regionservers.maxtostart";
94  
95    public static final String WAIT_ON_REGIONSERVERS_MINTOSTART =
96        "hbase.master.wait.on.regionservers.mintostart";
97  
98    public static final String WAIT_ON_REGIONSERVERS_TIMEOUT =
99        "hbase.master.wait.on.regionservers.timeout";
100 
101   public static final String WAIT_ON_REGIONSERVERS_INTERVAL =
102       "hbase.master.wait.on.regionservers.interval";
103 
104   private static final Log LOG = LogFactory.getLog(ServerManager.class);
105 
106   // Set if we are to shutdown the cluster.
107   private volatile boolean clusterShutdown = false;
108 
109   private final SortedMap<byte[], Long> flushedSequenceIdByRegion =
110     new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
111 
112   /** Map of registered servers to their current load */
113   private final ConcurrentHashMap<ServerName, ServerLoad> onlineServers =
114     new ConcurrentHashMap<ServerName, ServerLoad>();
115 
116   /**
117    * Map of admin interfaces per registered regionserver; these interfaces we use to control
118    * regionservers out on the cluster
119    */
120   private final Map<ServerName, AdminService.BlockingInterface> rsAdmins =
121     new HashMap<ServerName, AdminService.BlockingInterface>();
122 
123   /**
124    * List of region servers <ServerName> that should not get any more new
125    * regions.
126    */
127   private final ArrayList<ServerName> drainingServers =
128     new ArrayList<ServerName>();
129 
130   private final Server master;
131   private final MasterServices services;
132   private final HConnection connection;
133 
134   private final DeadServer deadservers = new DeadServer();
135 
136   private final long maxSkew;
137   private final long warningSkew;
138 
139   /**
140    * Set of region servers which are dead but not processed immediately. If one
141    * server died before master enables ServerShutdownHandler, the server will be
142    * added to this set and will be processed through calling
143    * {@link ServerManager#processQueuedDeadServers()} by master.
144    * <p>
145    * A dead server is a server instance known to be dead, not listed in the /hbase/rs
146    * znode any more. It may have not been submitted to ServerShutdownHandler yet
147    * because the handler is not enabled.
148    * <p>
149    * A dead server, which has been submitted to ServerShutdownHandler while the
150    * handler is not enabled, is queued up.
151    * <p>
152    * So this is a set of region servers known to be dead but not submitted to
153    * ServerShutdownHander for processing yet.
154    */
155   private Set<ServerName> queuedDeadServers = new HashSet<ServerName>();
156 
157   /**
158    * Set of region servers which are dead and submitted to ServerShutdownHandler to process but not
159    * fully processed immediately.
160    * <p>
161    * If one server died before assignment manager finished the failover cleanup, the server will be
162    * added to this set and will be processed through calling
163    * {@link ServerManager#processQueuedDeadServers()} by assignment manager.
164    * <p>
165    * The Boolean value indicates whether log split is needed inside ServerShutdownHandler
166    * <p>
167    * ServerShutdownHandler processes a dead server submitted to the handler after the handler is
168    * enabled. It may not be able to complete the processing because meta is not yet online or master
169    * is currently in startup mode. In this case, the dead server will be parked in this set
170    * temporarily.
171    */
172   private Map<ServerName, Boolean> requeuedDeadServers = new HashMap<ServerName, Boolean>();
173 
174   /**
175    * Constructor.
176    * @param master
177    * @param services
178    * @throws ZooKeeperConnectionException
179    */
180   public ServerManager(final Server master, final MasterServices services)
181       throws IOException {
182     this(master, services, true);
183   }
184 
185   @SuppressWarnings("deprecation")
186   ServerManager(final Server master, final MasterServices services,
187       final boolean connect) throws IOException {
188     this.master = master;
189     this.services = services;
190     Configuration c = master.getConfiguration();
191     maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
192     warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
193     this.connection = connect ? HConnectionManager.getConnection(c) : null;
194   }
195 
196   /**
197    * Let the server manager know a new regionserver has come online
198    * @param ia The remote address
199    * @param port The remote port
200    * @param serverStartcode
201    * @param serverCurrentTime The current time of the region server in ms
202    * @return The ServerName we know this server as.
203    * @throws IOException
204    */
205   ServerName regionServerStartup(final InetAddress ia, final int port,
206     final long serverStartcode, long serverCurrentTime)
207   throws IOException {
208     // Test for case where we get a region startup message from a regionserver
209     // that has been quickly restarted but whose znode expiration handler has
210     // not yet run, or from a server whose fail we are currently processing.
211     // Test its host+port combo is present in serverAddresstoServerInfo.  If it
212     // is, reject the server and trigger its expiration. The next time it comes
213     // in, it should have been removed from serverAddressToServerInfo and queued
214     // for processing by ProcessServerShutdown.
215     ServerName sn = ServerName.valueOf(ia.getHostName(), port, serverStartcode);
216     checkClockSkew(sn, serverCurrentTime);
217     checkIsDead(sn, "STARTUP");
218     if (!checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
219       LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup"
220         + " could not record the server: " + sn);
221     }
222     return sn;
223   }
224 
225   /**
226    * Updates last flushed sequence Ids for the regions on server sn
227    * @param sn
228    * @param hsl
229    */
230   private void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) {
231     Map<byte[], RegionLoad> regionsLoad = hsl.getRegionsLoad();
232     for (Entry<byte[], RegionLoad> entry : regionsLoad.entrySet()) {
233       Long existingValue = flushedSequenceIdByRegion.get(entry.getKey());
234       long l = entry.getValue().getCompleteSequenceId();
235       if (existingValue != null) {
236         if (l != -1 && l < existingValue) {
237           if (LOG.isDebugEnabled()) {
238             LOG.debug("RegionServer " + sn +
239                 " indicates a last flushed sequence id (" + entry.getValue() +
240                 ") that is less than the previous last flushed sequence id (" +
241                 existingValue + ") for region " +
242                 Bytes.toString(entry.getKey()) + " Ignoring.");
243           }
244           continue; // Don't let smaller sequence ids override greater
245           // sequence ids.
246         }
247       }
248       flushedSequenceIdByRegion.put(entry.getKey(), l);
249     }
250   }
251 
252   void regionServerReport(ServerName sn,
253       ServerLoad sl) throws YouAreDeadException {
254     checkIsDead(sn, "REPORT");
255     if (null == this.onlineServers.replace(sn, sl)) {
256       // Already have this host+port combo and its just different start code?
257       // Just let the server in. Presume master joining a running cluster.
258       // recordNewServer is what happens at the end of reportServerStartup.
259       // The only thing we are skipping is passing back to the regionserver
260       // the ServerName to use. Here we presume a master has already done
261       // that so we'll press on with whatever it gave us for ServerName.
262       if (!checkAndRecordNewServer(sn, sl)) {
263         LOG.info("RegionServerReport ignored, could not record the server: " + sn);
264         return; // Not recorded, so no need to move on
265       }
266     }
267     updateLastFlushedSequenceIds(sn, sl);
268   }
269 
270   /**
271    * Check is a server of same host and port already exists,
272    * if not, or the existed one got a smaller start code, record it.
273    *
274    * @param sn the server to check and record
275    * @param sl the server load on the server
276    * @return true if the server is recorded, otherwise, false
277    */
278   boolean checkAndRecordNewServer(
279       final ServerName serverName, final ServerLoad sl) {
280     ServerName existingServer = null;
281     synchronized (this.onlineServers) {
282       existingServer = findServerWithSameHostnamePortWithLock(serverName);
283       if (existingServer != null && (existingServer.getStartcode() > serverName.getStartcode())) {
284         LOG.info("Server serverName=" + serverName + " rejected; we already have "
285             + existingServer.toString() + " registered with same hostname and port");
286         return false;
287       }
288       recordNewServerWithLock(serverName, sl);
289     }
290     // Note that we assume that same ts means same server, and don't expire in that case.
291     //  TODO: ts can theoretically collide due to clock shifts, so this is a bit hacky.
292     if (existingServer != null && (existingServer.getStartcode() < serverName.getStartcode())) {
293       LOG.info("Triggering server recovery; existingServer " +
294           existingServer + " looks stale, new server:" + serverName);
295       expireServer(existingServer);
296     }
297     return true;
298   }
299 
300   /**
301    * Checks if the clock skew between the server and the master. If the clock skew exceeds the
302    * configured max, it will throw an exception; if it exceeds the configured warning threshold,
303    * it will log a warning but start normally.
304    * @param serverName Incoming servers's name
305    * @param serverCurrentTime
306    * @throws ClockOutOfSyncException if the skew exceeds the configured max value
307    */
308   private void checkClockSkew(final ServerName serverName, final long serverCurrentTime)
309   throws ClockOutOfSyncException {
310     long skew = Math.abs(System.currentTimeMillis() - serverCurrentTime);
311     if (skew > maxSkew) {
312       String message = "Server " + serverName + " has been " +
313         "rejected; Reported time is too far out of sync with master.  " +
314         "Time difference of " + skew + "ms > max allowed of " + maxSkew + "ms";
315       LOG.warn(message);
316       throw new ClockOutOfSyncException(message);
317     } else if (skew > warningSkew){
318       String message = "Reported time for server " + serverName + " is out of sync with master " +
319         "by " + skew + "ms. (Warning threshold is " + warningSkew + "ms; " +
320         "error threshold is " + maxSkew + "ms)";
321       LOG.warn(message);
322     }
323   }
324 
325   /**
326    * If this server is on the dead list, reject it with a YouAreDeadException.
327    * If it was dead but came back with a new start code, remove the old entry
328    * from the dead list.
329    * @param serverName
330    * @param what START or REPORT
331    * @throws org.apache.hadoop.hbase.YouAreDeadException
332    */
333   private void checkIsDead(final ServerName serverName, final String what)
334       throws YouAreDeadException {
335     if (this.deadservers.isDeadServer(serverName)) {
336       // host name, port and start code all match with existing one of the
337       // dead servers. So, this server must be dead.
338       String message = "Server " + what + " rejected; currently processing " +
339           serverName + " as dead server";
340       LOG.debug(message);
341       throw new YouAreDeadException(message);
342     }
343     // remove dead server with same hostname and port of newly checking in rs after master
344     // initialization.See HBASE-5916 for more information.
345     if ((this.services == null || ((HMaster) this.services).isInitialized())
346         && this.deadservers.cleanPreviousInstance(serverName)) {
347       // This server has now become alive after we marked it as dead.
348       // We removed it's previous entry from the dead list to reflect it.
349       LOG.debug(what + ":" + " Server " + serverName + " came back up," +
350           " removed it from the dead servers list");
351     }
352   }
353 
354   /**
355    * Assumes onlineServers is locked.
356    * @return ServerName with matching hostname and port.
357    */
358   private ServerName findServerWithSameHostnamePortWithLock(
359       final ServerName serverName) {
360     for (ServerName sn: this.onlineServers.keySet()) {
361       if (ServerName.isSameHostnameAndPort(serverName, sn)) return sn;
362     }
363     return null;
364   }
365 
366   /**
367    * Adds the onlineServers list. onlineServers should be locked.
368    * @param serverName The remote servers name.
369    * @param sl
370    * @return Server load from the removed server, if any.
371    */
372   @VisibleForTesting
373   void recordNewServerWithLock(final ServerName serverName, final ServerLoad sl) {
374     LOG.info("Registering server=" + serverName);
375     this.onlineServers.put(serverName, sl);
376     this.rsAdmins.remove(serverName);
377   }
378 
379   public long getLastFlushedSequenceId(byte[] regionName) {
380     long seqId = -1;
381     if (flushedSequenceIdByRegion.containsKey(regionName)) {
382       seqId = flushedSequenceIdByRegion.get(regionName);
383     }
384     return seqId;
385   }
386 
387   /**
388    * @param serverName
389    * @return ServerLoad if serverName is known else null
390    */
391   public ServerLoad getLoad(final ServerName serverName) {
392     return this.onlineServers.get(serverName);
393   }
394 
395   /**
396    * Compute the average load across all region servers.
397    * Currently, this uses a very naive computation - just uses the number of
398    * regions being served, ignoring stats about number of requests.
399    * @return the average load
400    */
401   public double getAverageLoad() {
402     int totalLoad = 0;
403     int numServers = 0;
404     double averageLoad;
405     for (ServerLoad sl: this.onlineServers.values()) {
406         numServers++;
407         totalLoad += sl.getNumberOfRegions();
408     }
409     averageLoad = (double)totalLoad / (double)numServers;
410     return averageLoad;
411   }
412 
413   /** @return the count of active regionservers */
414   int countOfRegionServers() {
415     // Presumes onlineServers is a concurrent map
416     return this.onlineServers.size();
417   }
418 
419   /**
420    * @return Read-only map of servers to serverinfo
421    */
422   public Map<ServerName, ServerLoad> getOnlineServers() {
423     // Presumption is that iterating the returned Map is OK.
424     synchronized (this.onlineServers) {
425       return Collections.unmodifiableMap(this.onlineServers);
426     }
427   }
428 
429 
430   public DeadServer getDeadServers() {
431     return this.deadservers;
432   }
433 
434   /**
435    * Checks if any dead servers are currently in progress.
436    * @return true if any RS are being processed as dead, false if not
437    */
438   public boolean areDeadServersInProgress() {
439     return this.deadservers.areDeadServersInProgress();
440   }
441 
442   void letRegionServersShutdown() {
443     long previousLogTime = 0;
444     while (!onlineServers.isEmpty()) {
445 
446       if (System.currentTimeMillis() > (previousLogTime + 1000)) {
447         StringBuilder sb = new StringBuilder();
448         // It's ok here to not sync on onlineServers - merely logging
449         for (ServerName key : this.onlineServers.keySet()) {
450           if (sb.length() > 0) {
451             sb.append(", ");
452           }
453           sb.append(key);
454         }
455         LOG.info("Waiting on regionserver(s) to go down " + sb.toString());
456         previousLogTime = System.currentTimeMillis();
457       }
458 
459       synchronized (onlineServers) {
460         try {
461           onlineServers.wait(100);
462         } catch (InterruptedException ignored) {
463           // continue
464         }
465       }
466     }
467   }
468 
469   /*
470    * Expire the passed server.  Add it to list of dead servers and queue a
471    * shutdown processing.
472    */
473   public synchronized void expireServer(final ServerName serverName) {
474     if (!services.isServerShutdownHandlerEnabled()) {
475       LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
476           + "delay expiring server " + serverName);
477       this.queuedDeadServers.add(serverName);
478       return;
479     }
480     if (this.deadservers.isDeadServer(serverName)) {
481       // TODO: Can this happen?  It shouldn't be online in this case?
482       LOG.warn("Expiration of " + serverName +
483           " but server shutdown already in progress");
484       return;
485     }
486     synchronized (onlineServers) {
487       if (!this.onlineServers.containsKey(serverName)) {
488         LOG.warn("Expiration of " + serverName + " but server not online");
489       }
490       // Remove the server from the known servers lists and update load info BUT
491       // add to deadservers first; do this so it'll show in dead servers list if
492       // not in online servers list.
493       this.deadservers.add(serverName);
494       this.onlineServers.remove(serverName);
495       onlineServers.notifyAll();
496     }
497     this.rsAdmins.remove(serverName);
498     // If cluster is going down, yes, servers are going to be expiring; don't
499     // process as a dead server
500     if (this.clusterShutdown) {
501       LOG.info("Cluster shutdown set; " + serverName +
502         " expired; onlineServers=" + this.onlineServers.size());
503       if (this.onlineServers.isEmpty()) {
504         master.stop("Cluster shutdown set; onlineServer=0");
505       }
506       return;
507     }
508 
509     boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
510     if (carryingMeta) {
511       this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
512         this.services, this.deadservers, serverName));
513     } else {
514       this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
515         this.services, this.deadservers, serverName, true));
516     }
517     LOG.debug("Added=" + serverName +
518       " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta);
519   }
520 
521   public synchronized void processDeadServer(final ServerName serverName) {
522     this.processDeadServer(serverName, false);
523   }
524 
525   public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitHlog) {
526     // When assignment manager is cleaning up the zookeeper nodes and rebuilding the
527     // in-memory region states, region servers could be down. Meta table can and
528     // should be re-assigned, log splitting can be done too. However, it is better to
529     // wait till the cleanup is done before re-assigning user regions.
530     //
531     // We should not wait in the server shutdown handler thread since it can clog
532     // the handler threads and meta table could not be re-assigned in case
533     // the corresponding server is down. So we queue them up here instead.
534     if (!services.getAssignmentManager().isFailoverCleanupDone()) {
535       requeuedDeadServers.put(serverName, shouldSplitHlog);
536       return;
537     }
538 
539     this.deadservers.add(serverName);
540     this.services.getExecutorService().submit(
541       new ServerShutdownHandler(this.master, this.services, this.deadservers, serverName,
542           shouldSplitHlog));
543   }
544 
545   /**
546    * Process the servers which died during master's initialization. It will be
547    * called after HMaster#assignMeta and AssignmentManager#joinCluster.
548    * */
549   synchronized void processQueuedDeadServers() {
550     if (!services.isServerShutdownHandlerEnabled()) {
551       LOG.info("Master hasn't enabled ServerShutdownHandler");
552     }
553     Iterator<ServerName> serverIterator = queuedDeadServers.iterator();
554     while (serverIterator.hasNext()) {
555       ServerName tmpServerName = serverIterator.next();
556       expireServer(tmpServerName);
557       serverIterator.remove();
558       requeuedDeadServers.remove(tmpServerName);
559     }
560 
561     if (!services.getAssignmentManager().isFailoverCleanupDone()) {
562       LOG.info("AssignmentManager hasn't finished failover cleanup; waiting");
563     }
564 
565     for(ServerName tmpServerName : requeuedDeadServers.keySet()){
566       processDeadServer(tmpServerName, requeuedDeadServers.get(tmpServerName));
567     }
568     requeuedDeadServers.clear();
569   }
570 
571   /*
572    * Remove the server from the drain list.
573    */
574   public boolean removeServerFromDrainList(final ServerName sn) {
575     // Warn if the server (sn) is not online.  ServerName is of the form:
576     // <hostname> , <port> , <startcode>
577 
578     if (!this.isServerOnline(sn)) {
579       LOG.warn("Server " + sn + " is not currently online. " +
580                "Removing from draining list anyway, as requested.");
581     }
582     // Remove the server from the draining servers lists.
583     return this.drainingServers.remove(sn);
584   }
585 
586   /*
587    * Add the server to the drain list.
588    */
589   public boolean addServerToDrainList(final ServerName sn) {
590     // Warn if the server (sn) is not online.  ServerName is of the form:
591     // <hostname> , <port> , <startcode>
592 
593     if (!this.isServerOnline(sn)) {
594       LOG.warn("Server " + sn + " is not currently online. " +
595                "Ignoring request to add it to draining list.");
596       return false;
597     }
598     // Add the server to the draining servers lists, if it's not already in
599     // it.
600     if (this.drainingServers.contains(sn)) {
601       LOG.warn("Server " + sn + " is already in the draining server list." +
602                "Ignoring request to add it again.");
603       return false;
604     }
605     return this.drainingServers.add(sn);
606   }
607 
608   // RPC methods to region servers
609 
610   /**
611    * Sends an OPEN RPC to the specified server to open the specified region.
612    * <p>
613    * Open should not fail but can if server just crashed.
614    * <p>
615    * @param server server to open a region
616    * @param region region to open
617    * @param versionOfOfflineNode that needs to be present in the offline node
618    * when RS tries to change the state from OFFLINE to other states.
619    * @param favoredNodes
620    */
621   public RegionOpeningState sendRegionOpen(final ServerName server,
622       HRegionInfo region, int versionOfOfflineNode, List<ServerName> favoredNodes)
623   throws IOException {
624     AdminService.BlockingInterface admin = getRsAdmin(server);
625     if (admin == null) {
626       LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
627         " failed because no RPC connection found to this server");
628       return RegionOpeningState.FAILED_OPENING;
629     }
630     OpenRegionRequest request =
631       RequestConverter.buildOpenRegionRequest(server, region, versionOfOfflineNode, favoredNodes);
632     try {
633       OpenRegionResponse response = admin.openRegion(null, request);
634       return ResponseConverter.getRegionOpeningState(response);
635     } catch (ServiceException se) {
636       throw ProtobufUtil.getRemoteException(se);
637     }
638   }
639 
640   /**
641    * Sends an OPEN RPC to the specified server to open the specified region.
642    * <p>
643    * Open should not fail but can if server just crashed.
644    * <p>
645    * @param server server to open a region
646    * @param regionOpenInfos info of a list of regions to open
647    * @return a list of region opening states
648    */
649   public List<RegionOpeningState> sendRegionOpen(ServerName server,
650       List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
651   throws IOException {
652     AdminService.BlockingInterface admin = getRsAdmin(server);
653     if (admin == null) {
654       LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
655         " failed because no RPC connection found to this server");
656       return null;
657     }
658 
659     OpenRegionRequest request =
660       RequestConverter.buildOpenRegionRequest(regionOpenInfos);
661     try {
662       OpenRegionResponse response = admin.openRegion(null, request);
663       return ResponseConverter.getRegionOpeningStateList(response);
664     } catch (ServiceException se) {
665       throw ProtobufUtil.getRemoteException(se);
666     }
667   }
668 
669   /**
670    * Sends an CLOSE RPC to the specified server to close the specified region.
671    * <p>
672    * A region server could reject the close request because it either does not
673    * have the specified region or the region is being split.
674    * @param server server to open a region
675    * @param region region to open
676    * @param versionOfClosingNode
677    *   the version of znode to compare when RS transitions the znode from
678    *   CLOSING state.
679    * @param dest - if the region is moved to another server, the destination server. null otherwise.
680    * @return true if server acknowledged close, false if not
681    * @throws IOException
682    */
683   public boolean sendRegionClose(ServerName server, HRegionInfo region,
684     int versionOfClosingNode, ServerName dest, boolean transitionInZK) throws IOException {
685     if (server == null) throw new NullPointerException("Passed server is null");
686     AdminService.BlockingInterface admin = getRsAdmin(server);
687     if (admin == null) {
688       throw new IOException("Attempting to send CLOSE RPC to server " +
689         server.toString() + " for region " +
690         region.getRegionNameAsString() +
691         " failed because no RPC connection found to this server");
692     }
693     return ProtobufUtil.closeRegion(admin, server, region.getRegionName(),
694       versionOfClosingNode, dest, transitionInZK);
695   }
696 
697   public boolean sendRegionClose(ServerName server,
698       HRegionInfo region, int versionOfClosingNode) throws IOException {
699     return sendRegionClose(server, region, versionOfClosingNode, null, true);
700   }
701 
702   /**
703    * Sends an MERGE REGIONS RPC to the specified server to merge the specified
704    * regions.
705    * <p>
706    * A region server could reject the close request because it either does not
707    * have the specified region.
708    * @param server server to merge regions
709    * @param region_a region to merge
710    * @param region_b region to merge
711    * @param forcible true if do a compulsory merge, otherwise we will only merge
712    *          two adjacent regions
713    * @throws IOException
714    */
715   public void sendRegionsMerge(ServerName server, HRegionInfo region_a,
716       HRegionInfo region_b, boolean forcible) throws IOException {
717     if (server == null)
718       throw new NullPointerException("Passed server is null");
719     if (region_a == null || region_b == null)
720       throw new NullPointerException("Passed region is null");
721     AdminService.BlockingInterface admin = getRsAdmin(server);
722     if (admin == null) {
723       throw new IOException("Attempting to send MERGE REGIONS RPC to server "
724           + server.toString() + " for region "
725           + region_a.getRegionNameAsString() + ","
726           + region_b.getRegionNameAsString()
727           + " failed because no RPC connection found to this server");
728     }
729     ProtobufUtil.mergeRegions(admin, region_a, region_b, forcible);
730   }
731 
732   /**
733    * Check if a region server is reachable and has the expected start code
734    */
735   public boolean isServerReachable(ServerName server) {
736     if (server == null) throw new NullPointerException("Passed server is null");
737     int maximumAttempts = Math.max(1, master.getConfiguration().getInt(
738       "hbase.master.maximum.ping.server.attempts", 10));
739     for (int i = 0; i < maximumAttempts; i++) {
740       try {
741         AdminService.BlockingInterface admin = getRsAdmin(server);
742         if (admin != null) {
743           ServerInfo info = ProtobufUtil.getServerInfo(admin);
744           return info != null && info.hasServerName()
745             && server.getStartcode() == info.getServerName().getStartCode();
746         }
747       } catch (IOException ioe) {
748         LOG.debug("Couldn't reach " + server + ", try=" + i
749           + " of " + maximumAttempts, ioe);
750       }
751     }
752     return false;
753   }
754 
755     /**
756     * @param sn
757     * @return Admin interface for the remote regionserver named <code>sn</code>
758     * @throws IOException
759     * @throws RetriesExhaustedException wrapping a ConnectException if failed
760     */
761   private AdminService.BlockingInterface getRsAdmin(final ServerName sn)
762   throws IOException {
763     AdminService.BlockingInterface admin = this.rsAdmins.get(sn);
764     if (admin == null) {
765       LOG.debug("New admin connection to " + sn.toString());
766       admin = this.connection.getAdmin(sn);
767       this.rsAdmins.put(sn, admin);
768     }
769     return admin;
770   }
771 
772   /**
773    * Wait for the region servers to report in.
774    * We will wait until one of this condition is met:
775    *  - the master is stopped
776    *  - the 'hbase.master.wait.on.regionservers.maxtostart' number of
777    *    region servers is reached
778    *  - the 'hbase.master.wait.on.regionservers.mintostart' is reached AND
779    *   there have been no new region server in for
780    *      'hbase.master.wait.on.regionservers.interval' time AND
781    *   the 'hbase.master.wait.on.regionservers.timeout' is reached
782    *
783    * @throws InterruptedException
784    */
785   public void waitForRegionServers(MonitoredTask status)
786   throws InterruptedException {
787     final long interval = this.master.getConfiguration().
788       getLong(WAIT_ON_REGIONSERVERS_INTERVAL, 1500);
789     final long timeout = this.master.getConfiguration().
790       getLong(WAIT_ON_REGIONSERVERS_TIMEOUT, 4500);
791     int minToStart = this.master.getConfiguration().
792       getInt(WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
793     if (minToStart < 1) {
794       LOG.warn(String.format(
795         "The value of '%s' (%d) can not be less than 1, ignoring.",
796         WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
797       minToStart = 1;
798     }
799     int maxToStart = this.master.getConfiguration().
800       getInt(WAIT_ON_REGIONSERVERS_MAXTOSTART, Integer.MAX_VALUE);
801     if (maxToStart < minToStart) {
802         LOG.warn(String.format(
803             "The value of '%s' (%d) is set less than '%s' (%d), ignoring.",
804             WAIT_ON_REGIONSERVERS_MAXTOSTART, maxToStart,
805             WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
806         maxToStart = Integer.MAX_VALUE;
807     }
808 
809     long now =  System.currentTimeMillis();
810     final long startTime = now;
811     long slept = 0;
812     long lastLogTime = 0;
813     long lastCountChange = startTime;
814     int count = countOfRegionServers();
815     int oldCount = 0;
816     while (
817       !this.master.isStopped() &&
818         count < maxToStart &&
819         (lastCountChange+interval > now || timeout > slept || count < minToStart)
820       ){
821 
822       // Log some info at every interval time or if there is a change
823       if (oldCount != count || lastLogTime+interval < now){
824         lastLogTime = now;
825         String msg =
826           "Waiting for region servers count to settle; currently"+
827             " checked in " + count + ", slept for " + slept + " ms," +
828             " expecting minimum of " + minToStart + ", maximum of "+ maxToStart+
829             ", timeout of "+timeout+" ms, interval of "+interval+" ms.";
830         LOG.info(msg);
831         status.setStatus(msg);
832       }
833 
834       // We sleep for some time
835       final long sleepTime = 50;
836       Thread.sleep(sleepTime);
837       now =  System.currentTimeMillis();
838       slept = now - startTime;
839 
840       oldCount = count;
841       count = countOfRegionServers();
842       if (count != oldCount) {
843         lastCountChange = now;
844       }
845     }
846 
847     LOG.info("Finished waiting for region servers count to settle;" +
848       " checked in " + count + ", slept for " + slept + " ms," +
849       " expecting minimum of " + minToStart + ", maximum of "+ maxToStart+","+
850       " master is "+ (this.master.isStopped() ? "stopped.": "running.")
851     );
852   }
853 
854   /**
855    * @return A copy of the internal list of online servers.
856    */
857   public List<ServerName> getOnlineServersList() {
858     // TODO: optimize the load balancer call so we don't need to make a new list
859     // TODO: FIX. THIS IS POPULAR CALL.
860     return new ArrayList<ServerName>(this.onlineServers.keySet());
861   }
862 
863   /**
864    * @return A copy of the internal list of draining servers.
865    */
866   public List<ServerName> getDrainingServersList() {
867     return new ArrayList<ServerName>(this.drainingServers);
868   }
869 
870   /**
871    * @return A copy of the internal set of deadNotExpired servers.
872    */
873   Set<ServerName> getDeadNotExpiredServers() {
874     return new HashSet<ServerName>(this.queuedDeadServers);
875   }
876 
877   /**
878    * @return A copy of the internal map of requeuedDeadServers servers and their corresponding
879    *         splitlog need flag.
880    */
881   Map<ServerName, Boolean> getRequeuedDeadServers() {
882     return Collections.unmodifiableMap(this.requeuedDeadServers);
883   }
884 
885   public boolean isServerOnline(ServerName serverName) {
886     return serverName != null && onlineServers.containsKey(serverName);
887   }
888 
889   /**
890    * Check if a server is known to be dead.  A server can be online,
891    * or known to be dead, or unknown to this manager (i.e, not online,
892    * not known to be dead either. it is simply not tracked by the
893    * master any more, for example, a very old previous instance).
894    */
895   public synchronized boolean isServerDead(ServerName serverName) {
896     return serverName == null || deadservers.isDeadServer(serverName)
897       || queuedDeadServers.contains(serverName)
898       || requeuedDeadServers.containsKey(serverName);
899   }
900 
901   public void shutdownCluster() {
902     this.clusterShutdown = true;
903     this.master.stop("Cluster shutdown requested");
904   }
905 
906   public boolean isClusterShutdown() {
907     return this.clusterShutdown;
908   }
909 
910   /**
911    * Stop the ServerManager.  Currently closes the connection to the master.
912    */
913   public void stop() {
914     if (connection != null) {
915       try {
916         connection.close();
917       } catch (IOException e) {
918         LOG.error("Attempt to close connection to master failed", e);
919       }
920     }
921   }
922 
923   /**
924    * Creates a list of possible destinations for a region. It contains the online servers, but not
925    *  the draining or dying servers.
926    *  @param serverToExclude can be null if there is no server to exclude
927    */
928   public List<ServerName> createDestinationServersList(final ServerName serverToExclude){
929     final List<ServerName> destServers = getOnlineServersList();
930 
931     if (serverToExclude != null){
932       destServers.remove(serverToExclude);
933     }
934 
935     // Loop through the draining server list and remove them from the server list
936     final List<ServerName> drainingServersCopy = getDrainingServersList();
937     if (!drainingServersCopy.isEmpty()) {
938       for (final ServerName server: drainingServersCopy) {
939         destServers.remove(server);
940       }
941     }
942 
943     // Remove the deadNotExpired servers from the server list.
944     removeDeadNotExpiredServers(destServers);
945 
946     return destServers;
947   }
948 
949   /**
950    * Calls {@link #createDestinationServersList} without server to exclude.
951    */
952   public List<ServerName> createDestinationServersList(){
953     return createDestinationServersList(null);
954   }
955 
956     /**
957     * Loop through the deadNotExpired server list and remove them from the
958     * servers.
959     * This function should be used carefully outside of this class. You should use a high level
960     *  method such as {@link #createDestinationServersList()} instead of managing you own list.
961     */
962   void removeDeadNotExpiredServers(List<ServerName> servers) {
963     Set<ServerName> deadNotExpiredServersCopy = this.getDeadNotExpiredServers();
964     if (!deadNotExpiredServersCopy.isEmpty()) {
965       for (ServerName server : deadNotExpiredServersCopy) {
966         LOG.debug("Removing dead but not expired server: " + server
967           + " from eligible server pool.");
968         servers.remove(server);
969       }
970     }
971   }
972 
973   /**
974    * To clear any dead server with same host name and port of any online server
975    */
976   void clearDeadServersWithSameHostNameAndPortOfOnlineServer() {
977     for (ServerName serverName : getOnlineServersList()) {
978       deadservers.cleanAllPreviousInstances(serverName);
979     }
980   }
981 }