View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.net.InetAddress;
23  import java.util.ArrayList;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Map.Entry;
31  import java.util.Set;
32  import java.util.concurrent.ConcurrentHashMap;
33  import java.util.concurrent.ConcurrentNavigableMap;
34  import java.util.concurrent.ConcurrentSkipListMap;
35  import java.util.concurrent.CopyOnWriteArrayList;
36  
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.hadoop.conf.Configuration;
40  import org.apache.hadoop.hbase.ClockOutOfSyncException;
41  import org.apache.hadoop.hbase.HConstants;
42  import org.apache.hadoop.hbase.HRegionInfo;
43  import org.apache.hadoop.hbase.NotServingRegionException;
44  import org.apache.hadoop.hbase.RegionLoad;
45  import org.apache.hadoop.hbase.ServerLoad;
46  import org.apache.hadoop.hbase.ServerName;
47  import org.apache.hadoop.hbase.YouAreDeadException;
48  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
49  import org.apache.hadoop.hbase.classification.InterfaceAudience;
50  import org.apache.hadoop.hbase.client.ClusterConnection;
51  import org.apache.hadoop.hbase.client.RetriesExhaustedException;
52  import org.apache.hadoop.hbase.ipc.HBaseRpcController;
53  import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
54  import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
55  import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
56  import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
57  import org.apache.hadoop.hbase.monitoring.MonitoredTask;
58  import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
59  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
60  import org.apache.hadoop.hbase.protobuf.RequestConverter;
61  import org.apache.hadoop.hbase.protobuf.ResponseConverter;
62  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
63  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
64  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
65  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
66  import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
67  import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
68  import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
69  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
70  import org.apache.hadoop.hbase.regionserver.HRegionServer;
71  import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
72  import org.apache.hadoop.hbase.security.User;
73  import org.apache.hadoop.hbase.util.Bytes;
74  import org.apache.hadoop.hbase.util.Pair;
75  import org.apache.hadoop.hbase.util.RetryCounter;
76  import org.apache.hadoop.hbase.util.RetryCounterFactory;
77  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
78  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
79  import org.apache.zookeeper.KeeperException;
80
81  import com.google.common.annotations.VisibleForTesting;
82  import com.google.protobuf.ByteString;
83  import com.google.protobuf.ServiceException;
84
85  /**
86   * The ServerManager class manages info about region servers.
87   * <p>
88   * Maintains lists of online and dead servers.  Processes the startups,
89   * shutdowns, and deaths of region servers.
90   * <p>
91   * Servers are distinguished in two different ways.  A given server has a
92   * location, specified by hostname and port, and of which there can only be one
93   * online at any given time.  A server instance is specified by the location
94   * (hostname and port) as well as the startcode (timestamp from when the server
95   * was started).  This is used to differentiate a restarted instance of a given
96   * server from the original instance.
97   * <p>
98   * If a sever is known not to be running any more, it is called dead. The dead
99   * server needs to be handled by a ServerShutdownHandler.  If the handler is not
100  * enabled yet, the server can't be handled right away so it is queued up.
101  * After the handler is enabled, the server will be submitted to a handler to handle.
102  * However, the handler may be just partially enabled.  If so,
103  * the server cannot be fully processed, and be queued up for further processing.
104  * A server is fully processed only after the handler is fully enabled
105  * and has completed the handling.
106  */
107 @InterfaceAudience.Private
108 public class ServerManager {
109   public static final String WAIT_ON_REGIONSERVERS_MAXTOSTART =
110       "hbase.master.wait.on.regionservers.maxtostart";
111
112   public static final String WAIT_ON_REGIONSERVERS_MINTOSTART =
113       "hbase.master.wait.on.regionservers.mintostart";
114
115   public static final String WAIT_ON_REGIONSERVERS_TIMEOUT =
116       "hbase.master.wait.on.regionservers.timeout";
117
118   public static final String WAIT_ON_REGIONSERVERS_INTERVAL =
119       "hbase.master.wait.on.regionservers.interval";
120
121   private static final Log LOG = LogFactory.getLog(ServerManager.class);
122
123   // Set if we are to shutdown the cluster.
124   private volatile boolean clusterShutdown = false;
125
126   /**
127    * The last flushed sequence id for a region.
128    */
129   private final ConcurrentNavigableMap<byte[], Long> flushedSequenceIdByRegion =
130     new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
131
132   /**
133    * The last flushed sequence id for a store in a region.
134    */
135   private final ConcurrentNavigableMap<byte[], ConcurrentNavigableMap<byte[], Long>>
136     storeFlushedSequenceIdsByRegion =
137     new ConcurrentSkipListMap<byte[], ConcurrentNavigableMap<byte[], Long>>(Bytes.BYTES_COMPARATOR);
138
139   /** Map of registered servers to their current load */
140   private final ConcurrentNavigableMap<ServerName, ServerLoad> onlineServers =
141     new ConcurrentSkipListMap<ServerName, ServerLoad>();
142
143   /**
144    * Map of admin interfaces per registered regionserver; these interfaces we use to control
145    * regionservers out on the cluster
146    */
147   private final Map<ServerName, AdminService.BlockingInterface> rsAdmins =
148     new HashMap<ServerName, AdminService.BlockingInterface>();
149
150   /** List of region servers that should not get any more new regions. */
151   private final ArrayList<ServerName> drainingServers =
152     new ArrayList<ServerName>();
153
154   private final MasterServices master;
155   private final ClusterConnection connection;
156
157   private final DeadServer deadservers = new DeadServer();
158
159   private final long maxSkew;
160   private final long warningSkew;
161
162   private final RetryCounterFactory pingRetryCounterFactory;
163   private final RpcControllerFactory rpcControllerFactory;
164
165   /**
166    * Set of region servers which are dead but not processed immediately. If one
167    * server died before master enables ServerShutdownHandler, the server will be
168    * added to this set and will be processed through calling
169    * {@link ServerManager#processQueuedDeadServers()} by master.
170    * <p>
171    * A dead server is a server instance known to be dead, not listed in the /hbase/rs
172    * znode any more. It may have not been submitted to ServerShutdownHandler yet
173    * because the handler is not enabled.
174    * <p>
175    * A dead server, which has been submitted to ServerShutdownHandler while the
176    * handler is not enabled, is queued up.
177    * <p>
178    * So this is a set of region servers known to be dead but not submitted to
179    * ServerShutdownHandler for processing yet.
180    */
181   private Set<ServerName> queuedDeadServers = new HashSet<ServerName>();
182
183   /**
184    * Set of region servers which are dead and submitted to ServerShutdownHandler to process but not
185    * fully processed immediately.
186    * <p>
187    * If one server died before assignment manager finished the failover cleanup, the server will be
188    * added to this set and will be processed through calling
189    * {@link ServerManager#processQueuedDeadServers()} by assignment manager.
190    * <p>
191    * The Boolean value indicates whether log split is needed inside ServerShutdownHandler
192    * <p>
193    * ServerShutdownHandler processes a dead server submitted to the handler after the handler is
194    * enabled. It may not be able to complete the processing because meta is not yet online or master
195    * is currently in startup mode. In this case, the dead server will be parked in this set
196    * temporarily.
197    */
198   private Map<ServerName, Boolean> requeuedDeadServers
199     = new ConcurrentHashMap<ServerName, Boolean>();
200
201   /** Listeners that are called on server events. */
202   private List<ServerListener> listeners = new CopyOnWriteArrayList<ServerListener>();
203
204   /**
205    * Constructor.
206    * @param master
207    * @throws ZooKeeperConnectionException
208    */
209   public ServerManager(final MasterServices master) throws IOException {
210     this(master, true);
211   }
212
213   ServerManager(final MasterServices master, final boolean connect) throws IOException {
214     this.master = master;
215     Configuration c = master.getConfiguration();
216     maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
217     warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
218     this.connection = connect ? master.getClusterConnection() : null;
219     int pingMaxAttempts = Math.max(1, master.getConfiguration().getInt(
220       "hbase.master.maximum.ping.server.attempts", 10));
221     int pingSleepInterval = Math.max(1, master.getConfiguration().getInt(
222       "hbase.master.ping.server.retry.sleep.interval", 100));
223     this.pingRetryCounterFactory = new RetryCounterFactory(pingMaxAttempts, pingSleepInterval);
224     this.rpcControllerFactory = this.connection == null
225         ? null
226         : connection.getRpcControllerFactory();
227   }
228
229   /**
230    * Add the listener to the notification list.
231    * @param listener The ServerListener to register
232    */
233   public void registerListener(final ServerListener listener) {
234     this.listeners.add(listener);
235   }
236
237   /**
238    * Remove the listener from the notification list.
239    * @param listener The ServerListener to unregister
240    */
241   public boolean unregisterListener(final ServerListener listener) {
242     return this.listeners.remove(listener);
243   }
244
245   /**
246    * Let the server manager know a new regionserver has come online
247    * @param request the startup request
248    * @param ia the InetAddress from which request is received
249    * @return The ServerName we know this server as.
250    * @throws IOException
251    */
252   ServerName regionServerStartup(RegionServerStartupRequest request, InetAddress ia)
253       throws IOException {
254     // Test for case where we get a region startup message from a regionserver
255     // that has been quickly restarted but whose znode expiration handler has
256     // not yet run, or from a server whose fail we are currently processing.
257     // Test its host+port combo is present in serverAddresstoServerInfo.  If it
258     // is, reject the server and trigger its expiration. The next time it comes
259     // in, it should have been removed from serverAddressToServerInfo and queued
260     // for processing by ProcessServerShutdown.
261
262     final String hostname = request.hasUseThisHostnameInstead() ?
263         request.getUseThisHostnameInstead() :ia.getHostName();
264     ServerName sn = ServerName.valueOf(hostname, request.getPort(),
265       request.getServerStartCode());
266     checkClockSkew(sn, request.getServerCurrentTime());
267     checkIsDead(sn, "STARTUP");
268     if (!checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
269       LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup"
270         + " could not record the server: " + sn);
271     }
272     return sn;
273   }
274
275   private ConcurrentNavigableMap<byte[], Long> getOrCreateStoreFlushedSequenceId(
276     byte[] regionName) {
277     ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId =
278         storeFlushedSequenceIdsByRegion.get(regionName);
279     if (storeFlushedSequenceId != null) {
280       return storeFlushedSequenceId;
281     }
282     storeFlushedSequenceId = new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
283     ConcurrentNavigableMap<byte[], Long> alreadyPut =
284         storeFlushedSequenceIdsByRegion.putIfAbsent(regionName, storeFlushedSequenceId);
285     return alreadyPut == null ? storeFlushedSequenceId : alreadyPut;
286   }
287   /**
288    * Updates last flushed sequence Ids for the regions on server sn
289    * @param sn
290    * @param hsl
291    */
292   private void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) {
293     Map<byte[], RegionLoad> regionsLoad = hsl.getRegionsLoad();
294     for (Entry<byte[], RegionLoad> entry : regionsLoad.entrySet()) {
295       byte[] encodedRegionName = Bytes.toBytes(HRegionInfo.encodeRegionName(entry.getKey()));
296       Long existingValue = flushedSequenceIdByRegion.get(encodedRegionName);
297       long l = entry.getValue().getCompleteSequenceId();
298       // Don't let smaller sequence ids override greater sequence ids.
299       if (LOG.isTraceEnabled()) {
300         LOG.trace(Bytes.toString(encodedRegionName) + ", existingValue=" + existingValue +
301           ", completeSequenceId=" + l);
302       }
303       if (existingValue == null || (l != HConstants.NO_SEQNUM && l > existingValue)) {
304         flushedSequenceIdByRegion.put(encodedRegionName, l);
305       } else if (l != HConstants.NO_SEQNUM && l < existingValue) {
306         LOG.warn("RegionServer " + sn + " indicates a last flushed sequence id ("
307             + l + ") that is less than the previous last flushed sequence id ("
308             + existingValue + ") for region " + Bytes.toString(entry.getKey()) + " Ignoring.");
309       }
310       ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId =
311           getOrCreateStoreFlushedSequenceId(encodedRegionName);
312       for (StoreSequenceId storeSeqId : entry.getValue().getStoreCompleteSequenceId()) {
313         byte[] family = storeSeqId.getFamilyName().toByteArray();
314         existingValue = storeFlushedSequenceId.get(family);
315         l = storeSeqId.getSequenceId();
316         if (LOG.isTraceEnabled()) {
317           LOG.trace(Bytes.toString(encodedRegionName) + ", family=" + Bytes.toString(family) +
318             ", existingValue=" + existingValue + ", completeSequenceId=" + l);
319         }
320         // Don't let smaller sequence ids override greater sequence ids.
321         if (existingValue == null || (l != HConstants.NO_SEQNUM && l > existingValue.longValue())) {
322           storeFlushedSequenceId.put(family, l);
323         }
324       }
325     }
326   }
327
328   void regionServerReport(ServerName sn,
329       ServerLoad sl) throws YouAreDeadException {
330     checkIsDead(sn, "REPORT");
331     if (null == this.onlineServers.replace(sn, sl)) {
332       // Already have this host+port combo and its just different start code?
333       // Just let the server in. Presume master joining a running cluster.
334       // recordNewServer is what happens at the end of reportServerStartup.
335       // The only thing we are skipping is passing back to the regionserver
336       // the ServerName to use. Here we presume a master has already done
337       // that so we'll press on with whatever it gave us for ServerName.
338       if (!checkAndRecordNewServer(sn, sl)) {
339         LOG.info("RegionServerReport ignored, could not record the server: " + sn);
340         return; // Not recorded, so no need to move on
341       }
342     }
343     updateLastFlushedSequenceIds(sn, sl);
344   }
345
346   /**
347    * Check is a server of same host and port already exists,
348    * if not, or the existed one got a smaller start code, record it.
349    *
350    * @param serverName the server to check and record
351    * @param sl the server load on the server
352    * @return true if the server is recorded, otherwise, false
353    */
354   boolean checkAndRecordNewServer(
355       final ServerName serverName, final ServerLoad sl) {
356     ServerName existingServer = null;
357     synchronized (this.onlineServers) {
358       existingServer = findServerWithSameHostnamePortWithLock(serverName);
359       if (existingServer != null && (existingServer.getStartcode() > serverName.getStartcode())) {
360         LOG.info("Server serverName=" + serverName + " rejected; we already have "
361             + existingServer.toString() + " registered with same hostname and port");
362         return false;
363       }
364       recordNewServerWithLock(serverName, sl);
365     }
366
367     // Tell our listeners that a server was added
368     if (!this.listeners.isEmpty()) {
369       for (ServerListener listener : this.listeners) {
370         listener.serverAdded(serverName);
371       }
372     }
373
374     // Note that we assume that same ts means same server, and don't expire in that case.
375     //  TODO: ts can theoretically collide due to clock shifts, so this is a bit hacky.
376     if (existingServer != null && (existingServer.getStartcode() < serverName.getStartcode())) {
377       LOG.info("Triggering server recovery; existingServer " +
378           existingServer + " looks stale, new server:" + serverName);
379       expireServer(existingServer);
380     }
381     return true;
382   }
383
384   /**
385    * Checks if the clock skew between the server and the master. If the clock skew exceeds the
386    * configured max, it will throw an exception; if it exceeds the configured warning threshold,
387    * it will log a warning but start normally.
388    * @param serverName Incoming servers's name
389    * @param serverCurrentTime
390    * @throws ClockOutOfSyncException if the skew exceeds the configured max value
391    */
392   private void checkClockSkew(final ServerName serverName, final long serverCurrentTime)
393   throws ClockOutOfSyncException {
394     long skew = Math.abs(System.currentTimeMillis() - serverCurrentTime);
395     if (skew > maxSkew) {
396       String message = "Server " + serverName + " has been " +
397         "rejected; Reported time is too far out of sync with master.  " +
398         "Time difference of " + skew + "ms > max allowed of " + maxSkew + "ms";
399       LOG.warn(message);
400       throw new ClockOutOfSyncException(message);
401     } else if (skew > warningSkew){
402       String message = "Reported time for server " + serverName + " is out of sync with master " +
403         "by " + skew + "ms. (Warning threshold is " + warningSkew + "ms; " +
404         "error threshold is " + maxSkew + "ms)";
405       LOG.warn(message);
406     }
407   }
408
409   /**
410    * If this server is on the dead list, reject it with a YouAreDeadException.
411    * If it was dead but came back with a new start code, remove the old entry
412    * from the dead list.
413    * @param serverName
414    * @param what START or REPORT
415    * @throws org.apache.hadoop.hbase.YouAreDeadException
416    */
417   private void checkIsDead(final ServerName serverName, final String what)
418       throws YouAreDeadException {
419     if (this.deadservers.isDeadServer(serverName)) {
420       // host name, port and start code all match with existing one of the
421       // dead servers. So, this server must be dead.
422       String message = "Server " + what + " rejected; currently processing " +
423           serverName + " as dead server";
424       LOG.debug(message);
425       throw new YouAreDeadException(message);
426     }
427     // remove dead server with same hostname and port of newly checking in rs after master
428     // initialization.See HBASE-5916 for more information.
429     if ((this.master == null || this.master.isInitialized())
430         && this.deadservers.cleanPreviousInstance(serverName)) {
431       // This server has now become alive after we marked it as dead.
432       // We removed it's previous entry from the dead list to reflect it.
433       LOG.debug(what + ":" + " Server " + serverName + " came back up," +
434           " removed it from the dead servers list");
435     }
436   }
437
438   /**
439    * Assumes onlineServers is locked.
440    * @return ServerName with matching hostname and port.
441    */
442   private ServerName findServerWithSameHostnamePortWithLock(
443       final ServerName serverName) {
444     ServerName end = ServerName.valueOf(serverName.getHostname(), serverName.getPort(),
445         Long.MAX_VALUE);
446
447     ServerName r = onlineServers.lowerKey(end);
448     if (r != null) {
449       if (ServerName.isSameHostnameAndPort(r, serverName)) {
450         return r;
451       }
452     }
453     return null;
454   }
455
456   /**
457    * Adds the onlineServers list. onlineServers should be locked.
458    * @param serverName The remote servers name.
459    * @param s
460    */
461   @VisibleForTesting
462   void recordNewServerWithLock(final ServerName serverName, final ServerLoad sl) {
463     LOG.info("Registering server=" + serverName);
464     this.onlineServers.put(serverName, sl);
465     this.rsAdmins.remove(serverName);
466   }
467
468   public RegionStoreSequenceIds getLastFlushedSequenceId(byte[] encodedRegionName) {
469     RegionStoreSequenceIds.Builder builder = RegionStoreSequenceIds.newBuilder();
470     Long seqId = flushedSequenceIdByRegion.get(encodedRegionName);
471     builder.setLastFlushedSequenceId(seqId != null ? seqId.longValue() : HConstants.NO_SEQNUM);
472     Map<byte[], Long> storeFlushedSequenceId =
473         storeFlushedSequenceIdsByRegion.get(encodedRegionName);
474     if (storeFlushedSequenceId != null) {
475       for (Map.Entry<byte[], Long> entry : storeFlushedSequenceId.entrySet()) {
476         builder.addStoreSequenceId(StoreSequenceId.newBuilder()
477             .setFamilyName(ByteString.copyFrom(entry.getKey()))
478             .setSequenceId(entry.getValue().longValue()).build());
479       }
480     }
481     return builder.build();
482   }
483
484   /**
485    * @param serverName
486    * @return ServerLoad if serverName is known else null
487    */
488   public ServerLoad getLoad(final ServerName serverName) {
489     return this.onlineServers.get(serverName);
490   }
491
492   /**
493    * Compute the average load across all region servers.
494    * Currently, this uses a very naive computation - just uses the number of
495    * regions being served, ignoring stats about number of requests.
496    * @return the average load
497    */
498   public double getAverageLoad() {
499     int totalLoad = 0;
500     int numServers = 0;
501     for (ServerLoad sl: this.onlineServers.values()) {
502         numServers++;
503         totalLoad += sl.getNumberOfRegions();
504     }
505     return numServers == 0 ? 0 :
506       (double)totalLoad / (double)numServers;
507   }
508
509   /** @return the count of active regionservers */
510   public int countOfRegionServers() {
511     // Presumes onlineServers is a concurrent map
512     return this.onlineServers.size();
513   }
514
515   /**
516    * @return Read-only map of servers to serverinfo
517    */
518   public Map<ServerName, ServerLoad> getOnlineServers() {
519     // Presumption is that iterating the returned Map is OK.
520     synchronized (this.onlineServers) {
521       return Collections.unmodifiableMap(this.onlineServers);
522     }
523   }
524
525
526   public DeadServer getDeadServers() {
527     return this.deadservers;
528   }
529
530   /**
531    * Checks if any dead servers are currently in progress.
532    * @return true if any RS are being processed as dead, false if not
533    */
534   public boolean areDeadServersInProgress() {
535     return this.deadservers.areDeadServersInProgress();
536   }
537
538   void letRegionServersShutdown() {
539     long previousLogTime = 0;
540     ServerName sn = master.getServerName();
541     ZooKeeperWatcher zkw = master.getZooKeeper();
542     int onlineServersCt;
543     while ((onlineServersCt = onlineServers.size()) > 0){
544
545       if (System.currentTimeMillis() > (previousLogTime + 1000)) {
546         Set<ServerName> remainingServers = onlineServers.keySet();
547         synchronized (onlineServers) {
548           if (remainingServers.size() == 1 && remainingServers.contains(sn)) {
549             // Master will delete itself later.
550             return;
551           }
552         }
553         StringBuilder sb = new StringBuilder();
554         // It's ok here to not sync on onlineServers - merely logging
555         for (ServerName key : remainingServers) {
556           if (sb.length() > 0) {
557             sb.append(", ");
558           }
559           sb.append(key);
560         }
561         LOG.info("Waiting on regionserver(s) to go down " + sb.toString());
562         previousLogTime = System.currentTimeMillis();
563       }
564
565       try {
566         List<String> servers = ZKUtil.listChildrenNoWatch(zkw, zkw.rsZNode);
567         if (servers == null || servers.size() == 0 || (servers.size() == 1
568             && servers.contains(sn.toString()))) {
569           LOG.info("ZK shows there is only the master self online, exiting now");
570           // Master could have lost some ZK events, no need to wait more.
571           break;
572         }
573       } catch (KeeperException ke) {
574         LOG.warn("Failed to list regionservers", ke);
575         // ZK is malfunctioning, don't hang here
576         break;
577       }
578       synchronized (onlineServers) {
579         try {
580           if (onlineServersCt == onlineServers.size()) onlineServers.wait(100);
581         } catch (InterruptedException ignored) {
582           // continue
583         }
584       }
585     }
586   }
587
588   /*
589    * Expire the passed server.  Add it to list of dead servers and queue a
590    * shutdown processing.
591    */
592   public synchronized void expireServer(final ServerName serverName) {
593     if (serverName.equals(master.getServerName())) {
594       if (!(master.isAborted() || master.isStopped())) {
595         master.stop("We lost our znode?");
596       }
597       return;
598     }
599     if (!master.isServerCrashProcessingEnabled()) {
600       LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
601           + "delay expiring server " + serverName);
602       this.queuedDeadServers.add(serverName);
603       return;
604     }
605     if (this.deadservers.isDeadServer(serverName)) {
606       // TODO: Can this happen?  It shouldn't be online in this case?
607       LOG.warn("Expiration of " + serverName +
608           " but server shutdown already in progress");
609       return;
610     }
611     moveFromOnelineToDeadServers(serverName);
612
613     // If cluster is going down, yes, servers are going to be expiring; don't
614     // process as a dead server
615     if (this.clusterShutdown) {
616       LOG.info("Cluster shutdown set; " + serverName +
617         " expired; onlineServers=" + this.onlineServers.size());
618       if (this.onlineServers.isEmpty()) {
619         master.stop("Cluster shutdown set; onlineServer=0");
620       }
621       return;
622     }
623
624     boolean carryingMeta = master.getAssignmentManager().isCarryingMeta(serverName);
625     ProcedureExecutor<MasterProcedureEnv> procExec = this.master.getMasterProcedureExecutor();
626     procExec.submitProcedure(new ServerCrashProcedure(
627       procExec.getEnvironment(), serverName, true, carryingMeta));
628     LOG.debug("Added=" + serverName +
629       " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta);
630
631     // Tell our listeners that a server was removed
632     if (!this.listeners.isEmpty()) {
633       for (ServerListener listener : this.listeners) {
634         listener.serverRemoved(serverName);
635       }
636     }
637   }
638
639   @VisibleForTesting
640   public void moveFromOnelineToDeadServers(final ServerName sn) {
641     synchronized (onlineServers) {
642       if (!this.onlineServers.containsKey(sn)) {
643         LOG.warn("Expiration of " + sn + " but server not online");
644       }
645       // Remove the server from the known servers lists and update load info BUT
646       // add to deadservers first; do this so it'll show in dead servers list if
647       // not in online servers list.
648       this.deadservers.add(sn);
649       this.onlineServers.remove(sn);
650       onlineServers.notifyAll();
651     }
652     this.rsAdmins.remove(sn);
653   }
654
655   public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitWal) {
656     // When assignment manager is cleaning up the zookeeper nodes and rebuilding the
657     // in-memory region states, region servers could be down. Meta table can and
658     // should be re-assigned, log splitting can be done too. However, it is better to
659     // wait till the cleanup is done before re-assigning user regions.
660     //
661     // We should not wait in the server shutdown handler thread since it can clog
662     // the handler threads and meta table could not be re-assigned in case
663     // the corresponding server is down. So we queue them up here instead.
664     if (!master.getAssignmentManager().isFailoverCleanupDone()) {
665       requeuedDeadServers.put(serverName, shouldSplitWal);
666       return;
667     }
668
669     this.deadservers.add(serverName);
670     ProcedureExecutor<MasterProcedureEnv> procExec = this.master.getMasterProcedureExecutor();
671     procExec.submitProcedure(new ServerCrashProcedure(
672       procExec.getEnvironment(), serverName, shouldSplitWal, false));
673   }
674
675   /**
676    * Process the servers which died during master's initialization. It will be
677    * called after HMaster#assignMeta and AssignmentManager#joinCluster.
678    * */
679   synchronized void processQueuedDeadServers() {
680     if (!master.isServerCrashProcessingEnabled()) {
681       LOG.info("Master hasn't enabled ServerShutdownHandler");
682     }
683     Iterator<ServerName> serverIterator = queuedDeadServers.iterator();
684     while (serverIterator.hasNext()) {
685       ServerName tmpServerName = serverIterator.next();
686       expireServer(tmpServerName);
687       serverIterator.remove();
688       requeuedDeadServers.remove(tmpServerName);
689     }
690
691     if (!master.getAssignmentManager().isFailoverCleanupDone()) {
692       LOG.info("AssignmentManager hasn't finished failover cleanup; waiting");
693     }
694
695     for (Map.Entry<ServerName, Boolean> entry : requeuedDeadServers.entrySet()) {
696       processDeadServer(entry.getKey(), entry.getValue());
697     }
698     requeuedDeadServers.clear();
699   }
700
701   /*
702    * Remove the server from the drain list.
703    */
704   public boolean removeServerFromDrainList(final ServerName sn) {
705     // Warn if the server (sn) is not online.  ServerName is of the form:
706     // <hostname> , <port> , <startcode>
707
708     if (!this.isServerOnline(sn)) {
709       LOG.warn("Server " + sn + " is not currently online. " +
710                "Removing from draining list anyway, as requested.");
711     }
712     // Remove the server from the draining servers lists.
713     return this.drainingServers.remove(sn);
714   }
715
716   /*
717    * Add the server to the drain list.
718    */
719   public boolean addServerToDrainList(final ServerName sn) {
720     // Warn if the server (sn) is not online.  ServerName is of the form:
721     // <hostname> , <port> , <startcode>
722
723     if (!this.isServerOnline(sn)) {
724       LOG.warn("Server " + sn + " is not currently online. " +
725                "Ignoring request to add it to draining list.");
726       return false;
727     }
728     // Add the server to the draining servers lists, if it's not already in
729     // it.
730     if (this.drainingServers.contains(sn)) {
731       LOG.warn("Server " + sn + " is already in the draining server list." +
732                "Ignoring request to add it again.");
733       return false;
734     }
735     return this.drainingServers.add(sn);
736   }
737
738   // RPC methods to region servers
739
740   /**
741    * Sends an OPEN RPC to the specified server to open the specified region.
742    * <p>
743    * Open should not fail but can if server just crashed.
744    * <p>
745    * @param server server to open a region
746    * @param region region to open
747    * @param favoredNodes
748    */
749   public RegionOpeningState sendRegionOpen(final ServerName server,
750       HRegionInfo region, List<ServerName> favoredNodes)
751   throws IOException {
752     AdminService.BlockingInterface admin = getRsAdmin(server);
753     if (admin == null) {
754       throw new IOException("Attempting to send OPEN RPC to server " + server.toString() +
755         " failed because no RPC connection found to this server");
756     }
757     OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server,
758       region, favoredNodes,
759       (RecoveryMode.LOG_REPLAY == this.master.getMasterWalManager().getLogRecoveryMode()));
760     try {
761       OpenRegionResponse response = admin.openRegion(null, request);
762       return ResponseConverter.getRegionOpeningState(response);
763     } catch (ServiceException se) {
764       throw ProtobufUtil.getRemoteException(se);
765     }
766   }
767
768   /**
769    * Sends an OPEN RPC to the specified server to open the specified region.
770    * <p>
771    * Open should not fail but can if server just crashed.
772    * <p>
773    * @param server server to open a region
774    * @param regionOpenInfos info of a list of regions to open
775    * @return a list of region opening states
776    */
777   public List<RegionOpeningState> sendRegionOpen(ServerName server,
778       List<Pair<HRegionInfo, List<ServerName>>> regionOpenInfos)
779   throws IOException {
780     AdminService.BlockingInterface admin = getRsAdmin(server);
781     if (admin == null) {
782       throw new IOException("Attempting to send OPEN RPC to server " + server.toString() +
783         " failed because no RPC connection found to this server");
784     }
785
786     OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, regionOpenInfos,
787       (RecoveryMode.LOG_REPLAY == this.master.getMasterWalManager().getLogRecoveryMode()));
788     try {
789       OpenRegionResponse response = admin.openRegion(null, request);
790       return ResponseConverter.getRegionOpeningStateList(response);
791     } catch (ServiceException se) {
792       throw ProtobufUtil.getRemoteException(se);
793     }
794   }
795
796   private HBaseRpcController newRpcController() {
797     return rpcControllerFactory == null ? null : rpcControllerFactory.newController();
798   }
799
800   /**
801    * Sends an CLOSE RPC to the specified server to close the specified region.
802    * <p>
803    * A region server could reject the close request because it either does not
804    * have the specified region or the region is being split.
805    * @param server server to open a region
806    * @param region region to open
807    * @param dest - if the region is moved to another server, the destination server. null otherwise.
808    * @throws IOException
809    */
810   public boolean sendRegionClose(ServerName server, HRegionInfo region,
811       ServerName dest) throws IOException {
812     if (server == null) throw new NullPointerException("Passed server is null");
813     AdminService.BlockingInterface admin = getRsAdmin(server);
814     if (admin == null) {
815       throw new IOException("Attempting to send CLOSE RPC to server " +
816         server.toString() + " for region " +
817         region.getRegionNameAsString() +
818         " failed because no RPC connection found to this server");
819     }
820     HBaseRpcController controller = newRpcController();
821     return ProtobufUtil.closeRegion(controller, admin, server, region.getRegionName(), dest);
822   }
823
824   public boolean sendRegionClose(ServerName server,
825       HRegionInfo region) throws IOException {
826     return sendRegionClose(server, region, null);
827   }
828
829   /**
830    * Sends a WARMUP RPC to the specified server to warmup the specified region.
831    * <p>
832    * A region server could reject the close request because it either does not
833    * have the specified region or the region is being split.
834    * @param server server to warmup a region
835    * @param region region to  warmup
836    */
837   public void sendRegionWarmup(ServerName server,
838       HRegionInfo region) {
839     if (server == null) return;
840     try {
841       AdminService.BlockingInterface admin = getRsAdmin(server);
842       HBaseRpcController controller = newRpcController();
843       ProtobufUtil.warmupRegion(controller, admin, region);
844     } catch (IOException e) {
845       LOG.error("Received exception in RPC for warmup server:" +
846         server + "region: " + region +
847         "exception: " + e);
848     }
849   }
850
851   /**
852    * Contacts a region server and waits up to timeout ms
853    * to close the region.  This bypasses the active hmaster.
854    */
855   public static void closeRegionSilentlyAndWait(ClusterConnection connection,
856     ServerName server, HRegionInfo region, long timeout) throws IOException, InterruptedException {
857     AdminService.BlockingInterface rs = connection.getAdmin(server);
858     HBaseRpcController controller = connection.getRpcControllerFactory().newController();
859     try {
860       ProtobufUtil.closeRegion(controller, rs, server, region.getRegionName());
861     } catch (IOException e) {
862       LOG.warn("Exception when closing region: " + region.getRegionNameAsString(), e);
863     }
864     long expiration = timeout + System.currentTimeMillis();
865     while (System.currentTimeMillis() < expiration) {
866       controller.reset();
867       try {
868         HRegionInfo rsRegion =
869           ProtobufUtil.getRegionInfo(controller, rs, region.getRegionName());
870         if (rsRegion == null) return;
871       } catch (IOException ioe) {
872         if (ioe instanceof NotServingRegionException) // no need to retry again
873           return;
874         LOG.warn("Exception when retrieving regioninfo from: "
875           + region.getRegionNameAsString(), ioe);
876       }
877       Thread.sleep(1000);
878     }
879     throw new IOException("Region " + region + " failed to close within"
880         + " timeout " + timeout);
881   }
882
883   /**
884    * Sends an MERGE REGIONS RPC to the specified server to merge the specified
885    * regions.
886    * <p>
887    * A region server could reject the close request because it either does not
888    * have the specified region.
889    * @param server server to merge regions
890    * @param region_a region to merge
891    * @param region_b region to merge
892    * @param forcible true if do a compulsory merge, otherwise we will only merge
893    *          two adjacent regions
894    * @throws IOException
895    */
896   public void sendRegionsMerge(ServerName server, HRegionInfo region_a,
897       HRegionInfo region_b, boolean forcible, final User user) throws IOException {
898     if (server == null)
899       throw new NullPointerException("Passed server is null");
900     if (region_a == null || region_b == null)
901       throw new NullPointerException("Passed region is null");
902     AdminService.BlockingInterface admin = getRsAdmin(server);
903     if (admin == null) {
904       throw new IOException("Attempting to send MERGE REGIONS RPC to server "
905           + server.toString() + " for region "
906           + region_a.getRegionNameAsString() + ","
907           + region_b.getRegionNameAsString()
908           + " failed because no RPC connection found to this server");
909     }
910     HBaseRpcController controller = newRpcController();
911     ProtobufUtil.mergeRegions(controller, admin, region_a, region_b, forcible, user);
912   }
913
914   /**
915    * Check if a region server is reachable and has the expected start code
916    */
917   public boolean isServerReachable(ServerName server) {
918     if (server == null) throw new NullPointerException("Passed server is null");
919
920
921     RetryCounter retryCounter = pingRetryCounterFactory.create();
922     while (retryCounter.shouldRetry()) {
923       try {
924         HBaseRpcController controller = newRpcController();
925         AdminService.BlockingInterface admin = getRsAdmin(server);
926         if (admin != null) {
927           ServerInfo info = ProtobufUtil.getServerInfo(controller, admin);
928           return info != null && info.hasServerName()
929             && server.getStartcode() == info.getServerName().getStartCode();
930         }
931       } catch (IOException ioe) {
932         LOG.debug("Couldn't reach " + server + ", try=" + retryCounter.getAttemptTimes()
933           + " of " + retryCounter.getMaxAttempts(), ioe);
934         try {
935           retryCounter.sleepUntilNextRetry();
936         } catch(InterruptedException ie) {
937           Thread.currentThread().interrupt();
938         }
939       }
940     }
941     return false;
942   }
943
944     /**
945     * @param sn
946     * @return Admin interface for the remote regionserver named <code>sn</code>
947     * @throws IOException
948     * @throws RetriesExhaustedException wrapping a ConnectException if failed
949     */
950   private AdminService.BlockingInterface getRsAdmin(final ServerName sn)
951   throws IOException {
952     AdminService.BlockingInterface admin = this.rsAdmins.get(sn);
953     if (admin == null) {
954       LOG.debug("New admin connection to " + sn.toString());
955       if (sn.equals(master.getServerName()) && master instanceof HRegionServer) {
956         // A master is also a region server now, see HBASE-10569 for details
957         admin = ((HRegionServer)master).getRSRpcServices();
958       } else {
959         admin = this.connection.getAdmin(sn);
960       }
961       this.rsAdmins.put(sn, admin);
962     }
963     return admin;
964   }
965
966   /**
967    * Wait for the region servers to report in.
968    * We will wait until one of this condition is met:
969    *  - the master is stopped
970    *  - the 'hbase.master.wait.on.regionservers.maxtostart' number of
971    *    region servers is reached
972    *  - the 'hbase.master.wait.on.regionservers.mintostart' is reached AND
973    *   there have been no new region server in for
974    *      'hbase.master.wait.on.regionservers.interval' time AND
975    *   the 'hbase.master.wait.on.regionservers.timeout' is reached
976    *
977    * @throws InterruptedException
978    */
979   public void waitForRegionServers(MonitoredTask status)
980   throws InterruptedException {
981     final long interval = this.master.getConfiguration().
982       getLong(WAIT_ON_REGIONSERVERS_INTERVAL, 1500);
983     final long timeout = this.master.getConfiguration().
984       getLong(WAIT_ON_REGIONSERVERS_TIMEOUT, 4500);
985     int defaultMinToStart = 1;
986     if (BaseLoadBalancer.tablesOnMaster(master.getConfiguration())) {
987       // If we assign regions to master, we'd like to start
988       // at least another region server so that we don't
989       // assign all regions to master if other region servers
990       // don't come up in time.
991       defaultMinToStart = 2;
992     }
993     int minToStart = this.master.getConfiguration().
994       getInt(WAIT_ON_REGIONSERVERS_MINTOSTART, defaultMinToStart);
995     if (minToStart < 1) {
996       LOG.warn(String.format(
997         "The value of '%s' (%d) can not be less than 1, ignoring.",
998         WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
999       minToStart = 1;
1000     }
1001     int maxToStart = this.master.getConfiguration().
1002       getInt(WAIT_ON_REGIONSERVERS_MAXTOSTART, Integer.MAX_VALUE);
1003     if (maxToStart < minToStart) {
1004         LOG.warn(String.format(
1005             "The value of '%s' (%d) is set less than '%s' (%d), ignoring.",
1006             WAIT_ON_REGIONSERVERS_MAXTOSTART, maxToStart,
1007             WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
1008         maxToStart = Integer.MAX_VALUE;
1009     }
1010
1011     long now =  System.currentTimeMillis();
1012     final long startTime = now;
1013     long slept = 0;
1014     long lastLogTime = 0;
1015     long lastCountChange = startTime;
1016     int count = countOfRegionServers();
1017     int oldCount = 0;
1018     while (!this.master.isStopped() && count < maxToStart
1019         && (lastCountChange+interval > now || timeout > slept || count < minToStart)) {
1020       // Log some info at every interval time or if there is a change
1021       if (oldCount != count || lastLogTime+interval < now){
1022         lastLogTime = now;
1023         String msg =
1024           "Waiting for region servers count to settle; currently"+
1025             " checked in " + count + ", slept for " + slept + " ms," +
1026             " expecting minimum of " + minToStart + ", maximum of "+ maxToStart+
1027             ", timeout of "+timeout+" ms, interval of "+interval+" ms.";
1028         LOG.info(msg);
1029         status.setStatus(msg);
1030       }
1031
1032       // We sleep for some time
1033       final long sleepTime = 50;
1034       Thread.sleep(sleepTime);
1035       now =  System.currentTimeMillis();
1036       slept = now - startTime;
1037
1038       oldCount = count;
1039       count = countOfRegionServers();
1040       if (count != oldCount) {
1041         lastCountChange = now;
1042       }
1043     }
1044
1045     LOG.info("Finished waiting for region servers count to settle;" +
1046       " checked in " + count + ", slept for " + slept + " ms," +
1047       " expecting minimum of " + minToStart + ", maximum of "+ maxToStart+","+
1048       " master is "+ (this.master.isStopped() ? "stopped.": "running")
1049     );
1050   }
1051
1052   /**
1053    * @return A copy of the internal list of online servers.
1054    */
1055   public List<ServerName> getOnlineServersList() {
1056     // TODO: optimize the load balancer call so we don't need to make a new list
1057     // TODO: FIX. THIS IS POPULAR CALL.
1058     return new ArrayList<ServerName>(this.onlineServers.keySet());
1059   }
1060
1061   /**
1062    * @return A copy of the internal list of draining servers.
1063    */
1064   public List<ServerName> getDrainingServersList() {
1065     return new ArrayList<ServerName>(this.drainingServers);
1066   }
1067
1068   /**
1069    * @return A copy of the internal set of deadNotExpired servers.
1070    */
1071   Set<ServerName> getDeadNotExpiredServers() {
1072     return new HashSet<ServerName>(this.queuedDeadServers);
1073   }
1074
1075   /**
1076    * During startup, if we figure it is not a failover, i.e. there is
1077    * no more WAL files to split, we won't try to recover these dead servers.
1078    * So we just remove them from the queue. Use caution in calling this.
1079    */
1080   void removeRequeuedDeadServers() {
1081     requeuedDeadServers.clear();
1082   }
1083
1084   /**
1085    * @return A copy of the internal map of requeuedDeadServers servers and their corresponding
1086    *         splitlog need flag.
1087    */
1088   Map<ServerName, Boolean> getRequeuedDeadServers() {
1089     return Collections.unmodifiableMap(this.requeuedDeadServers);
1090   }
1091
1092   public boolean isServerOnline(ServerName serverName) {
1093     return serverName != null && onlineServers.containsKey(serverName);
1094   }
1095
1096   /**
1097    * Check if a server is known to be dead.  A server can be online,
1098    * or known to be dead, or unknown to this manager (i.e, not online,
1099    * not known to be dead either. it is simply not tracked by the
1100    * master any more, for example, a very old previous instance).
1101    */
1102   public synchronized boolean isServerDead(ServerName serverName) {
1103     return serverName == null || deadservers.isDeadServer(serverName)
1104       || queuedDeadServers.contains(serverName)
1105       || requeuedDeadServers.containsKey(serverName);
1106   }
1107
1108   public void shutdownCluster() {
1109     this.clusterShutdown = true;
1110     this.master.stop("Cluster shutdown requested");
1111   }
1112
1113   public boolean isClusterShutdown() {
1114     return this.clusterShutdown;
1115   }
1116
1117   /**
1118    * Stop the ServerManager.  Currently closes the connection to the master.
1119    */
1120   public void stop() {
1121     if (connection != null) {
1122       try {
1123         connection.close();
1124       } catch (IOException e) {
1125         LOG.error("Attempt to close connection to master failed", e);
1126       }
1127     }
1128   }
1129
1130   /**
1131    * Creates a list of possible destinations for a region. It contains the online servers, but not
1132    *  the draining or dying servers.
1133    *  @param serverToExclude can be null if there is no server to exclude
1134    */
1135   public List<ServerName> createDestinationServersList(final ServerName serverToExclude){
1136     final List<ServerName> destServers = getOnlineServersList();
1137
1138     if (serverToExclude != null){
1139       destServers.remove(serverToExclude);
1140     }
1141
1142     // Loop through the draining server list and remove them from the server list
1143     final List<ServerName> drainingServersCopy = getDrainingServersList();
1144     if (!drainingServersCopy.isEmpty()) {
1145       for (final ServerName server: drainingServersCopy) {
1146         destServers.remove(server);
1147       }
1148     }
1149
1150     // Remove the deadNotExpired servers from the server list.
1151     removeDeadNotExpiredServers(destServers);
1152     return destServers;
1153   }
1154
1155   /**
1156    * Calls {@link #createDestinationServersList} without server to exclude.
1157    */
1158   public List<ServerName> createDestinationServersList(){
1159     return createDestinationServersList(null);
1160   }
1161
1162     /**
1163     * Loop through the deadNotExpired server list and remove them from the
1164     * servers.
1165     * This function should be used carefully outside of this class. You should use a high level
1166     *  method such as {@link #createDestinationServersList()} instead of managing you own list.
1167     */
1168   void removeDeadNotExpiredServers(List<ServerName> servers) {
1169     Set<ServerName> deadNotExpiredServersCopy = this.getDeadNotExpiredServers();
1170     if (!deadNotExpiredServersCopy.isEmpty()) {
1171       for (ServerName server : deadNotExpiredServersCopy) {
1172         LOG.debug("Removing dead but not expired server: " + server
1173           + " from eligible server pool.");
1174         servers.remove(server);
1175       }
1176     }
1177   }
1178
1179   /**
1180    * To clear any dead server with same host name and port of any online server
1181    */
1182   void clearDeadServersWithSameHostNameAndPortOfOnlineServer() {
1183     for (ServerName serverName : getOnlineServersList()) {
1184       deadservers.cleanAllPreviousInstances(serverName);
1185     }
1186   }
1187 }