View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.net.InetAddress;
23  import java.util.ArrayList;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Map.Entry;
31  import java.util.Set;
32  import java.util.concurrent.ConcurrentHashMap;
33  import java.util.concurrent.ConcurrentNavigableMap;
34  import java.util.concurrent.ConcurrentSkipListMap;
35  import java.util.concurrent.CopyOnWriteArrayList;
36  
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.hadoop.conf.Configuration;
40  import org.apache.hadoop.hbase.ClockOutOfSyncException;
41  import org.apache.hadoop.hbase.HConstants;
42  import org.apache.hadoop.hbase.HRegionInfo;
43  import org.apache.hadoop.hbase.NotServingRegionException;
44  import org.apache.hadoop.hbase.RegionLoad;
45  import org.apache.hadoop.hbase.Server;
46  import org.apache.hadoop.hbase.ServerLoad;
47  import org.apache.hadoop.hbase.ServerName;
48  import org.apache.hadoop.hbase.YouAreDeadException;
49  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
50  import org.apache.hadoop.hbase.classification.InterfaceAudience;
51  import org.apache.hadoop.hbase.client.ClusterConnection;
52  import org.apache.hadoop.hbase.client.ConnectionFactory;
53  import org.apache.hadoop.hbase.client.RetriesExhaustedException;
54  import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
55  import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
56  import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
57  import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
58  import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
59  import org.apache.hadoop.hbase.monitoring.MonitoredTask;
60  import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
61  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
62  import org.apache.hadoop.hbase.protobuf.RequestConverter;
63  import org.apache.hadoop.hbase.protobuf.ResponseConverter;
64  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
65  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
66  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
67  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
68  import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
69  import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
70  import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
71  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
72  import org.apache.hadoop.hbase.regionserver.HRegionServer;
73  import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
74  import org.apache.hadoop.hbase.util.Bytes;
75  import org.apache.hadoop.hbase.util.Triple;
76  import org.apache.hadoop.hbase.util.RetryCounter;
77  import org.apache.hadoop.hbase.util.RetryCounterFactory;
78  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
79  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
80  import org.apache.zookeeper.KeeperException;
81  
82  import com.google.common.annotations.VisibleForTesting;
83  import com.google.protobuf.ByteString;
84  import com.google.protobuf.ServiceException;
85  
86  /**
87   * The ServerManager class manages info about region servers.
88   * <p>
89   * Maintains lists of online and dead servers.  Processes the startups,
90   * shutdowns, and deaths of region servers.
91   * <p>
92   * Servers are distinguished in two different ways.  A given server has a
93   * location, specified by hostname and port, and of which there can only be one
94   * online at any given time.  A server instance is specified by the location
95   * (hostname and port) as well as the startcode (timestamp from when the server
96   * was started).  This is used to differentiate a restarted instance of a given
97   * server from the original instance.
98   * <p>
99   * If a sever is known not to be running any more, it is called dead. The dead
100  * server needs to be handled by a ServerShutdownHandler.  If the handler is not
101  * enabled yet, the server can't be handled right away so it is queued up.
102  * After the handler is enabled, the server will be submitted to a handler to handle.
103  * However, the handler may be just partially enabled.  If so,
104  * the server cannot be fully processed, and be queued up for further processing.
105  * A server is fully processed only after the handler is fully enabled
106  * and has completed the handling.
107  */
108 @InterfaceAudience.Private
109 public class ServerManager {
110   public static final String WAIT_ON_REGIONSERVERS_MAXTOSTART =
111       "hbase.master.wait.on.regionservers.maxtostart";
112 
113   public static final String WAIT_ON_REGIONSERVERS_MINTOSTART =
114       "hbase.master.wait.on.regionservers.mintostart";
115 
116   public static final String WAIT_ON_REGIONSERVERS_TIMEOUT =
117       "hbase.master.wait.on.regionservers.timeout";
118 
119   public static final String WAIT_ON_REGIONSERVERS_INTERVAL =
120       "hbase.master.wait.on.regionservers.interval";
121 
122   private static final Log LOG = LogFactory.getLog(ServerManager.class);
123 
124   // Set if we are to shutdown the cluster.
125   private volatile boolean clusterShutdown = false;
126 
127   /**
128    * The last flushed sequence id for a region.
129    */
130   private final ConcurrentNavigableMap<byte[], Long> flushedSequenceIdByRegion =
131     new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
132 
133   /**
134    * The last flushed sequence id for a store in a region.
135    */
136   private final ConcurrentNavigableMap<byte[], ConcurrentNavigableMap<byte[], Long>>
137     storeFlushedSequenceIdsByRegion =
138     new ConcurrentSkipListMap<byte[], ConcurrentNavigableMap<byte[], Long>>(Bytes.BYTES_COMPARATOR);
139 
140   /** Map of registered servers to their current load */
141   private final ConcurrentHashMap<ServerName, ServerLoad> onlineServers =
142     new ConcurrentHashMap<ServerName, ServerLoad>();
143 
144   /**
145    * Map of admin interfaces per registered regionserver; these interfaces we use to control
146    * regionservers out on the cluster
147    */
148   private final Map<ServerName, AdminService.BlockingInterface> rsAdmins =
149     new HashMap<ServerName, AdminService.BlockingInterface>();
150 
151   /**
152    * List of region servers <ServerName> that should not get any more new
153    * regions.
154    */
155   private final ArrayList<ServerName> drainingServers =
156     new ArrayList<ServerName>();
157 
158   private final Server master;
159   private final MasterServices services;
160   private final ClusterConnection connection;
161 
162   private final DeadServer deadservers = new DeadServer();
163 
164   private final long maxSkew;
165   private final long warningSkew;
166 
167   private final RetryCounterFactory pingRetryCounterFactory;
168   private final RpcControllerFactory rpcControllerFactory;
169 
170   /**
171    * Set of region servers which are dead but not processed immediately. If one
172    * server died before master enables ServerShutdownHandler, the server will be
173    * added to this set and will be processed through calling
174    * {@link ServerManager#processQueuedDeadServers()} by master.
175    * <p>
176    * A dead server is a server instance known to be dead, not listed in the /hbase/rs
177    * znode any more. It may have not been submitted to ServerShutdownHandler yet
178    * because the handler is not enabled.
179    * <p>
180    * A dead server, which has been submitted to ServerShutdownHandler while the
181    * handler is not enabled, is queued up.
182    * <p>
183    * So this is a set of region servers known to be dead but not submitted to
184    * ServerShutdownHander for processing yet.
185    */
186   private Set<ServerName> queuedDeadServers = new HashSet<ServerName>();
187 
188   /**
189    * Set of region servers which are dead and submitted to ServerShutdownHandler to process but not
190    * fully processed immediately.
191    * <p>
192    * If one server died before assignment manager finished the failover cleanup, the server will be
193    * added to this set and will be processed through calling
194    * {@link ServerManager#processQueuedDeadServers()} by assignment manager.
195    * <p>
196    * The Boolean value indicates whether log split is needed inside ServerShutdownHandler
197    * <p>
198    * ServerShutdownHandler processes a dead server submitted to the handler after the handler is
199    * enabled. It may not be able to complete the processing because meta is not yet online or master
200    * is currently in startup mode. In this case, the dead server will be parked in this set
201    * temporarily.
202    */
203   private Map<ServerName, Boolean> requeuedDeadServers
204     = new ConcurrentHashMap<ServerName, Boolean>();
205 
206   /** Listeners that are called on server events. */
207   private List<ServerListener> listeners = new CopyOnWriteArrayList<ServerListener>();
208 
209   /**
210    * Constructor.
211    * @param master
212    * @param services
213    * @throws ZooKeeperConnectionException
214    */
215   public ServerManager(final Server master, final MasterServices services)
216       throws IOException {
217     this(master, services, true);
218   }
219 
220   ServerManager(final Server master, final MasterServices services,
221       final boolean connect) throws IOException {
222     this.master = master;
223     this.services = services;
224     Configuration c = master.getConfiguration();
225     maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
226     warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
227     this.connection = connect ? (ClusterConnection)ConnectionFactory.createConnection(c) : null;
228     int pingMaxAttempts = Math.max(1, master.getConfiguration().getInt(
229       "hbase.master.maximum.ping.server.attempts", 10));
230     int pingSleepInterval = Math.max(1, master.getConfiguration().getInt(
231       "hbase.master.ping.server.retry.sleep.interval", 100));
232     this.pingRetryCounterFactory = new RetryCounterFactory(pingMaxAttempts, pingSleepInterval);
233     this.rpcControllerFactory = this.connection == null
234         ? null
235         : connection.getRpcControllerFactory();
236   }
237 
238   /**
239    * Add the listener to the notification list.
240    * @param listener The ServerListener to register
241    */
242   public void registerListener(final ServerListener listener) {
243     this.listeners.add(listener);
244   }
245 
246   /**
247    * Remove the listener from the notification list.
248    * @param listener The ServerListener to unregister
249    */
250   public boolean unregisterListener(final ServerListener listener) {
251     return this.listeners.remove(listener);
252   }
253 
254   /**
255    * Let the server manager know a new regionserver has come online
256    * @param request the startup request
257    * @param ia the InetAddress from which request is received
258    * @return The ServerName we know this server as.
259    * @throws IOException
260    */
261   ServerName regionServerStartup(RegionServerStartupRequest request, InetAddress ia)
262       throws IOException {
263     // Test for case where we get a region startup message from a regionserver
264     // that has been quickly restarted but whose znode expiration handler has
265     // not yet run, or from a server whose fail we are currently processing.
266     // Test its host+port combo is present in serverAddresstoServerInfo.  If it
267     // is, reject the server and trigger its expiration. The next time it comes
268     // in, it should have been removed from serverAddressToServerInfo and queued
269     // for processing by ProcessServerShutdown.
270 
271     final String hostname = request.hasUseThisHostnameInstead() ?
272         request.getUseThisHostnameInstead() :ia.getHostName();
273     ServerName sn = ServerName.valueOf(hostname, request.getPort(),
274       request.getServerStartCode());
275     checkClockSkew(sn, request.getServerCurrentTime());
276     checkIsDead(sn, "STARTUP");
277     if (!checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
278       LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup"
279         + " could not record the server: " + sn);
280     }
281     return sn;
282   }
283 
284   private ConcurrentNavigableMap<byte[], Long> getOrCreateStoreFlushedSequenceId(
285     byte[] regionName) {
286     ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId =
287         storeFlushedSequenceIdsByRegion.get(regionName);
288     if (storeFlushedSequenceId != null) {
289       return storeFlushedSequenceId;
290     }
291     storeFlushedSequenceId = new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
292     ConcurrentNavigableMap<byte[], Long> alreadyPut =
293         storeFlushedSequenceIdsByRegion.putIfAbsent(regionName, storeFlushedSequenceId);
294     return alreadyPut == null ? storeFlushedSequenceId : alreadyPut;
295   }
296   /**
297    * Updates last flushed sequence Ids for the regions on server sn
298    * @param sn
299    * @param hsl
300    */
301   private void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) {
302     Map<byte[], RegionLoad> regionsLoad = hsl.getRegionsLoad();
303     for (Entry<byte[], RegionLoad> entry : regionsLoad.entrySet()) {
304       byte[] encodedRegionName = Bytes.toBytes(HRegionInfo.encodeRegionName(entry.getKey()));
305       Long existingValue = flushedSequenceIdByRegion.get(encodedRegionName);
306       long l = entry.getValue().getCompleteSequenceId();
307       // Don't let smaller sequence ids override greater sequence ids.
308       if (LOG.isTraceEnabled()) {
309         LOG.trace(Bytes.toString(encodedRegionName) + ", existingValue=" + existingValue +
310           ", completeSequenceId=" + l);
311       }
312       if (existingValue == null || (l != HConstants.NO_SEQNUM && l > existingValue)) {
313         flushedSequenceIdByRegion.put(encodedRegionName, l);
314       } else if (l != HConstants.NO_SEQNUM && l < existingValue) {
315         LOG.warn("RegionServer " + sn + " indicates a last flushed sequence id ("
316             + l + ") that is less than the previous last flushed sequence id ("
317             + existingValue + ") for region " + Bytes.toString(entry.getKey()) + " Ignoring.");
318       }
319       ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId =
320           getOrCreateStoreFlushedSequenceId(encodedRegionName);
321       for (StoreSequenceId storeSeqId : entry.getValue().getStoreCompleteSequenceId()) {
322         byte[] family = storeSeqId.getFamilyName().toByteArray();
323         existingValue = storeFlushedSequenceId.get(family);
324         l = storeSeqId.getSequenceId();
325         if (LOG.isTraceEnabled()) {
326           LOG.trace(Bytes.toString(encodedRegionName) + ", family=" + Bytes.toString(family) +
327             ", existingValue=" + existingValue + ", completeSequenceId=" + l);
328         }
329         // Don't let smaller sequence ids override greater sequence ids.
330         if (existingValue == null || (l != HConstants.NO_SEQNUM && l > existingValue.longValue())) {
331           storeFlushedSequenceId.put(family, l);
332         }
333       }
334     }
335   }
336 
337   void regionServerReport(ServerName sn,
338       ServerLoad sl) throws YouAreDeadException {
339     checkIsDead(sn, "REPORT");
340     if (null == this.onlineServers.replace(sn, sl)) {
341       // Already have this host+port combo and its just different start code?
342       // Just let the server in. Presume master joining a running cluster.
343       // recordNewServer is what happens at the end of reportServerStartup.
344       // The only thing we are skipping is passing back to the regionserver
345       // the ServerName to use. Here we presume a master has already done
346       // that so we'll press on with whatever it gave us for ServerName.
347       if (!checkAndRecordNewServer(sn, sl)) {
348         LOG.info("RegionServerReport ignored, could not record the server: " + sn);
349         return; // Not recorded, so no need to move on
350       }
351     }
352     updateLastFlushedSequenceIds(sn, sl);
353   }
354 
355   /**
356    * Check is a server of same host and port already exists,
357    * if not, or the existed one got a smaller start code, record it.
358    *
359    * @param sn the server to check and record
360    * @param sl the server load on the server
361    * @return true if the server is recorded, otherwise, false
362    */
363   boolean checkAndRecordNewServer(
364       final ServerName serverName, final ServerLoad sl) {
365     ServerName existingServer = null;
366     synchronized (this.onlineServers) {
367       existingServer = findServerWithSameHostnamePortWithLock(serverName);
368       if (existingServer != null && (existingServer.getStartcode() > serverName.getStartcode())) {
369         LOG.info("Server serverName=" + serverName + " rejected; we already have "
370             + existingServer.toString() + " registered with same hostname and port");
371         return false;
372       }
373       recordNewServerWithLock(serverName, sl);
374     }
375 
376     // Tell our listeners that a server was added
377     if (!this.listeners.isEmpty()) {
378       for (ServerListener listener : this.listeners) {
379         listener.serverAdded(serverName);
380       }
381     }
382 
383     // Note that we assume that same ts means same server, and don't expire in that case.
384     //  TODO: ts can theoretically collide due to clock shifts, so this is a bit hacky.
385     if (existingServer != null && (existingServer.getStartcode() < serverName.getStartcode())) {
386       LOG.info("Triggering server recovery; existingServer " +
387           existingServer + " looks stale, new server:" + serverName);
388       expireServer(existingServer);
389     }
390     return true;
391   }
392 
393   /**
394    * Checks if the clock skew between the server and the master. If the clock skew exceeds the
395    * configured max, it will throw an exception; if it exceeds the configured warning threshold,
396    * it will log a warning but start normally.
397    * @param serverName Incoming servers's name
398    * @param serverCurrentTime
399    * @throws ClockOutOfSyncException if the skew exceeds the configured max value
400    */
401   private void checkClockSkew(final ServerName serverName, final long serverCurrentTime)
402   throws ClockOutOfSyncException {
403     long skew = Math.abs(System.currentTimeMillis() - serverCurrentTime);
404     if (skew > maxSkew) {
405       String message = "Server " + serverName + " has been " +
406         "rejected; Reported time is too far out of sync with master.  " +
407         "Time difference of " + skew + "ms > max allowed of " + maxSkew + "ms";
408       LOG.warn(message);
409       throw new ClockOutOfSyncException(message);
410     } else if (skew > warningSkew){
411       String message = "Reported time for server " + serverName + " is out of sync with master " +
412         "by " + skew + "ms. (Warning threshold is " + warningSkew + "ms; " +
413         "error threshold is " + maxSkew + "ms)";
414       LOG.warn(message);
415     }
416   }
417 
418   /**
419    * If this server is on the dead list, reject it with a YouAreDeadException.
420    * If it was dead but came back with a new start code, remove the old entry
421    * from the dead list.
422    * @param serverName
423    * @param what START or REPORT
424    * @throws org.apache.hadoop.hbase.YouAreDeadException
425    */
426   private void checkIsDead(final ServerName serverName, final String what)
427       throws YouAreDeadException {
428     if (this.deadservers.isDeadServer(serverName)) {
429       // host name, port and start code all match with existing one of the
430       // dead servers. So, this server must be dead.
431       String message = "Server " + what + " rejected; currently processing " +
432           serverName + " as dead server";
433       LOG.debug(message);
434       throw new YouAreDeadException(message);
435     }
436     // remove dead server with same hostname and port of newly checking in rs after master
437     // initialization.See HBASE-5916 for more information.
438     if ((this.services == null || ((HMaster) this.services).isInitialized())
439         && this.deadservers.cleanPreviousInstance(serverName)) {
440       // This server has now become alive after we marked it as dead.
441       // We removed it's previous entry from the dead list to reflect it.
442       LOG.debug(what + ":" + " Server " + serverName + " came back up," +
443           " removed it from the dead servers list");
444     }
445   }
446 
447   /**
448    * Assumes onlineServers is locked.
449    * @return ServerName with matching hostname and port.
450    */
451   private ServerName findServerWithSameHostnamePortWithLock(
452       final ServerName serverName) {
453     for (ServerName sn: this.onlineServers.keySet()) {
454       if (ServerName.isSameHostnameAndPort(serverName, sn)) return sn;
455     }
456     return null;
457   }
458 
459   /**
460    * Adds the onlineServers list. onlineServers should be locked.
461    * @param serverName The remote servers name.
462    * @param sl
463    * @return Server load from the removed server, if any.
464    */
465   @VisibleForTesting
466   void recordNewServerWithLock(final ServerName serverName, final ServerLoad sl) {
467     LOG.info("Registering server=" + serverName);
468     this.onlineServers.put(serverName, sl);
469     this.rsAdmins.remove(serverName);
470   }
471 
472   public RegionStoreSequenceIds getLastFlushedSequenceId(byte[] encodedRegionName) {
473     RegionStoreSequenceIds.Builder builder = RegionStoreSequenceIds.newBuilder();
474     Long seqId = flushedSequenceIdByRegion.get(encodedRegionName);
475     builder.setLastFlushedSequenceId(seqId != null ? seqId.longValue() : HConstants.NO_SEQNUM);
476     Map<byte[], Long> storeFlushedSequenceId =
477         storeFlushedSequenceIdsByRegion.get(encodedRegionName);
478     if (storeFlushedSequenceId != null) {
479       for (Map.Entry<byte[], Long> entry : storeFlushedSequenceId.entrySet()) {
480         builder.addStoreSequenceId(StoreSequenceId.newBuilder()
481             .setFamilyName(ByteString.copyFrom(entry.getKey()))
482             .setSequenceId(entry.getValue().longValue()).build());
483       }
484     }
485     return builder.build();
486   }
487 
488   /**
489    * @param serverName
490    * @return ServerLoad if serverName is known else null
491    */
492   public ServerLoad getLoad(final ServerName serverName) {
493     return this.onlineServers.get(serverName);
494   }
495 
496   /**
497    * Compute the average load across all region servers.
498    * Currently, this uses a very naive computation - just uses the number of
499    * regions being served, ignoring stats about number of requests.
500    * @return the average load
501    */
502   public double getAverageLoad() {
503     int totalLoad = 0;
504     int numServers = 0;
505     for (ServerLoad sl: this.onlineServers.values()) {
506         numServers++;
507         totalLoad += sl.getNumberOfRegions();
508     }
509     return numServers == 0 ? 0 :
510       (double)totalLoad / (double)numServers;
511   }
512 
513   /** @return the count of active regionservers */
514   public int countOfRegionServers() {
515     // Presumes onlineServers is a concurrent map
516     return this.onlineServers.size();
517   }
518 
519   /**
520    * @return Read-only map of servers to serverinfo
521    */
522   public Map<ServerName, ServerLoad> getOnlineServers() {
523     // Presumption is that iterating the returned Map is OK.
524     synchronized (this.onlineServers) {
525       return Collections.unmodifiableMap(this.onlineServers);
526     }
527   }
528 
529 
530   public DeadServer getDeadServers() {
531     return this.deadservers;
532   }
533 
534   /**
535    * Checks if any dead servers are currently in progress.
536    * @return true if any RS are being processed as dead, false if not
537    */
538   public boolean areDeadServersInProgress() {
539     return this.deadservers.areDeadServersInProgress();
540   }
541 
542   void letRegionServersShutdown() {
543     long previousLogTime = 0;
544     ServerName sn = master.getServerName();
545     ZooKeeperWatcher zkw = master.getZooKeeper();
546     int onlineServersCt;
547     while ((onlineServersCt = onlineServers.size()) > 0){
548 
549       if (System.currentTimeMillis() > (previousLogTime + 1000)) {
550         Set<ServerName> remainingServers = onlineServers.keySet();
551         synchronized (onlineServers) {
552           if (remainingServers.size() == 1 && remainingServers.contains(sn)) {
553             // Master will delete itself later.
554             return;
555           }
556         }
557         StringBuilder sb = new StringBuilder();
558         // It's ok here to not sync on onlineServers - merely logging
559         for (ServerName key : remainingServers) {
560           if (sb.length() > 0) {
561             sb.append(", ");
562           }
563           sb.append(key);
564         }
565         LOG.info("Waiting on regionserver(s) to go down " + sb.toString());
566         previousLogTime = System.currentTimeMillis();
567       }
568 
569       try {
570         List<String> servers = ZKUtil.listChildrenNoWatch(zkw, zkw.rsZNode);
571         if (servers == null || servers.size() == 0 || (servers.size() == 1
572             && servers.contains(sn.toString()))) {
573           LOG.info("ZK shows there is only the master self online, exiting now");
574           // Master could have lost some ZK events, no need to wait more.
575           break;
576         }
577       } catch (KeeperException ke) {
578         LOG.warn("Failed to list regionservers", ke);
579         // ZK is malfunctioning, don't hang here
580         break;
581       }
582       synchronized (onlineServers) {
583         try {
584           if (onlineServersCt == onlineServers.size()) onlineServers.wait(100);
585         } catch (InterruptedException ignored) {
586           // continue
587         }
588       }
589     }
590   }
591 
592   /*
593    * Expire the passed server.  Add it to list of dead servers and queue a
594    * shutdown processing.
595    */
596   public synchronized void expireServer(final ServerName serverName) {
597     if (serverName.equals(master.getServerName())) {
598       if (!(master.isAborted() || master.isStopped())) {
599         master.stop("We lost our znode?");
600       }
601       return;
602     }
603     if (!services.isServerCrashProcessingEnabled()) {
604       LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
605           + "delay expiring server " + serverName);
606       this.queuedDeadServers.add(serverName);
607       return;
608     }
609     if (this.deadservers.isDeadServer(serverName)) {
610       // TODO: Can this happen?  It shouldn't be online in this case?
611       LOG.warn("Expiration of " + serverName +
612           " but server shutdown already in progress");
613       return;
614     }
615     moveFromOnelineToDeadServers(serverName);
616 
617     // If cluster is going down, yes, servers are going to be expiring; don't
618     // process as a dead server
619     if (this.clusterShutdown) {
620       LOG.info("Cluster shutdown set; " + serverName +
621         " expired; onlineServers=" + this.onlineServers.size());
622       if (this.onlineServers.isEmpty()) {
623         master.stop("Cluster shutdown set; onlineServer=0");
624       }
625       return;
626     }
627 
628     boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName) ==
629         AssignmentManager.ServerHostRegion.HOSTING_REGION;
630     ProcedureExecutor<MasterProcedureEnv> procExec = this.services.getMasterProcedureExecutor();
631     procExec.submitProcedure(new ServerCrashProcedure(
632       procExec.getEnvironment(), serverName, true, carryingMeta));
633     LOG.debug("Added=" + serverName +
634       " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta);
635 
636     // Tell our listeners that a server was removed
637     if (!this.listeners.isEmpty()) {
638       for (ServerListener listener : this.listeners) {
639         listener.serverRemoved(serverName);
640       }
641     }
642   }
643 
644   @VisibleForTesting
645   public void moveFromOnelineToDeadServers(final ServerName sn) {
646     synchronized (onlineServers) {
647       if (!this.onlineServers.containsKey(sn)) {
648         LOG.warn("Expiration of " + sn + " but server not online");
649       }
650       // Remove the server from the known servers lists and update load info BUT
651       // add to deadservers first; do this so it'll show in dead servers list if
652       // not in online servers list.
653       this.deadservers.add(sn);
654       this.onlineServers.remove(sn);
655       onlineServers.notifyAll();
656     }
657     this.rsAdmins.remove(sn);
658   }
659 
660   public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitWal) {
661     // When assignment manager is cleaning up the zookeeper nodes and rebuilding the
662     // in-memory region states, region servers could be down. Meta table can and
663     // should be re-assigned, log splitting can be done too. However, it is better to
664     // wait till the cleanup is done before re-assigning user regions.
665     //
666     // We should not wait in the server shutdown handler thread since it can clog
667     // the handler threads and meta table could not be re-assigned in case
668     // the corresponding server is down. So we queue them up here instead.
669     if (!services.getAssignmentManager().isFailoverCleanupDone()) {
670       requeuedDeadServers.put(serverName, shouldSplitWal);
671       return;
672     }
673 
674     this.deadservers.add(serverName);
675     ProcedureExecutor<MasterProcedureEnv> procExec = this.services.getMasterProcedureExecutor();
676     procExec.submitProcedure(new ServerCrashProcedure(
677       procExec.getEnvironment(), serverName, shouldSplitWal, false));
678   }
679 
680   /**
681    * Process the servers which died during master's initialization. It will be
682    * called after HMaster#assignMeta and AssignmentManager#joinCluster.
683    * */
684   synchronized void processQueuedDeadServers() {
685     if (!services.isServerCrashProcessingEnabled()) {
686       LOG.info("Master hasn't enabled ServerShutdownHandler");
687     }
688     Iterator<ServerName> serverIterator = queuedDeadServers.iterator();
689     while (serverIterator.hasNext()) {
690       ServerName tmpServerName = serverIterator.next();
691       expireServer(tmpServerName);
692       serverIterator.remove();
693       requeuedDeadServers.remove(tmpServerName);
694     }
695 
696     if (!services.getAssignmentManager().isFailoverCleanupDone()) {
697       LOG.info("AssignmentManager hasn't finished failover cleanup; waiting");
698     }
699 
700     for(ServerName tmpServerName : requeuedDeadServers.keySet()){
701       processDeadServer(tmpServerName, requeuedDeadServers.get(tmpServerName));
702     }
703     requeuedDeadServers.clear();
704   }
705 
706   /*
707    * Remove the server from the drain list.
708    */
709   public boolean removeServerFromDrainList(final ServerName sn) {
710     // Warn if the server (sn) is not online.  ServerName is of the form:
711     // <hostname> , <port> , <startcode>
712 
713     if (!this.isServerOnline(sn)) {
714       LOG.warn("Server " + sn + " is not currently online. " +
715                "Removing from draining list anyway, as requested.");
716     }
717     // Remove the server from the draining servers lists.
718     return this.drainingServers.remove(sn);
719   }
720 
721   /*
722    * Add the server to the drain list.
723    */
724   public boolean addServerToDrainList(final ServerName sn) {
725     // Warn if the server (sn) is not online.  ServerName is of the form:
726     // <hostname> , <port> , <startcode>
727 
728     if (!this.isServerOnline(sn)) {
729       LOG.warn("Server " + sn + " is not currently online. " +
730                "Ignoring request to add it to draining list.");
731       return false;
732     }
733     // Add the server to the draining servers lists, if it's not already in
734     // it.
735     if (this.drainingServers.contains(sn)) {
736       LOG.warn("Server " + sn + " is already in the draining server list." +
737                "Ignoring request to add it again.");
738       return false;
739     }
740     return this.drainingServers.add(sn);
741   }
742 
743   // RPC methods to region servers
744 
745   /**
746    * Sends an OPEN RPC to the specified server to open the specified region.
747    * <p>
748    * Open should not fail but can if server just crashed.
749    * <p>
750    * @param server server to open a region
751    * @param region region to open
752    * @param versionOfOfflineNode that needs to be present in the offline node
753    * when RS tries to change the state from OFFLINE to other states.
754    * @param favoredNodes
755    */
756   public RegionOpeningState sendRegionOpen(final ServerName server,
757       HRegionInfo region, int versionOfOfflineNode, List<ServerName> favoredNodes)
758   throws IOException {
759     AdminService.BlockingInterface admin = getRsAdmin(server);
760     if (admin == null) {
761       LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
762         " failed because no RPC connection found to this server");
763       return RegionOpeningState.FAILED_OPENING;
764     }
765     OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, 
766       region, versionOfOfflineNode, favoredNodes, 
767       (RecoveryMode.LOG_REPLAY == this.services.getMasterFileSystem().getLogRecoveryMode()));
768     try {
769       OpenRegionResponse response = admin.openRegion(null, request);
770       return ResponseConverter.getRegionOpeningState(response);
771     } catch (ServiceException se) {
772       throw ProtobufUtil.getRemoteException(se);
773     }
774   }
775 
776   /**
777    * Sends an OPEN RPC to the specified server to open the specified region.
778    * <p>
779    * Open should not fail but can if server just crashed.
780    * <p>
781    * @param server server to open a region
782    * @param regionOpenInfos info of a list of regions to open
783    * @return a list of region opening states
784    */
785   public List<RegionOpeningState> sendRegionOpen(ServerName server,
786       List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
787   throws IOException {
788     AdminService.BlockingInterface admin = getRsAdmin(server);
789     if (admin == null) {
790       LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
791         " failed because no RPC connection found to this server");
792       return null;
793     }
794 
795     OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, regionOpenInfos,
796       (RecoveryMode.LOG_REPLAY == this.services.getMasterFileSystem().getLogRecoveryMode()));
797     try {
798       OpenRegionResponse response = admin.openRegion(null, request);
799       return ResponseConverter.getRegionOpeningStateList(response);
800     } catch (ServiceException se) {
801       throw ProtobufUtil.getRemoteException(se);
802     }
803   }
804 
805   private PayloadCarryingRpcController newRpcController() {
806     return rpcControllerFactory == null ? null : rpcControllerFactory.newController();
807   }
808 
809   /**
810    * Sends an CLOSE RPC to the specified server to close the specified region.
811    * <p>
812    * A region server could reject the close request because it either does not
813    * have the specified region or the region is being split.
814    * @param server server to open a region
815    * @param region region to open
816    * @param versionOfClosingNode
817    *   the version of znode to compare when RS transitions the znode from
818    *   CLOSING state.
819    * @param dest - if the region is moved to another server, the destination server. null otherwise.
820    * @return true if server acknowledged close, false if not
821    * @throws IOException
822    */
823   public boolean sendRegionClose(ServerName server, HRegionInfo region,
824     int versionOfClosingNode, ServerName dest, boolean transitionInZK) throws IOException {
825     if (server == null) throw new NullPointerException("Passed server is null");
826     AdminService.BlockingInterface admin = getRsAdmin(server);
827     if (admin == null) {
828       throw new IOException("Attempting to send CLOSE RPC to server " +
829         server.toString() + " for region " +
830         region.getRegionNameAsString() +
831         " failed because no RPC connection found to this server");
832     }
833     PayloadCarryingRpcController controller = newRpcController();
834     return ProtobufUtil.closeRegion(controller, admin, server, region.getRegionName(),
835       versionOfClosingNode, dest, transitionInZK);
836   }
837 
838   public boolean sendRegionClose(ServerName server,
839       HRegionInfo region, int versionOfClosingNode) throws IOException {
840     return sendRegionClose(server, region, versionOfClosingNode, null, true);
841   }
842 
843   /**
844    * Sends a WARMUP RPC to the specified server to warmup the specified region.
845    * <p>
846    * A region server could reject the close request because it either does not
847    * have the specified region or the region is being split.
848    * @param server server to warmup a region
849    * @param region region to  warmup
850    */
851   public void sendRegionWarmup(ServerName server,
852       HRegionInfo region) {
853     if (server == null) return;
854     try {
855       AdminService.BlockingInterface admin = getRsAdmin(server);
856       PayloadCarryingRpcController controller = newRpcController();
857       ProtobufUtil.warmupRegion(controller, admin, region);
858     } catch (IOException e) {
859       LOG.error("Received exception in RPC for warmup server:" +
860         server + "region: " + region +
861         "exception: " + e);
862     }
863   }
864 
865   /**
866    * Contacts a region server and waits up to timeout ms
867    * to close the region.  This bypasses the active hmaster.
868    */
869   public static void closeRegionSilentlyAndWait(ClusterConnection connection,
870     ServerName server, HRegionInfo region, long timeout) throws IOException, InterruptedException {
871     AdminService.BlockingInterface rs = connection.getAdmin(server);
872     PayloadCarryingRpcController controller = connection.getRpcControllerFactory().newController();
873     try {
874       ProtobufUtil.closeRegion(controller, rs, server, region.getRegionName(), false);
875     } catch (IOException e) {
876       LOG.warn("Exception when closing region: " + region.getRegionNameAsString(), e);
877     }
878     long expiration = timeout + System.currentTimeMillis();
879     while (System.currentTimeMillis() < expiration) {
880       try {
881         HRegionInfo rsRegion =
882           ProtobufUtil.getRegionInfo(controller, rs, region.getRegionName());
883         if (rsRegion == null) return;
884       } catch (IOException ioe) {
885         if (ioe instanceof NotServingRegionException) // no need to retry again
886           return;
887         LOG.warn("Exception when retrieving regioninfo from: "
888           + region.getRegionNameAsString(), ioe);
889       }
890       Thread.sleep(1000);
891     }
892     throw new IOException("Region " + region + " failed to close within"
893         + " timeout " + timeout);
894   }
895 
896   /**
897    * Sends an MERGE REGIONS RPC to the specified server to merge the specified
898    * regions.
899    * <p>
900    * A region server could reject the close request because it either does not
901    * have the specified region.
902    * @param server server to merge regions
903    * @param region_a region to merge
904    * @param region_b region to merge
905    * @param forcible true if do a compulsory merge, otherwise we will only merge
906    *          two adjacent regions
907    * @throws IOException
908    */
909   public void sendRegionsMerge(ServerName server, HRegionInfo region_a,
910       HRegionInfo region_b, boolean forcible) throws IOException {
911     if (server == null)
912       throw new NullPointerException("Passed server is null");
913     if (region_a == null || region_b == null)
914       throw new NullPointerException("Passed region is null");
915     AdminService.BlockingInterface admin = getRsAdmin(server);
916     if (admin == null) {
917       throw new IOException("Attempting to send MERGE REGIONS RPC to server "
918           + server.toString() + " for region "
919           + region_a.getRegionNameAsString() + ","
920           + region_b.getRegionNameAsString()
921           + " failed because no RPC connection found to this server");
922     }
923     PayloadCarryingRpcController controller = newRpcController();
924     ProtobufUtil.mergeRegions(controller, admin, region_a, region_b, forcible);
925   }
926 
927   /**
928    * Check if a region server is reachable and has the expected start code
929    */
930   public boolean isServerReachable(ServerName server) {
931     if (server == null) throw new NullPointerException("Passed server is null");
932 
933 
934     RetryCounter retryCounter = pingRetryCounterFactory.create();
935     while (retryCounter.shouldRetry()) {
936       synchronized (this.onlineServers) {
937         if (this.deadservers.isDeadServer(server)) {
938           return false;
939         }
940       }
941       try {
942         PayloadCarryingRpcController controller = newRpcController();
943         AdminService.BlockingInterface admin = getRsAdmin(server);
944         if (admin != null) {
945           ServerInfo info = ProtobufUtil.getServerInfo(controller, admin);
946           return info != null && info.hasServerName()
947             && server.getStartcode() == info.getServerName().getStartCode();
948         }
949       } catch (IOException ioe) {
950         if (LOG.isDebugEnabled()) {
951           LOG.debug("Couldn't reach " + server + ", try=" + retryCounter.getAttemptTimes() + " of "
952               + retryCounter.getMaxAttempts(), ioe);
953         }
954         try {
955           retryCounter.sleepUntilNextRetry();
956         } catch(InterruptedException ie) {
957           Thread.currentThread().interrupt();
958           break;
959         }
960       }
961     }
962     return false;
963   }
964 
965     /**
966     * @param sn
967     * @return Admin interface for the remote regionserver named <code>sn</code>
968     * @throws IOException
969     * @throws RetriesExhaustedException wrapping a ConnectException if failed
970     */
971   private AdminService.BlockingInterface getRsAdmin(final ServerName sn)
972   throws IOException {
973     AdminService.BlockingInterface admin = this.rsAdmins.get(sn);
974     if (admin == null) {
975       LOG.debug("New admin connection to " + sn.toString());
976       if (sn.equals(master.getServerName()) && master instanceof HRegionServer) {
977         // A master is also a region server now, see HBASE-10569 for details
978         admin = ((HRegionServer)master).getRSRpcServices();
979       } else {
980         admin = this.connection.getAdmin(sn);
981       }
982       this.rsAdmins.put(sn, admin);
983     }
984     return admin;
985   }
986 
987   /**
988    * Wait for the region servers to report in.
989    * We will wait until one of this condition is met:
990    *  - the master is stopped
991    *  - the 'hbase.master.wait.on.regionservers.maxtostart' number of
992    *    region servers is reached
993    *  - the 'hbase.master.wait.on.regionservers.mintostart' is reached AND
994    *   there have been no new region server in for
995    *      'hbase.master.wait.on.regionservers.interval' time AND
996    *   the 'hbase.master.wait.on.regionservers.timeout' is reached
997    *
998    * @throws InterruptedException
999    */
1000   public void waitForRegionServers(MonitoredTask status)
1001   throws InterruptedException {
1002     final long interval = this.master.getConfiguration().
1003       getLong(WAIT_ON_REGIONSERVERS_INTERVAL, 1500);
1004     final long timeout = this.master.getConfiguration().
1005       getLong(WAIT_ON_REGIONSERVERS_TIMEOUT, 4500);
1006     int defaultMinToStart = 1;
1007     if (BaseLoadBalancer.tablesOnMaster(master.getConfiguration())) {
1008       // If we assign regions to master, we'd like to start
1009       // at least another region server so that we don't
1010       // assign all regions to master if other region servers
1011       // don't come up in time.
1012       defaultMinToStart = 2;
1013     }
1014     int minToStart = this.master.getConfiguration().
1015       getInt(WAIT_ON_REGIONSERVERS_MINTOSTART, defaultMinToStart);
1016     if (minToStart < 1) {
1017       LOG.warn(String.format(
1018         "The value of '%s' (%d) can not be less than 1, ignoring.",
1019         WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
1020       minToStart = 1;
1021     }
1022     int maxToStart = this.master.getConfiguration().
1023       getInt(WAIT_ON_REGIONSERVERS_MAXTOSTART, Integer.MAX_VALUE);
1024     if (maxToStart < minToStart) {
1025         LOG.warn(String.format(
1026             "The value of '%s' (%d) is set less than '%s' (%d), ignoring.",
1027             WAIT_ON_REGIONSERVERS_MAXTOSTART, maxToStart,
1028             WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
1029         maxToStart = Integer.MAX_VALUE;
1030     }
1031 
1032     long now =  System.currentTimeMillis();
1033     final long startTime = now;
1034     long slept = 0;
1035     long lastLogTime = 0;
1036     long lastCountChange = startTime;
1037     int count = countOfRegionServers();
1038     int oldCount = 0;
1039     while (!this.master.isStopped() && count < maxToStart
1040         && (lastCountChange+interval > now || timeout > slept || count < minToStart)) {
1041       // Log some info at every interval time or if there is a change
1042       if (oldCount != count || lastLogTime+interval < now){
1043         lastLogTime = now;
1044         String msg =
1045           "Waiting for region servers count to settle; currently"+
1046             " checked in " + count + ", slept for " + slept + " ms," +
1047             " expecting minimum of " + minToStart + ", maximum of "+ maxToStart+
1048             ", timeout of "+timeout+" ms, interval of "+interval+" ms.";
1049         LOG.info(msg);
1050         status.setStatus(msg);
1051       }
1052 
1053       // We sleep for some time
1054       final long sleepTime = 50;
1055       Thread.sleep(sleepTime);
1056       now =  System.currentTimeMillis();
1057       slept = now - startTime;
1058 
1059       oldCount = count;
1060       count = countOfRegionServers();
1061       if (count != oldCount) {
1062         lastCountChange = now;
1063       }
1064     }
1065 
1066     LOG.info("Finished waiting for region servers count to settle;" +
1067       " checked in " + count + ", slept for " + slept + " ms," +
1068       " expecting minimum of " + minToStart + ", maximum of "+ maxToStart+","+
1069       " master is "+ (this.master.isStopped() ? "stopped.": "running")
1070     );
1071   }
1072 
1073   /**
1074    * @return A copy of the internal list of online servers.
1075    */
1076   public List<ServerName> getOnlineServersList() {
1077     // TODO: optimize the load balancer call so we don't need to make a new list
1078     // TODO: FIX. THIS IS POPULAR CALL.
1079     return new ArrayList<ServerName>(this.onlineServers.keySet());
1080   }
1081 
1082   /**
1083    * @return A copy of the internal list of draining servers.
1084    */
1085   public List<ServerName> getDrainingServersList() {
1086     return new ArrayList<ServerName>(this.drainingServers);
1087   }
1088 
1089   /**
1090    * @return A copy of the internal set of deadNotExpired servers.
1091    */
1092   Set<ServerName> getDeadNotExpiredServers() {
1093     return new HashSet<ServerName>(this.queuedDeadServers);
1094   }
1095 
1096   /**
1097    * During startup, if we figure it is not a failover, i.e. there is
1098    * no more WAL files to split, we won't try to recover these dead servers.
1099    * So we just remove them from the queue. Use caution in calling this.
1100    */
1101   void removeRequeuedDeadServers() {
1102     requeuedDeadServers.clear();
1103   }
1104 
1105   /**
1106    * @return A copy of the internal map of requeuedDeadServers servers and their corresponding
1107    *         splitlog need flag.
1108    */
1109   Map<ServerName, Boolean> getRequeuedDeadServers() {
1110     return Collections.unmodifiableMap(this.requeuedDeadServers);
1111   }
1112 
1113   public boolean isServerOnline(ServerName serverName) {
1114     return serverName != null && onlineServers.containsKey(serverName);
1115   }
1116 
1117   /**
1118    * Check if a server is known to be dead.  A server can be online,
1119    * or known to be dead, or unknown to this manager (i.e, not online,
1120    * not known to be dead either. it is simply not tracked by the
1121    * master any more, for example, a very old previous instance).
1122    */
1123   public synchronized boolean isServerDead(ServerName serverName) {
1124     return serverName == null || deadservers.isDeadServer(serverName)
1125       || queuedDeadServers.contains(serverName)
1126       || requeuedDeadServers.containsKey(serverName);
1127   }
1128 
1129   public void shutdownCluster() {
1130     this.clusterShutdown = true;
1131     this.master.stop("Cluster shutdown requested");
1132   }
1133 
1134   public boolean isClusterShutdown() {
1135     return this.clusterShutdown;
1136   }
1137 
1138   /**
1139    * Stop the ServerManager.  Currently closes the connection to the master.
1140    */
1141   public void stop() {
1142     if (connection != null) {
1143       try {
1144         connection.close();
1145       } catch (IOException e) {
1146         LOG.error("Attempt to close connection to master failed", e);
1147       }
1148     }
1149   }
1150 
1151   /**
1152    * Creates a list of possible destinations for a region. It contains the online servers, but not
1153    *  the draining or dying servers.
1154    *  @param serverToExclude can be null if there is no server to exclude
1155    */
1156   public List<ServerName> createDestinationServersList(final ServerName serverToExclude){
1157     final List<ServerName> destServers = getOnlineServersList();
1158 
1159     if (serverToExclude != null){
1160       destServers.remove(serverToExclude);
1161     }
1162 
1163     // Loop through the draining server list and remove them from the server list
1164     final List<ServerName> drainingServersCopy = getDrainingServersList();
1165     if (!drainingServersCopy.isEmpty()) {
1166       for (final ServerName server: drainingServersCopy) {
1167         destServers.remove(server);
1168       }
1169     }
1170 
1171     // Remove the deadNotExpired servers from the server list.
1172     removeDeadNotExpiredServers(destServers);
1173     return destServers;
1174   }
1175 
1176   /**
1177    * Calls {@link #createDestinationServersList} without server to exclude.
1178    */
1179   public List<ServerName> createDestinationServersList(){
1180     return createDestinationServersList(null);
1181   }
1182 
1183     /**
1184     * Loop through the deadNotExpired server list and remove them from the
1185     * servers.
1186     * This function should be used carefully outside of this class. You should use a high level
1187     *  method such as {@link #createDestinationServersList()} instead of managing you own list.
1188     */
1189   void removeDeadNotExpiredServers(List<ServerName> servers) {
1190     Set<ServerName> deadNotExpiredServersCopy = this.getDeadNotExpiredServers();
1191     if (!deadNotExpiredServersCopy.isEmpty()) {
1192       for (ServerName server : deadNotExpiredServersCopy) {
1193         LOG.debug("Removing dead but not expired server: " + server
1194           + " from eligible server pool.");
1195         servers.remove(server);
1196       }
1197     }
1198   }
1199 
1200   /**
1201    * To clear any dead server with same host name and port of any online server
1202    */
1203   void clearDeadServersWithSameHostNameAndPortOfOnlineServer() {
1204     for (ServerName serverName : getOnlineServersList()) {
1205       deadservers.cleanAllPreviousInstances(serverName);
1206     }
1207   }
1208 
1209   /**
1210    * Called by delete table and similar to notify the ServerManager that a region was removed.
1211    */
1212   public void removeRegion(final HRegionInfo regionInfo) {
1213     final byte[] encodedName = regionInfo.getEncodedNameAsBytes();
1214     storeFlushedSequenceIdsByRegion.remove(encodedName);
1215     flushedSequenceIdByRegion.remove(encodedName);
1216   }
1217 
1218   /**
1219    * Called by delete table and similar to notify the ServerManager that a region was removed.
1220    */
1221   public void removeRegions(final List<HRegionInfo> regions) {
1222     for (HRegionInfo hri: regions) {
1223       removeRegion(hri);
1224     }
1225   }
1226 }