View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.net.InetAddress;
23  import java.util.ArrayList;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Map.Entry;
31  import java.util.Set;
32  import java.util.concurrent.ConcurrentHashMap;
33  import java.util.concurrent.ConcurrentNavigableMap;
34  import java.util.concurrent.ConcurrentSkipListMap;
35  import java.util.concurrent.CopyOnWriteArrayList;
36  
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.hadoop.conf.Configuration;
40  import org.apache.hadoop.hbase.ClockOutOfSyncException;
41  import org.apache.hadoop.hbase.HConstants;
42  import org.apache.hadoop.hbase.HRegionInfo;
43  import org.apache.hadoop.hbase.NotServingRegionException;
44  import org.apache.hadoop.hbase.RegionLoad;
45  import org.apache.hadoop.hbase.Server;
46  import org.apache.hadoop.hbase.ServerLoad;
47  import org.apache.hadoop.hbase.ServerName;
48  import org.apache.hadoop.hbase.YouAreDeadException;
49  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
50  import org.apache.hadoop.hbase.classification.InterfaceAudience;
51  import org.apache.hadoop.hbase.client.ClusterConnection;
52  import org.apache.hadoop.hbase.client.ConnectionFactory;
53  import org.apache.hadoop.hbase.client.RetriesExhaustedException;
54  import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
55  import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
56  import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
57  import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
58  import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
59  import org.apache.hadoop.hbase.monitoring.MonitoredTask;
60  import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
61  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
62  import org.apache.hadoop.hbase.protobuf.RequestConverter;
63  import org.apache.hadoop.hbase.protobuf.ResponseConverter;
64  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
65  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
66  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
67  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
68  import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
69  import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
70  import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
71  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
72  import org.apache.hadoop.hbase.regionserver.HRegionServer;
73  import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
74  import org.apache.hadoop.hbase.util.Bytes;
75  import org.apache.hadoop.hbase.util.Triple;
76  import org.apache.hadoop.hbase.util.RetryCounter;
77  import org.apache.hadoop.hbase.util.RetryCounterFactory;
78  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
79  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
80  import org.apache.zookeeper.KeeperException;
81  
82  import com.google.common.annotations.VisibleForTesting;
83  import com.google.protobuf.ByteString;
84  import com.google.protobuf.ServiceException;
85  
86  /**
87   * The ServerManager class manages info about region servers.
88   * <p>
89   * Maintains lists of online and dead servers.  Processes the startups,
90   * shutdowns, and deaths of region servers.
91   * <p>
92   * Servers are distinguished in two different ways.  A given server has a
93   * location, specified by hostname and port, and of which there can only be one
94   * online at any given time.  A server instance is specified by the location
95   * (hostname and port) as well as the startcode (timestamp from when the server
96   * was started).  This is used to differentiate a restarted instance of a given
97   * server from the original instance.
98   * <p>
99   * If a sever is known not to be running any more, it is called dead. The dead
100  * server needs to be handled by a ServerShutdownHandler.  If the handler is not
101  * enabled yet, the server can't be handled right away so it is queued up.
102  * After the handler is enabled, the server will be submitted to a handler to handle.
103  * However, the handler may be just partially enabled.  If so,
104  * the server cannot be fully processed, and be queued up for further processing.
105  * A server is fully processed only after the handler is fully enabled
106  * and has completed the handling.
107  */
108 @InterfaceAudience.Private
109 public class ServerManager {
110   public static final String WAIT_ON_REGIONSERVERS_MAXTOSTART =
111       "hbase.master.wait.on.regionservers.maxtostart";
112 
113   public static final String WAIT_ON_REGIONSERVERS_MINTOSTART =
114       "hbase.master.wait.on.regionservers.mintostart";
115 
116   public static final String WAIT_ON_REGIONSERVERS_TIMEOUT =
117       "hbase.master.wait.on.regionservers.timeout";
118 
119   public static final String WAIT_ON_REGIONSERVERS_INTERVAL =
120       "hbase.master.wait.on.regionservers.interval";
121 
122   private static final Log LOG = LogFactory.getLog(ServerManager.class);
123 
124   // Set if we are to shutdown the cluster.
125   private volatile boolean clusterShutdown = false;
126 
127   /**
128    * The last flushed sequence id for a region.
129    */
130   private final ConcurrentNavigableMap<byte[], Long> flushedSequenceIdByRegion =
131     new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
132 
133   /**
134    * The last flushed sequence id for a store in a region.
135    */
136   private final ConcurrentNavigableMap<byte[], ConcurrentNavigableMap<byte[], Long>>
137     storeFlushedSequenceIdsByRegion =
138     new ConcurrentSkipListMap<byte[], ConcurrentNavigableMap<byte[], Long>>(Bytes.BYTES_COMPARATOR);
139 
140   /** Map of registered servers to their current load */
141   private final ConcurrentHashMap<ServerName, ServerLoad> onlineServers =
142     new ConcurrentHashMap<ServerName, ServerLoad>();
143 
144   /**
145    * Map of admin interfaces per registered regionserver; these interfaces we use to control
146    * regionservers out on the cluster
147    */
148   private final Map<ServerName, AdminService.BlockingInterface> rsAdmins =
149     new HashMap<ServerName, AdminService.BlockingInterface>();
150 
151   /**
152    * List of region servers <ServerName> that should not get any more new
153    * regions.
154    */
155   private final ArrayList<ServerName> drainingServers =
156     new ArrayList<ServerName>();
157 
158   private final Server master;
159   private final MasterServices services;
160   private final ClusterConnection connection;
161 
162   private final DeadServer deadservers = new DeadServer();
163 
164   private final long maxSkew;
165   private final long warningSkew;
166 
167   private final RetryCounterFactory pingRetryCounterFactory;
168   private final RpcControllerFactory rpcControllerFactory;
169 
170   /**
171    * Set of region servers which are dead but not processed immediately. If one
172    * server died before master enables ServerShutdownHandler, the server will be
173    * added to this set and will be processed through calling
174    * {@link ServerManager#processQueuedDeadServers()} by master.
175    * <p>
176    * A dead server is a server instance known to be dead, not listed in the /hbase/rs
177    * znode any more. It may have not been submitted to ServerShutdownHandler yet
178    * because the handler is not enabled.
179    * <p>
180    * A dead server, which has been submitted to ServerShutdownHandler while the
181    * handler is not enabled, is queued up.
182    * <p>
183    * So this is a set of region servers known to be dead but not submitted to
184    * ServerShutdownHander for processing yet.
185    */
186   private Set<ServerName> queuedDeadServers = new HashSet<ServerName>();
187 
188   /**
189    * Set of region servers which are dead and submitted to ServerShutdownHandler to process but not
190    * fully processed immediately.
191    * <p>
192    * If one server died before assignment manager finished the failover cleanup, the server will be
193    * added to this set and will be processed through calling
194    * {@link ServerManager#processQueuedDeadServers()} by assignment manager.
195    * <p>
196    * The Boolean value indicates whether log split is needed inside ServerShutdownHandler
197    * <p>
198    * ServerShutdownHandler processes a dead server submitted to the handler after the handler is
199    * enabled. It may not be able to complete the processing because meta is not yet online or master
200    * is currently in startup mode. In this case, the dead server will be parked in this set
201    * temporarily.
202    */
203   private Map<ServerName, Boolean> requeuedDeadServers
204     = new ConcurrentHashMap<ServerName, Boolean>();
205 
206   /** Listeners that are called on server events. */
207   private List<ServerListener> listeners = new CopyOnWriteArrayList<ServerListener>();
208 
209   /**
210    * Constructor.
211    * @param master
212    * @param services
213    * @throws ZooKeeperConnectionException
214    */
215   public ServerManager(final Server master, final MasterServices services)
216       throws IOException {
217     this(master, services, true);
218   }
219 
220   ServerManager(final Server master, final MasterServices services,
221       final boolean connect) throws IOException {
222     this.master = master;
223     this.services = services;
224     Configuration c = master.getConfiguration();
225     maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
226     warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
227     this.connection = connect ? (ClusterConnection)ConnectionFactory.createConnection(c) : null;
228     int pingMaxAttempts = Math.max(1, master.getConfiguration().getInt(
229       "hbase.master.maximum.ping.server.attempts", 10));
230     int pingSleepInterval = Math.max(1, master.getConfiguration().getInt(
231       "hbase.master.ping.server.retry.sleep.interval", 100));
232     this.pingRetryCounterFactory = new RetryCounterFactory(pingMaxAttempts, pingSleepInterval);
233     this.rpcControllerFactory = this.connection == null
234         ? null
235         : connection.getRpcControllerFactory();
236   }
237 
238   /**
239    * Add the listener to the notification list.
240    * @param listener The ServerListener to register
241    */
242   public void registerListener(final ServerListener listener) {
243     this.listeners.add(listener);
244   }
245 
246   /**
247    * Remove the listener from the notification list.
248    * @param listener The ServerListener to unregister
249    */
250   public boolean unregisterListener(final ServerListener listener) {
251     return this.listeners.remove(listener);
252   }
253 
254   /**
255    * Let the server manager know a new regionserver has come online
256    * @param request the startup request
257    * @param ia the InetAddress from which request is received
258    * @return The ServerName we know this server as.
259    * @throws IOException
260    */
261   ServerName regionServerStartup(RegionServerStartupRequest request, InetAddress ia)
262       throws IOException {
263     // Test for case where we get a region startup message from a regionserver
264     // that has been quickly restarted but whose znode expiration handler has
265     // not yet run, or from a server whose fail we are currently processing.
266     // Test its host+port combo is present in serverAddresstoServerInfo.  If it
267     // is, reject the server and trigger its expiration. The next time it comes
268     // in, it should have been removed from serverAddressToServerInfo and queued
269     // for processing by ProcessServerShutdown.
270 
271     final String hostname = request.hasUseThisHostnameInstead() ?
272         request.getUseThisHostnameInstead() :ia.getHostName();
273     ServerName sn = ServerName.valueOf(hostname, request.getPort(),
274       request.getServerStartCode());
275     checkClockSkew(sn, request.getServerCurrentTime());
276     checkIsDead(sn, "STARTUP");
277     if (!checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
278       LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup"
279         + " could not record the server: " + sn);
280     }
281     return sn;
282   }
283 
284   private ConcurrentNavigableMap<byte[], Long> getOrCreateStoreFlushedSequenceId(
285     byte[] regionName) {
286     ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId =
287         storeFlushedSequenceIdsByRegion.get(regionName);
288     if (storeFlushedSequenceId != null) {
289       return storeFlushedSequenceId;
290     }
291     storeFlushedSequenceId = new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
292     ConcurrentNavigableMap<byte[], Long> alreadyPut =
293         storeFlushedSequenceIdsByRegion.putIfAbsent(regionName, storeFlushedSequenceId);
294     return alreadyPut == null ? storeFlushedSequenceId : alreadyPut;
295   }
296   /**
297    * Updates last flushed sequence Ids for the regions on server sn
298    * @param sn
299    * @param hsl
300    */
301   private void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) {
302     Map<byte[], RegionLoad> regionsLoad = hsl.getRegionsLoad();
303     for (Entry<byte[], RegionLoad> entry : regionsLoad.entrySet()) {
304       byte[] encodedRegionName = Bytes.toBytes(HRegionInfo.encodeRegionName(entry.getKey()));
305       Long existingValue = flushedSequenceIdByRegion.get(encodedRegionName);
306       long l = entry.getValue().getCompleteSequenceId();
307       // Don't let smaller sequence ids override greater sequence ids.
308       if (LOG.isTraceEnabled()) {
309         LOG.trace(Bytes.toString(encodedRegionName) + ", existingValue=" + existingValue +
310           ", completeSequenceId=" + l);
311       }
312       if (existingValue == null || (l != HConstants.NO_SEQNUM && l > existingValue)) {
313         flushedSequenceIdByRegion.put(encodedRegionName, l);
314       } else if (l != HConstants.NO_SEQNUM && l < existingValue) {
315         LOG.warn("RegionServer " + sn + " indicates a last flushed sequence id ("
316             + l + ") that is less than the previous last flushed sequence id ("
317             + existingValue + ") for region " + Bytes.toString(entry.getKey()) + " Ignoring.");
318       }
319       ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId =
320           getOrCreateStoreFlushedSequenceId(encodedRegionName);
321       for (StoreSequenceId storeSeqId : entry.getValue().getStoreCompleteSequenceId()) {
322         byte[] family = storeSeqId.getFamilyName().toByteArray();
323         existingValue = storeFlushedSequenceId.get(family);
324         l = storeSeqId.getSequenceId();
325         if (LOG.isTraceEnabled()) {
326           LOG.trace(Bytes.toString(encodedRegionName) + ", family=" + Bytes.toString(family) +
327             ", existingValue=" + existingValue + ", completeSequenceId=" + l);
328         }
329         // Don't let smaller sequence ids override greater sequence ids.
330         if (existingValue == null || (l != HConstants.NO_SEQNUM && l > existingValue.longValue())) {
331           storeFlushedSequenceId.put(family, l);
332         }
333       }
334     }
335   }
336 
337   void regionServerReport(ServerName sn,
338       ServerLoad sl) throws YouAreDeadException {
339     checkIsDead(sn, "REPORT");
340     if (null == this.onlineServers.replace(sn, sl)) {
341       // Already have this host+port combo and its just different start code?
342       // Just let the server in. Presume master joining a running cluster.
343       // recordNewServer is what happens at the end of reportServerStartup.
344       // The only thing we are skipping is passing back to the regionserver
345       // the ServerName to use. Here we presume a master has already done
346       // that so we'll press on with whatever it gave us for ServerName.
347       if (!checkAndRecordNewServer(sn, sl)) {
348         LOG.info("RegionServerReport ignored, could not record the server: " + sn);
349         return; // Not recorded, so no need to move on
350       }
351     }
352     updateLastFlushedSequenceIds(sn, sl);
353   }
354 
355   /**
356    * Check is a server of same host and port already exists,
357    * if not, or the existed one got a smaller start code, record it.
358    *
359    * @param sn the server to check and record
360    * @param sl the server load on the server
361    * @return true if the server is recorded, otherwise, false
362    */
363   boolean checkAndRecordNewServer(
364       final ServerName serverName, final ServerLoad sl) {
365     ServerName existingServer = null;
366     synchronized (this.onlineServers) {
367       existingServer = findServerWithSameHostnamePortWithLock(serverName);
368       if (existingServer != null && (existingServer.getStartcode() > serverName.getStartcode())) {
369         LOG.info("Server serverName=" + serverName + " rejected; we already have "
370             + existingServer.toString() + " registered with same hostname and port");
371         return false;
372       }
373       recordNewServerWithLock(serverName, sl);
374     }
375 
376     // Tell our listeners that a server was added
377     if (!this.listeners.isEmpty()) {
378       for (ServerListener listener : this.listeners) {
379         listener.serverAdded(serverName);
380       }
381     }
382 
383     // Note that we assume that same ts means same server, and don't expire in that case.
384     //  TODO: ts can theoretically collide due to clock shifts, so this is a bit hacky.
385     if (existingServer != null && (existingServer.getStartcode() < serverName.getStartcode())) {
386       LOG.info("Triggering server recovery; existingServer " +
387           existingServer + " looks stale, new server:" + serverName);
388       expireServer(existingServer);
389     }
390     return true;
391   }
392 
393   /**
394    * Checks if the clock skew between the server and the master. If the clock skew exceeds the
395    * configured max, it will throw an exception; if it exceeds the configured warning threshold,
396    * it will log a warning but start normally.
397    * @param serverName Incoming servers's name
398    * @param serverCurrentTime
399    * @throws ClockOutOfSyncException if the skew exceeds the configured max value
400    */
401   private void checkClockSkew(final ServerName serverName, final long serverCurrentTime)
402   throws ClockOutOfSyncException {
403     long skew = Math.abs(System.currentTimeMillis() - serverCurrentTime);
404     if (skew > maxSkew) {
405       String message = "Server " + serverName + " has been " +
406         "rejected; Reported time is too far out of sync with master.  " +
407         "Time difference of " + skew + "ms > max allowed of " + maxSkew + "ms";
408       LOG.warn(message);
409       throw new ClockOutOfSyncException(message);
410     } else if (skew > warningSkew){
411       String message = "Reported time for server " + serverName + " is out of sync with master " +
412         "by " + skew + "ms. (Warning threshold is " + warningSkew + "ms; " +
413         "error threshold is " + maxSkew + "ms)";
414       LOG.warn(message);
415     }
416   }
417 
418   /**
419    * If this server is on the dead list, reject it with a YouAreDeadException.
420    * If it was dead but came back with a new start code, remove the old entry
421    * from the dead list.
422    * @param serverName
423    * @param what START or REPORT
424    * @throws org.apache.hadoop.hbase.YouAreDeadException
425    */
426   private void checkIsDead(final ServerName serverName, final String what)
427       throws YouAreDeadException {
428     if (this.deadservers.isDeadServer(serverName)) {
429       // host name, port and start code all match with existing one of the
430       // dead servers. So, this server must be dead.
431       String message = "Server " + what + " rejected; currently processing " +
432           serverName + " as dead server";
433       LOG.debug(message);
434       throw new YouAreDeadException(message);
435     }
436     // remove dead server with same hostname and port of newly checking in rs after master
437     // initialization.See HBASE-5916 for more information.
438     if ((this.services == null || ((HMaster) this.services).isInitialized())
439         && this.deadservers.cleanPreviousInstance(serverName)) {
440       // This server has now become alive after we marked it as dead.
441       // We removed it's previous entry from the dead list to reflect it.
442       LOG.debug(what + ":" + " Server " + serverName + " came back up," +
443           " removed it from the dead servers list");
444     }
445   }
446 
447   /**
448    * Assumes onlineServers is locked.
449    * @return ServerName with matching hostname and port.
450    */
451   private ServerName findServerWithSameHostnamePortWithLock(
452       final ServerName serverName) {
453     for (ServerName sn: this.onlineServers.keySet()) {
454       if (ServerName.isSameHostnameAndPort(serverName, sn)) return sn;
455     }
456     return null;
457   }
458 
459   /**
460    * Adds the onlineServers list. onlineServers should be locked.
461    * @param serverName The remote servers name.
462    * @param sl
463    * @return Server load from the removed server, if any.
464    */
465   @VisibleForTesting
466   void recordNewServerWithLock(final ServerName serverName, final ServerLoad sl) {
467     LOG.info("Registering server=" + serverName);
468     this.onlineServers.put(serverName, sl);
469     this.rsAdmins.remove(serverName);
470   }
471 
472   public RegionStoreSequenceIds getLastFlushedSequenceId(byte[] encodedRegionName) {
473     RegionStoreSequenceIds.Builder builder = RegionStoreSequenceIds.newBuilder();
474     Long seqId = flushedSequenceIdByRegion.get(encodedRegionName);
475     builder.setLastFlushedSequenceId(seqId != null ? seqId.longValue() : HConstants.NO_SEQNUM);
476     Map<byte[], Long> storeFlushedSequenceId =
477         storeFlushedSequenceIdsByRegion.get(encodedRegionName);
478     if (storeFlushedSequenceId != null) {
479       for (Map.Entry<byte[], Long> entry : storeFlushedSequenceId.entrySet()) {
480         builder.addStoreSequenceId(StoreSequenceId.newBuilder()
481             .setFamilyName(ByteString.copyFrom(entry.getKey()))
482             .setSequenceId(entry.getValue().longValue()).build());
483       }
484     }
485     return builder.build();
486   }
487 
488   /**
489    * @param serverName
490    * @return ServerLoad if serverName is known else null
491    */
492   public ServerLoad getLoad(final ServerName serverName) {
493     return this.onlineServers.get(serverName);
494   }
495 
496   /**
497    * Compute the average load across all region servers.
498    * Currently, this uses a very naive computation - just uses the number of
499    * regions being served, ignoring stats about number of requests.
500    * @return the average load
501    */
502   public double getAverageLoad() {
503     int totalLoad = 0;
504     int numServers = 0;
505     for (ServerLoad sl: this.onlineServers.values()) {
506         numServers++;
507         totalLoad += sl.getNumberOfRegions();
508     }
509     return numServers == 0 ? 0 :
510       (double)totalLoad / (double)numServers;
511   }
512 
513   /** @return the count of active regionservers */
514   public int countOfRegionServers() {
515     // Presumes onlineServers is a concurrent map
516     return this.onlineServers.size();
517   }
518 
519   /**
520    * @return Read-only map of servers to serverinfo
521    */
522   public Map<ServerName, ServerLoad> getOnlineServers() {
523     // Presumption is that iterating the returned Map is OK.
524     synchronized (this.onlineServers) {
525       return Collections.unmodifiableMap(this.onlineServers);
526     }
527   }
528 
529 
530   public DeadServer getDeadServers() {
531     return this.deadservers;
532   }
533 
534   /**
535    * Checks if any dead servers are currently in progress.
536    * @return true if any RS are being processed as dead, false if not
537    */
538   public boolean areDeadServersInProgress() {
539     return this.deadservers.areDeadServersInProgress();
540   }
541 
542   void letRegionServersShutdown() {
543     long previousLogTime = 0;
544     ServerName sn = master.getServerName();
545     ZooKeeperWatcher zkw = master.getZooKeeper();
546     int onlineServersCt;
547     while ((onlineServersCt = onlineServers.size()) > 0){
548 
549       if (System.currentTimeMillis() > (previousLogTime + 1000)) {
550         Set<ServerName> remainingServers = onlineServers.keySet();
551         synchronized (onlineServers) {
552           if (remainingServers.size() == 1 && remainingServers.contains(sn)) {
553             // Master will delete itself later.
554             return;
555           }
556         }
557         StringBuilder sb = new StringBuilder();
558         // It's ok here to not sync on onlineServers - merely logging
559         for (ServerName key : remainingServers) {
560           if (sb.length() > 0) {
561             sb.append(", ");
562           }
563           sb.append(key);
564         }
565         LOG.info("Waiting on regionserver(s) to go down " + sb.toString());
566         previousLogTime = System.currentTimeMillis();
567       }
568 
569       try {
570         List<String> servers = ZKUtil.listChildrenNoWatch(zkw, zkw.rsZNode);
571         if (servers == null || servers.size() == 0 || (servers.size() == 1
572             && servers.contains(sn.toString()))) {
573           LOG.info("ZK shows there is only the master self online, exiting now");
574           // Master could have lost some ZK events, no need to wait more.
575           break;
576         }
577       } catch (KeeperException ke) {
578         LOG.warn("Failed to list regionservers", ke);
579         // ZK is malfunctioning, don't hang here
580         break;
581       }
582       synchronized (onlineServers) {
583         try {
584           if (onlineServersCt == onlineServers.size()) onlineServers.wait(100);
585         } catch (InterruptedException ignored) {
586           // continue
587         }
588       }
589     }
590   }
591 
592   /*
593    * Expire the passed server.  Add it to list of dead servers and queue a
594    * shutdown processing.
595    */
596   public synchronized void expireServer(final ServerName serverName) {
597     if (serverName.equals(master.getServerName())) {
598       if (!(master.isAborted() || master.isStopped())) {
599         master.stop("We lost our znode?");
600       }
601       return;
602     }
603     if (!services.isServerCrashProcessingEnabled()) {
604       LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
605           + "delay expiring server " + serverName);
606       this.queuedDeadServers.add(serverName);
607       return;
608     }
609     if (this.deadservers.isDeadServer(serverName)) {
610       // TODO: Can this happen?  It shouldn't be online in this case?
611       LOG.warn("Expiration of " + serverName +
612           " but server shutdown already in progress");
613       return;
614     }
615     moveFromOnelineToDeadServers(serverName);
616 
617     // If cluster is going down, yes, servers are going to be expiring; don't
618     // process as a dead server
619     if (this.clusterShutdown) {
620       LOG.info("Cluster shutdown set; " + serverName +
621         " expired; onlineServers=" + this.onlineServers.size());
622       if (this.onlineServers.isEmpty()) {
623         master.stop("Cluster shutdown set; onlineServer=0");
624       }
625       return;
626     }
627 
628     boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName) ==
629         AssignmentManager.ServerHostRegion.HOSTING_REGION;
630     ProcedureExecutor<MasterProcedureEnv> procExec = this.services.getMasterProcedureExecutor();
631     procExec.submitProcedure(new ServerCrashProcedure(
632       procExec.getEnvironment(), serverName, true, carryingMeta));
633     LOG.debug("Added=" + serverName +
634       " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta);
635 
636     // Tell our listeners that a server was removed
637     if (!this.listeners.isEmpty()) {
638       for (ServerListener listener : this.listeners) {
639         listener.serverRemoved(serverName);
640       }
641     }
642   }
643 
644   @VisibleForTesting
645   public void moveFromOnelineToDeadServers(final ServerName sn) {
646     synchronized (onlineServers) {
647       if (!this.onlineServers.containsKey(sn)) {
648         LOG.warn("Expiration of " + sn + " but server not online");
649       }
650       // Remove the server from the known servers lists and update load info BUT
651       // add to deadservers first; do this so it'll show in dead servers list if
652       // not in online servers list.
653       this.deadservers.add(sn);
654       this.onlineServers.remove(sn);
655       onlineServers.notifyAll();
656     }
657     this.rsAdmins.remove(sn);
658   }
659 
660   public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitWal) {
661     // When assignment manager is cleaning up the zookeeper nodes and rebuilding the
662     // in-memory region states, region servers could be down. Meta table can and
663     // should be re-assigned, log splitting can be done too. However, it is better to
664     // wait till the cleanup is done before re-assigning user regions.
665     //
666     // We should not wait in the server shutdown handler thread since it can clog
667     // the handler threads and meta table could not be re-assigned in case
668     // the corresponding server is down. So we queue them up here instead.
669     if (!services.getAssignmentManager().isFailoverCleanupDone()) {
670       requeuedDeadServers.put(serverName, shouldSplitWal);
671       return;
672     }
673 
674     this.deadservers.add(serverName);
675     ProcedureExecutor<MasterProcedureEnv> procExec = this.services.getMasterProcedureExecutor();
676     procExec.submitProcedure(new ServerCrashProcedure(
677       procExec.getEnvironment(), serverName, shouldSplitWal, false));
678   }
679 
680   /**
681    * Process the servers which died during master's initialization. It will be
682    * called after HMaster#assignMeta and AssignmentManager#joinCluster.
683    * */
684   synchronized void processQueuedDeadServers() {
685     if (!services.isServerCrashProcessingEnabled()) {
686       LOG.info("Master hasn't enabled ServerShutdownHandler");
687     }
688     Iterator<ServerName> serverIterator = queuedDeadServers.iterator();
689     while (serverIterator.hasNext()) {
690       ServerName tmpServerName = serverIterator.next();
691       expireServer(tmpServerName);
692       serverIterator.remove();
693       requeuedDeadServers.remove(tmpServerName);
694     }
695 
696     if (!services.getAssignmentManager().isFailoverCleanupDone()) {
697       LOG.info("AssignmentManager hasn't finished failover cleanup; waiting");
698     }
699     for (Map.Entry<ServerName, Boolean> entry : requeuedDeadServers.entrySet()) {
700       processDeadServer(entry.getKey(), entry.getValue());
701     }
702     requeuedDeadServers.clear();
703   }
704 
705   /*
706    * Remove the server from the drain list.
707    */
708   public boolean removeServerFromDrainList(final ServerName sn) {
709     // Warn if the server (sn) is not online.  ServerName is of the form:
710     // <hostname> , <port> , <startcode>
711 
712     if (!this.isServerOnline(sn)) {
713       LOG.warn("Server " + sn + " is not currently online. " +
714                "Removing from draining list anyway, as requested.");
715     }
716     // Remove the server from the draining servers lists.
717     return this.drainingServers.remove(sn);
718   }
719 
720   /*
721    * Add the server to the drain list.
722    */
723   public boolean addServerToDrainList(final ServerName sn) {
724     // Warn if the server (sn) is not online.  ServerName is of the form:
725     // <hostname> , <port> , <startcode>
726 
727     if (!this.isServerOnline(sn)) {
728       LOG.warn("Server " + sn + " is not currently online. " +
729                "Ignoring request to add it to draining list.");
730       return false;
731     }
732     // Add the server to the draining servers lists, if it's not already in
733     // it.
734     if (this.drainingServers.contains(sn)) {
735       LOG.warn("Server " + sn + " is already in the draining server list." +
736                "Ignoring request to add it again.");
737       return false;
738     }
739     return this.drainingServers.add(sn);
740   }
741 
742   // RPC methods to region servers
743 
744   /**
745    * Sends an OPEN RPC to the specified server to open the specified region.
746    * <p>
747    * Open should not fail but can if server just crashed.
748    * <p>
749    * @param server server to open a region
750    * @param region region to open
751    * @param versionOfOfflineNode that needs to be present in the offline node
752    * when RS tries to change the state from OFFLINE to other states.
753    * @param favoredNodes
754    */
755   public RegionOpeningState sendRegionOpen(final ServerName server,
756       HRegionInfo region, int versionOfOfflineNode, List<ServerName> favoredNodes)
757   throws IOException {
758     AdminService.BlockingInterface admin = getRsAdmin(server);
759     if (admin == null) {
760       LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
761         " failed because no RPC connection found to this server");
762       return RegionOpeningState.FAILED_OPENING;
763     }
764     OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, 
765       region, versionOfOfflineNode, favoredNodes, 
766       (RecoveryMode.LOG_REPLAY == this.services.getMasterFileSystem().getLogRecoveryMode()));
767     try {
768       OpenRegionResponse response = admin.openRegion(null, request);
769       return ResponseConverter.getRegionOpeningState(response);
770     } catch (ServiceException se) {
771       throw ProtobufUtil.getRemoteException(se);
772     }
773   }
774 
775   /**
776    * Sends an OPEN RPC to the specified server to open the specified region.
777    * <p>
778    * Open should not fail but can if server just crashed.
779    * <p>
780    * @param server server to open a region
781    * @param regionOpenInfos info of a list of regions to open
782    * @return a list of region opening states
783    */
784   public List<RegionOpeningState> sendRegionOpen(ServerName server,
785       List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
786   throws IOException {
787     AdminService.BlockingInterface admin = getRsAdmin(server);
788     if (admin == null) {
789       LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
790         " failed because no RPC connection found to this server");
791       return null;
792     }
793 
794     OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, regionOpenInfos,
795       (RecoveryMode.LOG_REPLAY == this.services.getMasterFileSystem().getLogRecoveryMode()));
796     try {
797       OpenRegionResponse response = admin.openRegion(null, request);
798       return ResponseConverter.getRegionOpeningStateList(response);
799     } catch (ServiceException se) {
800       throw ProtobufUtil.getRemoteException(se);
801     }
802   }
803 
804   private PayloadCarryingRpcController newRpcController() {
805     return rpcControllerFactory == null ? null : rpcControllerFactory.newController();
806   }
807 
808   /**
809    * Sends an CLOSE RPC to the specified server to close the specified region.
810    * <p>
811    * A region server could reject the close request because it either does not
812    * have the specified region or the region is being split.
813    * @param server server to open a region
814    * @param region region to open
815    * @param versionOfClosingNode
816    *   the version of znode to compare when RS transitions the znode from
817    *   CLOSING state.
818    * @param dest - if the region is moved to another server, the destination server. null otherwise.
819    * @return true if server acknowledged close, false if not
820    * @throws IOException
821    */
822   public boolean sendRegionClose(ServerName server, HRegionInfo region,
823     int versionOfClosingNode, ServerName dest, boolean transitionInZK) throws IOException {
824     if (server == null) throw new NullPointerException("Passed server is null");
825     AdminService.BlockingInterface admin = getRsAdmin(server);
826     if (admin == null) {
827       throw new IOException("Attempting to send CLOSE RPC to server " +
828         server.toString() + " for region " +
829         region.getRegionNameAsString() +
830         " failed because no RPC connection found to this server");
831     }
832     PayloadCarryingRpcController controller = newRpcController();
833     return ProtobufUtil.closeRegion(controller, admin, server, region.getRegionName(),
834       versionOfClosingNode, dest, transitionInZK);
835   }
836 
837   public boolean sendRegionClose(ServerName server,
838       HRegionInfo region, int versionOfClosingNode) throws IOException {
839     return sendRegionClose(server, region, versionOfClosingNode, null, true);
840   }
841 
842   /**
843    * Sends a WARMUP RPC to the specified server to warmup the specified region.
844    * <p>
845    * A region server could reject the close request because it either does not
846    * have the specified region or the region is being split.
847    * @param server server to warmup a region
848    * @param region region to  warmup
849    */
850   public void sendRegionWarmup(ServerName server,
851       HRegionInfo region) {
852     if (server == null) return;
853     try {
854       AdminService.BlockingInterface admin = getRsAdmin(server);
855       PayloadCarryingRpcController controller = newRpcController();
856       ProtobufUtil.warmupRegion(controller, admin, region);
857     } catch (IOException e) {
858       LOG.error("Received exception in RPC for warmup server:" +
859         server + "region: " + region +
860         "exception: " + e);
861     }
862   }
863 
864   /**
865    * Contacts a region server and waits up to timeout ms
866    * to close the region.  This bypasses the active hmaster.
867    */
868   public static void closeRegionSilentlyAndWait(ClusterConnection connection,
869     ServerName server, HRegionInfo region, long timeout) throws IOException, InterruptedException {
870     AdminService.BlockingInterface rs = connection.getAdmin(server);
871     PayloadCarryingRpcController controller = connection.getRpcControllerFactory().newController();
872     try {
873       ProtobufUtil.closeRegion(controller, rs, server, region.getRegionName(), false);
874     } catch (IOException e) {
875       LOG.warn("Exception when closing region: " + region.getRegionNameAsString(), e);
876     }
877     long expiration = timeout + System.currentTimeMillis();
878     while (System.currentTimeMillis() < expiration) {
879       try {
880         HRegionInfo rsRegion =
881           ProtobufUtil.getRegionInfo(controller, rs, region.getRegionName());
882         if (rsRegion == null) return;
883       } catch (IOException ioe) {
884         if (ioe instanceof NotServingRegionException) // no need to retry again
885           return;
886         LOG.warn("Exception when retrieving regioninfo from: "
887           + region.getRegionNameAsString(), ioe);
888       }
889       Thread.sleep(1000);
890     }
891     throw new IOException("Region " + region + " failed to close within"
892         + " timeout " + timeout);
893   }
894 
895   /**
896    * Sends an MERGE REGIONS RPC to the specified server to merge the specified
897    * regions.
898    * <p>
899    * A region server could reject the close request because it either does not
900    * have the specified region.
901    * @param server server to merge regions
902    * @param region_a region to merge
903    * @param region_b region to merge
904    * @param forcible true if do a compulsory merge, otherwise we will only merge
905    *          two adjacent regions
906    * @throws IOException
907    */
908   public void sendRegionsMerge(ServerName server, HRegionInfo region_a,
909       HRegionInfo region_b, boolean forcible) throws IOException {
910     if (server == null)
911       throw new NullPointerException("Passed server is null");
912     if (region_a == null || region_b == null)
913       throw new NullPointerException("Passed region is null");
914     AdminService.BlockingInterface admin = getRsAdmin(server);
915     if (admin == null) {
916       throw new IOException("Attempting to send MERGE REGIONS RPC to server "
917           + server.toString() + " for region "
918           + region_a.getRegionNameAsString() + ","
919           + region_b.getRegionNameAsString()
920           + " failed because no RPC connection found to this server");
921     }
922     PayloadCarryingRpcController controller = newRpcController();
923     ProtobufUtil.mergeRegions(controller, admin, region_a, region_b, forcible);
924   }
925 
926   /**
927    * Check if a region server is reachable and has the expected start code
928    */
929   public boolean isServerReachable(ServerName server) {
930     if (server == null) throw new NullPointerException("Passed server is null");
931 
932 
933     RetryCounter retryCounter = pingRetryCounterFactory.create();
934     while (retryCounter.shouldRetry()) {
935       synchronized (this.onlineServers) {
936         if (this.deadservers.isDeadServer(server)) {
937           return false;
938         }
939       }
940       try {
941         PayloadCarryingRpcController controller = newRpcController();
942         AdminService.BlockingInterface admin = getRsAdmin(server);
943         if (admin != null) {
944           ServerInfo info = ProtobufUtil.getServerInfo(controller, admin);
945           return info != null && info.hasServerName()
946             && server.getStartcode() == info.getServerName().getStartCode();
947         }
948       } catch (IOException ioe) {
949         if (LOG.isDebugEnabled()) {
950           LOG.debug("Couldn't reach " + server + ", try=" + retryCounter.getAttemptTimes() + " of "
951               + retryCounter.getMaxAttempts(), ioe);
952         }
953         try {
954           retryCounter.sleepUntilNextRetry();
955         } catch(InterruptedException ie) {
956           Thread.currentThread().interrupt();
957           break;
958         }
959       }
960     }
961     return false;
962   }
963 
964     /**
965     * @param sn
966     * @return Admin interface for the remote regionserver named <code>sn</code>
967     * @throws IOException
968     * @throws RetriesExhaustedException wrapping a ConnectException if failed
969     */
970   private AdminService.BlockingInterface getRsAdmin(final ServerName sn)
971   throws IOException {
972     AdminService.BlockingInterface admin = this.rsAdmins.get(sn);
973     if (admin == null) {
974       LOG.debug("New admin connection to " + sn.toString());
975       if (sn.equals(master.getServerName()) && master instanceof HRegionServer) {
976         // A master is also a region server now, see HBASE-10569 for details
977         admin = ((HRegionServer)master).getRSRpcServices();
978       } else {
979         admin = this.connection.getAdmin(sn);
980       }
981       this.rsAdmins.put(sn, admin);
982     }
983     return admin;
984   }
985 
986   /**
987    * Wait for the region servers to report in.
988    * We will wait until one of this condition is met:
989    *  - the master is stopped
990    *  - the 'hbase.master.wait.on.regionservers.maxtostart' number of
991    *    region servers is reached
992    *  - the 'hbase.master.wait.on.regionservers.mintostart' is reached AND
993    *   there have been no new region server in for
994    *      'hbase.master.wait.on.regionservers.interval' time AND
995    *   the 'hbase.master.wait.on.regionservers.timeout' is reached
996    *
997    * @throws InterruptedException
998    */
999   public void waitForRegionServers(MonitoredTask status)
1000   throws InterruptedException {
1001     final long interval = this.master.getConfiguration().
1002       getLong(WAIT_ON_REGIONSERVERS_INTERVAL, 1500);
1003     final long timeout = this.master.getConfiguration().
1004       getLong(WAIT_ON_REGIONSERVERS_TIMEOUT, 4500);
1005     int defaultMinToStart = 1;
1006     if (BaseLoadBalancer.tablesOnMaster(master.getConfiguration())) {
1007       // If we assign regions to master, we'd like to start
1008       // at least another region server so that we don't
1009       // assign all regions to master if other region servers
1010       // don't come up in time.
1011       defaultMinToStart = 2;
1012     }
1013     int minToStart = this.master.getConfiguration().
1014       getInt(WAIT_ON_REGIONSERVERS_MINTOSTART, defaultMinToStart);
1015     if (minToStart < 1) {
1016       LOG.warn(String.format(
1017         "The value of '%s' (%d) can not be less than 1, ignoring.",
1018         WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
1019       minToStart = 1;
1020     }
1021     int maxToStart = this.master.getConfiguration().
1022       getInt(WAIT_ON_REGIONSERVERS_MAXTOSTART, Integer.MAX_VALUE);
1023     if (maxToStart < minToStart) {
1024         LOG.warn(String.format(
1025             "The value of '%s' (%d) is set less than '%s' (%d), ignoring.",
1026             WAIT_ON_REGIONSERVERS_MAXTOSTART, maxToStart,
1027             WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
1028         maxToStart = Integer.MAX_VALUE;
1029     }
1030 
1031     long now =  System.currentTimeMillis();
1032     final long startTime = now;
1033     long slept = 0;
1034     long lastLogTime = 0;
1035     long lastCountChange = startTime;
1036     int count = countOfRegionServers();
1037     int oldCount = 0;
1038     while (!this.master.isStopped() && count < maxToStart
1039         && (lastCountChange+interval > now || timeout > slept || count < minToStart)) {
1040       // Log some info at every interval time or if there is a change
1041       if (oldCount != count || lastLogTime+interval < now){
1042         lastLogTime = now;
1043         String msg =
1044           "Waiting for region servers count to settle; currently"+
1045             " checked in " + count + ", slept for " + slept + " ms," +
1046             " expecting minimum of " + minToStart + ", maximum of "+ maxToStart+
1047             ", timeout of "+timeout+" ms, interval of "+interval+" ms.";
1048         LOG.info(msg);
1049         status.setStatus(msg);
1050       }
1051 
1052       // We sleep for some time
1053       final long sleepTime = 50;
1054       Thread.sleep(sleepTime);
1055       now =  System.currentTimeMillis();
1056       slept = now - startTime;
1057 
1058       oldCount = count;
1059       count = countOfRegionServers();
1060       if (count != oldCount) {
1061         lastCountChange = now;
1062       }
1063     }
1064 
1065     LOG.info("Finished waiting for region servers count to settle;" +
1066       " checked in " + count + ", slept for " + slept + " ms," +
1067       " expecting minimum of " + minToStart + ", maximum of "+ maxToStart+","+
1068       " master is "+ (this.master.isStopped() ? "stopped.": "running")
1069     );
1070   }
1071 
1072   /**
1073    * @return A copy of the internal list of online servers.
1074    */
1075   public List<ServerName> getOnlineServersList() {
1076     // TODO: optimize the load balancer call so we don't need to make a new list
1077     // TODO: FIX. THIS IS POPULAR CALL.
1078     return new ArrayList<ServerName>(this.onlineServers.keySet());
1079   }
1080 
1081   /**
1082    * @return A copy of the internal list of draining servers.
1083    */
1084   public List<ServerName> getDrainingServersList() {
1085     return new ArrayList<ServerName>(this.drainingServers);
1086   }
1087 
1088   /**
1089    * @return A copy of the internal set of deadNotExpired servers.
1090    */
1091   Set<ServerName> getDeadNotExpiredServers() {
1092     return new HashSet<ServerName>(this.queuedDeadServers);
1093   }
1094 
1095   /**
1096    * During startup, if we figure it is not a failover, i.e. there is
1097    * no more WAL files to split, we won't try to recover these dead servers.
1098    * So we just remove them from the queue. Use caution in calling this.
1099    */
1100   void removeRequeuedDeadServers() {
1101     requeuedDeadServers.clear();
1102   }
1103 
1104   /**
1105    * @return A copy of the internal map of requeuedDeadServers servers and their corresponding
1106    *         splitlog need flag.
1107    */
1108   Map<ServerName, Boolean> getRequeuedDeadServers() {
1109     return Collections.unmodifiableMap(this.requeuedDeadServers);
1110   }
1111 
1112   public boolean isServerOnline(ServerName serverName) {
1113     return serverName != null && onlineServers.containsKey(serverName);
1114   }
1115 
1116   /**
1117    * Check whether a server is online based on hostname and port
1118    * @return true if finding a server with matching hostname and port.
1119    */
1120   public boolean isServerWithSameHostnamePortOnline(final ServerName serverName) {
1121     return findServerWithSameHostnamePortWithLock(serverName) != null;
1122   }
1123 
1124   /**
1125    * Check if a server is known to be dead.  A server can be online,
1126    * or known to be dead, or unknown to this manager (i.e, not online,
1127    * not known to be dead either. it is simply not tracked by the
1128    * master any more, for example, a very old previous instance).
1129    */
1130   public synchronized boolean isServerDead(ServerName serverName) {
1131     return serverName == null || deadservers.isDeadServer(serverName)
1132       || queuedDeadServers.contains(serverName)
1133       || requeuedDeadServers.containsKey(serverName);
1134   }
1135 
1136   public void shutdownCluster() {
1137     this.clusterShutdown = true;
1138     this.master.stop("Cluster shutdown requested");
1139   }
1140 
1141   public boolean isClusterShutdown() {
1142     return this.clusterShutdown;
1143   }
1144 
1145   /**
1146    * Stop the ServerManager.  Currently closes the connection to the master.
1147    */
1148   public void stop() {
1149     if (connection != null) {
1150       try {
1151         connection.close();
1152       } catch (IOException e) {
1153         LOG.error("Attempt to close connection to master failed", e);
1154       }
1155     }
1156   }
1157 
1158   /**
1159    * Creates a list of possible destinations for a region. It contains the online servers, but not
1160    *  the draining or dying servers.
1161    *  @param serverToExclude can be null if there is no server to exclude
1162    */
1163   public List<ServerName> createDestinationServersList(final ServerName serverToExclude){
1164     final List<ServerName> destServers = getOnlineServersList();
1165 
1166     if (serverToExclude != null){
1167       destServers.remove(serverToExclude);
1168     }
1169 
1170     // Loop through the draining server list and remove them from the server list
1171     final List<ServerName> drainingServersCopy = getDrainingServersList();
1172     if (!drainingServersCopy.isEmpty()) {
1173       for (final ServerName server: drainingServersCopy) {
1174         destServers.remove(server);
1175       }
1176     }
1177 
1178     // Remove the deadNotExpired servers from the server list.
1179     removeDeadNotExpiredServers(destServers);
1180     return destServers;
1181   }
1182 
1183   /**
1184    * Calls {@link #createDestinationServersList} without server to exclude.
1185    */
1186   public List<ServerName> createDestinationServersList(){
1187     return createDestinationServersList(null);
1188   }
1189 
1190     /**
1191     * Loop through the deadNotExpired server list and remove them from the
1192     * servers.
1193     * This function should be used carefully outside of this class. You should use a high level
1194     *  method such as {@link #createDestinationServersList()} instead of managing you own list.
1195     */
1196   void removeDeadNotExpiredServers(List<ServerName> servers) {
1197     Set<ServerName> deadNotExpiredServersCopy = this.getDeadNotExpiredServers();
1198     if (!deadNotExpiredServersCopy.isEmpty()) {
1199       for (ServerName server : deadNotExpiredServersCopy) {
1200         LOG.debug("Removing dead but not expired server: " + server
1201           + " from eligible server pool.");
1202         servers.remove(server);
1203       }
1204     }
1205   }
1206 
1207   /**
1208    * To clear any dead server with same host name and port of any online server
1209    */
1210   void clearDeadServersWithSameHostNameAndPortOfOnlineServer() {
1211     for (ServerName serverName : getOnlineServersList()) {
1212       deadservers.cleanAllPreviousInstances(serverName);
1213     }
1214   }
1215 
1216   /**
1217    * Called by delete table and similar to notify the ServerManager that a region was removed.
1218    */
1219   public void removeRegion(final HRegionInfo regionInfo) {
1220     final byte[] encodedName = regionInfo.getEncodedNameAsBytes();
1221     storeFlushedSequenceIdsByRegion.remove(encodedName);
1222     flushedSequenceIdByRegion.remove(encodedName);
1223   }
1224 
1225   @VisibleForTesting
1226   public boolean isRegionInServerManagerStates(final HRegionInfo hri) {
1227     final byte[] encodedName = hri.getEncodedNameAsBytes();
1228     return (storeFlushedSequenceIdsByRegion.containsKey(encodedName)
1229         || flushedSequenceIdByRegion.containsKey(encodedName));
1230   }
1231 
1232   /**
1233    * Called by delete table and similar to notify the ServerManager that a region was removed.
1234    */
1235   public void removeRegions(final List<HRegionInfo> regions) {
1236     for (HRegionInfo hri: regions) {
1237       removeRegion(hri);
1238     }
1239   }
1240 }