View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.lang.reflect.Constructor;
24  import java.lang.reflect.InvocationTargetException;
25  import java.net.InetAddress;
26  import java.net.InetSocketAddress;
27  import java.net.UnknownHostException;
28  import java.util.ArrayList;
29  import java.util.Arrays;
30  import java.util.Collection;
31  import java.util.Collections;
32  import java.util.Comparator;
33  import java.util.HashSet;
34  import java.util.Iterator;
35  import java.util.List;
36  import java.util.Map;
37  import java.util.Set;
38  import java.util.concurrent.TimeUnit;
39  import java.util.concurrent.atomic.AtomicReference;
40  import java.util.regex.Pattern;
41  
42  import javax.servlet.ServletException;
43  import javax.servlet.http.HttpServlet;
44  import javax.servlet.http.HttpServletRequest;
45  import javax.servlet.http.HttpServletResponse;
46  
47  import org.apache.commons.logging.Log;
48  import org.apache.commons.logging.LogFactory;
49  import org.apache.hadoop.conf.Configuration;
50  import org.apache.hadoop.fs.Path;
51  import org.apache.hadoop.hbase.ClusterStatus;
52  import org.apache.hadoop.hbase.CoordinatedStateException;
53  import org.apache.hadoop.hbase.CoordinatedStateManager;
54  import org.apache.hadoop.hbase.DoNotRetryIOException;
55  import org.apache.hadoop.hbase.HBaseIOException;
56  import org.apache.hadoop.hbase.HBaseInterfaceAudience;
57  import org.apache.hadoop.hbase.HColumnDescriptor;
58  import org.apache.hadoop.hbase.HConstants;
59  import org.apache.hadoop.hbase.HRegionInfo;
60  import org.apache.hadoop.hbase.HTableDescriptor;
61  import org.apache.hadoop.hbase.MasterNotRunningException;
62  import org.apache.hadoop.hbase.MetaTableAccessor;
63  import org.apache.hadoop.hbase.NamespaceDescriptor;
64  import org.apache.hadoop.hbase.NamespaceNotFoundException;
65  import org.apache.hadoop.hbase.PleaseHoldException;
66  import org.apache.hadoop.hbase.Server;
67  import org.apache.hadoop.hbase.ServerLoad;
68  import org.apache.hadoop.hbase.ServerName;
69  import org.apache.hadoop.hbase.TableDescriptors;
70  import org.apache.hadoop.hbase.TableName;
71  import org.apache.hadoop.hbase.TableNotDisabledException;
72  import org.apache.hadoop.hbase.TableNotFoundException;
73  import org.apache.hadoop.hbase.UnknownRegionException;
74  import org.apache.hadoop.hbase.classification.InterfaceAudience;
75  import org.apache.hadoop.hbase.client.MetaScanner;
76  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
77  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
78  import org.apache.hadoop.hbase.client.Result;
79  import org.apache.hadoop.hbase.client.TableState;
80  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
81  import org.apache.hadoop.hbase.exceptions.DeserializationException;
82  import org.apache.hadoop.hbase.executor.ExecutorType;
83  import org.apache.hadoop.hbase.ipc.RequestContext;
84  import org.apache.hadoop.hbase.ipc.RpcServer;
85  import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
86  import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
87  import org.apache.hadoop.hbase.master.balancer.BalancerChore;
88  import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
89  import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
90  import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
91  import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
92  import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
93  import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
94  import org.apache.hadoop.hbase.master.handler.DeleteTableHandler;
95  import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
96  import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
97  import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
98  import org.apache.hadoop.hbase.master.handler.ModifyTableHandler;
99  import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
100 import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
101 import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
102 import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
103 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
104 import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
105 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
106 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
107 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
108 import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
109 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
110 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
111 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
112 import org.apache.hadoop.hbase.regionserver.HRegionServer;
113 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
114 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
115 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
116 import org.apache.hadoop.hbase.replication.regionserver.Replication;
117 import org.apache.hadoop.hbase.security.UserProvider;
118 import org.apache.hadoop.hbase.util.Addressing;
119 import org.apache.hadoop.hbase.util.Bytes;
120 import org.apache.hadoop.hbase.util.CompressionTest;
121 import org.apache.hadoop.hbase.util.EncryptionTest;
122 import org.apache.hadoop.hbase.util.FSUtils;
123 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
124 import org.apache.hadoop.hbase.util.HasThread;
125 import org.apache.hadoop.hbase.util.Pair;
126 import org.apache.hadoop.hbase.util.Threads;
127 import org.apache.hadoop.hbase.util.VersionInfo;
128 import org.apache.hadoop.hbase.util.ZKDataMigrator;
129 import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
130 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
131 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
132 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
133 import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
134 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
135 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
136 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
137 import org.apache.zookeeper.KeeperException;
138 import org.mortbay.jetty.Connector;
139 import org.mortbay.jetty.nio.SelectChannelConnector;
140 import org.mortbay.jetty.servlet.Context;
141 
142 import com.google.common.annotations.VisibleForTesting;
143 import com.google.common.collect.Maps;
144 import com.google.protobuf.Descriptors;
145 import com.google.protobuf.Service;
146 
147 /**
148  * HMaster is the "master server" for HBase. An HBase cluster has one active
149  * master.  If many masters are started, all compete.  Whichever wins goes on to
150  * run the cluster.  All others park themselves in their constructor until
151  * master or cluster shutdown or until the active master loses its lease in
152  * zookeeper.  Thereafter, all running master jostle to take over master role.
153  *
154  * <p>The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In
155  * this case it will tell all regionservers to go down and then wait on them
156  * all reporting in that they are down.  This master will then shut itself down.
157  *
158  * <p>You can also shutdown just this master.  Call {@link #stopMaster()}.
159  *
160  * @see org.apache.zookeeper.Watcher
161  */
162 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
163 @SuppressWarnings("deprecation")
164 public class HMaster extends HRegionServer implements MasterServices, Server {
165   private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
166 
167   /**
168    * Protection against zombie master. Started once Master accepts active responsibility and
169    * starts taking over responsibilities. Allows a finite time window before giving up ownership.
170    */
171   private static class InitializationMonitor extends HasThread {
172     /** The amount of time in milliseconds to sleep before checking initialization status. */
173     public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout";
174     public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES);
175 
176     /**
177      * When timeout expired and initialization has not complete, call {@link System#exit(int)} when
178      * true, do nothing otherwise.
179      */
180     public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout";
181     public static final boolean HALT_DEFAULT = false;
182 
183     private final HMaster master;
184     private final long timeout;
185     private final boolean haltOnTimeout;
186 
187     /** Creates a Thread that monitors the {@link #isInitialized()} state. */
188     InitializationMonitor(HMaster master) {
189       super("MasterInitializationMonitor");
190       this.master = master;
191       this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT);
192       this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT);
193       this.setDaemon(true);
194     }
195 
196     @Override
197     public void run() {
198       try {
199         while (!master.isStopped() && master.isActiveMaster()) {
200           Thread.sleep(timeout);
201           if (master.isInitialized()) {
202             LOG.debug("Initialization completed within allotted tolerance. Monitor exiting.");
203           } else {
204             LOG.error("Master failed to complete initialization after " + timeout + "ms. Please"
205                 + " consider submitting a bug report including a thread dump of this process.");
206             if (haltOnTimeout) {
207               LOG.error("Zombie Master exiting. Thread dump to stdout");
208               Threads.printThreadInfo(System.out, "Zombie HMaster");
209               System.exit(-1);
210             }
211           }
212         }
213       } catch (InterruptedException ie) {
214         LOG.trace("InitMonitor thread interrupted. Existing.");
215       }
216     }
217   }
218 
219   // MASTER is name of the webapp and the attribute name used stuffing this
220   //instance into web context.
221   public static final String MASTER = "master";
222 
223   // Manager and zk listener for master election
224   private final ActiveMasterManager activeMasterManager;
225   // Region server tracker
226   RegionServerTracker regionServerTracker;
227   // Draining region server tracker
228   private DrainingServerTracker drainingServerTracker;
229   // Tracker for load balancer state
230   LoadBalancerTracker loadBalancerTracker;
231 
232   /** Namespace stuff */
233   private TableNamespaceManager tableNamespaceManager;
234 
235   // Metrics for the HMaster
236   final MetricsMaster metricsMaster;
237   // file system manager for the master FS operations
238   private MasterFileSystem fileSystemManager;
239 
240   // server manager to deal with region server info
241   volatile ServerManager serverManager;
242 
243   // manager of assignment nodes in zookeeper
244   AssignmentManager assignmentManager;
245 
246   // buffer for "fatal error" notices from region servers
247   // in the cluster. This is only used for assisting
248   // operations/debugging.
249   MemoryBoundedLogMessageBuffer rsFatals;
250 
251   // flag set after we become the active master (used for testing)
252   private volatile boolean isActiveMaster = false;
253 
254   // flag set after we complete initialization once active,
255   // it is not private since it's used in unit tests
256   volatile boolean initialized = false;
257 
258   // flag set after master services are started,
259   // initialization may have not completed yet.
260   volatile boolean serviceStarted = false;
261 
262   // flag set after we complete assignMeta.
263   private volatile boolean serverShutdownHandlerEnabled = false;
264 
265   LoadBalancer balancer;
266   private BalancerChore balancerChore;
267   private ClusterStatusChore clusterStatusChore;
268   private ClusterStatusPublisher clusterStatusPublisherChore = null;
269 
270   CatalogJanitor catalogJanitorChore;
271   private LogCleaner logCleaner;
272   private HFileCleaner hfileCleaner;
273 
274   MasterCoprocessorHost cpHost;
275 
276   private final boolean preLoadTableDescriptors;
277 
278   // Time stamps for when a hmaster became active
279   private long masterActiveTime;
280 
281   //should we check the compression codec type at master side, default true, HBASE-6370
282   private final boolean masterCheckCompression;
283 
284   //should we check encryption settings at master side, default true
285   private final boolean masterCheckEncryption;
286 
287   Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
288 
289   // monitor for snapshot of hbase tables
290   SnapshotManager snapshotManager;
291   // monitor for distributed procedures
292   MasterProcedureManagerHost mpmHost;
293 
294   private MasterQuotaManager quotaManager;
295 
296   // handle table states
297   private TableStateManager tableStateManager;
298 
299   /** flag used in test cases in order to simulate RS failures during master initialization */
300   private volatile boolean initializationBeforeMetaAssignment = false;
301 
302   /** jetty server for master to redirect requests to regionserver infoServer */
303   private org.mortbay.jetty.Server masterJettyServer;
304 
305   public static class RedirectServlet extends HttpServlet {
306     private static final long serialVersionUID = 2894774810058302472L;
307     private static int regionServerInfoPort;
308 
309     @Override
310     public void doGet(HttpServletRequest request,
311         HttpServletResponse response) throws ServletException, IOException {
312       String redirectUrl = request.getScheme() + "://"
313         + request.getServerName() + ":" + regionServerInfoPort
314         + request.getRequestURI();
315       response.sendRedirect(redirectUrl);
316     }
317   }
318 
319   /**
320    * Initializes the HMaster. The steps are as follows:
321    * <p>
322    * <ol>
323    * <li>Initialize the local HRegionServer
324    * <li>Start the ActiveMasterManager.
325    * </ol>
326    * <p>
327    * Remaining steps of initialization occur in
328    * #finishActiveMasterInitialization(MonitoredTask) after
329    * the master becomes the active one.
330    *
331    * @throws KeeperException
332    * @throws IOException
333    */
334   public HMaster(final Configuration conf, CoordinatedStateManager csm)
335       throws IOException, KeeperException {
336     super(conf, csm);
337     this.rsFatals = new MemoryBoundedLogMessageBuffer(
338       conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
339 
340     LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
341         ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));
342 
343     Replication.decorateMasterConfiguration(this.conf);
344 
345     // Hack! Maps DFSClient => Master for logs.  HDFS made this
346     // config param for task trackers, but we can piggyback off of it.
347     if (this.conf.get("mapreduce.task.attempt.id") == null) {
348       this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
349     }
350 
351     // should we check the compression codec type at master side, default true, HBASE-6370
352     this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
353 
354     // should we check encryption settings at master side, default true
355     this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);
356 
357     this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this));
358 
359     // preload table descriptor at startup
360     this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);
361 
362     // Do we publish the status?
363 
364     boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
365         HConstants.STATUS_PUBLISHED_DEFAULT);
366     Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
367         conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
368             ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
369             ClusterStatusPublisher.Publisher.class);
370 
371     if (shouldPublish) {
372       if (publisherClass == null) {
373         LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
374             ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
375             " is not set - not publishing status");
376       } else {
377         clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
378         Threads.setDaemonThreadRunning(clusterStatusPublisherChore.getThread());
379       }
380     }
381     activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
382     int infoPort = putUpJettyServer();
383     startActiveMasterManager(infoPort);
384   }
385 
386   // return the actual infoPort, -1 means disable info server.
387   private int putUpJettyServer() throws IOException {
388     if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
389       return -1;
390     }
391     int infoPort = conf.getInt("hbase.master.info.port.orig",
392       HConstants.DEFAULT_MASTER_INFOPORT);
393     // -1 is for disabling info server, so no redirecting
394     if (infoPort < 0 || infoServer == null) {
395       return -1;
396     }
397     String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
398     if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
399       String msg =
400           "Failed to start redirecting jetty server. Address " + addr
401               + " does not belong to this host. Correct configuration parameter: "
402               + "hbase.master.info.bindAddress";
403       LOG.error(msg);
404       throw new IOException(msg);
405     }
406 
407     RedirectServlet.regionServerInfoPort = infoServer.getPort();
408     masterJettyServer = new org.mortbay.jetty.Server();
409     Connector connector = new SelectChannelConnector();
410     connector.setHost(addr);
411     connector.setPort(infoPort);
412     masterJettyServer.addConnector(connector);
413     masterJettyServer.setStopAtShutdown(true);
414     Context context = new Context(masterJettyServer, "/", Context.NO_SESSIONS);
415     context.addServlet(RedirectServlet.class, "/*");
416     try {
417       masterJettyServer.start();
418     } catch (Exception e) {
419       throw new IOException("Failed to start redirecting jetty server", e);
420     }
421     return connector.getLocalPort();
422   }
423 
424   /**
425    * For compatibility, if failed with regionserver credentials, try the master one
426    */
427   @Override
428   protected void login(UserProvider user, String host) throws IOException {
429     try {
430       super.login(user, host);
431     } catch (IOException ie) {
432       user.login("hbase.master.keytab.file",
433         "hbase.master.kerberos.principal", host);
434     }
435   }
436 
437   /**
438    * If configured to put regions on active master,
439    * wait till a backup master becomes active.
440    * Otherwise, loop till the server is stopped or aborted.
441    */
442   @Override
443   protected void waitForMasterActive(){
444     boolean tablesOnMaster = BaseLoadBalancer.tablesOnMaster(conf);
445     while (!(tablesOnMaster && isActiveMaster)
446         && !isStopped() && !isAborted()) {
447       sleeper.sleep();
448     }
449   }
450 
451   @VisibleForTesting
452   public MasterRpcServices getMasterRpcServices() {
453     return (MasterRpcServices)rpcServices;
454   }
455 
456   public boolean balanceSwitch(final boolean b) throws IOException {
457     return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
458   }
459 
460   @Override
461   protected String getProcessName() {
462     return MASTER;
463   }
464 
465   @Override
466   protected boolean canCreateBaseZNode() {
467     return true;
468   }
469 
470   @Override
471   protected boolean canUpdateTableDescriptor() {
472     return true;
473   }
474 
475   @Override
476   protected RSRpcServices createRpcServices() throws IOException {
477     return new MasterRpcServices(this);
478   }
479 
480   @Override
481   protected void configureInfoServer() {
482     infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);
483     infoServer.setAttribute(MASTER, this);
484     if (BaseLoadBalancer.tablesOnMaster(conf)) {
485       super.configureInfoServer();
486     }
487   }
488 
489   @Override
490   protected Class<? extends HttpServlet> getDumpServlet() {
491     return MasterDumpServlet.class;
492   }
493 
494   /**
495    * Emit the HMaster metrics, such as region in transition metrics.
496    * Surrounding in a try block just to be sure metrics doesn't abort HMaster.
497    */
498   @Override
499   protected void doMetrics() {
500     try {
501       if (assignmentManager != null) {
502         assignmentManager.updateRegionsInTransitionMetrics();
503       }
504     } catch (Throwable e) {
505       LOG.error("Couldn't update metrics: " + e.getMessage());
506     }
507   }
508 
509   MetricsMaster getMasterMetrics() {
510     return metricsMaster;
511   }
512 
513   /**
514    * Initialize all ZK based system trackers.
515    * @throws IOException
516    * @throws InterruptedException
517    * @throws KeeperException
518    * @throws CoordinatedStateException
519    */
520   void initializeZKBasedSystemTrackers() throws IOException,
521       InterruptedException, KeeperException, CoordinatedStateException {
522     this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
523     this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
524     this.loadBalancerTracker.start();
525     this.assignmentManager = new AssignmentManager(this, serverManager,
526       this.balancer, this.service, this.metricsMaster,
527       this.tableLockManager, tableStateManager);
528 
529     this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
530         this.serverManager);
531     this.regionServerTracker.start();
532 
533     this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
534       this.serverManager);
535     this.drainingServerTracker.start();
536 
537     // Set the cluster as up.  If new RSs, they'll be waiting on this before
538     // going ahead with their startup.
539     boolean wasUp = this.clusterStatusTracker.isClusterUp();
540     if (!wasUp) this.clusterStatusTracker.setClusterUp();
541 
542     LOG.info("Server active/primary master=" + this.serverName +
543         ", sessionid=0x" +
544         Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
545         ", setting cluster-up flag (Was=" + wasUp + ")");
546 
547     // create/initialize the snapshot manager and other procedure managers
548     this.snapshotManager = new SnapshotManager();
549     this.mpmHost = new MasterProcedureManagerHost();
550     this.mpmHost.register(this.snapshotManager);
551     this.mpmHost.register(new MasterFlushTableProcedureManager());
552     this.mpmHost.loadProcedures(conf);
553     this.mpmHost.initialize(this, this.metricsMaster);
554 
555     // migrating existent table state from zk
556     for (Map.Entry<TableName, TableState.State> entry : ZKDataMigrator
557         .queryForTableStates(getZooKeeper()).entrySet()) {
558       LOG.info("Converting state from zk to new states:" + entry);
559       tableStateManager.setTableState(entry.getKey(), entry.getValue());
560     }
561     ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().tableZNode);
562   }
563 
564   /**
565    * Finish initialization of HMaster after becoming the primary master.
566    *
567    * <ol>
568    * <li>Initialize master components - file system manager, server manager,
569    *     assignment manager, region server tracker, etc</li>
570    * <li>Start necessary service threads - balancer, catalog janior,
571    *     executor services, etc</li>
572    * <li>Set cluster as UP in ZooKeeper</li>
573    * <li>Wait for RegionServers to check-in</li>
574    * <li>Split logs and perform data recovery, if necessary</li>
575    * <li>Ensure assignment of meta/namespace regions<li>
576    * <li>Handle either fresh cluster start or master failover</li>
577    * </ol>
578    *
579    * @throws IOException
580    * @throws InterruptedException
581    * @throws KeeperException
582    * @throws CoordinatedStateException
583    */
584   private void finishActiveMasterInitialization(MonitoredTask status)
585       throws IOException, InterruptedException, KeeperException, CoordinatedStateException {
586 
587     isActiveMaster = true;
588     Thread zombieDetector = new Thread(new InitializationMonitor(this));
589     zombieDetector.start();
590 
591     /*
592      * We are active master now... go initialize components we need to run.
593      * Note, there may be dross in zk from previous runs; it'll get addressed
594      * below after we determine if cluster startup or failover.
595      */
596 
597     status.setStatus("Initializing Master file system");
598 
599     this.masterActiveTime = System.currentTimeMillis();
600     // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
601     this.fileSystemManager = new MasterFileSystem(this, this);
602 
603     // enable table descriptors cache
604     this.tableDescriptors.setCacheOn();
605 
606     // warm-up HTDs cache on master initialization
607     if (preLoadTableDescriptors) {
608       status.setStatus("Pre-loading table descriptors");
609       this.tableDescriptors.getAll();
610     }
611 
612     // publish cluster ID
613     status.setStatus("Publishing Cluster ID in ZooKeeper");
614     ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
615     this.serverManager = createServerManager(this, this);
616 
617     setupClusterConnection();
618 
619     // Invalidate all write locks held previously
620     this.tableLockManager.reapWriteLocks();
621 
622     this.tableStateManager = new TableStateManager(this);
623     this.tableStateManager.start();
624 
625     status.setStatus("Initializing ZK system trackers");
626     initializeZKBasedSystemTrackers();
627 
628     // initialize master side coprocessors before we start handling requests
629     status.setStatus("Initializing master coprocessors");
630     this.cpHost = new MasterCoprocessorHost(this, this.conf);
631 
632     // start up all service threads.
633     status.setStatus("Initializing master service threads");
634     startServiceThreads();
635 
636     // Wake up this server to check in
637     sleeper.skipSleepCycle();
638 
639     // Wait for region servers to report in
640     this.serverManager.waitForRegionServers(status);
641     // Check zk for region servers that are up but didn't register
642     for (ServerName sn: this.regionServerTracker.getOnlineServers()) {
643       // The isServerOnline check is opportunistic, correctness is handled inside
644       if (!this.serverManager.isServerOnline(sn)
645           && serverManager.checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
646         LOG.info("Registered server found up in zk but who has not yet reported in: " + sn);
647       }
648     }
649 
650     // get a list for previously failed RS which need log splitting work
651     // we recover hbase:meta region servers inside master initialization and
652     // handle other failed servers in SSH in order to start up master node ASAP
653     Set<ServerName> previouslyFailedServers = this.fileSystemManager
654         .getFailedServersFromLogFolders();
655 
656     // remove stale recovering regions from previous run
657     this.fileSystemManager.removeStaleRecoveringRegionsFromZK(previouslyFailedServers);
658 
659     // log splitting for hbase:meta server
660     ServerName oldMetaServerLocation = metaTableLocator.getMetaRegionLocation(this.getZooKeeper());
661     if (oldMetaServerLocation != null && previouslyFailedServers.contains(oldMetaServerLocation)) {
662       splitMetaLogBeforeAssignment(oldMetaServerLocation);
663       // Note: we can't remove oldMetaServerLocation from previousFailedServers list because it
664       // may also host user regions
665     }
666     Set<ServerName> previouslyFailedMetaRSs = getPreviouselyFailedMetaServersFromZK();
667     // need to use union of previouslyFailedMetaRSs recorded in ZK and previouslyFailedServers
668     // instead of previouslyFailedMetaRSs alone to address the following two situations:
669     // 1) the chained failure situation(recovery failed multiple times in a row).
670     // 2) master get killed right before it could delete the recovering hbase:meta from ZK while the
671     // same server still has non-meta wals to be replayed so that
672     // removeStaleRecoveringRegionsFromZK can't delete the stale hbase:meta region
673     // Passing more servers into splitMetaLog is all right. If a server doesn't have hbase:meta wal,
674     // there is no op for the server.
675     previouslyFailedMetaRSs.addAll(previouslyFailedServers);
676 
677     this.initializationBeforeMetaAssignment = true;
678 
679     // Wait for regionserver to finish initialization.
680     if (BaseLoadBalancer.tablesOnMaster(conf)) {
681       waitForServerOnline();
682     }
683 
684     //initialize load balancer
685     this.balancer.setClusterStatus(getClusterStatus());
686     this.balancer.setMasterServices(this);
687     this.balancer.initialize();
688 
689     // Check if master is shutting down because of some issue
690     // in initializing the regionserver or the balancer.
691     if(isStopped()) return;
692 
693     // Make sure meta assigned before proceeding.
694     status.setStatus("Assigning Meta Region");
695     assignMeta(status, previouslyFailedMetaRSs);
696     // check if master is shutting down because above assignMeta could return even hbase:meta isn't
697     // assigned when master is shutting down
698     if(isStopped()) return;
699 
700     status.setStatus("Submitting log splitting work for previously failed region servers");
701     // Master has recovered hbase:meta region server and we put
702     // other failed region servers in a queue to be handled later by SSH
703     for (ServerName tmpServer : previouslyFailedServers) {
704       this.serverManager.processDeadServer(tmpServer, true);
705     }
706 
707     // Fix up assignment manager status
708     status.setStatus("Starting assignment manager");
709     this.assignmentManager.joinCluster();
710 
711     //set cluster status again after user regions are assigned
712     this.balancer.setClusterStatus(getClusterStatus());
713 
714     // Start balancer and meta catalog janitor after meta and regions have
715     // been assigned.
716     status.setStatus("Starting balancer and catalog janitor");
717     this.clusterStatusChore = new ClusterStatusChore(this, balancer);
718     Threads.setDaemonThreadRunning(clusterStatusChore.getThread());
719     this.balancerChore = new BalancerChore(this);
720     Threads.setDaemonThreadRunning(balancerChore.getThread());
721     this.catalogJanitorChore = new CatalogJanitor(this, this);
722     Threads.setDaemonThreadRunning(catalogJanitorChore.getThread());
723 
724     status.setStatus("Starting namespace manager");
725     initNamespace();
726 
727     status.setStatus("Starting quota manager");
728     initQuotaManager();
729 
730     if (this.cpHost != null) {
731       try {
732         this.cpHost.preMasterInitialization();
733       } catch (IOException e) {
734         LOG.error("Coprocessor preMasterInitialization() hook failed", e);
735       }
736     }
737 
738     status.markComplete("Initialization successful");
739     LOG.info("Master has completed initialization");
740     configurationManager.registerObserver(this.balancer);
741     initialized = true;
742     // clear the dead servers with same host name and port of online server because we are not
743     // removing dead server with same hostname and port of rs which is trying to check in before
744     // master initialization. See HBASE-5916.
745     this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
746 
747     if (this.cpHost != null) {
748       // don't let cp initialization errors kill the master
749       try {
750         this.cpHost.postStartMaster();
751       } catch (IOException ioe) {
752         LOG.error("Coprocessor postStartMaster() hook failed", ioe);
753       }
754     }
755 
756     zombieDetector.interrupt();
757   }
758 
759   /**
760    * Create a {@link ServerManager} instance.
761    * @param master
762    * @param services
763    * @return An instance of {@link ServerManager}
764    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
765    * @throws IOException
766    */
767   ServerManager createServerManager(final Server master,
768       final MasterServices services)
769   throws IOException {
770     // We put this out here in a method so can do a Mockito.spy and stub it out
771     // w/ a mocked up ServerManager.
772     return new ServerManager(master, services);
773   }
774 
775   /**
776    * Check <code>hbase:meta</code> is assigned. If not, assign it.
777    * @param status MonitoredTask
778    * @param previouslyFailedMetaRSs
779    * @throws InterruptedException
780    * @throws IOException
781    * @throws KeeperException
782    */
783   void assignMeta(MonitoredTask status, Set<ServerName> previouslyFailedMetaRSs)
784       throws InterruptedException, IOException, KeeperException {
785     // Work on meta region
786     int assigned = 0;
787     long timeout = this.conf.getLong("hbase.catalog.verification.timeout", 1000);
788     status.setStatus("Assigning hbase:meta region");
789 
790     // Get current meta state from zk.
791     RegionState metaState = MetaTableLocator.getMetaRegionState(getZooKeeper());
792 
793     RegionStates regionStates = assignmentManager.getRegionStates();
794     regionStates.createRegionState(HRegionInfo.FIRST_META_REGIONINFO,
795       metaState.getState(), metaState.getServerName(), null);
796 
797     if (!metaState.isOpened() || !metaTableLocator.verifyMetaRegionLocation(
798         this.getConnection(), this.getZooKeeper(), timeout)) {
799       ServerName currentMetaServer = metaState.getServerName();
800       if (serverManager.isServerOnline(currentMetaServer)) {
801         LOG.info("Meta was in transition on " + currentMetaServer);
802         assignmentManager.processRegionsInTransition(Arrays.asList(metaState));
803       } else {
804         if (currentMetaServer != null) {
805           splitMetaLogBeforeAssignment(currentMetaServer);
806           regionStates.logSplit(HRegionInfo.FIRST_META_REGIONINFO);
807           previouslyFailedMetaRSs.add(currentMetaServer);
808         }
809         LOG.info("Re-assigning hbase:meta, it was on " + currentMetaServer);
810         assignmentManager.assignMeta();
811       }
812       assigned++;
813     }
814 
815     enableMeta(TableName.META_TABLE_NAME);
816 
817     if ((RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode())
818         && (!previouslyFailedMetaRSs.isEmpty())) {
819       // replay WAL edits mode need new hbase:meta RS is assigned firstly
820       status.setStatus("replaying log for Meta Region");
821       this.fileSystemManager.splitMetaLog(previouslyFailedMetaRSs);
822     }
823 
824     // Make sure a hbase:meta location is set. We need to enable SSH here since
825     // if the meta region server is died at this time, we need it to be re-assigned
826     // by SSH so that system tables can be assigned.
827     // No need to wait for meta is assigned = 0 when meta is just verified.
828     enableServerShutdownHandler(assigned != 0);
829 
830     LOG.info("hbase:meta assigned=" + assigned + ", location="
831       + metaTableLocator.getMetaRegionLocation(this.getZooKeeper()));
832     status.setStatus("META assigned.");
833   }
834 
835   void initNamespace() throws IOException {
836     //create namespace manager
837     tableNamespaceManager = new TableNamespaceManager(this);
838     tableNamespaceManager.start();
839   }
840 
841   void initQuotaManager() throws IOException {
842     quotaManager = new MasterQuotaManager(this);
843     quotaManager.start();
844   }
845 
846   boolean isCatalogJanitorEnabled() {
847     return catalogJanitorChore != null ?
848       catalogJanitorChore.getEnabled() : false;
849   }
850 
851   private void splitMetaLogBeforeAssignment(ServerName currentMetaServer) throws IOException {
852     if (RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode()) {
853       // In log replay mode, we mark hbase:meta region as recovering in ZK
854       Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
855       regions.add(HRegionInfo.FIRST_META_REGIONINFO);
856       this.fileSystemManager.prepareLogReplay(currentMetaServer, regions);
857     } else {
858       // In recovered.edits mode: create recovered edits file for hbase:meta server
859       this.fileSystemManager.splitMetaLog(currentMetaServer);
860     }
861   }
862 
863   private void enableServerShutdownHandler(
864       final boolean waitForMeta) throws IOException, InterruptedException {
865     // If ServerShutdownHandler is disabled, we enable it and expire those dead
866     // but not expired servers. This is required so that if meta is assigning to
867     // a server which dies after assignMeta starts assignment,
868     // SSH can re-assign it. Otherwise, we will be
869     // stuck here waiting forever if waitForMeta is specified.
870     if (!serverShutdownHandlerEnabled) {
871       serverShutdownHandlerEnabled = true;
872       this.serverManager.processQueuedDeadServers();
873     }
874 
875     if (waitForMeta) {
876       metaTableLocator.waitMetaRegionLocation(this.getZooKeeper());
877     }
878   }
879 
880   private void enableMeta(TableName metaTableName) {
881     if (!this.tableStateManager.isTableState(metaTableName,
882             TableState.State.ENABLED)) {
883       this.assignmentManager.setEnabledTable(metaTableName);
884     }
885   }
886 
887   /**
888    * This function returns a set of region server names under hbase:meta recovering region ZK node
889    * @return Set of meta server names which were recorded in ZK
890    * @throws KeeperException
891    */
892   private Set<ServerName> getPreviouselyFailedMetaServersFromZK() throws KeeperException {
893     Set<ServerName> result = new HashSet<ServerName>();
894     String metaRecoveringZNode = ZKUtil.joinZNode(zooKeeper.recoveringRegionsZNode,
895       HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
896     List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(zooKeeper, metaRecoveringZNode);
897     if (regionFailedServers == null) return result;
898 
899     for(String failedServer : regionFailedServers) {
900       ServerName server = ServerName.parseServerName(failedServer);
901       result.add(server);
902     }
903     return result;
904   }
905 
906   @Override
907   public TableDescriptors getTableDescriptors() {
908     return this.tableDescriptors;
909   }
910 
911   @Override
912   public ServerManager getServerManager() {
913     return this.serverManager;
914   }
915 
916   @Override
917   public MasterFileSystem getMasterFileSystem() {
918     return this.fileSystemManager;
919   }
920 
921   @Override
922   public TableStateManager getTableStateManager() {
923     return tableStateManager;
924   }
925 
926   /*
927    * Start up all services. If any of these threads gets an unhandled exception
928    * then they just die with a logged message.  This should be fine because
929    * in general, we do not expect the master to get such unhandled exceptions
930    *  as OOMEs; it should be lightly loaded. See what HRegionServer does if
931    *  need to install an unexpected exception handler.
932    */
933   private void startServiceThreads() throws IOException{
934    // Start the executor service pools
935    this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
936       conf.getInt("hbase.master.executor.openregion.threads", 5));
937    this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
938       conf.getInt("hbase.master.executor.closeregion.threads", 5));
939    this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
940       conf.getInt("hbase.master.executor.serverops.threads", 5));
941    this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
942       conf.getInt("hbase.master.executor.serverops.threads", 5));
943    this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
944       conf.getInt("hbase.master.executor.logreplayops.threads", 10));
945 
946    // We depend on there being only one instance of this executor running
947    // at a time.  To do concurrency, would need fencing of enable/disable of
948    // tables.
949    // Any time changing this maxThreads to > 1, pls see the comment at
950    // AccessController#postCreateTableHandler
951    this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
952 
953    // Start log cleaner thread
954    int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
955    this.logCleaner =
956       new LogCleaner(cleanerInterval,
957          this, conf, getMasterFileSystem().getFileSystem(),
958          getMasterFileSystem().getOldLogDir());
959          Threads.setDaemonThreadRunning(logCleaner.getThread(),
960            getServerName().toShortString() + ".oldLogCleaner");
961 
962    //start the hfile archive cleaner thread
963     Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
964     this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
965         .getFileSystem(), archiveDir);
966     Threads.setDaemonThreadRunning(hfileCleaner.getThread(),
967       getServerName().toShortString() + ".archivedHFileCleaner");
968 
969     serviceStarted = true;
970     if (LOG.isTraceEnabled()) {
971       LOG.trace("Started service threads");
972     }
973   }
974 
975   @Override
976   protected void stopServiceThreads() {
977     if (masterJettyServer != null) {
978       LOG.info("Stopping master jetty server");
979       try {
980         masterJettyServer.stop();
981       } catch (Exception e) {
982         LOG.error("Failed to stop master jetty server", e);
983       }
984     }
985     super.stopServiceThreads();
986     stopChores();
987     // Wait for all the remaining region servers to report in IFF we were
988     // running a cluster shutdown AND we were NOT aborting.
989     if (!isAborted() && this.serverManager != null &&
990         this.serverManager.isClusterShutdown()) {
991       this.serverManager.letRegionServersShutdown();
992     }
993     if (LOG.isDebugEnabled()) {
994       LOG.debug("Stopping service threads");
995     }
996     // Clean up and close up shop
997     if (this.logCleaner!= null) this.logCleaner.interrupt();
998     if (this.hfileCleaner != null) this.hfileCleaner.interrupt();
999     if (this.quotaManager != null) this.quotaManager.stop();
1000     if (this.activeMasterManager != null) this.activeMasterManager.stop();
1001     if (this.serverManager != null) this.serverManager.stop();
1002     if (this.assignmentManager != null) this.assignmentManager.stop();
1003     if (this.fileSystemManager != null) this.fileSystemManager.stop();
1004     if (this.mpmHost != null) this.mpmHost.stop("server shutting down.");
1005   }
1006 
1007   private void stopChores() {
1008     if (this.balancerChore != null) {
1009       this.balancerChore.interrupt();
1010     }
1011     if (this.clusterStatusChore != null) {
1012       this.clusterStatusChore.interrupt();
1013     }
1014     if (this.catalogJanitorChore != null) {
1015       this.catalogJanitorChore.interrupt();
1016     }
1017     if (this.clusterStatusPublisherChore != null){
1018       clusterStatusPublisherChore.interrupt();
1019     }
1020   }
1021 
1022   /**
1023    * @return Get remote side's InetAddress
1024    * @throws UnknownHostException
1025    */
1026   InetAddress getRemoteInetAddress(final int port,
1027       final long serverStartCode) throws UnknownHostException {
1028     // Do it out here in its own little method so can fake an address when
1029     // mocking up in tests.
1030     InetAddress ia = RpcServer.getRemoteIp();
1031 
1032     // The call could be from the local regionserver,
1033     // in which case, there is no remote address.
1034     if (ia == null && serverStartCode == startcode) {
1035       InetSocketAddress isa = rpcServices.getSocketAddress();
1036       if (isa != null && isa.getPort() == port) {
1037         ia = isa.getAddress();
1038       }
1039     }
1040     return ia;
1041   }
1042 
1043   /**
1044    * @return Maximum time we should run balancer for
1045    */
1046   private int getBalancerCutoffTime() {
1047     int balancerCutoffTime =
1048       getConfiguration().getInt("hbase.balancer.max.balancing", -1);
1049     if (balancerCutoffTime == -1) {
1050       // No time period set so create one
1051       int balancerPeriod =
1052         getConfiguration().getInt("hbase.balancer.period", 300000);
1053       balancerCutoffTime = balancerPeriod;
1054       // If nonsense period, set it to balancerPeriod
1055       if (balancerCutoffTime <= 0) balancerCutoffTime = balancerPeriod;
1056     }
1057     return balancerCutoffTime;
1058   }
1059 
1060   public boolean balance() throws IOException {
1061     // if master not initialized, don't run balancer.
1062     if (!this.initialized) {
1063       LOG.debug("Master has not been initialized, don't run balancer.");
1064       return false;
1065     }
1066     // Do this call outside of synchronized block.
1067     int maximumBalanceTime = getBalancerCutoffTime();
1068     synchronized (this.balancer) {
1069       // If balance not true, don't run balancer.
1070       if (!this.loadBalancerTracker.isBalancerOn()) return false;
1071       // Only allow one balance run at at time.
1072       if (this.assignmentManager.getRegionStates().isRegionsInTransition()) {
1073         Map<String, RegionState> regionsInTransition =
1074           this.assignmentManager.getRegionStates().getRegionsInTransition();
1075         LOG.debug("Not running balancer because " + regionsInTransition.size() +
1076           " region(s) in transition: " + org.apache.commons.lang.StringUtils.
1077             abbreviate(regionsInTransition.toString(), 256));
1078         return false;
1079       }
1080       if (this.serverManager.areDeadServersInProgress()) {
1081         LOG.debug("Not running balancer because processing dead regionserver(s): " +
1082           this.serverManager.getDeadServers());
1083         return false;
1084       }
1085 
1086       if (this.cpHost != null) {
1087         try {
1088           if (this.cpHost.preBalance()) {
1089             LOG.debug("Coprocessor bypassing balancer request");
1090             return false;
1091           }
1092         } catch (IOException ioe) {
1093           LOG.error("Error invoking master coprocessor preBalance()", ioe);
1094           return false;
1095         }
1096       }
1097 
1098       Map<TableName, Map<ServerName, List<HRegionInfo>>> assignmentsByTable =
1099         this.assignmentManager.getRegionStates().getAssignmentsByTable();
1100 
1101       List<RegionPlan> plans = new ArrayList<RegionPlan>();
1102       //Give the balancer the current cluster state.
1103       this.balancer.setClusterStatus(getClusterStatus());
1104       for (Map<ServerName, List<HRegionInfo>> assignments : assignmentsByTable.values()) {
1105         List<RegionPlan> partialPlans = this.balancer.balanceCluster(assignments);
1106         if (partialPlans != null) plans.addAll(partialPlans);
1107       }
1108       long cutoffTime = System.currentTimeMillis() + maximumBalanceTime;
1109       int rpCount = 0;  // number of RegionPlans balanced so far
1110       long totalRegPlanExecTime = 0;
1111       if (plans != null && !plans.isEmpty()) {
1112         for (RegionPlan plan: plans) {
1113           LOG.info("balance " + plan);
1114           long balStartTime = System.currentTimeMillis();
1115           //TODO: bulk assign
1116           this.assignmentManager.balance(plan);
1117           totalRegPlanExecTime += System.currentTimeMillis()-balStartTime;
1118           rpCount++;
1119           if (rpCount < plans.size() &&
1120               // if performing next balance exceeds cutoff time, exit the loop
1121               (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) {
1122             //TODO: After balance, there should not be a cutoff time (keeping it as a security net for now)
1123             LOG.debug("No more balancing till next balance run; maximumBalanceTime=" +
1124               maximumBalanceTime);
1125             break;
1126           }
1127         }
1128       }
1129       if (this.cpHost != null) {
1130         try {
1131           this.cpHost.postBalance(rpCount < plans.size() ? plans.subList(0, rpCount) : plans);
1132         } catch (IOException ioe) {
1133           // balancing already succeeded so don't change the result
1134           LOG.error("Error invoking master coprocessor postBalance()", ioe);
1135         }
1136       }
1137     }
1138     // If LoadBalancer did not generate any plans, it means the cluster is already balanced.
1139     // Return true indicating a success.
1140     return true;
1141   }
1142 
1143   /**
1144    * @return Client info for use as prefix on an audit log string; who did an action
1145    */
1146   String getClientIdAuditPrefix() {
1147     return "Client=" + RequestContext.getRequestUserName() + "/" +
1148       RequestContext.get().getRemoteAddress();
1149   }
1150 
1151   /**
1152    * Switch for the background CatalogJanitor thread.
1153    * Used for testing.  The thread will continue to run.  It will just be a noop
1154    * if disabled.
1155    * @param b If false, the catalog janitor won't do anything.
1156    */
1157   public void setCatalogJanitorEnabled(final boolean b) {
1158     this.catalogJanitorChore.setEnabled(b);
1159   }
1160 
1161   @Override
1162   public void dispatchMergingRegions(final HRegionInfo region_a,
1163       final HRegionInfo region_b, final boolean forcible) throws IOException {
1164     checkInitialized();
1165     this.service.submit(new DispatchMergingRegionHandler(this,
1166         this.catalogJanitorChore, region_a, region_b, forcible));
1167   }
1168 
1169   void move(final byte[] encodedRegionName,
1170       final byte[] destServerName) throws HBaseIOException {
1171     RegionState regionState = assignmentManager.getRegionStates().
1172       getRegionState(Bytes.toString(encodedRegionName));
1173     if (regionState == null) {
1174       throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
1175     }
1176 
1177     HRegionInfo hri = regionState.getRegion();
1178     ServerName dest;
1179     if (destServerName == null || destServerName.length == 0) {
1180       LOG.info("Passed destination servername is null/empty so " +
1181         "choosing a server at random");
1182       final List<ServerName> destServers = this.serverManager.createDestinationServersList(
1183         regionState.getServerName());
1184       dest = balancer.randomAssignment(hri, destServers);
1185       if (dest == null) {
1186         LOG.debug("Unable to determine a plan to assign " + hri);
1187         return;
1188       }
1189     } else {
1190       dest = ServerName.valueOf(Bytes.toString(destServerName));
1191       if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer
1192           && !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) {
1193         // To avoid unnecessary region moving later by balancer. Don't put user
1194         // regions on master. Regions on master could be put on other region
1195         // server intentionally by test however.
1196         LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
1197           + " to avoid unnecessary region moving later by load balancer,"
1198           + " because it should not be on master");
1199         return;
1200       }
1201     }
1202 
1203     if (dest.equals(regionState.getServerName())) {
1204       LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
1205         + " because region already assigned to the same server " + dest + ".");
1206       return;
1207     }
1208 
1209     // Now we can do the move
1210     RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest);
1211 
1212     try {
1213       checkInitialized();
1214       if (this.cpHost != null) {
1215         if (this.cpHost.preMove(hri, rp.getSource(), rp.getDestination())) {
1216           return;
1217         }
1218       }
1219       LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
1220       this.assignmentManager.balance(rp);
1221       if (this.cpHost != null) {
1222         this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
1223       }
1224     } catch (IOException ioe) {
1225       if (ioe instanceof HBaseIOException) {
1226         throw (HBaseIOException)ioe;
1227       }
1228       throw new HBaseIOException(ioe);
1229     }
1230   }
1231 
1232   @Override
1233   public void createTable(HTableDescriptor hTableDescriptor,
1234       byte [][] splitKeys) throws IOException {
1235     if (isStopped()) {
1236       throw new MasterNotRunningException();
1237     }
1238 
1239     String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
1240     ensureNamespaceExists(namespace);
1241 
1242     HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
1243     checkInitialized();
1244     sanityCheckTableDescriptor(hTableDescriptor);
1245     if (cpHost != null) {
1246       cpHost.preCreateTable(hTableDescriptor, newRegions);
1247     }
1248     LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
1249     this.service.submit(new CreateTableHandler(this,
1250       this.fileSystemManager, hTableDescriptor, conf,
1251       newRegions, this).prepare());
1252     if (cpHost != null) {
1253       cpHost.postCreateTable(hTableDescriptor, newRegions);
1254     }
1255 
1256   }
1257 
1258   /**
1259    * Checks whether the table conforms to some sane limits, and configured
1260    * values (compression, etc) work. Throws an exception if something is wrong.
1261    * @throws IOException
1262    */
1263   private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
1264     final String CONF_KEY = "hbase.table.sanity.checks";
1265     if (!conf.getBoolean(CONF_KEY, true)) {
1266       return;
1267     }
1268     String tableVal = htd.getConfigurationValue(CONF_KEY);
1269     if (tableVal != null && !Boolean.valueOf(tableVal)) {
1270       return;
1271     }
1272 
1273     // check max file size
1274     long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit
1275     long maxFileSize = htd.getMaxFileSize();
1276     if (maxFileSize < 0) {
1277       maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
1278     }
1279     if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
1280       throw new DoNotRetryIOException("MAX_FILESIZE for table descriptor or "
1281         + "\"hbase.hregion.max.filesize\" (" + maxFileSize
1282         + ") is too small, which might cause over splitting into unmanageable "
1283         + "number of regions. Set " + CONF_KEY + " to false at conf or table descriptor "
1284           + "if you want to bypass sanity checks");
1285     }
1286 
1287     // check flush size
1288     long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit
1289     long flushSize = htd.getMemStoreFlushSize();
1290     if (flushSize < 0) {
1291       flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
1292     }
1293     if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
1294       throw new DoNotRetryIOException("MEMSTORE_FLUSHSIZE for table descriptor or "
1295           + "\"hbase.hregion.memstore.flush.size\" ("+flushSize+") is too small, which might cause"
1296           + " very frequent flushing. Set " + CONF_KEY + " to false at conf or table descriptor "
1297           + "if you want to bypass sanity checks");
1298     }
1299 
1300     // check that coprocessors and other specified plugin classes can be loaded
1301     try {
1302       checkClassLoading(conf, htd);
1303     } catch (Exception ex) {
1304       throw new DoNotRetryIOException(ex);
1305     }
1306 
1307     // check compression can be loaded
1308     try {
1309       checkCompression(htd);
1310     } catch (IOException e) {
1311       throw new DoNotRetryIOException(e.getMessage(), e);
1312     }
1313 
1314     // check encryption can be loaded
1315     try {
1316       checkEncryption(conf, htd);
1317     } catch (IOException e) {
1318       throw new DoNotRetryIOException(e.getMessage(), e);
1319     }
1320 
1321     // check that we have at least 1 CF
1322     if (htd.getColumnFamilies().length == 0) {
1323       throw new DoNotRetryIOException("Table should have at least one column family "
1324           + "Set "+CONF_KEY+" at conf or table descriptor if you want to bypass sanity checks");
1325     }
1326 
1327     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1328       if (hcd.getTimeToLive() <= 0) {
1329         throw new DoNotRetryIOException("TTL for column family " + hcd.getNameAsString()
1330           + "  must be positive. Set " + CONF_KEY + " to false at conf or table descriptor "
1331           + "if you want to bypass sanity checks");
1332       }
1333 
1334       // check blockSize
1335       if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
1336         throw new DoNotRetryIOException("Block size for column family " + hcd.getNameAsString()
1337           + "  must be between 1K and 16MB Set "+CONF_KEY+" to false at conf or table descriptor "
1338           + "if you want to bypass sanity checks");
1339       }
1340 
1341       // check versions
1342       if (hcd.getMinVersions() < 0) {
1343         throw new DoNotRetryIOException("Min versions for column family " + hcd.getNameAsString()
1344           + "  must be positive. Set " + CONF_KEY + " to false at conf or table descriptor "
1345           + "if you want to bypass sanity checks");
1346       }
1347       // max versions already being checked
1348 
1349       // check replication scope
1350       if (hcd.getScope() < 0) {
1351         throw new DoNotRetryIOException("Replication scope for column family "
1352           + hcd.getNameAsString() + "  must be positive. Set " + CONF_KEY + " to false at conf "
1353           + "or table descriptor if you want to bypass sanity checks");
1354       }
1355 
1356       // TODO: should we check coprocessors and encryption ?
1357     }
1358   }
1359 
1360   private void startActiveMasterManager(int infoPort) throws KeeperException {
1361     String backupZNode = ZKUtil.joinZNode(
1362       zooKeeper.backupMasterAddressesZNode, serverName.toString());
1363     /*
1364     * Add a ZNode for ourselves in the backup master directory since we
1365     * may not become the active master. If so, we want the actual active
1366     * master to know we are backup masters, so that it won't assign
1367     * regions to us if so configured.
1368     *
1369     * If we become the active master later, ActiveMasterManager will delete
1370     * this node explicitly.  If we crash before then, ZooKeeper will delete
1371     * this node for us since it is ephemeral.
1372     */
1373     LOG.info("Adding backup master ZNode " + backupZNode);
1374     if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode,
1375         serverName, infoPort)) {
1376       LOG.warn("Failed create of " + backupZNode + " by " + serverName);
1377     }
1378 
1379     activeMasterManager.setInfoPort(infoPort);
1380     // Start a thread to try to become the active master, so we won't block here
1381     Threads.setDaemonThreadRunning(new Thread(new Runnable() {
1382       @Override
1383       public void run() {
1384         int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT,
1385           HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
1386         // If we're a backup master, stall until a primary to writes his address
1387         if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP,
1388             HConstants.DEFAULT_MASTER_TYPE_BACKUP)) {
1389           LOG.debug("HMaster started in backup mode. "
1390             + "Stalling until master znode is written.");
1391           // This will only be a minute or so while the cluster starts up,
1392           // so don't worry about setting watches on the parent znode
1393           while (!activeMasterManager.hasActiveMaster()) {
1394             LOG.debug("Waiting for master address ZNode to be written "
1395               + "(Also watching cluster state node)");
1396             Threads.sleep(timeout);
1397           }
1398         }
1399         MonitoredTask status = TaskMonitor.get().createStatus("Master startup");
1400         status.setDescription("Master startup");
1401         try {
1402           if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, status)) {
1403             finishActiveMasterInitialization(status);
1404           }
1405         } catch (Throwable t) {
1406           status.setStatus("Failed to become active: " + t.getMessage());
1407           LOG.fatal("Failed to become active master", t);
1408           // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility
1409           if (t instanceof NoClassDefFoundError &&
1410               t.getMessage().contains("org/apache/hadoop/hdfs/protocol/FSConstants$SafeModeAction")) {
1411             // improved error message for this special case
1412             abort("HBase is having a problem with its Hadoop jars.  You may need to "
1413               + "recompile HBase against Hadoop version "
1414               +  org.apache.hadoop.util.VersionInfo.getVersion()
1415               + " or change your hadoop jars to start properly", t);
1416           } else {
1417             abort("Unhandled exception. Starting shutdown.", t);
1418           }
1419         } finally {
1420           status.cleanup();
1421         }
1422       }
1423     }, getServerName().toShortString() + ".activeMasterManager"));
1424   }
1425 
1426   private void checkCompression(final HTableDescriptor htd)
1427   throws IOException {
1428     if (!this.masterCheckCompression) return;
1429     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1430       checkCompression(hcd);
1431     }
1432   }
1433 
1434   private void checkCompression(final HColumnDescriptor hcd)
1435   throws IOException {
1436     if (!this.masterCheckCompression) return;
1437     CompressionTest.testCompression(hcd.getCompression());
1438     CompressionTest.testCompression(hcd.getCompactionCompression());
1439   }
1440 
1441   private void checkEncryption(final Configuration conf, final HTableDescriptor htd)
1442   throws IOException {
1443     if (!this.masterCheckEncryption) return;
1444     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1445       checkEncryption(conf, hcd);
1446     }
1447   }
1448 
1449   private void checkEncryption(final Configuration conf, final HColumnDescriptor hcd)
1450   throws IOException {
1451     if (!this.masterCheckEncryption) return;
1452     EncryptionTest.testEncryption(conf, hcd.getEncryptionType(), hcd.getEncryptionKey());
1453   }
1454 
1455   private void checkClassLoading(final Configuration conf, final HTableDescriptor htd)
1456   throws IOException {
1457     RegionSplitPolicy.getSplitPolicyClass(htd, conf);
1458     RegionCoprocessorHost.testTableCoprocessorAttrs(conf, htd);
1459   }
1460 
1461   private HRegionInfo[] getHRegionInfos(HTableDescriptor hTableDescriptor,
1462     byte[][] splitKeys) {
1463     long regionId = System.currentTimeMillis();
1464     HRegionInfo[] hRegionInfos = null;
1465     if (splitKeys == null || splitKeys.length == 0) {
1466       hRegionInfos = new HRegionInfo[]{new HRegionInfo(hTableDescriptor.getTableName(), null, null,
1467                 false, regionId)};
1468     } else {
1469       int numRegions = splitKeys.length + 1;
1470       hRegionInfos = new HRegionInfo[numRegions];
1471       byte[] startKey = null;
1472       byte[] endKey = null;
1473       for (int i = 0; i < numRegions; i++) {
1474         endKey = (i == splitKeys.length) ? null : splitKeys[i];
1475         hRegionInfos[i] =
1476              new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
1477                  false, regionId);
1478         startKey = endKey;
1479       }
1480     }
1481     return hRegionInfos;
1482   }
1483 
1484   private static boolean isCatalogTable(final TableName tableName) {
1485     return tableName.equals(TableName.META_TABLE_NAME);
1486   }
1487 
1488   @Override
1489   public void deleteTable(final TableName tableName) throws IOException {
1490     checkInitialized();
1491     if (cpHost != null) {
1492       cpHost.preDeleteTable(tableName);
1493     }
1494     LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
1495     this.service.submit(new DeleteTableHandler(tableName, this, this).prepare());
1496     if (cpHost != null) {
1497       cpHost.postDeleteTable(tableName);
1498     }
1499   }
1500 
1501   @Override
1502   public void truncateTable(TableName tableName, boolean preserveSplits) throws IOException {
1503     checkInitialized();
1504     if (cpHost != null) {
1505       cpHost.preTruncateTable(tableName);
1506     }
1507     LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
1508     TruncateTableHandler handler = new TruncateTableHandler(tableName, this, this, preserveSplits);
1509     handler.prepare();
1510     handler.process();
1511     if (cpHost != null) {
1512       cpHost.postTruncateTable(tableName);
1513     }
1514   }
1515 
1516   @Override
1517   public void addColumn(final TableName tableName, final HColumnDescriptor columnDescriptor)
1518       throws IOException {
1519     checkInitialized();
1520     checkCompression(columnDescriptor);
1521     checkEncryption(conf, columnDescriptor);
1522     if (cpHost != null) {
1523       if (cpHost.preAddColumn(tableName, columnDescriptor)) {
1524         return;
1525       }
1526     }
1527     //TODO: we should process this (and some others) in an executor
1528     new TableAddFamilyHandler(tableName, columnDescriptor, this, this).prepare().process();
1529     if (cpHost != null) {
1530       cpHost.postAddColumn(tableName, columnDescriptor);
1531     }
1532   }
1533 
1534   @Override
1535   public void modifyColumn(TableName tableName, HColumnDescriptor descriptor)
1536       throws IOException {
1537     checkInitialized();
1538     checkCompression(descriptor);
1539     checkEncryption(conf, descriptor);
1540     if (cpHost != null) {
1541       if (cpHost.preModifyColumn(tableName, descriptor)) {
1542         return;
1543       }
1544     }
1545     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
1546     new TableModifyFamilyHandler(tableName, descriptor, this, this)
1547       .prepare().process();
1548     if (cpHost != null) {
1549       cpHost.postModifyColumn(tableName, descriptor);
1550     }
1551   }
1552 
1553   @Override
1554   public void deleteColumn(final TableName tableName, final byte[] columnName)
1555       throws IOException {
1556     checkInitialized();
1557     if (cpHost != null) {
1558       if (cpHost.preDeleteColumn(tableName, columnName)) {
1559         return;
1560       }
1561     }
1562     LOG.info(getClientIdAuditPrefix() + " delete " + Bytes.toString(columnName));
1563     new TableDeleteFamilyHandler(tableName, columnName, this, this).prepare().process();
1564     if (cpHost != null) {
1565       cpHost.postDeleteColumn(tableName, columnName);
1566     }
1567   }
1568 
1569   @Override
1570   public void enableTable(final TableName tableName) throws IOException {
1571     checkInitialized();
1572     if (cpHost != null) {
1573       cpHost.preEnableTable(tableName);
1574     }
1575     LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
1576     this.service.submit(new EnableTableHandler(this, tableName,
1577       assignmentManager, tableLockManager, false).prepare());
1578     if (cpHost != null) {
1579       cpHost.postEnableTable(tableName);
1580    }
1581   }
1582 
1583   @Override
1584   public void disableTable(final TableName tableName) throws IOException {
1585     checkInitialized();
1586     if (cpHost != null) {
1587       cpHost.preDisableTable(tableName);
1588     }
1589     LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
1590     this.service.submit(new DisableTableHandler(this, tableName,
1591       assignmentManager, tableLockManager, false).prepare());
1592     if (cpHost != null) {
1593       cpHost.postDisableTable(tableName);
1594     }
1595   }
1596 
1597   /**
1598    * Return the region and current deployment for the region containing
1599    * the given row. If the region cannot be found, returns null. If it
1600    * is found, but not currently deployed, the second element of the pair
1601    * may be null.
1602    */
1603   @VisibleForTesting // Used by TestMaster.
1604   Pair<HRegionInfo, ServerName> getTableRegionForRow(
1605       final TableName tableName, final byte [] rowKey)
1606   throws IOException {
1607     final AtomicReference<Pair<HRegionInfo, ServerName>> result =
1608       new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
1609 
1610     MetaScannerVisitor visitor =
1611       new MetaScannerVisitorBase() {
1612         @Override
1613         public boolean processRow(Result data) throws IOException {
1614           if (data == null || data.size() <= 0) {
1615             return true;
1616           }
1617           Pair<HRegionInfo, ServerName> pair = HRegionInfo.getHRegionInfoAndServerName(data);
1618           if (pair == null) {
1619             return false;
1620           }
1621           if (!pair.getFirst().getTable().equals(tableName)) {
1622             return false;
1623           }
1624           result.set(pair);
1625           return true;
1626         }
1627     };
1628 
1629     MetaScanner.metaScan(clusterConnection, visitor, tableName, rowKey, 1);
1630     return result.get();
1631   }
1632 
1633   @Override
1634   public void modifyTable(final TableName tableName, final HTableDescriptor descriptor)
1635       throws IOException {
1636     checkInitialized();
1637     sanityCheckTableDescriptor(descriptor);
1638     if (cpHost != null) {
1639       cpHost.preModifyTable(tableName, descriptor);
1640     }
1641     LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
1642     new ModifyTableHandler(tableName, descriptor, this, this).prepare().process();
1643     if (cpHost != null) {
1644       cpHost.postModifyTable(tableName, descriptor);
1645     }
1646   }
1647 
1648   @Override
1649   public void checkTableModifiable(final TableName tableName)
1650       throws IOException, TableNotFoundException, TableNotDisabledException {
1651     if (isCatalogTable(tableName)) {
1652       throw new IOException("Can't modify catalog tables");
1653     }
1654     if (!MetaTableAccessor.tableExists(getConnection(), tableName)) {
1655       throw new TableNotFoundException(tableName);
1656     }
1657     if (!getAssignmentManager().getTableStateManager().
1658         isTableState(tableName, TableState.State.DISABLED)) {
1659       throw new TableNotDisabledException(tableName);
1660     }
1661   }
1662 
1663   /**
1664    * @return cluster status
1665    */
1666   public ClusterStatus getClusterStatus() throws InterruptedIOException {
1667     // Build Set of backup masters from ZK nodes
1668     List<String> backupMasterStrings;
1669     try {
1670       backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper,
1671         this.zooKeeper.backupMasterAddressesZNode);
1672     } catch (KeeperException e) {
1673       LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
1674       backupMasterStrings = new ArrayList<String>(0);
1675     }
1676     List<ServerName> backupMasters = new ArrayList<ServerName>(
1677                                           backupMasterStrings.size());
1678     for (String s: backupMasterStrings) {
1679       try {
1680         byte [] bytes;
1681         try {
1682           bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
1683               this.zooKeeper.backupMasterAddressesZNode, s));
1684         } catch (InterruptedException e) {
1685           throw new InterruptedIOException();
1686         }
1687         if (bytes != null) {
1688           ServerName sn;
1689           try {
1690             sn = ServerName.parseFrom(bytes);
1691           } catch (DeserializationException e) {
1692             LOG.warn("Failed parse, skipping registering backup server", e);
1693             continue;
1694           }
1695           backupMasters.add(sn);
1696         }
1697       } catch (KeeperException e) {
1698         LOG.warn(this.zooKeeper.prefix("Unable to get information about " +
1699                  "backup servers"), e);
1700       }
1701     }
1702     Collections.sort(backupMasters, new Comparator<ServerName>() {
1703       @Override
1704       public int compare(ServerName s1, ServerName s2) {
1705         return s1.getServerName().compareTo(s2.getServerName());
1706       }});
1707 
1708     String clusterId = fileSystemManager != null ?
1709       fileSystemManager.getClusterId().toString() : null;
1710     Map<String, RegionState> regionsInTransition = assignmentManager != null ?
1711       assignmentManager.getRegionStates().getRegionsInTransition() : null;
1712     String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null;
1713     boolean balancerOn = loadBalancerTracker != null ?
1714       loadBalancerTracker.isBalancerOn() : false;
1715     Map<ServerName, ServerLoad> onlineServers = null;
1716     Set<ServerName> deadServers = null;
1717     if (serverManager != null) {
1718       deadServers = serverManager.getDeadServers().copyServerNames();
1719       onlineServers = serverManager.getOnlineServers();
1720     }
1721     return new ClusterStatus(VersionInfo.getVersion(), clusterId,
1722       onlineServers, deadServers, serverName, backupMasters,
1723       regionsInTransition, coprocessors, balancerOn);
1724   }
1725 
1726   /**
1727    * The set of loaded coprocessors is stored in a static set. Since it's
1728    * statically allocated, it does not require that HMaster's cpHost be
1729    * initialized prior to accessing it.
1730    * @return a String representation of the set of names of the loaded
1731    * coprocessors.
1732    */
1733   public static String getLoadedCoprocessors() {
1734     return CoprocessorHost.getLoadedCoprocessors().toString();
1735   }
1736 
1737   /**
1738    * @return timestamp in millis when HMaster was started.
1739    */
1740   public long getMasterStartTime() {
1741     return startcode;
1742   }
1743 
1744   /**
1745    * @return timestamp in millis when HMaster became the active master.
1746    */
1747   public long getMasterActiveTime() {
1748     return masterActiveTime;
1749   }
1750 
1751   public int getRegionServerInfoPort(final ServerName sn) {
1752     RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
1753     if (info == null || info.getInfoPort() == 0) {
1754       return conf.getInt(HConstants.REGIONSERVER_INFO_PORT,
1755         HConstants.DEFAULT_REGIONSERVER_INFOPORT);
1756     }
1757     return info.getInfoPort();
1758   }
1759 
1760   /**
1761    * @return array of coprocessor SimpleNames.
1762    */
1763   public String[] getMasterCoprocessors() {
1764     Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors();
1765     return masterCoprocessors.toArray(new String[masterCoprocessors.size()]);
1766   }
1767 
1768   @Override
1769   public void abort(final String msg, final Throwable t) {
1770     if (isAborted() || isStopped()) {
1771       return;
1772     }
1773     if (cpHost != null) {
1774       // HBASE-4014: dump a list of loaded coprocessors.
1775       LOG.fatal("Master server abort: loaded coprocessors are: " +
1776           getLoadedCoprocessors());
1777     }
1778     if (t != null) LOG.fatal(msg, t);
1779     stop(msg);
1780   }
1781 
1782   @Override
1783   public ZooKeeperWatcher getZooKeeper() {
1784     return zooKeeper;
1785   }
1786 
1787   @Override
1788   public MasterCoprocessorHost getMasterCoprocessorHost() {
1789     return cpHost;
1790   }
1791 
1792   @Override
1793   public MasterQuotaManager getMasterQuotaManager() {
1794     return quotaManager;
1795   }
1796 
1797   @Override
1798   public ServerName getServerName() {
1799     return this.serverName;
1800   }
1801 
1802   @Override
1803   public AssignmentManager getAssignmentManager() {
1804     return this.assignmentManager;
1805   }
1806 
1807   public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
1808     return rsFatals;
1809   }
1810 
1811   public void shutdown() {
1812     if (cpHost != null) {
1813       try {
1814         cpHost.preShutdown();
1815       } catch (IOException ioe) {
1816         LOG.error("Error call master coprocessor preShutdown()", ioe);
1817       }
1818     }
1819 
1820     if (this.serverManager != null) {
1821       this.serverManager.shutdownCluster();
1822     }
1823     if (this.clusterStatusTracker != null){
1824       try {
1825         this.clusterStatusTracker.setClusterDown();
1826       } catch (KeeperException e) {
1827         LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
1828       }
1829     }
1830   }
1831 
1832   public void stopMaster() {
1833     if (cpHost != null) {
1834       try {
1835         cpHost.preStopMaster();
1836       } catch (IOException ioe) {
1837         LOG.error("Error call master coprocessor preStopMaster()", ioe);
1838       }
1839     }
1840     stop("Stopped by " + Thread.currentThread().getName());
1841   }
1842 
1843   void checkServiceStarted() throws ServerNotRunningYetException {
1844     if (!serviceStarted) {
1845       throw new ServerNotRunningYetException("Server is not running yet");
1846     }
1847   }
1848 
1849   void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException {
1850     checkServiceStarted();
1851     if (!this.initialized) {
1852       throw new PleaseHoldException("Master is initializing");
1853     }
1854   }
1855 
1856   void checkNamespaceManagerReady() throws IOException {
1857     checkInitialized();
1858     if (tableNamespaceManager == null ||
1859         !tableNamespaceManager.isTableAvailableAndInitialized()) {
1860       throw new IOException("Table Namespace Manager not ready yet, try again later");
1861     }
1862   }
1863   /**
1864    * Report whether this master is currently the active master or not.
1865    * If not active master, we are parked on ZK waiting to become active.
1866    *
1867    * This method is used for testing.
1868    *
1869    * @return true if active master, false if not.
1870    */
1871   public boolean isActiveMaster() {
1872     return isActiveMaster;
1873   }
1874 
1875   /**
1876    * Report whether this master has completed with its initialization and is
1877    * ready.  If ready, the master is also the active master.  A standby master
1878    * is never ready.
1879    *
1880    * This method is used for testing.
1881    *
1882    * @return true if master is ready to go, false if not.
1883    */
1884   @Override
1885   public boolean isInitialized() {
1886     return initialized;
1887   }
1888 
1889   /**
1890    * ServerShutdownHandlerEnabled is set false before completing
1891    * assignMeta to prevent processing of ServerShutdownHandler.
1892    * @return true if assignMeta has completed;
1893    */
1894   @Override
1895   public boolean isServerShutdownHandlerEnabled() {
1896     return this.serverShutdownHandlerEnabled;
1897   }
1898 
1899   /**
1900    * Report whether this master has started initialization and is about to do meta region assignment
1901    * @return true if master is in initialization & about to assign hbase:meta regions
1902    */
1903   public boolean isInitializationStartsMetaRegionAssignment() {
1904     return this.initializationBeforeMetaAssignment;
1905   }
1906 
1907   public void assignRegion(HRegionInfo hri) {
1908     assignmentManager.assign(hri);
1909   }
1910 
1911   /**
1912    * Compute the average load across all region servers.
1913    * Currently, this uses a very naive computation - just uses the number of
1914    * regions being served, ignoring stats about number of requests.
1915    * @return the average load
1916    */
1917   public double getAverageLoad() {
1918     if (this.assignmentManager == null) {
1919       return 0;
1920     }
1921 
1922     RegionStates regionStates = this.assignmentManager.getRegionStates();
1923     if (regionStates == null) {
1924       return 0;
1925     }
1926     return regionStates.getAverageLoad();
1927   }
1928 
1929   @Override
1930   public boolean registerService(Service instance) {
1931     /*
1932      * No stacking of instances is allowed for a single service name
1933      */
1934     Descriptors.ServiceDescriptor serviceDesc = instance.getDescriptorForType();
1935     if (coprocessorServiceHandlers.containsKey(serviceDesc.getFullName())) {
1936       LOG.error("Coprocessor service "+serviceDesc.getFullName()+
1937           " already registered, rejecting request from "+instance
1938       );
1939       return false;
1940     }
1941 
1942     coprocessorServiceHandlers.put(serviceDesc.getFullName(), instance);
1943     if (LOG.isDebugEnabled()) {
1944       LOG.debug("Registered master coprocessor service: service="+serviceDesc.getFullName());
1945     }
1946     return true;
1947   }
1948 
1949   /**
1950    * Utility for constructing an instance of the passed HMaster class.
1951    * @param masterClass
1952    * @param conf
1953    * @return HMaster instance.
1954    */
1955   public static HMaster constructMaster(Class<? extends HMaster> masterClass,
1956       final Configuration conf, final CoordinatedStateManager cp)  {
1957     try {
1958       Constructor<? extends HMaster> c =
1959         masterClass.getConstructor(Configuration.class, CoordinatedStateManager.class);
1960       return c.newInstance(conf, cp);
1961     } catch (InvocationTargetException ite) {
1962       Throwable target = ite.getTargetException() != null?
1963         ite.getTargetException(): ite;
1964       if (target.getCause() != null) target = target.getCause();
1965       throw new RuntimeException("Failed construction of Master: " +
1966         masterClass.toString(), target);
1967     } catch (Exception e) {
1968       throw new RuntimeException("Failed construction of Master: " +
1969         masterClass.toString() + ((e.getCause() != null)?
1970           e.getCause().getMessage(): ""), e);
1971     }
1972   }
1973 
1974   /**
1975    * @see org.apache.hadoop.hbase.master.HMasterCommandLine
1976    */
1977   public static void main(String [] args) {
1978     VersionInfo.logVersion();
1979     new HMasterCommandLine(HMaster.class).doMain(args);
1980   }
1981 
1982   public HFileCleaner getHFileCleaner() {
1983     return this.hfileCleaner;
1984   }
1985 
1986   /**
1987    * Exposed for TESTING!
1988    * @return the underlying snapshot manager
1989    */
1990   public SnapshotManager getSnapshotManagerForTesting() {
1991     return this.snapshotManager;
1992   }
1993 
1994   @Override
1995   public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
1996     TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
1997     checkNamespaceManagerReady();
1998     if (cpHost != null) {
1999       if (cpHost.preCreateNamespace(descriptor)) {
2000         return;
2001       }
2002     }
2003     LOG.info(getClientIdAuditPrefix() + " creating " + descriptor);
2004     tableNamespaceManager.create(descriptor);
2005     if (cpHost != null) {
2006       cpHost.postCreateNamespace(descriptor);
2007     }
2008   }
2009 
2010   @Override
2011   public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
2012     TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
2013     checkNamespaceManagerReady();
2014     if (cpHost != null) {
2015       if (cpHost.preModifyNamespace(descriptor)) {
2016         return;
2017       }
2018     }
2019     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
2020     tableNamespaceManager.update(descriptor);
2021     if (cpHost != null) {
2022       cpHost.postModifyNamespace(descriptor);
2023     }
2024   }
2025 
2026   @Override
2027   public void deleteNamespace(String name) throws IOException {
2028     checkNamespaceManagerReady();
2029     if (cpHost != null) {
2030       if (cpHost.preDeleteNamespace(name)) {
2031         return;
2032       }
2033     }
2034     LOG.info(getClientIdAuditPrefix() + " delete " + name);
2035     tableNamespaceManager.remove(name);
2036     if (cpHost != null) {
2037       cpHost.postDeleteNamespace(name);
2038     }
2039   }
2040 
2041   /**
2042    * Ensure that the specified namespace exists, otherwise throws a NamespaceNotFoundException
2043    *
2044    * @param name the namespace to check
2045    * @throws IOException if the namespace manager is not ready yet.
2046    * @throws NamespaceNotFoundException if the namespace does not exists
2047    */
2048   private void ensureNamespaceExists(final String name)
2049       throws IOException, NamespaceNotFoundException {
2050     checkNamespaceManagerReady();
2051     NamespaceDescriptor nsd = tableNamespaceManager.get(name);
2052     if (nsd == null) {
2053       throw new NamespaceNotFoundException(name);
2054     }
2055   }
2056 
2057   @Override
2058   public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
2059     checkNamespaceManagerReady();
2060 
2061     if (cpHost != null) {
2062       cpHost.preGetNamespaceDescriptor(name);
2063     }
2064 
2065     NamespaceDescriptor nsd = tableNamespaceManager.get(name);
2066     if (nsd == null) {
2067       throw new NamespaceNotFoundException(name);
2068     }
2069 
2070     if (cpHost != null) {
2071       cpHost.postGetNamespaceDescriptor(nsd);
2072     }
2073 
2074     return nsd;
2075   }
2076 
2077   @Override
2078   public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
2079     checkNamespaceManagerReady();
2080 
2081     final List<NamespaceDescriptor> descriptors = new ArrayList<NamespaceDescriptor>();
2082     boolean bypass = false;
2083     if (cpHost != null) {
2084       bypass = cpHost.preListNamespaceDescriptors(descriptors);
2085     }
2086 
2087     if (!bypass) {
2088       descriptors.addAll(tableNamespaceManager.list());
2089 
2090       if (cpHost != null) {
2091         cpHost.postListNamespaceDescriptors(descriptors);
2092       }
2093     }
2094     return descriptors;
2095   }
2096 
2097   @Override
2098   public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
2099     ensureNamespaceExists(name);
2100     return listTableDescriptors(name, null, null, true);
2101   }
2102 
2103   @Override
2104   public List<TableName> listTableNamesByNamespace(String name) throws IOException {
2105     ensureNamespaceExists(name);
2106     return listTableNames(name, null, true);
2107   }
2108 
2109   /**
2110    * Returns the list of table descriptors that match the specified request
2111    *
2112    * @param namespace the namespace to query, or null if querying for all
2113    * @param regex The regular expression to match against, or null if querying for all
2114    * @param tableNameList the list of table names, or null if querying for all
2115    * @param includeSysTables False to match only against userspace tables
2116    * @return the list of table descriptors
2117    */
2118   public List<HTableDescriptor> listTableDescriptors(final String namespace, final String regex,
2119       final List<TableName> tableNameList, final boolean includeSysTables)
2120       throws IOException {
2121     final List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
2122 
2123     boolean bypass = false;
2124     if (cpHost != null) {
2125       bypass = cpHost.preGetTableDescriptors(tableNameList, descriptors, regex);
2126     }
2127 
2128     if (!bypass) {
2129       if (tableNameList == null || tableNameList.size() == 0) {
2130         // request for all TableDescriptors
2131         Collection<HTableDescriptor> htds;
2132         if (namespace != null && namespace.length() > 0) {
2133           htds = tableDescriptors.getByNamespace(namespace).values();
2134         } else {
2135           htds = tableDescriptors.getAll().values();
2136         }
2137 
2138         for (HTableDescriptor desc: htds) {
2139           if (includeSysTables || !desc.getTableName().isSystemTable()) {
2140             descriptors.add(desc);
2141           }
2142         }
2143       } else {
2144         for (TableName s: tableNameList) {
2145           HTableDescriptor desc = tableDescriptors.get(s);
2146           if (desc != null) {
2147             descriptors.add(desc);
2148           }
2149         }
2150       }
2151 
2152       // Retains only those matched by regular expression.
2153       if (regex != null) {
2154         filterTablesByRegex(descriptors, Pattern.compile(regex));
2155       }
2156 
2157       if (cpHost != null) {
2158         cpHost.postGetTableDescriptors(tableNameList, descriptors, regex);
2159       }
2160     }
2161     return descriptors;
2162   }
2163 
2164   /**
2165    * Returns the list of table names that match the specified request
2166    * @param regex The regular expression to match against, or null if querying for all
2167    * @param namespace the namespace to query, or null if querying for all
2168    * @param includeSysTables False to match only against userspace tables
2169    * @return the list of table names
2170    */
2171   public List<TableName> listTableNames(final String namespace, final String regex,
2172       final boolean includeSysTables) throws IOException {
2173     final List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
2174 
2175     boolean bypass = false;
2176     if (cpHost != null) {
2177       bypass = cpHost.preGetTableNames(descriptors, regex);
2178     }
2179 
2180     if (!bypass) {
2181       // get all descriptors
2182       Collection<HTableDescriptor> htds;
2183       if (namespace != null && namespace.length() > 0) {
2184         htds = tableDescriptors.getByNamespace(namespace).values();
2185       } else {
2186         htds = tableDescriptors.getAll().values();
2187       }
2188 
2189       for (HTableDescriptor htd: htds) {
2190         if (includeSysTables || !htd.getTableName().isSystemTable()) {
2191           descriptors.add(htd);
2192         }
2193       }
2194 
2195       // Retains only those matched by regular expression.
2196       if (regex != null) {
2197         filterTablesByRegex(descriptors, Pattern.compile(regex));
2198       }
2199 
2200       if (cpHost != null) {
2201         cpHost.postGetTableNames(descriptors, regex);
2202       }
2203     }
2204 
2205     List<TableName> result = new ArrayList<TableName>(descriptors.size());
2206     for (HTableDescriptor htd: descriptors) {
2207       result.add(htd.getTableName());
2208     }
2209     return result;
2210   }
2211 
2212 
2213   /**
2214    * Removes the table descriptors that don't match the pattern.
2215    * @param descriptors list of table descriptors to filter
2216    * @param pattern the regex to use
2217    */
2218   private static void filterTablesByRegex(final Collection<HTableDescriptor> descriptors,
2219       final Pattern pattern) {
2220     final String defaultNS = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR;
2221     Iterator<HTableDescriptor> itr = descriptors.iterator();
2222     while (itr.hasNext()) {
2223       HTableDescriptor htd = itr.next();
2224       String tableName = htd.getTableName().getNameAsString();
2225       boolean matched = pattern.matcher(tableName).matches();
2226       if (!matched && htd.getTableName().getNamespaceAsString().equals(defaultNS)) {
2227         matched = pattern.matcher(defaultNS + TableName.NAMESPACE_DELIM + tableName).matches();
2228       }
2229       if (!matched) {
2230         itr.remove();
2231       }
2232     }
2233   }
2234 }