View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.lang.reflect.Constructor;
24  import java.lang.reflect.InvocationTargetException;
25  import java.net.InetAddress;
26  import java.net.InetSocketAddress;
27  import java.net.UnknownHostException;
28  import java.util.ArrayList;
29  import java.util.Collections;
30  import java.util.Comparator;
31  import java.util.HashSet;
32  import java.util.List;
33  import java.util.Map;
34  import java.util.Set;
35  import java.util.concurrent.atomic.AtomicReference;
36  
37  import javax.servlet.ServletException;
38  import javax.servlet.http.HttpServlet;
39  import javax.servlet.http.HttpServletRequest;
40  import javax.servlet.http.HttpServletResponse;
41  
42  import org.apache.commons.logging.Log;
43  import org.apache.commons.logging.LogFactory;
44  import org.apache.hadoop.classification.InterfaceAudience;
45  import org.apache.hadoop.conf.Configuration;
46  import org.apache.hadoop.fs.Path;
47  import org.apache.hadoop.hbase.ClusterStatus;
48  import org.apache.hadoop.hbase.CoordinatedStateException;
49  import org.apache.hadoop.hbase.DoNotRetryIOException;
50  import org.apache.hadoop.hbase.HBaseIOException;
51  import org.apache.hadoop.hbase.HColumnDescriptor;
52  import org.apache.hadoop.hbase.HConstants;
53  import org.apache.hadoop.hbase.HRegionInfo;
54  import org.apache.hadoop.hbase.HTableDescriptor;
55  import org.apache.hadoop.hbase.MasterNotRunningException;
56  import org.apache.hadoop.hbase.NamespaceDescriptor;
57  import org.apache.hadoop.hbase.NamespaceNotFoundException;
58  import org.apache.hadoop.hbase.PleaseHoldException;
59  import org.apache.hadoop.hbase.Server;
60  import org.apache.hadoop.hbase.ServerLoad;
61  import org.apache.hadoop.hbase.ServerName;
62  import org.apache.hadoop.hbase.TableDescriptors;
63  import org.apache.hadoop.hbase.TableName;
64  import org.apache.hadoop.hbase.TableNotDisabledException;
65  import org.apache.hadoop.hbase.TableNotFoundException;
66  import org.apache.hadoop.hbase.UnknownRegionException;
67  import org.apache.hadoop.hbase.MetaTableAccessor;
68  import org.apache.hadoop.hbase.client.MetaScanner;
69  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
70  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
71  import org.apache.hadoop.hbase.client.Result;
72  import org.apache.hadoop.hbase.CoordinatedStateManager;
73  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
74  import org.apache.hadoop.hbase.exceptions.DeserializationException;
75  import org.apache.hadoop.hbase.executor.ExecutorType;
76  import org.apache.hadoop.hbase.ipc.RequestContext;
77  import org.apache.hadoop.hbase.ipc.RpcServer;
78  import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
79  import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
80  import org.apache.hadoop.hbase.master.RegionState.State;
81  import org.apache.hadoop.hbase.master.balancer.BalancerChore;
82  import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
83  import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
84  import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
85  import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
86  import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
87  import org.apache.hadoop.hbase.master.handler.DeleteTableHandler;
88  import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
89  import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
90  import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
91  import org.apache.hadoop.hbase.master.handler.ModifyTableHandler;
92  import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
93  import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
94  import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
95  import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
96  import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
97  import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
98  import org.apache.hadoop.hbase.monitoring.MonitoredTask;
99  import org.apache.hadoop.hbase.monitoring.TaskMonitor;
100 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
101 import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
102 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
103 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
104 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
105 import org.apache.hadoop.hbase.regionserver.HRegionServer;
106 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
107 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
108 import org.apache.hadoop.hbase.replication.regionserver.Replication;
109 import org.apache.hadoop.hbase.security.UserProvider;
110 import org.apache.hadoop.hbase.util.Bytes;
111 import org.apache.hadoop.hbase.util.CompressionTest;
112 import org.apache.hadoop.hbase.util.FSUtils;
113 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
114 import org.apache.hadoop.hbase.util.Pair;
115 import org.apache.hadoop.hbase.util.Threads;
116 import org.apache.hadoop.hbase.util.VersionInfo;
117 import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
118 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
119 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
120 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
121 import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
122 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
123 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
124 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
125 import org.apache.zookeeper.KeeperException;
126 import org.apache.zookeeper.Watcher;
127 import org.mortbay.jetty.Connector;
128 import org.mortbay.jetty.nio.SelectChannelConnector;
129 import org.mortbay.jetty.servlet.Context;
130 
131 import com.google.common.annotations.VisibleForTesting;
132 import com.google.common.collect.Lists;
133 import com.google.common.collect.Maps;
134 import com.google.protobuf.Descriptors;
135 import com.google.protobuf.Service;
136 
137 /**
138  * HMaster is the "master server" for HBase. An HBase cluster has one active
139  * master.  If many masters are started, all compete.  Whichever wins goes on to
140  * run the cluster.  All others park themselves in their constructor until
141  * master or cluster shutdown or until the active master loses its lease in
142  * zookeeper.  Thereafter, all running master jostle to take over master role.
143  *
144  * <p>The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In
145  * this case it will tell all regionservers to go down and then wait on them
146  * all reporting in that they are down.  This master will then shut itself down.
147  *
148  * <p>You can also shutdown just this master.  Call {@link #stopMaster()}.
149  *
150  * @see Watcher
151  */
152 @InterfaceAudience.Private
153 @SuppressWarnings("deprecation")
154 public class HMaster extends HRegionServer implements MasterServices, Server {
155   private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
156 
157   // MASTER is name of the webapp and the attribute name used stuffing this
158   //instance into web context.
159   public static final String MASTER = "master";
160 
161   // Manager and zk listener for master election
162   private ActiveMasterManager activeMasterManager;
163   // Region server tracker
164   RegionServerTracker regionServerTracker;
165   // Draining region server tracker
166   private DrainingServerTracker drainingServerTracker;
167   // Tracker for load balancer state
168   LoadBalancerTracker loadBalancerTracker;
169 
170   /** Namespace stuff */
171   private TableNamespaceManager tableNamespaceManager;
172   private NamespaceJanitor namespaceJanitorChore;
173 
174   // Metrics for the HMaster
175   final MetricsMaster metricsMaster;
176   // file system manager for the master FS operations
177   private MasterFileSystem fileSystemManager;
178 
179   // server manager to deal with region server info
180   volatile ServerManager serverManager;
181 
182   // manager of assignment nodes in zookeeper
183   AssignmentManager assignmentManager;
184 
185   // buffer for "fatal error" notices from region servers
186   // in the cluster. This is only used for assisting
187   // operations/debugging.
188   MemoryBoundedLogMessageBuffer rsFatals;
189 
190   // flag set after we become the active master (used for testing)
191   private volatile boolean isActiveMaster = false;
192 
193   // flag set after we complete initialization once active,
194   // it is not private since it's used in unit tests
195   volatile boolean initialized = false;
196 
197   // flag set after master services are started,
198   // initialization may have not completed yet.
199   volatile boolean serviceStarted = false;
200 
201   // flag set after we complete assignMeta.
202   private volatile boolean serverShutdownHandlerEnabled = false;
203 
204   LoadBalancer balancer;
205   private BalancerChore balancerChore;
206   private ClusterStatusChore clusterStatusChore;
207   private ClusterStatusPublisher clusterStatusPublisherChore = null;
208 
209   CatalogJanitor catalogJanitorChore;
210   private LogCleaner logCleaner;
211   private HFileCleaner hfileCleaner;
212 
213   MasterCoprocessorHost cpHost;
214 
215   // Time stamps for when a hmaster became active
216   private long masterActiveTime;
217 
218   //should we check the compression codec type at master side, default true, HBASE-6370
219   private final boolean masterCheckCompression;
220 
221   Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
222 
223   // monitor for snapshot of hbase tables
224   SnapshotManager snapshotManager;
225   // monitor for distributed procedures
226   MasterProcedureManagerHost mpmHost;
227 
228   /** flag used in test cases in order to simulate RS failures during master initialization */
229   private volatile boolean initializationBeforeMetaAssignment = false;
230 
231   /** jetty server for master to redirect requests to regionserver infoServer */
232   private org.mortbay.jetty.Server masterJettyServer;
233 
234   public static class RedirectServlet extends HttpServlet {
235     private static final long serialVersionUID = 2894774810058302472L;
236     private static int regionServerInfoPort;
237 
238     @Override
239     public void doGet(HttpServletRequest request,
240         HttpServletResponse response) throws ServletException, IOException {
241       String redirectUrl = request.getScheme() + "://"
242         + request.getServerName() + ":" + regionServerInfoPort
243         + request.getRequestURI();
244       response.sendRedirect(redirectUrl);
245     }
246   }
247 
248   /**
249    * Initializes the HMaster. The steps are as follows:
250    * <p>
251    * <ol>
252    * <li>Initialize the local HRegionServer
253    * <li>Start the ActiveMasterManager.
254    * </ol>
255    * <p>
256    * Remaining steps of initialization occur in
257    * {@link #finishActiveMasterInitialization(MonitoredTask)} after
258    * the master becomes the active one.
259    *
260    * @throws KeeperException
261    * @throws IOException
262    */
263   public HMaster(final Configuration conf, CoordinatedStateManager csm)
264       throws IOException, KeeperException {
265     super(conf, csm);
266     this.rsFatals = new MemoryBoundedLogMessageBuffer(
267       conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
268 
269     LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
270         ", hbase.cluster.distributed=" + this.conf.getBoolean("hbase.cluster.distributed", false));
271 
272     Replication.decorateMasterConfiguration(this.conf);
273 
274     // Hack! Maps DFSClient => Master for logs.  HDFS made this
275     // config param for task trackers, but we can piggyback off of it.
276     if (this.conf.get("mapreduce.task.attempt.id") == null) {
277       this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
278     }
279 
280     //should we check the compression codec type at master side, default true, HBASE-6370
281     this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
282 
283     this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this));
284 
285     // Do we publish the status?
286     boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
287         HConstants.STATUS_PUBLISHED_DEFAULT);
288     Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
289         conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
290             ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
291             ClusterStatusPublisher.Publisher.class);
292 
293     if (shouldPublish) {
294       if (publisherClass == null) {
295         LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
296             ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
297             " is not set - not publishing status");
298       } else {
299         clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
300         Threads.setDaemonThreadRunning(clusterStatusPublisherChore.getThread());
301       }
302     }
303     startActiveMasterManager();
304     putUpJettyServer();
305   }
306 
307   private void putUpJettyServer() throws IOException {
308     if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
309       return;
310     }
311     int infoPort = conf.getInt("hbase.master.info.port.orig",
312       HConstants.DEFAULT_MASTER_INFOPORT);
313     // -1 is for disabling info server, so no redirecting
314     if (infoPort < 0 || infoServer == null) {
315       return;
316     }
317 
318     RedirectServlet.regionServerInfoPort = infoServer.getPort();
319     masterJettyServer = new org.mortbay.jetty.Server();
320     Connector connector = new SelectChannelConnector();
321     connector.setHost(conf.get("hbase.master.info.bindAddress", "0.0.0.0"));
322     connector.setPort(infoPort);
323     masterJettyServer.addConnector(connector);
324     masterJettyServer.setStopAtShutdown(true);
325     Context context = new Context(masterJettyServer, "/", Context.NO_SESSIONS);
326     context.addServlet(RedirectServlet.class, "/*");
327     try {
328       masterJettyServer.start();
329     } catch (Exception e) {
330       throw new IOException("Failed to start redirecting jetty server", e);
331     }
332   }
333 
334   /**
335    * For compatibility, if failed with regionserver credentials, try the master one
336    */
337   protected void login(UserProvider user, String host) throws IOException {
338     try {
339       super.login(user, host);
340     } catch (IOException ie) {
341       user.login("hbase.master.keytab.file",
342         "hbase.master.kerberos.principal", host);
343     }
344   }
345 
346   @VisibleForTesting
347   public MasterRpcServices getMasterRpcServices() {
348     return (MasterRpcServices)rpcServices;
349   }
350 
351   public boolean balanceSwitch(final boolean b) throws IOException {
352     return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
353   }
354 
355   protected String getProcessName() {
356     return MASTER;
357   }
358 
359   protected boolean canCreateBaseZNode() {
360     return true;
361   }
362 
363   protected boolean canUpdateTableDescriptor() {
364     return true;
365   }
366 
367   protected RSRpcServices createRpcServices() throws IOException {
368     return new MasterRpcServices(this);
369   }
370 
371   protected void configureInfoServer() {
372     infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);
373     infoServer.setAttribute(MASTER, this);
374     super.configureInfoServer();
375   }
376 
377   protected Class<? extends HttpServlet> getDumpServlet() {
378     return MasterDumpServlet.class;
379   }
380 
381   /**
382    * Emit the HMaster metrics, such as region in transition metrics.
383    * Surrounding in a try block just to be sure metrics doesn't abort HMaster.
384    */
385   protected void doMetrics() {
386     try {
387       if (assignmentManager != null) {
388         assignmentManager.updateRegionsInTransitionMetrics();
389       }
390     } catch (Throwable e) {
391       LOG.error("Couldn't update metrics: " + e.getMessage());
392     }
393   }
394 
395   MetricsMaster getMasterMetrics() {
396     return metricsMaster;
397   }
398 
399   /**
400    * Initialize all ZK based system trackers.
401    * @throws IOException
402    * @throws InterruptedException
403    * @throws KeeperException
404    * @throws CoordinatedStateException
405    */
406   void initializeZKBasedSystemTrackers() throws IOException,
407       InterruptedException, KeeperException, CoordinatedStateException {
408     this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
409     this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
410     this.loadBalancerTracker.start();
411     this.assignmentManager = new AssignmentManager(this, serverManager,
412       this.balancer, this.service, this.metricsMaster,
413       this.tableLockManager);
414 
415     this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
416         this.serverManager);
417     this.regionServerTracker.start();
418 
419     this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
420       this.serverManager);
421     this.drainingServerTracker.start();
422 
423     // Set the cluster as up.  If new RSs, they'll be waiting on this before
424     // going ahead with their startup.
425     boolean wasUp = this.clusterStatusTracker.isClusterUp();
426     if (!wasUp) this.clusterStatusTracker.setClusterUp();
427 
428     LOG.info("Server active/primary master=" + this.serverName +
429         ", sessionid=0x" +
430         Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
431         ", setting cluster-up flag (Was=" + wasUp + ")");
432 
433     // create/initialize the snapshot manager and other procedure managers
434     this.snapshotManager = new SnapshotManager();
435     this.mpmHost = new MasterProcedureManagerHost();
436     this.mpmHost.register(this.snapshotManager);
437     this.mpmHost.register(new MasterFlushTableProcedureManager());
438     this.mpmHost.loadProcedures(conf);
439     this.mpmHost.initialize(this, this.metricsMaster);
440   }
441 
442   /**
443    * Finish initialization of HMaster after becoming the primary master.
444    *
445    * <ol>
446    * <li>Initialize master components - file system manager, server manager,
447    *     assignment manager, region server tracker, etc</li>
448    * <li>Start necessary service threads - balancer, catalog janior,
449    *     executor services, etc</li>
450    * <li>Set cluster as UP in ZooKeeper</li>
451    * <li>Wait for RegionServers to check-in</li>
452    * <li>Split logs and perform data recovery, if necessary</li>
453    * <li>Ensure assignment of meta/namespace regions<li>
454    * <li>Handle either fresh cluster start or master failover</li>
455    * </ol>
456    *
457    * @throws IOException
458    * @throws InterruptedException
459    * @throws KeeperException
460    * @throws CoordinatedStateException
461    */
462   private void finishActiveMasterInitialization(MonitoredTask status)
463       throws IOException, InterruptedException, KeeperException, CoordinatedStateException {
464 
465     isActiveMaster = true;
466 
467     /*
468      * We are active master now... go initialize components we need to run.
469      * Note, there may be dross in zk from previous runs; it'll get addressed
470      * below after we determine if cluster startup or failover.
471      */
472 
473     status.setStatus("Initializing Master file system");
474 
475     this.masterActiveTime = System.currentTimeMillis();
476     // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
477     this.fileSystemManager = new MasterFileSystem(this, this);
478 
479     // publish cluster ID
480     status.setStatus("Publishing Cluster ID in ZooKeeper");
481     ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
482     this.serverManager = createServerManager(this, this);
483 
484     metaTableLocator = new MetaTableLocator();
485     shortCircuitConnection = createShortCircuitConnection();
486 
487     // Invalidate all write locks held previously
488     this.tableLockManager.reapWriteLocks();
489 
490     status.setStatus("Initializing ZK system trackers");
491     initializeZKBasedSystemTrackers();
492 
493     // initialize master side coprocessors before we start handling requests
494     status.setStatus("Initializing master coprocessors");
495     this.cpHost = new MasterCoprocessorHost(this, this.conf);
496 
497     // start up all service threads.
498     status.setStatus("Initializing master service threads");
499     startServiceThreads();
500 
501     // Wake up this server to check in
502     sleeper.skipSleepCycle();
503 
504     // Wait for region servers to report in
505     this.serverManager.waitForRegionServers(status);
506     // Check zk for region servers that are up but didn't register
507     for (ServerName sn: this.regionServerTracker.getOnlineServers()) {
508       // The isServerOnline check is opportunistic, correctness is handled inside
509       if (!this.serverManager.isServerOnline(sn)
510           && serverManager.checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
511         LOG.info("Registered server found up in zk but who has not yet reported in: " + sn);
512       }
513     }
514 
515     // get a list for previously failed RS which need log splitting work
516     // we recover hbase:meta region servers inside master initialization and
517     // handle other failed servers in SSH in order to start up master node ASAP
518     Set<ServerName> previouslyFailedServers = this.fileSystemManager
519         .getFailedServersFromLogFolders();
520 
521     // remove stale recovering regions from previous run
522     this.fileSystemManager.removeStaleRecoveringRegionsFromZK(previouslyFailedServers);
523 
524     // log splitting for hbase:meta server
525     ServerName oldMetaServerLocation = metaTableLocator.getMetaRegionLocation(this.getZooKeeper());
526     if (oldMetaServerLocation != null && previouslyFailedServers.contains(oldMetaServerLocation)) {
527       splitMetaLogBeforeAssignment(oldMetaServerLocation);
528       // Note: we can't remove oldMetaServerLocation from previousFailedServers list because it
529       // may also host user regions
530     }
531     Set<ServerName> previouslyFailedMetaRSs = getPreviouselyFailedMetaServersFromZK();
532     // need to use union of previouslyFailedMetaRSs recorded in ZK and previouslyFailedServers
533     // instead of previouslyFailedMetaRSs alone to address the following two situations:
534     // 1) the chained failure situation(recovery failed multiple times in a row).
535     // 2) master get killed right before it could delete the recovering hbase:meta from ZK while the
536     // same server still has non-meta wals to be replayed so that
537     // removeStaleRecoveringRegionsFromZK can't delete the stale hbase:meta region
538     // Passing more servers into splitMetaLog is all right. If a server doesn't have hbase:meta wal,
539     // there is no op for the server.
540     previouslyFailedMetaRSs.addAll(previouslyFailedServers);
541 
542     this.initializationBeforeMetaAssignment = true;
543 
544     // Wait for regionserver to finish initialization.
545     synchronized (online) {
546       while (!isStopped() && !isOnline()) {
547         online.wait(100);
548       }
549     }
550 
551     //initialize load balancer
552     this.balancer.setClusterStatus(getClusterStatus());
553     this.balancer.setMasterServices(this);
554     this.balancer.initialize();
555 
556     // Check if master is shutting down because of some issue
557     // in initializing the regionserver or the balancer.
558     if(isStopped()) return;
559 
560     // Make sure meta assigned before proceeding.
561     status.setStatus("Assigning Meta Region");
562     assignMeta(status, previouslyFailedMetaRSs);
563     // check if master is shutting down because above assignMeta could return even hbase:meta isn't
564     // assigned when master is shutting down
565     if(isStopped()) return;
566 
567     status.setStatus("Submitting log splitting work for previously failed region servers");
568     // Master has recovered hbase:meta region server and we put
569     // other failed region servers in a queue to be handled later by SSH
570     for (ServerName tmpServer : previouslyFailedServers) {
571       this.serverManager.processDeadServer(tmpServer, true);
572     }
573 
574     // Fix up assignment manager status
575     status.setStatus("Starting assignment manager");
576     this.assignmentManager.joinCluster();
577 
578     //set cluster status again after user regions are assigned
579     this.balancer.setClusterStatus(getClusterStatus());
580 
581     // Start balancer and meta catalog janitor after meta and regions have
582     // been assigned.
583     status.setStatus("Starting balancer and catalog janitor");
584     this.clusterStatusChore = new ClusterStatusChore(this, balancer);
585     Threads.setDaemonThreadRunning(clusterStatusChore.getThread());
586     this.balancerChore = new BalancerChore(this);
587     Threads.setDaemonThreadRunning(balancerChore.getThread());
588     this.catalogJanitorChore = new CatalogJanitor(this, this);
589     Threads.setDaemonThreadRunning(catalogJanitorChore.getThread());
590 
591     status.setStatus("Starting namespace manager");
592     initNamespace();
593 
594     if (this.cpHost != null) {
595       try {
596         this.cpHost.preMasterInitialization();
597       } catch (IOException e) {
598         LOG.error("Coprocessor preMasterInitialization() hook failed", e);
599       }
600     }
601 
602     status.markComplete("Initialization successful");
603     LOG.info("Master has completed initialization");
604     initialized = true;
605     // clear the dead servers with same host name and port of online server because we are not
606     // removing dead server with same hostname and port of rs which is trying to check in before
607     // master initialization. See HBASE-5916.
608     this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
609 
610     if (this.cpHost != null) {
611       // don't let cp initialization errors kill the master
612       try {
613         this.cpHost.postStartMaster();
614       } catch (IOException ioe) {
615         LOG.error("Coprocessor postStartMaster() hook failed", ioe);
616       }
617     }
618   }
619 
620   /**
621    * Useful for testing purpose also where we have
622    * master restart scenarios.
623    */
624   protected void startCatalogJanitorChore() {
625     Threads.setDaemonThreadRunning(catalogJanitorChore.getThread());
626   }
627 
628   /**
629    * Useful for testing purpose also where we have
630    * master restart scenarios.
631    */
632   protected void startNamespaceJanitorChore() {
633     Threads.setDaemonThreadRunning(namespaceJanitorChore.getThread());
634   }
635 
636   /**
637    * Create a {@link ServerManager} instance.
638    * @param master
639    * @param services
640    * @return An instance of {@link ServerManager}
641    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
642    * @throws IOException
643    */
644   ServerManager createServerManager(final Server master,
645       final MasterServices services)
646   throws IOException {
647     // We put this out here in a method so can do a Mockito.spy and stub it out
648     // w/ a mocked up ServerManager.
649     return new ServerManager(master, services);
650   }
651 
652   /**
653    * Check <code>hbase:meta</code> is assigned. If not, assign it.
654    * @param status MonitoredTask
655    * @param previouslyFailedMetaRSs
656    * @throws InterruptedException
657    * @throws IOException
658    * @throws KeeperException
659    */
660   void assignMeta(MonitoredTask status, Set<ServerName> previouslyFailedMetaRSs)
661       throws InterruptedException, IOException, KeeperException {
662     // Work on meta region
663     int assigned = 0;
664     long timeout = this.conf.getLong("hbase.catalog.verification.timeout", 1000);
665     status.setStatus("Assigning hbase:meta region");
666 
667     RegionStates regionStates = assignmentManager.getRegionStates();
668     regionStates.createRegionState(HRegionInfo.FIRST_META_REGIONINFO);
669     boolean metaRegionLocation = metaTableLocator.verifyMetaRegionLocation(
670       this.getShortCircuitConnection(), this.getZooKeeper(), timeout);
671     ServerName currentMetaServer = metaTableLocator.getMetaRegionLocation(this.getZooKeeper());
672     if (!metaRegionLocation) {
673       // Meta location is not verified. It should be in transition, or offline.
674       // We will wait for it to be assigned in enableSSHandWaitForMeta below.
675       if (currentMetaServer != null) {
676         // If the meta server is not known to be dead or online,
677         // just split the meta log, and don't expire it since this
678         // could be a full cluster restart. Otherwise, we will think
679         // this is a failover and lose previous region locations.
680         // If it is really a failover case, AM will find out in rebuilding
681         // user regions. Otherwise, we are good since all logs are split
682         // or known to be replayed before user regions are assigned.
683         if (serverManager.isServerOnline(currentMetaServer)) {
684           LOG.info("Forcing expire of " + currentMetaServer);
685           serverManager.expireServer(currentMetaServer);
686         }
687         splitMetaLogBeforeAssignment(currentMetaServer);
688         previouslyFailedMetaRSs.add(currentMetaServer);
689       }
690       assignmentManager.assignMeta();
691       assigned++;
692     } else {
693       // Region already assigned. We didn't assign it. Add to in-memory state.
694       regionStates.updateRegionState(
695         HRegionInfo.FIRST_META_REGIONINFO, State.OPEN, currentMetaServer);
696       this.assignmentManager.regionOnline(
697         HRegionInfo.FIRST_META_REGIONINFO, currentMetaServer);
698     }
699 
700     enableMeta(TableName.META_TABLE_NAME);
701 
702     if ((RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode())
703         && (!previouslyFailedMetaRSs.isEmpty())) {
704       // replay WAL edits mode need new hbase:meta RS is assigned firstly
705       status.setStatus("replaying log for Meta Region");
706       this.fileSystemManager.splitMetaLog(previouslyFailedMetaRSs);
707     }
708 
709     // Make sure a hbase:meta location is set. We need to enable SSH here since
710     // if the meta region server is died at this time, we need it to be re-assigned
711     // by SSH so that system tables can be assigned.
712     // No need to wait for meta is assigned = 0 when meta is just verified.
713     enableServerShutdownHandler(assigned != 0);
714 
715     LOG.info("hbase:meta assigned=" + assigned + ", location="
716       + metaTableLocator.getMetaRegionLocation(this.getZooKeeper()));
717     status.setStatus("META assigned.");
718   }
719 
720   void initNamespace() throws IOException {
721     //create namespace manager
722     tableNamespaceManager = new TableNamespaceManager(this);
723     tableNamespaceManager.start();
724   }
725 
726   boolean isCatalogJanitorEnabled() {
727     return catalogJanitorChore != null ?
728       catalogJanitorChore.getEnabled() : false;
729   }
730 
731   private void splitMetaLogBeforeAssignment(ServerName currentMetaServer) throws IOException {
732     if (RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode()) {
733       // In log replay mode, we mark hbase:meta region as recovering in ZK
734       Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
735       regions.add(HRegionInfo.FIRST_META_REGIONINFO);
736       this.fileSystemManager.prepareLogReplay(currentMetaServer, regions);
737     } else {
738       // In recovered.edits mode: create recovered edits file for hbase:meta server
739       this.fileSystemManager.splitMetaLog(currentMetaServer);
740     }
741   }
742 
743   private void enableServerShutdownHandler(
744       final boolean waitForMeta) throws IOException, InterruptedException {
745     // If ServerShutdownHandler is disabled, we enable it and expire those dead
746     // but not expired servers. This is required so that if meta is assigning to
747     // a server which dies after assignMeta starts assignment,
748     // SSH can re-assign it. Otherwise, we will be
749     // stuck here waiting forever if waitForMeta is specified.
750     if (!serverShutdownHandlerEnabled) {
751       serverShutdownHandlerEnabled = true;
752       this.serverManager.processQueuedDeadServers();
753     }
754 
755     if (waitForMeta) {
756       metaTableLocator.waitMetaRegionLocation(this.getZooKeeper());
757       // Above check waits for general meta availability but this does not
758       // guarantee that the transition has completed
759       this.assignmentManager.waitForAssignment(HRegionInfo.FIRST_META_REGIONINFO);
760     }
761   }
762 
763   private void enableMeta(TableName metaTableName) {
764     if (!this.assignmentManager.getTableStateManager().isTableState(metaTableName,
765         ZooKeeperProtos.Table.State.ENABLED)) {
766       this.assignmentManager.setEnabledTable(metaTableName);
767     }
768   }
769 
770   /**
771    * This function returns a set of region server names under hbase:meta recovering region ZK node
772    * @return Set of meta server names which were recorded in ZK
773    * @throws KeeperException
774    */
775   private Set<ServerName> getPreviouselyFailedMetaServersFromZK() throws KeeperException {
776     Set<ServerName> result = new HashSet<ServerName>();
777     String metaRecoveringZNode = ZKUtil.joinZNode(zooKeeper.recoveringRegionsZNode,
778       HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
779     List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(zooKeeper, metaRecoveringZNode);
780     if (regionFailedServers == null) return result;
781 
782     for(String failedServer : regionFailedServers) {
783       ServerName server = ServerName.parseServerName(failedServer);
784       result.add(server);
785     }
786     return result;
787   }
788 
789   @Override
790   public TableDescriptors getTableDescriptors() {
791     return this.tableDescriptors;
792   }
793 
794   @Override
795   public ServerManager getServerManager() {
796     return this.serverManager;
797   }
798 
799   @Override
800   public MasterFileSystem getMasterFileSystem() {
801     return this.fileSystemManager;
802   }
803 
804   /*
805    * Start up all services. If any of these threads gets an unhandled exception
806    * then they just die with a logged message.  This should be fine because
807    * in general, we do not expect the master to get such unhandled exceptions
808    *  as OOMEs; it should be lightly loaded. See what HRegionServer does if
809    *  need to install an unexpected exception handler.
810    */
811   private void startServiceThreads() throws IOException{
812    // Start the executor service pools
813    this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
814       conf.getInt("hbase.master.executor.openregion.threads", 5));
815    this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
816       conf.getInt("hbase.master.executor.closeregion.threads", 5));
817    this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
818       conf.getInt("hbase.master.executor.serverops.threads", 5));
819    this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
820       conf.getInt("hbase.master.executor.serverops.threads", 5));
821    this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
822       conf.getInt("hbase.master.executor.logreplayops.threads", 10));
823 
824    // We depend on there being only one instance of this executor running
825    // at a time.  To do concurrency, would need fencing of enable/disable of
826    // tables.
827    // Any time changing this maxThreads to > 1, pls see the comment at
828    // AccessController#postCreateTableHandler
829    this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
830 
831    // Start log cleaner thread
832    int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
833    this.logCleaner =
834       new LogCleaner(cleanerInterval,
835          this, conf, getMasterFileSystem().getFileSystem(),
836          getMasterFileSystem().getOldLogDir());
837          Threads.setDaemonThreadRunning(logCleaner.getThread(), getName() + ".oldLogCleaner");
838 
839    //start the hfile archive cleaner thread
840     Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
841     this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
842         .getFileSystem(), archiveDir);
843     Threads.setDaemonThreadRunning(hfileCleaner.getThread(),
844       getName() + ".archivedHFileCleaner");
845 
846     serviceStarted = true;
847     if (LOG.isTraceEnabled()) {
848       LOG.trace("Started service threads");
849     }
850   }
851 
852   protected void stopServiceThreads() {
853     if (masterJettyServer != null) {
854       LOG.info("Stopping master jetty server");
855       try {
856         masterJettyServer.stop();
857       } catch (Exception e) {
858         LOG.error("Failed to stop master jetty server", e);
859       }
860     }
861     super.stopServiceThreads();
862     stopChores();
863     // Wait for all the remaining region servers to report in IFF we were
864     // running a cluster shutdown AND we were NOT aborting.
865     if (!isAborted() && this.serverManager != null &&
866         this.serverManager.isClusterShutdown()) {
867       this.serverManager.letRegionServersShutdown();
868     }
869     if (LOG.isDebugEnabled()) {
870       LOG.debug("Stopping service threads");
871     }
872     // Clean up and close up shop
873     if (this.logCleaner!= null) this.logCleaner.interrupt();
874     if (this.hfileCleaner != null) this.hfileCleaner.interrupt();
875     if (this.activeMasterManager != null) this.activeMasterManager.stop();
876     if (this.serverManager != null) this.serverManager.stop();
877     if (this.assignmentManager != null) this.assignmentManager.stop();
878     if (this.fileSystemManager != null) this.fileSystemManager.stop();
879     if (this.mpmHost != null) this.mpmHost.stop("server shutting down.");
880   }
881 
882   private void stopChores() {
883     if (this.balancerChore != null) {
884       this.balancerChore.interrupt();
885     }
886     if (this.clusterStatusChore != null) {
887       this.clusterStatusChore.interrupt();
888     }
889     if (this.catalogJanitorChore != null) {
890       this.catalogJanitorChore.interrupt();
891     }
892     if (this.clusterStatusPublisherChore != null){
893       clusterStatusPublisherChore.interrupt();
894     }
895     if (this.namespaceJanitorChore != null){
896       namespaceJanitorChore.interrupt();
897     }
898   }
899 
900   /**
901    * @return Get remote side's InetAddress
902    * @throws UnknownHostException
903    */
904   InetAddress getRemoteInetAddress(final int port,
905       final long serverStartCode) throws UnknownHostException {
906     // Do it out here in its own little method so can fake an address when
907     // mocking up in tests.
908     InetAddress ia = RpcServer.getRemoteIp();
909 
910     // The call could be from the local regionserver,
911     // in which case, there is no remote address.
912     if (ia == null && serverStartCode == startcode) {
913       InetSocketAddress isa = rpcServices.getSocketAddress();
914       if (isa != null && isa.getPort() == port) {
915         ia = isa.getAddress();
916       }
917     }
918     return ia;
919   }
920 
921   /**
922    * @return Maximum time we should run balancer for
923    */
924   private int getBalancerCutoffTime() {
925     int balancerCutoffTime =
926       getConfiguration().getInt("hbase.balancer.max.balancing", -1);
927     if (balancerCutoffTime == -1) {
928       // No time period set so create one
929       int balancerPeriod =
930         getConfiguration().getInt("hbase.balancer.period", 300000);
931       balancerCutoffTime = balancerPeriod;
932       // If nonsense period, set it to balancerPeriod
933       if (balancerCutoffTime <= 0) balancerCutoffTime = balancerPeriod;
934     }
935     return balancerCutoffTime;
936   }
937 
938   public boolean balance() throws IOException {
939     // if master not initialized, don't run balancer.
940     if (!this.initialized) {
941       LOG.debug("Master has not been initialized, don't run balancer.");
942       return false;
943     }
944     // Do this call outside of synchronized block.
945     int maximumBalanceTime = getBalancerCutoffTime();
946     synchronized (this.balancer) {
947       // If balance not true, don't run balancer.
948       if (!this.loadBalancerTracker.isBalancerOn()) return false;
949       // Only allow one balance run at at time.
950       if (this.assignmentManager.getRegionStates().isRegionsInTransition()) {
951         Map<String, RegionState> regionsInTransition =
952           this.assignmentManager.getRegionStates().getRegionsInTransition();
953         LOG.debug("Not running balancer because " + regionsInTransition.size() +
954           " region(s) in transition: " + org.apache.commons.lang.StringUtils.
955             abbreviate(regionsInTransition.toString(), 256));
956         return false;
957       }
958       if (this.serverManager.areDeadServersInProgress()) {
959         LOG.debug("Not running balancer because processing dead regionserver(s): " +
960           this.serverManager.getDeadServers());
961         return false;
962       }
963 
964       if (this.cpHost != null) {
965         try {
966           if (this.cpHost.preBalance()) {
967             LOG.debug("Coprocessor bypassing balancer request");
968             return false;
969           }
970         } catch (IOException ioe) {
971           LOG.error("Error invoking master coprocessor preBalance()", ioe);
972           return false;
973         }
974       }
975 
976       Map<TableName, Map<ServerName, List<HRegionInfo>>> assignmentsByTable =
977         this.assignmentManager.getRegionStates().getAssignmentsByTable();
978 
979       List<RegionPlan> plans = new ArrayList<RegionPlan>();
980       //Give the balancer the current cluster state.
981       this.balancer.setClusterStatus(getClusterStatus());
982       for (Map<ServerName, List<HRegionInfo>> assignments : assignmentsByTable.values()) {
983         List<RegionPlan> partialPlans = this.balancer.balanceCluster(assignments);
984         if (partialPlans != null) plans.addAll(partialPlans);
985       }
986       long cutoffTime = System.currentTimeMillis() + maximumBalanceTime;
987       int rpCount = 0;  // number of RegionPlans balanced so far
988       long totalRegPlanExecTime = 0;
989       if (plans != null && !plans.isEmpty()) {
990         for (RegionPlan plan: plans) {
991           LOG.info("balance " + plan);
992           long balStartTime = System.currentTimeMillis();
993           //TODO: bulk assign
994           this.assignmentManager.balance(plan);
995           totalRegPlanExecTime += System.currentTimeMillis()-balStartTime;
996           rpCount++;
997           if (rpCount < plans.size() &&
998               // if performing next balance exceeds cutoff time, exit the loop
999               (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) {
1000             //TODO: After balance, there should not be a cutoff time (keeping it as a security net for now)
1001             LOG.debug("No more balancing till next balance run; maximumBalanceTime=" +
1002               maximumBalanceTime);
1003             break;
1004           }
1005         }
1006       }
1007       if (this.cpHost != null) {
1008         try {
1009           this.cpHost.postBalance(rpCount < plans.size() ? plans.subList(0, rpCount) : plans);
1010         } catch (IOException ioe) {
1011           // balancing already succeeded so don't change the result
1012           LOG.error("Error invoking master coprocessor postBalance()", ioe);
1013         }
1014       }
1015     }
1016     // If LoadBalancer did not generate any plans, it means the cluster is already balanced.
1017     // Return true indicating a success.
1018     return true;
1019   }
1020 
1021   /**
1022    * @return Client info for use as prefix on an audit log string; who did an action
1023    */
1024   String getClientIdAuditPrefix() {
1025     return "Client=" + RequestContext.getRequestUserName() + "/" +
1026       RequestContext.get().getRemoteAddress();
1027   }
1028 
1029   /**
1030    * Switch for the background CatalogJanitor thread.
1031    * Used for testing.  The thread will continue to run.  It will just be a noop
1032    * if disabled.
1033    * @param b If false, the catalog janitor won't do anything.
1034    */
1035   public void setCatalogJanitorEnabled(final boolean b) {
1036     this.catalogJanitorChore.setEnabled(b);
1037   }
1038 
1039   @Override
1040   public void dispatchMergingRegions(final HRegionInfo region_a,
1041       final HRegionInfo region_b, final boolean forcible) throws IOException {
1042     checkInitialized();
1043     this.service.submit(new DispatchMergingRegionHandler(this,
1044         this.catalogJanitorChore, region_a, region_b, forcible));
1045   }
1046 
1047   void move(final byte[] encodedRegionName,
1048       final byte[] destServerName) throws HBaseIOException {
1049     RegionState regionState = assignmentManager.getRegionStates().
1050       getRegionState(Bytes.toString(encodedRegionName));
1051     if (regionState == null) {
1052       throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
1053     }
1054 
1055     HRegionInfo hri = regionState.getRegion();
1056     ServerName dest;
1057     if (destServerName == null || destServerName.length == 0) {
1058       LOG.info("Passed destination servername is null/empty so " +
1059         "choosing a server at random");
1060       final List<ServerName> destServers = this.serverManager.createDestinationServersList(
1061         regionState.getServerName());
1062       dest = balancer.randomAssignment(hri, destServers);
1063     } else {
1064       dest = ServerName.valueOf(Bytes.toString(destServerName));
1065       if (dest.equals(regionState.getServerName())) {
1066         LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
1067           + " because region already assigned to the same server " + dest + ".");
1068         return;
1069       }
1070     }
1071 
1072     // Now we can do the move
1073     RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest);
1074 
1075     try {
1076       checkInitialized();
1077       if (this.cpHost != null) {
1078         if (this.cpHost.preMove(hri, rp.getSource(), rp.getDestination())) {
1079           return;
1080         }
1081       }
1082       LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
1083       this.assignmentManager.balance(rp);
1084       if (this.cpHost != null) {
1085         this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
1086       }
1087     } catch (IOException ioe) {
1088       if (ioe instanceof HBaseIOException) {
1089         throw (HBaseIOException)ioe;
1090       }
1091       throw new HBaseIOException(ioe);
1092     }
1093   }
1094 
1095   @Override
1096   public void createTable(HTableDescriptor hTableDescriptor,
1097       byte [][] splitKeys) throws IOException {
1098     if (isStopped()) {
1099       throw new MasterNotRunningException();
1100     }
1101 
1102     String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
1103     getNamespaceDescriptor(namespace); // ensure namespace exists
1104 
1105     HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
1106     checkInitialized();
1107     sanityCheckTableDescriptor(hTableDescriptor);
1108     if (cpHost != null) {
1109       cpHost.preCreateTable(hTableDescriptor, newRegions);
1110     }
1111     LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
1112     this.service.submit(new CreateTableHandler(this,
1113       this.fileSystemManager, hTableDescriptor, conf,
1114       newRegions, this).prepare());
1115     if (cpHost != null) {
1116       cpHost.postCreateTable(hTableDescriptor, newRegions);
1117     }
1118 
1119   }
1120 
1121   /**
1122    * Checks whether the table conforms to some sane limits, and configured
1123    * values (compression, etc) work. Throws an exception if something is wrong.
1124    * @throws IOException
1125    */
1126   private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
1127     final String CONF_KEY = "hbase.table.sanity.checks";
1128     if (!conf.getBoolean(CONF_KEY, true)) {
1129       return;
1130     }
1131     String tableVal = htd.getConfigurationValue(CONF_KEY);
1132     if (tableVal != null && !Boolean.valueOf(tableVal)) {
1133       return;
1134     }
1135 
1136     // check max file size
1137     long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit
1138     long maxFileSize = htd.getMaxFileSize();
1139     if (maxFileSize < 0) {
1140       maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
1141     }
1142     if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
1143       throw new DoNotRetryIOException("MAX_FILESIZE for table descriptor or "
1144         + "\"hbase.hregion.max.filesize\" (" + maxFileSize
1145         + ") is too small, which might cause over splitting into unmanageable "
1146         + "number of regions. Set " + CONF_KEY + " to false at conf or table descriptor "
1147           + "if you want to bypass sanity checks");
1148     }
1149 
1150     // check flush size
1151     long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit
1152     long flushSize = htd.getMemStoreFlushSize();
1153     if (flushSize < 0) {
1154       flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
1155     }
1156     if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
1157       throw new DoNotRetryIOException("MEMSTORE_FLUSHSIZE for table descriptor or "
1158           + "\"hbase.hregion.memstore.flush.size\" ("+flushSize+") is too small, which might cause"
1159           + " very frequent flushing. Set " + CONF_KEY + " to false at conf or table descriptor "
1160           + "if you want to bypass sanity checks");
1161     }
1162 
1163     // check split policy class can be loaded
1164     try {
1165       RegionSplitPolicy.getSplitPolicyClass(htd, conf);
1166     } catch (Exception ex) {
1167       throw new DoNotRetryIOException(ex);
1168     }
1169 
1170     // check compression can be loaded
1171     checkCompression(htd);
1172 
1173     // check that we have at least 1 CF
1174     if (htd.getColumnFamilies().length == 0) {
1175       throw new DoNotRetryIOException("Table should have at least one column family "
1176           + "Set "+CONF_KEY+" at conf or table descriptor if you want to bypass sanity checks");
1177     }
1178 
1179     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1180       if (hcd.getTimeToLive() <= 0) {
1181         throw new DoNotRetryIOException("TTL for column family " + hcd.getNameAsString()
1182           + "  must be positive. Set " + CONF_KEY + " to false at conf or table descriptor "
1183           + "if you want to bypass sanity checks");
1184       }
1185 
1186       // check blockSize
1187       if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
1188         throw new DoNotRetryIOException("Block size for column family " + hcd.getNameAsString()
1189           + "  must be between 1K and 16MB Set "+CONF_KEY+" to false at conf or table descriptor "
1190           + "if you want to bypass sanity checks");
1191       }
1192 
1193       // check versions
1194       if (hcd.getMinVersions() < 0) {
1195         throw new DoNotRetryIOException("Min versions for column family " + hcd.getNameAsString()
1196           + "  must be positive. Set " + CONF_KEY + " to false at conf or table descriptor "
1197           + "if you want to bypass sanity checks");
1198       }
1199       // max versions already being checked
1200 
1201       // check replication scope
1202       if (hcd.getScope() < 0) {
1203         throw new DoNotRetryIOException("Replication scope for column family "
1204           + hcd.getNameAsString() + "  must be positive. Set " + CONF_KEY + " to false at conf "
1205           + "or table descriptor if you want to bypass sanity checks");
1206       }
1207 
1208       // TODO: should we check coprocessors and encryption ?
1209     }
1210   }
1211 
1212   private void startActiveMasterManager() throws KeeperException {
1213     String backupZNode = ZKUtil.joinZNode(
1214       zooKeeper.backupMasterAddressesZNode, serverName.toString());
1215     /*
1216     * Add a ZNode for ourselves in the backup master directory since we
1217     * may not become the active master. If so, we want the actual active
1218     * master to know we are backup masters, so that it won't assign
1219     * regions to us if so configured.
1220     *
1221     * If we become the active master later, ActiveMasterManager will delete
1222     * this node explicitly.  If we crash before then, ZooKeeper will delete
1223     * this node for us since it is ephemeral.
1224     */
1225     LOG.info("Adding ZNode for " + backupZNode + " in backup master directory");
1226     MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode, serverName);
1227 
1228     activeMasterManager = new ActiveMasterManager(zooKeeper, serverName, this);
1229     // Start a thread to try to become the active master, so we won't block here
1230     Threads.setDaemonThreadRunning(new Thread(new Runnable() {
1231       public void run() {
1232         int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT,
1233           HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
1234         // If we're a backup master, stall until a primary to writes his address
1235         if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP,
1236             HConstants.DEFAULT_MASTER_TYPE_BACKUP)) {
1237           LOG.debug("HMaster started in backup mode. "
1238             + "Stalling until master znode is written.");
1239           // This will only be a minute or so while the cluster starts up,
1240           // so don't worry about setting watches on the parent znode
1241           while (!activeMasterManager.hasActiveMaster()) {
1242             LOG.debug("Waiting for master address ZNode to be written "
1243               + "(Also watching cluster state node)");
1244             Threads.sleep(timeout);
1245           }
1246         }
1247         MonitoredTask status = TaskMonitor.get().createStatus("Master startup");
1248         status.setDescription("Master startup");
1249         try {
1250           if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, status)) {
1251             finishActiveMasterInitialization(status);
1252           }
1253         } catch (Throwable t) {
1254           status.setStatus("Failed to become active: " + t.getMessage());
1255           LOG.fatal("Failed to become active master", t);
1256           // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility
1257           if (t instanceof NoClassDefFoundError &&
1258               t.getMessage().contains("org/apache/hadoop/hdfs/protocol/FSConstants$SafeModeAction")) {
1259             // improved error message for this special case
1260             abort("HBase is having a problem with its Hadoop jars.  You may need to "
1261               + "recompile HBase against Hadoop version "
1262               +  org.apache.hadoop.util.VersionInfo.getVersion()
1263               + " or change your hadoop jars to start properly", t);
1264           } else {
1265             abort("Unhandled exception. Starting shutdown.", t);
1266           }
1267         } finally {
1268           status.cleanup();
1269         }
1270       }
1271     }, "ActiveMasterManager"));
1272   }
1273 
1274   private void checkCompression(final HTableDescriptor htd)
1275   throws IOException {
1276     if (!this.masterCheckCompression) return;
1277     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1278       checkCompression(hcd);
1279     }
1280   }
1281 
1282   private void checkCompression(final HColumnDescriptor hcd)
1283   throws IOException {
1284     if (!this.masterCheckCompression) return;
1285     CompressionTest.testCompression(hcd.getCompression());
1286     CompressionTest.testCompression(hcd.getCompactionCompression());
1287   }
1288 
1289   private HRegionInfo[] getHRegionInfos(HTableDescriptor hTableDescriptor,
1290     byte[][] splitKeys) {
1291     long regionId = System.currentTimeMillis();
1292     HRegionInfo[] hRegionInfos = null;
1293     if (splitKeys == null || splitKeys.length == 0) {
1294       hRegionInfos = new HRegionInfo[]{new HRegionInfo(hTableDescriptor.getTableName(), null, null,
1295                 false, regionId)};
1296     } else {
1297       int numRegions = splitKeys.length + 1;
1298       hRegionInfos = new HRegionInfo[numRegions];
1299       byte[] startKey = null;
1300       byte[] endKey = null;
1301       for (int i = 0; i < numRegions; i++) {
1302         endKey = (i == splitKeys.length) ? null : splitKeys[i];
1303         hRegionInfos[i] =
1304              new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
1305                  false, regionId);
1306         startKey = endKey;
1307       }
1308     }
1309     return hRegionInfos;
1310   }
1311 
1312   private static boolean isCatalogTable(final TableName tableName) {
1313     return tableName.equals(TableName.META_TABLE_NAME);
1314   }
1315 
1316   @Override
1317   public void deleteTable(final TableName tableName) throws IOException {
1318     checkInitialized();
1319     if (cpHost != null) {
1320       cpHost.preDeleteTable(tableName);
1321     }
1322     LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
1323     this.service.submit(new DeleteTableHandler(tableName, this, this).prepare());
1324     if (cpHost != null) {
1325       cpHost.postDeleteTable(tableName);
1326     }
1327   }
1328 
1329   @Override
1330   public void truncateTable(TableName tableName, boolean preserveSplits) throws IOException {
1331     checkInitialized();
1332     if (cpHost != null) {
1333       cpHost.preTruncateTable(tableName);
1334     }
1335     LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
1336     TruncateTableHandler handler = new TruncateTableHandler(tableName, this, this, preserveSplits);
1337     handler.prepare();
1338     handler.process();
1339     if (cpHost != null) {
1340       cpHost.postTruncateTable(tableName);
1341     }
1342   }
1343 
1344   @Override
1345   public void addColumn(final TableName tableName, final HColumnDescriptor column)
1346       throws IOException {
1347     checkInitialized();
1348     if (cpHost != null) {
1349       if (cpHost.preAddColumn(tableName, column)) {
1350         return;
1351       }
1352     }
1353     //TODO: we should process this (and some others) in an executor
1354     new TableAddFamilyHandler(tableName, column, this, this).prepare().process();
1355     if (cpHost != null) {
1356       cpHost.postAddColumn(tableName, column);
1357     }
1358   }
1359 
1360   @Override
1361   public void modifyColumn(TableName tableName, HColumnDescriptor descriptor)
1362       throws IOException {
1363     checkInitialized();
1364     checkCompression(descriptor);
1365     if (cpHost != null) {
1366       if (cpHost.preModifyColumn(tableName, descriptor)) {
1367         return;
1368       }
1369     }
1370     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
1371     new TableModifyFamilyHandler(tableName, descriptor, this, this)
1372       .prepare().process();
1373     if (cpHost != null) {
1374       cpHost.postModifyColumn(tableName, descriptor);
1375     }
1376   }
1377 
1378   @Override
1379   public void deleteColumn(final TableName tableName, final byte[] columnName)
1380       throws IOException {
1381     checkInitialized();
1382     if (cpHost != null) {
1383       if (cpHost.preDeleteColumn(tableName, columnName)) {
1384         return;
1385       }
1386     }
1387     LOG.info(getClientIdAuditPrefix() + " delete " + Bytes.toString(columnName));
1388     new TableDeleteFamilyHandler(tableName, columnName, this, this).prepare().process();
1389     if (cpHost != null) {
1390       cpHost.postDeleteColumn(tableName, columnName);
1391     }
1392   }
1393 
1394   @Override
1395   public void enableTable(final TableName tableName) throws IOException {
1396     checkInitialized();
1397     if (cpHost != null) {
1398       cpHost.preEnableTable(tableName);
1399     }
1400     LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
1401     this.service.submit(new EnableTableHandler(this, tableName,
1402       assignmentManager, tableLockManager, false).prepare());
1403     if (cpHost != null) {
1404       cpHost.postEnableTable(tableName);
1405    }
1406   }
1407 
1408   @Override
1409   public void disableTable(final TableName tableName) throws IOException {
1410     checkInitialized();
1411     if (cpHost != null) {
1412       cpHost.preDisableTable(tableName);
1413     }
1414     LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
1415     this.service.submit(new DisableTableHandler(this, tableName,
1416       assignmentManager, tableLockManager, false).prepare());
1417     if (cpHost != null) {
1418       cpHost.postDisableTable(tableName);
1419     }
1420   }
1421 
1422   /**
1423    * Return the region and current deployment for the region containing
1424    * the given row. If the region cannot be found, returns null. If it
1425    * is found, but not currently deployed, the second element of the pair
1426    * may be null.
1427    */
1428   Pair<HRegionInfo, ServerName> getTableRegionForRow(
1429       final TableName tableName, final byte [] rowKey)
1430   throws IOException {
1431     final AtomicReference<Pair<HRegionInfo, ServerName>> result =
1432       new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
1433 
1434     MetaScannerVisitor visitor =
1435       new MetaScannerVisitorBase() {
1436         @Override
1437         public boolean processRow(Result data) throws IOException {
1438           if (data == null || data.size() <= 0) {
1439             return true;
1440           }
1441           Pair<HRegionInfo, ServerName> pair = HRegionInfo.getHRegionInfoAndServerName(data);
1442           if (pair == null) {
1443             return false;
1444           }
1445           if (!pair.getFirst().getTable().equals(tableName)) {
1446             return false;
1447           }
1448           result.set(pair);
1449           return true;
1450         }
1451     };
1452 
1453     MetaScanner.metaScan(conf, visitor, tableName, rowKey, 1);
1454     return result.get();
1455   }
1456 
1457   @Override
1458   public void modifyTable(final TableName tableName, final HTableDescriptor descriptor)
1459       throws IOException {
1460     checkInitialized();
1461     sanityCheckTableDescriptor(descriptor);
1462     if (cpHost != null) {
1463       cpHost.preModifyTable(tableName, descriptor);
1464     }
1465     LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
1466     new ModifyTableHandler(tableName, descriptor, this, this).prepare().process();
1467     if (cpHost != null) {
1468       cpHost.postModifyTable(tableName, descriptor);
1469     }
1470   }
1471 
1472   @Override
1473   public void checkTableModifiable(final TableName tableName)
1474       throws IOException, TableNotFoundException, TableNotDisabledException {
1475     if (isCatalogTable(tableName)) {
1476       throw new IOException("Can't modify catalog tables");
1477     }
1478     if (!MetaTableAccessor.tableExists(getShortCircuitConnection(), tableName)) {
1479       throw new TableNotFoundException(tableName);
1480     }
1481     if (!getAssignmentManager().getTableStateManager().
1482         isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) {
1483       throw new TableNotDisabledException(tableName);
1484     }
1485   }
1486 
1487   /**
1488    * @return cluster status
1489    */
1490   public ClusterStatus getClusterStatus() throws InterruptedIOException {
1491     // Build Set of backup masters from ZK nodes
1492     List<String> backupMasterStrings;
1493     try {
1494       backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper,
1495         this.zooKeeper.backupMasterAddressesZNode);
1496     } catch (KeeperException e) {
1497       LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
1498       backupMasterStrings = new ArrayList<String>(0);
1499     }
1500     List<ServerName> backupMasters = new ArrayList<ServerName>(
1501                                           backupMasterStrings.size());
1502     for (String s: backupMasterStrings) {
1503       try {
1504         byte [] bytes;
1505         try {
1506           bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
1507               this.zooKeeper.backupMasterAddressesZNode, s));
1508         } catch (InterruptedException e) {
1509           throw new InterruptedIOException();
1510         }
1511         if (bytes != null) {
1512           ServerName sn;
1513           try {
1514             sn = ServerName.parseFrom(bytes);
1515           } catch (DeserializationException e) {
1516             LOG.warn("Failed parse, skipping registering backup server", e);
1517             continue;
1518           }
1519           backupMasters.add(sn);
1520         }
1521       } catch (KeeperException e) {
1522         LOG.warn(this.zooKeeper.prefix("Unable to get information about " +
1523                  "backup servers"), e);
1524       }
1525     }
1526     Collections.sort(backupMasters, new Comparator<ServerName>() {
1527       @Override
1528       public int compare(ServerName s1, ServerName s2) {
1529         return s1.getServerName().compareTo(s2.getServerName());
1530       }});
1531 
1532     String clusterId = fileSystemManager != null ?
1533       fileSystemManager.getClusterId().toString() : null;
1534     Map<String, RegionState> regionsInTransition = assignmentManager != null ?
1535       assignmentManager.getRegionStates().getRegionsInTransition() : null;
1536     String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null;
1537     boolean balancerOn = loadBalancerTracker != null ?
1538       loadBalancerTracker.isBalancerOn() : false;
1539     Map<ServerName, ServerLoad> onlineServers = null;
1540     Set<ServerName> deadServers = null;
1541     if (serverManager != null) {
1542       deadServers = serverManager.getDeadServers().copyServerNames();
1543       onlineServers = serverManager.getOnlineServers();
1544     }
1545     return new ClusterStatus(VersionInfo.getVersion(), clusterId,
1546       onlineServers, deadServers, serverName, backupMasters,
1547       regionsInTransition, coprocessors, balancerOn);
1548   }
1549 
1550   /**
1551    * The set of loaded coprocessors is stored in a static set. Since it's
1552    * statically allocated, it does not require that HMaster's cpHost be
1553    * initialized prior to accessing it.
1554    * @return a String representation of the set of names of the loaded
1555    * coprocessors.
1556    */
1557   public static String getLoadedCoprocessors() {
1558     return CoprocessorHost.getLoadedCoprocessors().toString();
1559   }
1560 
1561   /**
1562    * @return timestamp in millis when HMaster was started.
1563    */
1564   public long getMasterStartTime() {
1565     return startcode;
1566   }
1567 
1568   /**
1569    * @return timestamp in millis when HMaster became the active master.
1570    */
1571   public long getMasterActiveTime() {
1572     return masterActiveTime;
1573   }
1574 
1575   public int getRegionServerInfoPort(final ServerName sn) {
1576     RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
1577     if (info == null || info.getInfoPort() == 0) {
1578       return conf.getInt(HConstants.REGIONSERVER_INFO_PORT,
1579         HConstants.DEFAULT_REGIONSERVER_INFOPORT);
1580     }
1581     return info.getInfoPort();
1582   }
1583 
1584   /**
1585    * @return array of coprocessor SimpleNames.
1586    */
1587   public String[] getMasterCoprocessors() {
1588     Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors();
1589     return masterCoprocessors.toArray(new String[masterCoprocessors.size()]);
1590   }
1591 
1592   @Override
1593   public void abort(final String msg, final Throwable t) {
1594     if (cpHost != null) {
1595       // HBASE-4014: dump a list of loaded coprocessors.
1596       LOG.fatal("Master server abort: loaded coprocessors are: " +
1597           getLoadedCoprocessors());
1598     }
1599     if (t != null) LOG.fatal(msg, t);
1600     stop(msg);
1601   }
1602 
1603   @Override
1604   public ZooKeeperWatcher getZooKeeper() {
1605     return zooKeeper;
1606   }
1607 
1608   @Override
1609   public MasterCoprocessorHost getMasterCoprocessorHost() {
1610     return cpHost;
1611   }
1612 
1613   @Override
1614   public ServerName getServerName() {
1615     return this.serverName;
1616   }
1617 
1618   @Override
1619   public AssignmentManager getAssignmentManager() {
1620     return this.assignmentManager;
1621   }
1622 
1623   public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
1624     return rsFatals;
1625   }
1626 
1627   public void shutdown() {
1628     if (cpHost != null) {
1629       try {
1630         cpHost.preShutdown();
1631       } catch (IOException ioe) {
1632         LOG.error("Error call master coprocessor preShutdown()", ioe);
1633       }
1634     }
1635 
1636     if (this.serverManager != null) {
1637       this.serverManager.shutdownCluster();
1638     }
1639     if (this.clusterStatusTracker != null){
1640       try {
1641         this.clusterStatusTracker.setClusterDown();
1642       } catch (KeeperException e) {
1643         LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
1644       }
1645     }
1646   }
1647 
1648   public void stopMaster() {
1649     if (cpHost != null) {
1650       try {
1651         cpHost.preStopMaster();
1652       } catch (IOException ioe) {
1653         LOG.error("Error call master coprocessor preStopMaster()", ioe);
1654       }
1655     }
1656     stop("Stopped by " + Thread.currentThread().getName());
1657   }
1658 
1659   void checkServiceStarted() throws ServerNotRunningYetException {
1660     if (!serviceStarted) {
1661       throw new ServerNotRunningYetException("Server is not running yet");
1662     }
1663   }
1664 
1665   void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException {
1666     checkServiceStarted();
1667     if (!this.initialized) {
1668       throw new PleaseHoldException("Master is initializing");
1669     }
1670   }
1671 
1672   void checkNamespaceManagerReady() throws IOException {
1673     checkInitialized();
1674     if (tableNamespaceManager == null ||
1675         !tableNamespaceManager.isTableAvailableAndInitialized()) {
1676       throw new IOException("Table Namespace Manager not ready yet, try again later");
1677     }
1678   }
1679   /**
1680    * Report whether this master is currently the active master or not.
1681    * If not active master, we are parked on ZK waiting to become active.
1682    *
1683    * This method is used for testing.
1684    *
1685    * @return true if active master, false if not.
1686    */
1687   public boolean isActiveMaster() {
1688     return isActiveMaster;
1689   }
1690 
1691   /**
1692    * Report whether this master has completed with its initialization and is
1693    * ready.  If ready, the master is also the active master.  A standby master
1694    * is never ready.
1695    *
1696    * This method is used for testing.
1697    *
1698    * @return true if master is ready to go, false if not.
1699    */
1700   @Override
1701   public boolean isInitialized() {
1702     return initialized;
1703   }
1704 
1705   /**
1706    * ServerShutdownHandlerEnabled is set false before completing
1707    * assignMeta to prevent processing of ServerShutdownHandler.
1708    * @return true if assignMeta has completed;
1709    */
1710   @Override
1711   public boolean isServerShutdownHandlerEnabled() {
1712     return this.serverShutdownHandlerEnabled;
1713   }
1714 
1715   /**
1716    * Report whether this master has started initialization and is about to do meta region assignment
1717    * @return true if master is in initialization & about to assign hbase:meta regions
1718    */
1719   public boolean isInitializationStartsMetaRegionAssignment() {
1720     return this.initializationBeforeMetaAssignment;
1721   }
1722 
1723   public void assignRegion(HRegionInfo hri) {
1724     assignmentManager.assign(hri);
1725   }
1726 
1727   /**
1728    * Compute the average load across all region servers.
1729    * Currently, this uses a very naive computation - just uses the number of
1730    * regions being served, ignoring stats about number of requests.
1731    * @return the average load
1732    */
1733   public double getAverageLoad() {
1734     if (this.assignmentManager == null) {
1735       return 0;
1736     }
1737 
1738     RegionStates regionStates = this.assignmentManager.getRegionStates();
1739     if (regionStates == null) {
1740       return 0;
1741     }
1742     return regionStates.getAverageLoad();
1743   }
1744 
1745   @Override
1746   public boolean registerService(Service instance) {
1747     /*
1748      * No stacking of instances is allowed for a single service name
1749      */
1750     Descriptors.ServiceDescriptor serviceDesc = instance.getDescriptorForType();
1751     if (coprocessorServiceHandlers.containsKey(serviceDesc.getFullName())) {
1752       LOG.error("Coprocessor service "+serviceDesc.getFullName()+
1753           " already registered, rejecting request from "+instance
1754       );
1755       return false;
1756     }
1757 
1758     coprocessorServiceHandlers.put(serviceDesc.getFullName(), instance);
1759     if (LOG.isDebugEnabled()) {
1760       LOG.debug("Registered master coprocessor service: service="+serviceDesc.getFullName());
1761     }
1762     return true;
1763   }
1764 
1765   /**
1766    * Utility for constructing an instance of the passed HMaster class.
1767    * @param masterClass
1768    * @param conf
1769    * @return HMaster instance.
1770    */
1771   public static HMaster constructMaster(Class<? extends HMaster> masterClass,
1772       final Configuration conf, final CoordinatedStateManager cp)  {
1773     try {
1774       Constructor<? extends HMaster> c =
1775         masterClass.getConstructor(Configuration.class, CoordinatedStateManager.class);
1776       return c.newInstance(conf, cp);
1777     } catch (InvocationTargetException ite) {
1778       Throwable target = ite.getTargetException() != null?
1779         ite.getTargetException(): ite;
1780       if (target.getCause() != null) target = target.getCause();
1781       throw new RuntimeException("Failed construction of Master: " +
1782         masterClass.toString(), target);
1783     } catch (Exception e) {
1784       throw new RuntimeException("Failed construction of Master: " +
1785         masterClass.toString() + ((e.getCause() != null)?
1786           e.getCause().getMessage(): ""), e);
1787     }
1788   }
1789 
1790   /**
1791    * @see org.apache.hadoop.hbase.master.HMasterCommandLine
1792    */
1793   public static void main(String [] args) {
1794     VersionInfo.logVersion();
1795     new HMasterCommandLine(HMaster.class).doMain(args);
1796   }
1797 
1798   public HFileCleaner getHFileCleaner() {
1799     return this.hfileCleaner;
1800   }
1801 
1802   /**
1803    * Exposed for TESTING!
1804    * @return the underlying snapshot manager
1805    */
1806   public SnapshotManager getSnapshotManagerForTesting() {
1807     return this.snapshotManager;
1808   }
1809 
1810   @Override
1811   public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
1812     TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
1813     checkNamespaceManagerReady();
1814     if (cpHost != null) {
1815       if (cpHost.preCreateNamespace(descriptor)) {
1816         return;
1817       }
1818     }
1819     LOG.info(getClientIdAuditPrefix() + " creating " + descriptor);
1820     tableNamespaceManager.create(descriptor);
1821     if (cpHost != null) {
1822       cpHost.postCreateNamespace(descriptor);
1823     }
1824   }
1825 
1826   @Override
1827   public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
1828     TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
1829     checkNamespaceManagerReady();
1830     if (cpHost != null) {
1831       if (cpHost.preModifyNamespace(descriptor)) {
1832         return;
1833       }
1834     }
1835     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
1836     tableNamespaceManager.update(descriptor);
1837     if (cpHost != null) {
1838       cpHost.postModifyNamespace(descriptor);
1839     }
1840   }
1841 
1842   @Override
1843   public void deleteNamespace(String name) throws IOException {
1844     checkNamespaceManagerReady();
1845     if (cpHost != null) {
1846       if (cpHost.preDeleteNamespace(name)) {
1847         return;
1848       }
1849     }
1850     LOG.info(getClientIdAuditPrefix() + " delete " + name);
1851     tableNamespaceManager.remove(name);
1852     if (cpHost != null) {
1853       cpHost.postDeleteNamespace(name);
1854     }
1855   }
1856 
1857   @Override
1858   public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
1859     checkNamespaceManagerReady();
1860     NamespaceDescriptor nsd = tableNamespaceManager.get(name);
1861     if (nsd == null) {
1862       throw new NamespaceNotFoundException(name);
1863     }
1864     return nsd;
1865   }
1866 
1867   @Override
1868   public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
1869     checkNamespaceManagerReady();
1870     return Lists.newArrayList(tableNamespaceManager.list());
1871   }
1872 
1873   @Override
1874   public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
1875     getNamespaceDescriptor(name); // check that namespace exists
1876     return Lists.newArrayList(tableDescriptors.getByNamespace(name).values());
1877   }
1878 
1879   @Override
1880   public List<TableName> listTableNamesByNamespace(String name) throws IOException {
1881     List<TableName> tableNames = Lists.newArrayList();
1882     getNamespaceDescriptor(name); // check that namespace exists
1883     for (HTableDescriptor descriptor: tableDescriptors.getByNamespace(name).values()) {
1884       tableNames.add(descriptor.getTableName());
1885     }
1886     return tableNames;
1887   }
1888 }