View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.lang.reflect.Constructor;
24  import java.lang.reflect.InvocationTargetException;
25  import java.net.InetAddress;
26  import java.net.InetSocketAddress;
27  import java.net.UnknownHostException;
28  import java.util.ArrayList;
29  import java.util.Arrays;
30  import java.util.Collections;
31  import java.util.Comparator;
32  import java.util.HashSet;
33  import java.util.List;
34  import java.util.Map;
35  import java.util.Set;
36  import java.util.concurrent.atomic.AtomicReference;
37  
38  import javax.servlet.ServletException;
39  import javax.servlet.http.HttpServlet;
40  import javax.servlet.http.HttpServletRequest;
41  import javax.servlet.http.HttpServletResponse;
42  
43  import org.apache.commons.logging.Log;
44  import org.apache.commons.logging.LogFactory;
45  import org.apache.hadoop.classification.InterfaceAudience;
46  import org.apache.hadoop.conf.Configuration;
47  import org.apache.hadoop.fs.Path;
48  import org.apache.hadoop.hbase.ClusterStatus;
49  import org.apache.hadoop.hbase.CoordinatedStateException;
50  import org.apache.hadoop.hbase.CoordinatedStateManager;
51  import org.apache.hadoop.hbase.DoNotRetryIOException;
52  import org.apache.hadoop.hbase.HBaseIOException;
53  import org.apache.hadoop.hbase.HColumnDescriptor;
54  import org.apache.hadoop.hbase.HConstants;
55  import org.apache.hadoop.hbase.HRegionInfo;
56  import org.apache.hadoop.hbase.HTableDescriptor;
57  import org.apache.hadoop.hbase.MasterNotRunningException;
58  import org.apache.hadoop.hbase.MetaTableAccessor;
59  import org.apache.hadoop.hbase.NamespaceDescriptor;
60  import org.apache.hadoop.hbase.NamespaceNotFoundException;
61  import org.apache.hadoop.hbase.PleaseHoldException;
62  import org.apache.hadoop.hbase.Server;
63  import org.apache.hadoop.hbase.ServerLoad;
64  import org.apache.hadoop.hbase.ServerName;
65  import org.apache.hadoop.hbase.TableDescriptors;
66  import org.apache.hadoop.hbase.TableName;
67  import org.apache.hadoop.hbase.TableNotDisabledException;
68  import org.apache.hadoop.hbase.TableNotFoundException;
69  import org.apache.hadoop.hbase.UnknownRegionException;
70  import org.apache.hadoop.hbase.client.MetaScanner;
71  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
72  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
73  import org.apache.hadoop.hbase.client.Result;
74  import org.apache.hadoop.hbase.client.TableState;
75  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
76  import org.apache.hadoop.hbase.exceptions.DeserializationException;
77  import org.apache.hadoop.hbase.executor.ExecutorType;
78  import org.apache.hadoop.hbase.ipc.RequestContext;
79  import org.apache.hadoop.hbase.ipc.RpcServer;
80  import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
81  import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
82  import org.apache.hadoop.hbase.master.balancer.BalancerChore;
83  import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
84  import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
85  import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
86  import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
87  import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
88  import org.apache.hadoop.hbase.master.handler.DeleteTableHandler;
89  import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
90  import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
91  import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
92  import org.apache.hadoop.hbase.master.handler.ModifyTableHandler;
93  import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
94  import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
95  import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
96  import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
97  import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
98  import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
99  import org.apache.hadoop.hbase.monitoring.MonitoredTask;
100 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
101 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
102 import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
103 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
104 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
105 import org.apache.hadoop.hbase.regionserver.HRegionServer;
106 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
107 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
108 import org.apache.hadoop.hbase.replication.regionserver.Replication;
109 import org.apache.hadoop.hbase.security.UserProvider;
110 import org.apache.hadoop.hbase.util.Bytes;
111 import org.apache.hadoop.hbase.util.CompressionTest;
112 import org.apache.hadoop.hbase.util.FSUtils;
113 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
114 import org.apache.hadoop.hbase.util.Pair;
115 import org.apache.hadoop.hbase.util.Threads;
116 import org.apache.hadoop.hbase.util.VersionInfo;
117 import org.apache.hadoop.hbase.util.ZKDataMigrator;
118 import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
119 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
120 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
121 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
122 import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
123 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
124 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
125 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
126 import org.apache.zookeeper.KeeperException;
127 import org.apache.zookeeper.Watcher;
128 import org.mortbay.jetty.Connector;
129 import org.mortbay.jetty.nio.SelectChannelConnector;
130 import org.mortbay.jetty.servlet.Context;
131 
132 import com.google.common.annotations.VisibleForTesting;
133 import com.google.common.collect.Lists;
134 import com.google.common.collect.Maps;
135 import com.google.protobuf.Descriptors;
136 import com.google.protobuf.Service;
137 
138 /**
139  * HMaster is the "master server" for HBase. An HBase cluster has one active
140  * master.  If many masters are started, all compete.  Whichever wins goes on to
141  * run the cluster.  All others park themselves in their constructor until
142  * master or cluster shutdown or until the active master loses its lease in
143  * zookeeper.  Thereafter, all running master jostle to take over master role.
144  *
145  * <p>The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In
146  * this case it will tell all regionservers to go down and then wait on them
147  * all reporting in that they are down.  This master will then shut itself down.
148  *
149  * <p>You can also shutdown just this master.  Call {@link #stopMaster()}.
150  *
151  * @see Watcher
152  */
153 @InterfaceAudience.Private
154 @SuppressWarnings("deprecation")
155 public class HMaster extends HRegionServer implements MasterServices, Server {
156   private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
157 
158   // MASTER is name of the webapp and the attribute name used stuffing this
159   //instance into web context.
160   public static final String MASTER = "master";
161 
162   // Manager and zk listener for master election
163   private ActiveMasterManager activeMasterManager;
164   // Region server tracker
165   RegionServerTracker regionServerTracker;
166   // Draining region server tracker
167   private DrainingServerTracker drainingServerTracker;
168   // Tracker for load balancer state
169   LoadBalancerTracker loadBalancerTracker;
170 
171   /** Namespace stuff */
172   private TableNamespaceManager tableNamespaceManager;
173 
174   // Metrics for the HMaster
175   final MetricsMaster metricsMaster;
176   // file system manager for the master FS operations
177   private MasterFileSystem fileSystemManager;
178 
179   // server manager to deal with region server info
180   volatile ServerManager serverManager;
181 
182   // manager of assignment nodes in zookeeper
183   AssignmentManager assignmentManager;
184 
185   // buffer for "fatal error" notices from region servers
186   // in the cluster. This is only used for assisting
187   // operations/debugging.
188   MemoryBoundedLogMessageBuffer rsFatals;
189 
190   // flag set after we become the active master (used for testing)
191   private volatile boolean isActiveMaster = false;
192 
193   // flag set after we complete initialization once active,
194   // it is not private since it's used in unit tests
195   volatile boolean initialized = false;
196 
197   // flag set after master services are started,
198   // initialization may have not completed yet.
199   volatile boolean serviceStarted = false;
200 
201   // flag set after we complete assignMeta.
202   private volatile boolean serverShutdownHandlerEnabled = false;
203 
204   LoadBalancer balancer;
205   private BalancerChore balancerChore;
206   private ClusterStatusChore clusterStatusChore;
207   private ClusterStatusPublisher clusterStatusPublisherChore = null;
208 
209   CatalogJanitor catalogJanitorChore;
210   private LogCleaner logCleaner;
211   private HFileCleaner hfileCleaner;
212 
213   MasterCoprocessorHost cpHost;
214 
215   // Time stamps for when a hmaster became active
216   private long masterActiveTime;
217 
218   //should we check the compression codec type at master side, default true, HBASE-6370
219   private final boolean masterCheckCompression;
220 
221   Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
222 
223   // monitor for snapshot of hbase tables
224   SnapshotManager snapshotManager;
225   // monitor for distributed procedures
226   MasterProcedureManagerHost mpmHost;
227 
228   // handle table states
229   private TableStateManager tableStateManager;
230 
231   /** flag used in test cases in order to simulate RS failures during master initialization */
232   private volatile boolean initializationBeforeMetaAssignment = false;
233 
234   /** jetty server for master to redirect requests to regionserver infoServer */
235   private org.mortbay.jetty.Server masterJettyServer;
236 
237   public static class RedirectServlet extends HttpServlet {
238     private static final long serialVersionUID = 2894774810058302472L;
239     private static int regionServerInfoPort;
240 
241     @Override
242     public void doGet(HttpServletRequest request,
243         HttpServletResponse response) throws ServletException, IOException {
244       String redirectUrl = request.getScheme() + "://"
245         + request.getServerName() + ":" + regionServerInfoPort
246         + request.getRequestURI();
247       response.sendRedirect(redirectUrl);
248     }
249   }
250 
251   /**
252    * Initializes the HMaster. The steps are as follows:
253    * <p>
254    * <ol>
255    * <li>Initialize the local HRegionServer
256    * <li>Start the ActiveMasterManager.
257    * </ol>
258    * <p>
259    * Remaining steps of initialization occur in
260    * {@link #finishActiveMasterInitialization(MonitoredTask)} after
261    * the master becomes the active one.
262    *
263    * @throws KeeperException
264    * @throws IOException
265    */
266   public HMaster(final Configuration conf, CoordinatedStateManager csm)
267       throws IOException, KeeperException {
268     super(conf, csm);
269     this.rsFatals = new MemoryBoundedLogMessageBuffer(
270       conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
271 
272     LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
273         ", hbase.cluster.distributed=" + this.conf.getBoolean("hbase.cluster.distributed", false));
274 
275     Replication.decorateMasterConfiguration(this.conf);
276 
277     // Hack! Maps DFSClient => Master for logs.  HDFS made this
278     // config param for task trackers, but we can piggyback off of it.
279     if (this.conf.get("mapreduce.task.attempt.id") == null) {
280       this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
281     }
282 
283     //should we check the compression codec type at master side, default true, HBASE-6370
284     this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
285 
286     this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this));
287 
288     // Do we publish the status?
289     boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
290         HConstants.STATUS_PUBLISHED_DEFAULT);
291     Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
292         conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
293             ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
294             ClusterStatusPublisher.Publisher.class);
295 
296     if (shouldPublish) {
297       if (publisherClass == null) {
298         LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
299             ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
300             " is not set - not publishing status");
301       } else {
302         clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
303         Threads.setDaemonThreadRunning(clusterStatusPublisherChore.getThread());
304       }
305     }
306     startActiveMasterManager();
307     putUpJettyServer();
308   }
309 
310   private void putUpJettyServer() throws IOException {
311     if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
312       return;
313     }
314     int infoPort = conf.getInt("hbase.master.info.port.orig",
315       HConstants.DEFAULT_MASTER_INFOPORT);
316     // -1 is for disabling info server, so no redirecting
317     if (infoPort < 0 || infoServer == null) {
318       return;
319     }
320 
321     RedirectServlet.regionServerInfoPort = infoServer.getPort();
322     masterJettyServer = new org.mortbay.jetty.Server();
323     Connector connector = new SelectChannelConnector();
324     connector.setHost(conf.get("hbase.master.info.bindAddress", "0.0.0.0"));
325     connector.setPort(infoPort);
326     masterJettyServer.addConnector(connector);
327     masterJettyServer.setStopAtShutdown(true);
328     Context context = new Context(masterJettyServer, "/", Context.NO_SESSIONS);
329     context.addServlet(RedirectServlet.class, "/*");
330     try {
331       masterJettyServer.start();
332     } catch (Exception e) {
333       throw new IOException("Failed to start redirecting jetty server", e);
334     }
335   }
336 
337   /**
338    * For compatibility, if failed with regionserver credentials, try the master one
339    */
340   protected void login(UserProvider user, String host) throws IOException {
341     try {
342       super.login(user, host);
343     } catch (IOException ie) {
344       user.login("hbase.master.keytab.file",
345         "hbase.master.kerberos.principal", host);
346     }
347   }
348 
349   @VisibleForTesting
350   public MasterRpcServices getMasterRpcServices() {
351     return (MasterRpcServices)rpcServices;
352   }
353 
354   public boolean balanceSwitch(final boolean b) throws IOException {
355     return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
356   }
357 
358   protected String getProcessName() {
359     return MASTER;
360   }
361 
362   protected boolean canCreateBaseZNode() {
363     return true;
364   }
365 
366   protected boolean canUpdateTableDescriptor() {
367     return true;
368   }
369 
370   protected RSRpcServices createRpcServices() throws IOException {
371     return new MasterRpcServices(this);
372   }
373 
374   protected void configureInfoServer() {
375     infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);
376     infoServer.setAttribute(MASTER, this);
377     super.configureInfoServer();
378   }
379 
380   protected Class<? extends HttpServlet> getDumpServlet() {
381     return MasterDumpServlet.class;
382   }
383 
384   /**
385    * Emit the HMaster metrics, such as region in transition metrics.
386    * Surrounding in a try block just to be sure metrics doesn't abort HMaster.
387    */
388   protected void doMetrics() {
389     try {
390       if (assignmentManager != null) {
391         assignmentManager.updateRegionsInTransitionMetrics();
392       }
393     } catch (Throwable e) {
394       LOG.error("Couldn't update metrics: " + e.getMessage());
395     }
396   }
397 
398   MetricsMaster getMasterMetrics() {
399     return metricsMaster;
400   }
401 
402   /**
403    * Initialize all ZK based system trackers.
404    * @throws IOException
405    * @throws InterruptedException
406    * @throws KeeperException
407    * @throws CoordinatedStateException
408    */
409   void initializeZKBasedSystemTrackers() throws IOException,
410       InterruptedException, KeeperException, CoordinatedStateException {
411     this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
412     this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
413     this.loadBalancerTracker.start();
414     this.assignmentManager = new AssignmentManager(this, serverManager,
415       this.balancer, this.service, this.metricsMaster,
416       this.tableLockManager, tableStateManager);
417 
418     this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
419         this.serverManager);
420     this.regionServerTracker.start();
421 
422     this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
423       this.serverManager);
424     this.drainingServerTracker.start();
425 
426     // Set the cluster as up.  If new RSs, they'll be waiting on this before
427     // going ahead with their startup.
428     boolean wasUp = this.clusterStatusTracker.isClusterUp();
429     if (!wasUp) this.clusterStatusTracker.setClusterUp();
430 
431     LOG.info("Server active/primary master=" + this.serverName +
432         ", sessionid=0x" +
433         Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
434         ", setting cluster-up flag (Was=" + wasUp + ")");
435 
436     // create/initialize the snapshot manager and other procedure managers
437     this.snapshotManager = new SnapshotManager();
438     this.mpmHost = new MasterProcedureManagerHost();
439     this.mpmHost.register(this.snapshotManager);
440     this.mpmHost.register(new MasterFlushTableProcedureManager());
441     this.mpmHost.loadProcedures(conf);
442     this.mpmHost.initialize(this, this.metricsMaster);
443 
444     // migrating existent table state from zk
445     for (Map.Entry<TableName, TableState.State> entry : ZKDataMigrator
446         .queryForTableStates(getZooKeeper()).entrySet()) {
447       LOG.info("Converting state from zk to new states:" + entry);
448       tableStateManager.setTableState(entry.getKey(), entry.getValue());
449     }
450     ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().tableZNode);
451   }
452 
453   /**
454    * Finish initialization of HMaster after becoming the primary master.
455    *
456    * <ol>
457    * <li>Initialize master components - file system manager, server manager,
458    *     assignment manager, region server tracker, etc</li>
459    * <li>Start necessary service threads - balancer, catalog janior,
460    *     executor services, etc</li>
461    * <li>Set cluster as UP in ZooKeeper</li>
462    * <li>Wait for RegionServers to check-in</li>
463    * <li>Split logs and perform data recovery, if necessary</li>
464    * <li>Ensure assignment of meta/namespace regions<li>
465    * <li>Handle either fresh cluster start or master failover</li>
466    * </ol>
467    *
468    * @throws IOException
469    * @throws InterruptedException
470    * @throws KeeperException
471    * @throws CoordinatedStateException
472    */
473   private void finishActiveMasterInitialization(MonitoredTask status)
474       throws IOException, InterruptedException, KeeperException, CoordinatedStateException {
475 
476     isActiveMaster = true;
477 
478     /*
479      * We are active master now... go initialize components we need to run.
480      * Note, there may be dross in zk from previous runs; it'll get addressed
481      * below after we determine if cluster startup or failover.
482      */
483 
484     status.setStatus("Initializing Master file system");
485 
486     this.masterActiveTime = System.currentTimeMillis();
487     // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
488     this.fileSystemManager = new MasterFileSystem(this, this);
489 
490     // publish cluster ID
491     status.setStatus("Publishing Cluster ID in ZooKeeper");
492     ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
493     this.serverManager = createServerManager(this, this);
494 
495     synchronized (this) {
496       if (shortCircuitConnection == null) {
497         shortCircuitConnection = createShortCircuitConnection();
498         metaTableLocator = new MetaTableLocator();
499       }
500     }
501 
502     // Invalidate all write locks held previously
503     this.tableLockManager.reapWriteLocks();
504 
505     this.tableStateManager = new TableStateManager(this);
506     this.tableStateManager.start();
507 
508     status.setStatus("Initializing ZK system trackers");
509     initializeZKBasedSystemTrackers();
510 
511     // initialize master side coprocessors before we start handling requests
512     status.setStatus("Initializing master coprocessors");
513     this.cpHost = new MasterCoprocessorHost(this, this.conf);
514 
515     // start up all service threads.
516     status.setStatus("Initializing master service threads");
517     startServiceThreads();
518 
519     // Wake up this server to check in
520     sleeper.skipSleepCycle();
521 
522     // Wait for region servers to report in
523     this.serverManager.waitForRegionServers(status);
524     // Check zk for region servers that are up but didn't register
525     for (ServerName sn: this.regionServerTracker.getOnlineServers()) {
526       // The isServerOnline check is opportunistic, correctness is handled inside
527       if (!this.serverManager.isServerOnline(sn)
528           && serverManager.checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
529         LOG.info("Registered server found up in zk but who has not yet reported in: " + sn);
530       }
531     }
532 
533     // get a list for previously failed RS which need log splitting work
534     // we recover hbase:meta region servers inside master initialization and
535     // handle other failed servers in SSH in order to start up master node ASAP
536     Set<ServerName> previouslyFailedServers = this.fileSystemManager
537         .getFailedServersFromLogFolders();
538 
539     // remove stale recovering regions from previous run
540     this.fileSystemManager.removeStaleRecoveringRegionsFromZK(previouslyFailedServers);
541 
542     // log splitting for hbase:meta server
543     ServerName oldMetaServerLocation = metaTableLocator.getMetaRegionLocation(this.getZooKeeper());
544     if (oldMetaServerLocation != null && previouslyFailedServers.contains(oldMetaServerLocation)) {
545       splitMetaLogBeforeAssignment(oldMetaServerLocation);
546       // Note: we can't remove oldMetaServerLocation from previousFailedServers list because it
547       // may also host user regions
548     }
549     Set<ServerName> previouslyFailedMetaRSs = getPreviouselyFailedMetaServersFromZK();
550     // need to use union of previouslyFailedMetaRSs recorded in ZK and previouslyFailedServers
551     // instead of previouslyFailedMetaRSs alone to address the following two situations:
552     // 1) the chained failure situation(recovery failed multiple times in a row).
553     // 2) master get killed right before it could delete the recovering hbase:meta from ZK while the
554     // same server still has non-meta wals to be replayed so that
555     // removeStaleRecoveringRegionsFromZK can't delete the stale hbase:meta region
556     // Passing more servers into splitMetaLog is all right. If a server doesn't have hbase:meta wal,
557     // there is no op for the server.
558     previouslyFailedMetaRSs.addAll(previouslyFailedServers);
559 
560     this.initializationBeforeMetaAssignment = true;
561 
562     // Wait for regionserver to finish initialization.
563     synchronized (online) {
564       while (!isStopped() && !isOnline()) {
565         online.wait(100);
566       }
567     }
568 
569     //initialize load balancer
570     this.balancer.setClusterStatus(getClusterStatus());
571     this.balancer.setMasterServices(this);
572     this.balancer.initialize();
573 
574     // Check if master is shutting down because of some issue
575     // in initializing the regionserver or the balancer.
576     if(isStopped()) return;
577 
578     // Make sure meta assigned before proceeding.
579     status.setStatus("Assigning Meta Region");
580     assignMeta(status, previouslyFailedMetaRSs);
581     // check if master is shutting down because above assignMeta could return even hbase:meta isn't
582     // assigned when master is shutting down
583     if(isStopped()) return;
584 
585     status.setStatus("Submitting log splitting work for previously failed region servers");
586     // Master has recovered hbase:meta region server and we put
587     // other failed region servers in a queue to be handled later by SSH
588     for (ServerName tmpServer : previouslyFailedServers) {
589       this.serverManager.processDeadServer(tmpServer, true);
590     }
591 
592     // Fix up assignment manager status
593     status.setStatus("Starting assignment manager");
594     this.assignmentManager.joinCluster();
595 
596     //set cluster status again after user regions are assigned
597     this.balancer.setClusterStatus(getClusterStatus());
598 
599     // Start balancer and meta catalog janitor after meta and regions have
600     // been assigned.
601     status.setStatus("Starting balancer and catalog janitor");
602     this.clusterStatusChore = new ClusterStatusChore(this, balancer);
603     Threads.setDaemonThreadRunning(clusterStatusChore.getThread());
604     this.balancerChore = new BalancerChore(this);
605     Threads.setDaemonThreadRunning(balancerChore.getThread());
606     this.catalogJanitorChore = new CatalogJanitor(this, this);
607     Threads.setDaemonThreadRunning(catalogJanitorChore.getThread());
608 
609     status.setStatus("Starting namespace manager");
610     initNamespace();
611 
612     if (this.cpHost != null) {
613       try {
614         this.cpHost.preMasterInitialization();
615       } catch (IOException e) {
616         LOG.error("Coprocessor preMasterInitialization() hook failed", e);
617       }
618     }
619 
620     status.markComplete("Initialization successful");
621     LOG.info("Master has completed initialization");
622     initialized = true;
623     // clear the dead servers with same host name and port of online server because we are not
624     // removing dead server with same hostname and port of rs which is trying to check in before
625     // master initialization. See HBASE-5916.
626     this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
627 
628     if (this.cpHost != null) {
629       // don't let cp initialization errors kill the master
630       try {
631         this.cpHost.postStartMaster();
632       } catch (IOException ioe) {
633         LOG.error("Coprocessor postStartMaster() hook failed", ioe);
634       }
635     }
636   }
637 
638   /**
639    * Create a {@link ServerManager} instance.
640    * @param master
641    * @param services
642    * @return An instance of {@link ServerManager}
643    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
644    * @throws IOException
645    */
646   ServerManager createServerManager(final Server master,
647       final MasterServices services)
648   throws IOException {
649     // We put this out here in a method so can do a Mockito.spy and stub it out
650     // w/ a mocked up ServerManager.
651     return new ServerManager(master, services);
652   }
653 
654   /**
655    * Check <code>hbase:meta</code> is assigned. If not, assign it.
656    * @param status MonitoredTask
657    * @param previouslyFailedMetaRSs
658    * @throws InterruptedException
659    * @throws IOException
660    * @throws KeeperException
661    */
662   void assignMeta(MonitoredTask status, Set<ServerName> previouslyFailedMetaRSs)
663       throws InterruptedException, IOException, KeeperException {
664     // Work on meta region
665     int assigned = 0;
666     long timeout = this.conf.getLong("hbase.catalog.verification.timeout", 1000);
667     status.setStatus("Assigning hbase:meta region");
668 
669     // Get current meta state from zk.
670     RegionState metaState = MetaTableLocator.getMetaRegionState(getZooKeeper());
671 
672     RegionStates regionStates = assignmentManager.getRegionStates();
673     regionStates.createRegionState(HRegionInfo.FIRST_META_REGIONINFO,
674       metaState.getState(), metaState.getServerName(), null);
675 
676     if (!metaState.isOpened() || !metaTableLocator.verifyMetaRegionLocation(
677         this.getShortCircuitConnection(), this.getZooKeeper(), timeout)) {
678       ServerName currentMetaServer = metaState.getServerName();
679       if (serverManager.isServerOnline(currentMetaServer)) {
680         LOG.info("Meta was in transition on " + currentMetaServer);
681         assignmentManager.processRegionsInTransition(Arrays.asList(metaState));
682       } else {
683         if (currentMetaServer != null) {
684           splitMetaLogBeforeAssignment(currentMetaServer);
685           regionStates.logSplit(HRegionInfo.FIRST_META_REGIONINFO);
686           previouslyFailedMetaRSs.add(currentMetaServer);
687         }
688         LOG.info("Re-assigning hbase:meta, it was on " + currentMetaServer);
689         assignmentManager.assignMeta();
690       }
691       assigned++;
692     }
693 
694     enableMeta(TableName.META_TABLE_NAME);
695 
696     if ((RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode())
697         && (!previouslyFailedMetaRSs.isEmpty())) {
698       // replay WAL edits mode need new hbase:meta RS is assigned firstly
699       status.setStatus("replaying log for Meta Region");
700       this.fileSystemManager.splitMetaLog(previouslyFailedMetaRSs);
701     }
702 
703     // Make sure a hbase:meta location is set. We need to enable SSH here since
704     // if the meta region server is died at this time, we need it to be re-assigned
705     // by SSH so that system tables can be assigned.
706     // No need to wait for meta is assigned = 0 when meta is just verified.
707     enableServerShutdownHandler(assigned != 0);
708 
709     LOG.info("hbase:meta assigned=" + assigned + ", location="
710       + metaTableLocator.getMetaRegionLocation(this.getZooKeeper()));
711     status.setStatus("META assigned.");
712   }
713 
714   void initNamespace() throws IOException {
715     //create namespace manager
716     tableNamespaceManager = new TableNamespaceManager(this);
717     tableNamespaceManager.start();
718   }
719 
720   boolean isCatalogJanitorEnabled() {
721     return catalogJanitorChore != null ?
722       catalogJanitorChore.getEnabled() : false;
723   }
724 
725   private void splitMetaLogBeforeAssignment(ServerName currentMetaServer) throws IOException {
726     if (RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode()) {
727       // In log replay mode, we mark hbase:meta region as recovering in ZK
728       Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
729       regions.add(HRegionInfo.FIRST_META_REGIONINFO);
730       this.fileSystemManager.prepareLogReplay(currentMetaServer, regions);
731     } else {
732       // In recovered.edits mode: create recovered edits file for hbase:meta server
733       this.fileSystemManager.splitMetaLog(currentMetaServer);
734     }
735   }
736 
737   private void enableServerShutdownHandler(
738       final boolean waitForMeta) throws IOException, InterruptedException {
739     // If ServerShutdownHandler is disabled, we enable it and expire those dead
740     // but not expired servers. This is required so that if meta is assigning to
741     // a server which dies after assignMeta starts assignment,
742     // SSH can re-assign it. Otherwise, we will be
743     // stuck here waiting forever if waitForMeta is specified.
744     if (!serverShutdownHandlerEnabled) {
745       serverShutdownHandlerEnabled = true;
746       this.serverManager.processQueuedDeadServers();
747     }
748 
749     if (waitForMeta) {
750       metaTableLocator.waitMetaRegionLocation(this.getZooKeeper());
751     }
752   }
753 
754   private void enableMeta(TableName metaTableName) {
755     if (!this.tableStateManager.isTableState(metaTableName,
756             TableState.State.ENABLED)) {
757       this.assignmentManager.setEnabledTable(metaTableName);
758     }
759   }
760 
761   /**
762    * This function returns a set of region server names under hbase:meta recovering region ZK node
763    * @return Set of meta server names which were recorded in ZK
764    * @throws KeeperException
765    */
766   private Set<ServerName> getPreviouselyFailedMetaServersFromZK() throws KeeperException {
767     Set<ServerName> result = new HashSet<ServerName>();
768     String metaRecoveringZNode = ZKUtil.joinZNode(zooKeeper.recoveringRegionsZNode,
769       HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
770     List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(zooKeeper, metaRecoveringZNode);
771     if (regionFailedServers == null) return result;
772 
773     for(String failedServer : regionFailedServers) {
774       ServerName server = ServerName.parseServerName(failedServer);
775       result.add(server);
776     }
777     return result;
778   }
779 
780   @Override
781   public TableDescriptors getTableDescriptors() {
782     return this.tableDescriptors;
783   }
784 
785   @Override
786   public ServerManager getServerManager() {
787     return this.serverManager;
788   }
789 
790   @Override
791   public MasterFileSystem getMasterFileSystem() {
792     return this.fileSystemManager;
793   }
794 
795   @Override
796   public TableStateManager getTableStateManager() {
797     return tableStateManager;
798   }
799 
800   /*
801    * Start up all services. If any of these threads gets an unhandled exception
802    * then they just die with a logged message.  This should be fine because
803    * in general, we do not expect the master to get such unhandled exceptions
804    *  as OOMEs; it should be lightly loaded. See what HRegionServer does if
805    *  need to install an unexpected exception handler.
806    */
807   private void startServiceThreads() throws IOException{
808    // Start the executor service pools
809    this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
810       conf.getInt("hbase.master.executor.openregion.threads", 5));
811    this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
812       conf.getInt("hbase.master.executor.closeregion.threads", 5));
813    this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
814       conf.getInt("hbase.master.executor.serverops.threads", 5));
815    this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
816       conf.getInt("hbase.master.executor.serverops.threads", 5));
817    this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
818       conf.getInt("hbase.master.executor.logreplayops.threads", 10));
819 
820    // We depend on there being only one instance of this executor running
821    // at a time.  To do concurrency, would need fencing of enable/disable of
822    // tables.
823    // Any time changing this maxThreads to > 1, pls see the comment at
824    // AccessController#postCreateTableHandler
825    this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
826 
827    // Start log cleaner thread
828    int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
829    this.logCleaner =
830       new LogCleaner(cleanerInterval,
831          this, conf, getMasterFileSystem().getFileSystem(),
832          getMasterFileSystem().getOldLogDir());
833          Threads.setDaemonThreadRunning(logCleaner.getThread(), getName() + ".oldLogCleaner");
834 
835    //start the hfile archive cleaner thread
836     Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
837     this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
838         .getFileSystem(), archiveDir);
839     Threads.setDaemonThreadRunning(hfileCleaner.getThread(),
840       getName() + ".archivedHFileCleaner");
841 
842     serviceStarted = true;
843     if (LOG.isTraceEnabled()) {
844       LOG.trace("Started service threads");
845     }
846   }
847 
848   protected void stopServiceThreads() {
849     if (masterJettyServer != null) {
850       LOG.info("Stopping master jetty server");
851       try {
852         masterJettyServer.stop();
853       } catch (Exception e) {
854         LOG.error("Failed to stop master jetty server", e);
855       }
856     }
857     super.stopServiceThreads();
858     stopChores();
859     // Wait for all the remaining region servers to report in IFF we were
860     // running a cluster shutdown AND we were NOT aborting.
861     if (!isAborted() && this.serverManager != null &&
862         this.serverManager.isClusterShutdown()) {
863       this.serverManager.letRegionServersShutdown();
864     }
865     if (LOG.isDebugEnabled()) {
866       LOG.debug("Stopping service threads");
867     }
868     // Clean up and close up shop
869     if (this.logCleaner!= null) this.logCleaner.interrupt();
870     if (this.hfileCleaner != null) this.hfileCleaner.interrupt();
871     if (this.activeMasterManager != null) this.activeMasterManager.stop();
872     if (this.serverManager != null) this.serverManager.stop();
873     if (this.assignmentManager != null) this.assignmentManager.stop();
874     if (this.fileSystemManager != null) this.fileSystemManager.stop();
875     if (this.mpmHost != null) this.mpmHost.stop("server shutting down.");
876   }
877 
878   private void stopChores() {
879     if (this.balancerChore != null) {
880       this.balancerChore.interrupt();
881     }
882     if (this.clusterStatusChore != null) {
883       this.clusterStatusChore.interrupt();
884     }
885     if (this.catalogJanitorChore != null) {
886       this.catalogJanitorChore.interrupt();
887     }
888     if (this.clusterStatusPublisherChore != null){
889       clusterStatusPublisherChore.interrupt();
890     }
891   }
892 
893   /**
894    * @return Get remote side's InetAddress
895    * @throws UnknownHostException
896    */
897   InetAddress getRemoteInetAddress(final int port,
898       final long serverStartCode) throws UnknownHostException {
899     // Do it out here in its own little method so can fake an address when
900     // mocking up in tests.
901     InetAddress ia = RpcServer.getRemoteIp();
902 
903     // The call could be from the local regionserver,
904     // in which case, there is no remote address.
905     if (ia == null && serverStartCode == startcode) {
906       InetSocketAddress isa = rpcServices.getSocketAddress();
907       if (isa != null && isa.getPort() == port) {
908         ia = isa.getAddress();
909       }
910     }
911     return ia;
912   }
913 
914   /**
915    * @return Maximum time we should run balancer for
916    */
917   private int getBalancerCutoffTime() {
918     int balancerCutoffTime =
919       getConfiguration().getInt("hbase.balancer.max.balancing", -1);
920     if (balancerCutoffTime == -1) {
921       // No time period set so create one
922       int balancerPeriod =
923         getConfiguration().getInt("hbase.balancer.period", 300000);
924       balancerCutoffTime = balancerPeriod;
925       // If nonsense period, set it to balancerPeriod
926       if (balancerCutoffTime <= 0) balancerCutoffTime = balancerPeriod;
927     }
928     return balancerCutoffTime;
929   }
930 
931   public boolean balance() throws IOException {
932     // if master not initialized, don't run balancer.
933     if (!this.initialized) {
934       LOG.debug("Master has not been initialized, don't run balancer.");
935       return false;
936     }
937     // Do this call outside of synchronized block.
938     int maximumBalanceTime = getBalancerCutoffTime();
939     synchronized (this.balancer) {
940       // If balance not true, don't run balancer.
941       if (!this.loadBalancerTracker.isBalancerOn()) return false;
942       // Only allow one balance run at at time.
943       if (this.assignmentManager.getRegionStates().isRegionsInTransition()) {
944         Map<String, RegionState> regionsInTransition =
945           this.assignmentManager.getRegionStates().getRegionsInTransition();
946         LOG.debug("Not running balancer because " + regionsInTransition.size() +
947           " region(s) in transition: " + org.apache.commons.lang.StringUtils.
948             abbreviate(regionsInTransition.toString(), 256));
949         return false;
950       }
951       if (this.serverManager.areDeadServersInProgress()) {
952         LOG.debug("Not running balancer because processing dead regionserver(s): " +
953           this.serverManager.getDeadServers());
954         return false;
955       }
956 
957       if (this.cpHost != null) {
958         try {
959           if (this.cpHost.preBalance()) {
960             LOG.debug("Coprocessor bypassing balancer request");
961             return false;
962           }
963         } catch (IOException ioe) {
964           LOG.error("Error invoking master coprocessor preBalance()", ioe);
965           return false;
966         }
967       }
968 
969       Map<TableName, Map<ServerName, List<HRegionInfo>>> assignmentsByTable =
970         this.assignmentManager.getRegionStates().getAssignmentsByTable();
971 
972       List<RegionPlan> plans = new ArrayList<RegionPlan>();
973       //Give the balancer the current cluster state.
974       this.balancer.setClusterStatus(getClusterStatus());
975       for (Map<ServerName, List<HRegionInfo>> assignments : assignmentsByTable.values()) {
976         List<RegionPlan> partialPlans = this.balancer.balanceCluster(assignments);
977         if (partialPlans != null) plans.addAll(partialPlans);
978       }
979       long cutoffTime = System.currentTimeMillis() + maximumBalanceTime;
980       int rpCount = 0;  // number of RegionPlans balanced so far
981       long totalRegPlanExecTime = 0;
982       if (plans != null && !plans.isEmpty()) {
983         for (RegionPlan plan: plans) {
984           LOG.info("balance " + plan);
985           long balStartTime = System.currentTimeMillis();
986           //TODO: bulk assign
987           this.assignmentManager.balance(plan);
988           totalRegPlanExecTime += System.currentTimeMillis()-balStartTime;
989           rpCount++;
990           if (rpCount < plans.size() &&
991               // if performing next balance exceeds cutoff time, exit the loop
992               (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) {
993             //TODO: After balance, there should not be a cutoff time (keeping it as a security net for now)
994             LOG.debug("No more balancing till next balance run; maximumBalanceTime=" +
995               maximumBalanceTime);
996             break;
997           }
998         }
999       }
1000       if (this.cpHost != null) {
1001         try {
1002           this.cpHost.postBalance(rpCount < plans.size() ? plans.subList(0, rpCount) : plans);
1003         } catch (IOException ioe) {
1004           // balancing already succeeded so don't change the result
1005           LOG.error("Error invoking master coprocessor postBalance()", ioe);
1006         }
1007       }
1008     }
1009     // If LoadBalancer did not generate any plans, it means the cluster is already balanced.
1010     // Return true indicating a success.
1011     return true;
1012   }
1013 
1014   /**
1015    * @return Client info for use as prefix on an audit log string; who did an action
1016    */
1017   String getClientIdAuditPrefix() {
1018     return "Client=" + RequestContext.getRequestUserName() + "/" +
1019       RequestContext.get().getRemoteAddress();
1020   }
1021 
1022   /**
1023    * Switch for the background CatalogJanitor thread.
1024    * Used for testing.  The thread will continue to run.  It will just be a noop
1025    * if disabled.
1026    * @param b If false, the catalog janitor won't do anything.
1027    */
1028   public void setCatalogJanitorEnabled(final boolean b) {
1029     this.catalogJanitorChore.setEnabled(b);
1030   }
1031 
1032   @Override
1033   public void dispatchMergingRegions(final HRegionInfo region_a,
1034       final HRegionInfo region_b, final boolean forcible) throws IOException {
1035     checkInitialized();
1036     this.service.submit(new DispatchMergingRegionHandler(this,
1037         this.catalogJanitorChore, region_a, region_b, forcible));
1038   }
1039 
1040   void move(final byte[] encodedRegionName,
1041       final byte[] destServerName) throws HBaseIOException {
1042     RegionState regionState = assignmentManager.getRegionStates().
1043       getRegionState(Bytes.toString(encodedRegionName));
1044     if (regionState == null) {
1045       throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
1046     }
1047 
1048     HRegionInfo hri = regionState.getRegion();
1049     ServerName dest;
1050     if (destServerName == null || destServerName.length == 0) {
1051       LOG.info("Passed destination servername is null/empty so " +
1052         "choosing a server at random");
1053       final List<ServerName> destServers = this.serverManager.createDestinationServersList(
1054         regionState.getServerName());
1055       dest = balancer.randomAssignment(hri, destServers);
1056     } else {
1057       dest = ServerName.valueOf(Bytes.toString(destServerName));
1058       if (dest.equals(regionState.getServerName())) {
1059         LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
1060           + " because region already assigned to the same server " + dest + ".");
1061         return;
1062       }
1063     }
1064 
1065     // Now we can do the move
1066     RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest);
1067 
1068     try {
1069       checkInitialized();
1070       if (this.cpHost != null) {
1071         if (this.cpHost.preMove(hri, rp.getSource(), rp.getDestination())) {
1072           return;
1073         }
1074       }
1075       LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
1076       this.assignmentManager.balance(rp);
1077       if (this.cpHost != null) {
1078         this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
1079       }
1080     } catch (IOException ioe) {
1081       if (ioe instanceof HBaseIOException) {
1082         throw (HBaseIOException)ioe;
1083       }
1084       throw new HBaseIOException(ioe);
1085     }
1086   }
1087 
1088   @Override
1089   public void createTable(HTableDescriptor hTableDescriptor,
1090       byte [][] splitKeys) throws IOException {
1091     if (isStopped()) {
1092       throw new MasterNotRunningException();
1093     }
1094 
1095     String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
1096     getNamespaceDescriptor(namespace); // ensure namespace exists
1097 
1098     HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
1099     checkInitialized();
1100     sanityCheckTableDescriptor(hTableDescriptor);
1101     if (cpHost != null) {
1102       cpHost.preCreateTable(hTableDescriptor, newRegions);
1103     }
1104     LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
1105     this.service.submit(new CreateTableHandler(this,
1106       this.fileSystemManager, hTableDescriptor, conf,
1107       newRegions, this).prepare());
1108     if (cpHost != null) {
1109       cpHost.postCreateTable(hTableDescriptor, newRegions);
1110     }
1111 
1112   }
1113 
1114   /**
1115    * Checks whether the table conforms to some sane limits, and configured
1116    * values (compression, etc) work. Throws an exception if something is wrong.
1117    * @throws IOException
1118    */
1119   private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
1120     final String CONF_KEY = "hbase.table.sanity.checks";
1121     if (!conf.getBoolean(CONF_KEY, true)) {
1122       return;
1123     }
1124     String tableVal = htd.getConfigurationValue(CONF_KEY);
1125     if (tableVal != null && !Boolean.valueOf(tableVal)) {
1126       return;
1127     }
1128 
1129     // check max file size
1130     long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit
1131     long maxFileSize = htd.getMaxFileSize();
1132     if (maxFileSize < 0) {
1133       maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
1134     }
1135     if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
1136       throw new DoNotRetryIOException("MAX_FILESIZE for table descriptor or "
1137         + "\"hbase.hregion.max.filesize\" (" + maxFileSize
1138         + ") is too small, which might cause over splitting into unmanageable "
1139         + "number of regions. Set " + CONF_KEY + " to false at conf or table descriptor "
1140           + "if you want to bypass sanity checks");
1141     }
1142 
1143     // check flush size
1144     long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit
1145     long flushSize = htd.getMemStoreFlushSize();
1146     if (flushSize < 0) {
1147       flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
1148     }
1149     if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
1150       throw new DoNotRetryIOException("MEMSTORE_FLUSHSIZE for table descriptor or "
1151           + "\"hbase.hregion.memstore.flush.size\" ("+flushSize+") is too small, which might cause"
1152           + " very frequent flushing. Set " + CONF_KEY + " to false at conf or table descriptor "
1153           + "if you want to bypass sanity checks");
1154     }
1155 
1156     // check split policy class can be loaded
1157     try {
1158       RegionSplitPolicy.getSplitPolicyClass(htd, conf);
1159     } catch (Exception ex) {
1160       throw new DoNotRetryIOException(ex);
1161     }
1162 
1163     // check compression can be loaded
1164     checkCompression(htd);
1165 
1166     // check that we have at least 1 CF
1167     if (htd.getColumnFamilies().length == 0) {
1168       throw new DoNotRetryIOException("Table should have at least one column family "
1169           + "Set "+CONF_KEY+" at conf or table descriptor if you want to bypass sanity checks");
1170     }
1171 
1172     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1173       if (hcd.getTimeToLive() <= 0) {
1174         throw new DoNotRetryIOException("TTL for column family " + hcd.getNameAsString()
1175           + "  must be positive. Set " + CONF_KEY + " to false at conf or table descriptor "
1176           + "if you want to bypass sanity checks");
1177       }
1178 
1179       // check blockSize
1180       if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
1181         throw new DoNotRetryIOException("Block size for column family " + hcd.getNameAsString()
1182           + "  must be between 1K and 16MB Set "+CONF_KEY+" to false at conf or table descriptor "
1183           + "if you want to bypass sanity checks");
1184       }
1185 
1186       // check versions
1187       if (hcd.getMinVersions() < 0) {
1188         throw new DoNotRetryIOException("Min versions for column family " + hcd.getNameAsString()
1189           + "  must be positive. Set " + CONF_KEY + " to false at conf or table descriptor "
1190           + "if you want to bypass sanity checks");
1191       }
1192       // max versions already being checked
1193 
1194       // check replication scope
1195       if (hcd.getScope() < 0) {
1196         throw new DoNotRetryIOException("Replication scope for column family "
1197           + hcd.getNameAsString() + "  must be positive. Set " + CONF_KEY + " to false at conf "
1198           + "or table descriptor if you want to bypass sanity checks");
1199       }
1200 
1201       // TODO: should we check coprocessors and encryption ?
1202     }
1203   }
1204 
1205   private void startActiveMasterManager() throws KeeperException {
1206     String backupZNode = ZKUtil.joinZNode(
1207       zooKeeper.backupMasterAddressesZNode, serverName.toString());
1208     /*
1209     * Add a ZNode for ourselves in the backup master directory since we
1210     * may not become the active master. If so, we want the actual active
1211     * master to know we are backup masters, so that it won't assign
1212     * regions to us if so configured.
1213     *
1214     * If we become the active master later, ActiveMasterManager will delete
1215     * this node explicitly.  If we crash before then, ZooKeeper will delete
1216     * this node for us since it is ephemeral.
1217     */
1218     LOG.info("Adding ZNode for " + backupZNode + " in backup master directory");
1219     MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode, serverName);
1220 
1221     activeMasterManager = new ActiveMasterManager(zooKeeper, serverName, this);
1222     // Start a thread to try to become the active master, so we won't block here
1223     Threads.setDaemonThreadRunning(new Thread(new Runnable() {
1224       public void run() {
1225         int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT,
1226           HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
1227         // If we're a backup master, stall until a primary to writes his address
1228         if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP,
1229             HConstants.DEFAULT_MASTER_TYPE_BACKUP)) {
1230           LOG.debug("HMaster started in backup mode. "
1231             + "Stalling until master znode is written.");
1232           // This will only be a minute or so while the cluster starts up,
1233           // so don't worry about setting watches on the parent znode
1234           while (!activeMasterManager.hasActiveMaster()) {
1235             LOG.debug("Waiting for master address ZNode to be written "
1236               + "(Also watching cluster state node)");
1237             Threads.sleep(timeout);
1238           }
1239         }
1240         MonitoredTask status = TaskMonitor.get().createStatus("Master startup");
1241         status.setDescription("Master startup");
1242         try {
1243           if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, status)) {
1244             finishActiveMasterInitialization(status);
1245           }
1246         } catch (Throwable t) {
1247           status.setStatus("Failed to become active: " + t.getMessage());
1248           LOG.fatal("Failed to become active master", t);
1249           // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility
1250           if (t instanceof NoClassDefFoundError &&
1251               t.getMessage().contains("org/apache/hadoop/hdfs/protocol/FSConstants$SafeModeAction")) {
1252             // improved error message for this special case
1253             abort("HBase is having a problem with its Hadoop jars.  You may need to "
1254               + "recompile HBase against Hadoop version "
1255               +  org.apache.hadoop.util.VersionInfo.getVersion()
1256               + " or change your hadoop jars to start properly", t);
1257           } else {
1258             abort("Unhandled exception. Starting shutdown.", t);
1259           }
1260         } finally {
1261           status.cleanup();
1262         }
1263       }
1264     }, "ActiveMasterManager"));
1265   }
1266 
1267   private void checkCompression(final HTableDescriptor htd)
1268   throws IOException {
1269     if (!this.masterCheckCompression) return;
1270     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1271       checkCompression(hcd);
1272     }
1273   }
1274 
1275   private void checkCompression(final HColumnDescriptor hcd)
1276   throws IOException {
1277     if (!this.masterCheckCompression) return;
1278     CompressionTest.testCompression(hcd.getCompression());
1279     CompressionTest.testCompression(hcd.getCompactionCompression());
1280   }
1281 
1282   private HRegionInfo[] getHRegionInfos(HTableDescriptor hTableDescriptor,
1283     byte[][] splitKeys) {
1284     long regionId = System.currentTimeMillis();
1285     HRegionInfo[] hRegionInfos = null;
1286     if (splitKeys == null || splitKeys.length == 0) {
1287       hRegionInfos = new HRegionInfo[]{new HRegionInfo(hTableDescriptor.getTableName(), null, null,
1288                 false, regionId)};
1289     } else {
1290       int numRegions = splitKeys.length + 1;
1291       hRegionInfos = new HRegionInfo[numRegions];
1292       byte[] startKey = null;
1293       byte[] endKey = null;
1294       for (int i = 0; i < numRegions; i++) {
1295         endKey = (i == splitKeys.length) ? null : splitKeys[i];
1296         hRegionInfos[i] =
1297              new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
1298                  false, regionId);
1299         startKey = endKey;
1300       }
1301     }
1302     return hRegionInfos;
1303   }
1304 
1305   private static boolean isCatalogTable(final TableName tableName) {
1306     return tableName.equals(TableName.META_TABLE_NAME);
1307   }
1308 
1309   @Override
1310   public void deleteTable(final TableName tableName) throws IOException {
1311     checkInitialized();
1312     if (cpHost != null) {
1313       cpHost.preDeleteTable(tableName);
1314     }
1315     LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
1316     this.service.submit(new DeleteTableHandler(tableName, this, this).prepare());
1317     if (cpHost != null) {
1318       cpHost.postDeleteTable(tableName);
1319     }
1320   }
1321 
1322   @Override
1323   public void truncateTable(TableName tableName, boolean preserveSplits) throws IOException {
1324     checkInitialized();
1325     if (cpHost != null) {
1326       cpHost.preTruncateTable(tableName);
1327     }
1328     LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
1329     TruncateTableHandler handler = new TruncateTableHandler(tableName, this, this, preserveSplits);
1330     handler.prepare();
1331     handler.process();
1332     if (cpHost != null) {
1333       cpHost.postTruncateTable(tableName);
1334     }
1335   }
1336 
1337   @Override
1338   public void addColumn(final TableName tableName, final HColumnDescriptor column)
1339       throws IOException {
1340     checkInitialized();
1341     if (cpHost != null) {
1342       if (cpHost.preAddColumn(tableName, column)) {
1343         return;
1344       }
1345     }
1346     //TODO: we should process this (and some others) in an executor
1347     new TableAddFamilyHandler(tableName, column, this, this).prepare().process();
1348     if (cpHost != null) {
1349       cpHost.postAddColumn(tableName, column);
1350     }
1351   }
1352 
1353   @Override
1354   public void modifyColumn(TableName tableName, HColumnDescriptor descriptor)
1355       throws IOException {
1356     checkInitialized();
1357     checkCompression(descriptor);
1358     if (cpHost != null) {
1359       if (cpHost.preModifyColumn(tableName, descriptor)) {
1360         return;
1361       }
1362     }
1363     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
1364     new TableModifyFamilyHandler(tableName, descriptor, this, this)
1365       .prepare().process();
1366     if (cpHost != null) {
1367       cpHost.postModifyColumn(tableName, descriptor);
1368     }
1369   }
1370 
1371   @Override
1372   public void deleteColumn(final TableName tableName, final byte[] columnName)
1373       throws IOException {
1374     checkInitialized();
1375     if (cpHost != null) {
1376       if (cpHost.preDeleteColumn(tableName, columnName)) {
1377         return;
1378       }
1379     }
1380     LOG.info(getClientIdAuditPrefix() + " delete " + Bytes.toString(columnName));
1381     new TableDeleteFamilyHandler(tableName, columnName, this, this).prepare().process();
1382     if (cpHost != null) {
1383       cpHost.postDeleteColumn(tableName, columnName);
1384     }
1385   }
1386 
1387   @Override
1388   public void enableTable(final TableName tableName) throws IOException {
1389     checkInitialized();
1390     if (cpHost != null) {
1391       cpHost.preEnableTable(tableName);
1392     }
1393     LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
1394     this.service.submit(new EnableTableHandler(this, tableName,
1395       assignmentManager, tableLockManager, false).prepare());
1396     if (cpHost != null) {
1397       cpHost.postEnableTable(tableName);
1398    }
1399   }
1400 
1401   @Override
1402   public void disableTable(final TableName tableName) throws IOException {
1403     checkInitialized();
1404     if (cpHost != null) {
1405       cpHost.preDisableTable(tableName);
1406     }
1407     LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
1408     this.service.submit(new DisableTableHandler(this, tableName,
1409       assignmentManager, tableLockManager, false).prepare());
1410     if (cpHost != null) {
1411       cpHost.postDisableTable(tableName);
1412     }
1413   }
1414 
1415   /**
1416    * Return the region and current deployment for the region containing
1417    * the given row. If the region cannot be found, returns null. If it
1418    * is found, but not currently deployed, the second element of the pair
1419    * may be null.
1420    */
1421   Pair<HRegionInfo, ServerName> getTableRegionForRow(
1422       final TableName tableName, final byte [] rowKey)
1423   throws IOException {
1424     final AtomicReference<Pair<HRegionInfo, ServerName>> result =
1425       new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
1426 
1427     MetaScannerVisitor visitor =
1428       new MetaScannerVisitorBase() {
1429         @Override
1430         public boolean processRow(Result data) throws IOException {
1431           if (data == null || data.size() <= 0) {
1432             return true;
1433           }
1434           Pair<HRegionInfo, ServerName> pair = HRegionInfo.getHRegionInfoAndServerName(data);
1435           if (pair == null) {
1436             return false;
1437           }
1438           if (!pair.getFirst().getTable().equals(tableName)) {
1439             return false;
1440           }
1441           result.set(pair);
1442           return true;
1443         }
1444     };
1445 
1446     MetaScanner.metaScan(conf, visitor, tableName, rowKey, 1);
1447     return result.get();
1448   }
1449 
1450   @Override
1451   public void modifyTable(final TableName tableName, final HTableDescriptor descriptor)
1452       throws IOException {
1453     checkInitialized();
1454     sanityCheckTableDescriptor(descriptor);
1455     if (cpHost != null) {
1456       cpHost.preModifyTable(tableName, descriptor);
1457     }
1458     LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
1459     new ModifyTableHandler(tableName, descriptor, this, this).prepare().process();
1460     if (cpHost != null) {
1461       cpHost.postModifyTable(tableName, descriptor);
1462     }
1463   }
1464 
1465   @Override
1466   public void checkTableModifiable(final TableName tableName)
1467       throws IOException, TableNotFoundException, TableNotDisabledException {
1468     if (isCatalogTable(tableName)) {
1469       throw new IOException("Can't modify catalog tables");
1470     }
1471     if (!MetaTableAccessor.tableExists(getShortCircuitConnection(), tableName)) {
1472       throw new TableNotFoundException(tableName);
1473     }
1474     if (!getAssignmentManager().getTableStateManager().
1475         isTableState(tableName, TableState.State.DISABLED)) {
1476       throw new TableNotDisabledException(tableName);
1477     }
1478   }
1479 
1480   /**
1481    * @return cluster status
1482    */
1483   public ClusterStatus getClusterStatus() throws InterruptedIOException {
1484     // Build Set of backup masters from ZK nodes
1485     List<String> backupMasterStrings;
1486     try {
1487       backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper,
1488         this.zooKeeper.backupMasterAddressesZNode);
1489     } catch (KeeperException e) {
1490       LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
1491       backupMasterStrings = new ArrayList<String>(0);
1492     }
1493     List<ServerName> backupMasters = new ArrayList<ServerName>(
1494                                           backupMasterStrings.size());
1495     for (String s: backupMasterStrings) {
1496       try {
1497         byte [] bytes;
1498         try {
1499           bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
1500               this.zooKeeper.backupMasterAddressesZNode, s));
1501         } catch (InterruptedException e) {
1502           throw new InterruptedIOException();
1503         }
1504         if (bytes != null) {
1505           ServerName sn;
1506           try {
1507             sn = ServerName.parseFrom(bytes);
1508           } catch (DeserializationException e) {
1509             LOG.warn("Failed parse, skipping registering backup server", e);
1510             continue;
1511           }
1512           backupMasters.add(sn);
1513         }
1514       } catch (KeeperException e) {
1515         LOG.warn(this.zooKeeper.prefix("Unable to get information about " +
1516                  "backup servers"), e);
1517       }
1518     }
1519     Collections.sort(backupMasters, new Comparator<ServerName>() {
1520       @Override
1521       public int compare(ServerName s1, ServerName s2) {
1522         return s1.getServerName().compareTo(s2.getServerName());
1523       }});
1524 
1525     String clusterId = fileSystemManager != null ?
1526       fileSystemManager.getClusterId().toString() : null;
1527     Map<String, RegionState> regionsInTransition = assignmentManager != null ?
1528       assignmentManager.getRegionStates().getRegionsInTransition() : null;
1529     String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null;
1530     boolean balancerOn = loadBalancerTracker != null ?
1531       loadBalancerTracker.isBalancerOn() : false;
1532     Map<ServerName, ServerLoad> onlineServers = null;
1533     Set<ServerName> deadServers = null;
1534     if (serverManager != null) {
1535       deadServers = serverManager.getDeadServers().copyServerNames();
1536       onlineServers = serverManager.getOnlineServers();
1537     }
1538     return new ClusterStatus(VersionInfo.getVersion(), clusterId,
1539       onlineServers, deadServers, serverName, backupMasters,
1540       regionsInTransition, coprocessors, balancerOn);
1541   }
1542 
1543   /**
1544    * The set of loaded coprocessors is stored in a static set. Since it's
1545    * statically allocated, it does not require that HMaster's cpHost be
1546    * initialized prior to accessing it.
1547    * @return a String representation of the set of names of the loaded
1548    * coprocessors.
1549    */
1550   public static String getLoadedCoprocessors() {
1551     return CoprocessorHost.getLoadedCoprocessors().toString();
1552   }
1553 
1554   /**
1555    * @return timestamp in millis when HMaster was started.
1556    */
1557   public long getMasterStartTime() {
1558     return startcode;
1559   }
1560 
1561   /**
1562    * @return timestamp in millis when HMaster became the active master.
1563    */
1564   public long getMasterActiveTime() {
1565     return masterActiveTime;
1566   }
1567 
1568   public int getRegionServerInfoPort(final ServerName sn) {
1569     RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
1570     if (info == null || info.getInfoPort() == 0) {
1571       return conf.getInt(HConstants.REGIONSERVER_INFO_PORT,
1572         HConstants.DEFAULT_REGIONSERVER_INFOPORT);
1573     }
1574     return info.getInfoPort();
1575   }
1576 
1577   /**
1578    * @return array of coprocessor SimpleNames.
1579    */
1580   public String[] getMasterCoprocessors() {
1581     Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors();
1582     return masterCoprocessors.toArray(new String[masterCoprocessors.size()]);
1583   }
1584 
1585   @Override
1586   public void abort(final String msg, final Throwable t) {
1587     if (cpHost != null) {
1588       // HBASE-4014: dump a list of loaded coprocessors.
1589       LOG.fatal("Master server abort: loaded coprocessors are: " +
1590           getLoadedCoprocessors());
1591     }
1592     if (t != null) LOG.fatal(msg, t);
1593     stop(msg);
1594   }
1595 
1596   @Override
1597   public ZooKeeperWatcher getZooKeeper() {
1598     return zooKeeper;
1599   }
1600 
1601   @Override
1602   public MasterCoprocessorHost getMasterCoprocessorHost() {
1603     return cpHost;
1604   }
1605 
1606   @Override
1607   public ServerName getServerName() {
1608     return this.serverName;
1609   }
1610 
1611   @Override
1612   public AssignmentManager getAssignmentManager() {
1613     return this.assignmentManager;
1614   }
1615 
1616   public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
1617     return rsFatals;
1618   }
1619 
1620   public void shutdown() {
1621     if (cpHost != null) {
1622       try {
1623         cpHost.preShutdown();
1624       } catch (IOException ioe) {
1625         LOG.error("Error call master coprocessor preShutdown()", ioe);
1626       }
1627     }
1628 
1629     if (this.serverManager != null) {
1630       this.serverManager.shutdownCluster();
1631     }
1632     if (this.clusterStatusTracker != null){
1633       try {
1634         this.clusterStatusTracker.setClusterDown();
1635       } catch (KeeperException e) {
1636         LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
1637       }
1638     }
1639   }
1640 
1641   public void stopMaster() {
1642     if (cpHost != null) {
1643       try {
1644         cpHost.preStopMaster();
1645       } catch (IOException ioe) {
1646         LOG.error("Error call master coprocessor preStopMaster()", ioe);
1647       }
1648     }
1649     stop("Stopped by " + Thread.currentThread().getName());
1650   }
1651 
1652   void checkServiceStarted() throws ServerNotRunningYetException {
1653     if (!serviceStarted) {
1654       throw new ServerNotRunningYetException("Server is not running yet");
1655     }
1656   }
1657 
1658   void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException {
1659     checkServiceStarted();
1660     if (!this.initialized) {
1661       throw new PleaseHoldException("Master is initializing");
1662     }
1663   }
1664 
1665   void checkNamespaceManagerReady() throws IOException {
1666     checkInitialized();
1667     if (tableNamespaceManager == null ||
1668         !tableNamespaceManager.isTableAvailableAndInitialized()) {
1669       throw new IOException("Table Namespace Manager not ready yet, try again later");
1670     }
1671   }
1672   /**
1673    * Report whether this master is currently the active master or not.
1674    * If not active master, we are parked on ZK waiting to become active.
1675    *
1676    * This method is used for testing.
1677    *
1678    * @return true if active master, false if not.
1679    */
1680   public boolean isActiveMaster() {
1681     return isActiveMaster;
1682   }
1683 
1684   /**
1685    * Report whether this master has completed with its initialization and is
1686    * ready.  If ready, the master is also the active master.  A standby master
1687    * is never ready.
1688    *
1689    * This method is used for testing.
1690    *
1691    * @return true if master is ready to go, false if not.
1692    */
1693   @Override
1694   public boolean isInitialized() {
1695     return initialized;
1696   }
1697 
1698   /**
1699    * ServerShutdownHandlerEnabled is set false before completing
1700    * assignMeta to prevent processing of ServerShutdownHandler.
1701    * @return true if assignMeta has completed;
1702    */
1703   @Override
1704   public boolean isServerShutdownHandlerEnabled() {
1705     return this.serverShutdownHandlerEnabled;
1706   }
1707 
1708   /**
1709    * Report whether this master has started initialization and is about to do meta region assignment
1710    * @return true if master is in initialization & about to assign hbase:meta regions
1711    */
1712   public boolean isInitializationStartsMetaRegionAssignment() {
1713     return this.initializationBeforeMetaAssignment;
1714   }
1715 
1716   public void assignRegion(HRegionInfo hri) {
1717     assignmentManager.assign(hri);
1718   }
1719 
1720   /**
1721    * Compute the average load across all region servers.
1722    * Currently, this uses a very naive computation - just uses the number of
1723    * regions being served, ignoring stats about number of requests.
1724    * @return the average load
1725    */
1726   public double getAverageLoad() {
1727     if (this.assignmentManager == null) {
1728       return 0;
1729     }
1730 
1731     RegionStates regionStates = this.assignmentManager.getRegionStates();
1732     if (regionStates == null) {
1733       return 0;
1734     }
1735     return regionStates.getAverageLoad();
1736   }
1737 
1738   @Override
1739   public boolean registerService(Service instance) {
1740     /*
1741      * No stacking of instances is allowed for a single service name
1742      */
1743     Descriptors.ServiceDescriptor serviceDesc = instance.getDescriptorForType();
1744     if (coprocessorServiceHandlers.containsKey(serviceDesc.getFullName())) {
1745       LOG.error("Coprocessor service "+serviceDesc.getFullName()+
1746           " already registered, rejecting request from "+instance
1747       );
1748       return false;
1749     }
1750 
1751     coprocessorServiceHandlers.put(serviceDesc.getFullName(), instance);
1752     if (LOG.isDebugEnabled()) {
1753       LOG.debug("Registered master coprocessor service: service="+serviceDesc.getFullName());
1754     }
1755     return true;
1756   }
1757 
1758   /**
1759    * Utility for constructing an instance of the passed HMaster class.
1760    * @param masterClass
1761    * @param conf
1762    * @return HMaster instance.
1763    */
1764   public static HMaster constructMaster(Class<? extends HMaster> masterClass,
1765       final Configuration conf, final CoordinatedStateManager cp)  {
1766     try {
1767       Constructor<? extends HMaster> c =
1768         masterClass.getConstructor(Configuration.class, CoordinatedStateManager.class);
1769       return c.newInstance(conf, cp);
1770     } catch (InvocationTargetException ite) {
1771       Throwable target = ite.getTargetException() != null?
1772         ite.getTargetException(): ite;
1773       if (target.getCause() != null) target = target.getCause();
1774       throw new RuntimeException("Failed construction of Master: " +
1775         masterClass.toString(), target);
1776     } catch (Exception e) {
1777       throw new RuntimeException("Failed construction of Master: " +
1778         masterClass.toString() + ((e.getCause() != null)?
1779           e.getCause().getMessage(): ""), e);
1780     }
1781   }
1782 
1783   /**
1784    * @see org.apache.hadoop.hbase.master.HMasterCommandLine
1785    */
1786   public static void main(String [] args) {
1787     VersionInfo.logVersion();
1788     new HMasterCommandLine(HMaster.class).doMain(args);
1789   }
1790 
1791   public HFileCleaner getHFileCleaner() {
1792     return this.hfileCleaner;
1793   }
1794 
1795   /**
1796    * Exposed for TESTING!
1797    * @return the underlying snapshot manager
1798    */
1799   public SnapshotManager getSnapshotManagerForTesting() {
1800     return this.snapshotManager;
1801   }
1802 
1803   @Override
1804   public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
1805     TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
1806     checkNamespaceManagerReady();
1807     if (cpHost != null) {
1808       if (cpHost.preCreateNamespace(descriptor)) {
1809         return;
1810       }
1811     }
1812     LOG.info(getClientIdAuditPrefix() + " creating " + descriptor);
1813     tableNamespaceManager.create(descriptor);
1814     if (cpHost != null) {
1815       cpHost.postCreateNamespace(descriptor);
1816     }
1817   }
1818 
1819   @Override
1820   public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
1821     TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
1822     checkNamespaceManagerReady();
1823     if (cpHost != null) {
1824       if (cpHost.preModifyNamespace(descriptor)) {
1825         return;
1826       }
1827     }
1828     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
1829     tableNamespaceManager.update(descriptor);
1830     if (cpHost != null) {
1831       cpHost.postModifyNamespace(descriptor);
1832     }
1833   }
1834 
1835   @Override
1836   public void deleteNamespace(String name) throws IOException {
1837     checkNamespaceManagerReady();
1838     if (cpHost != null) {
1839       if (cpHost.preDeleteNamespace(name)) {
1840         return;
1841       }
1842     }
1843     LOG.info(getClientIdAuditPrefix() + " delete " + name);
1844     tableNamespaceManager.remove(name);
1845     if (cpHost != null) {
1846       cpHost.postDeleteNamespace(name);
1847     }
1848   }
1849 
1850   @Override
1851   public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
1852     checkNamespaceManagerReady();
1853     NamespaceDescriptor nsd = tableNamespaceManager.get(name);
1854     if (nsd == null) {
1855       throw new NamespaceNotFoundException(name);
1856     }
1857     return nsd;
1858   }
1859 
1860   @Override
1861   public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
1862     checkNamespaceManagerReady();
1863     return Lists.newArrayList(tableNamespaceManager.list());
1864   }
1865 
1866   @Override
1867   public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
1868     getNamespaceDescriptor(name); // check that namespace exists
1869     return Lists.newArrayList(tableDescriptors.getByNamespace(name).values());
1870   }
1871 
1872   @Override
1873   public List<TableName> listTableNamesByNamespace(String name) throws IOException {
1874     List<TableName> tableNames = Lists.newArrayList();
1875     getNamespaceDescriptor(name); // check that namespace exists
1876     for (HTableDescriptor descriptor: tableDescriptors.getByNamespace(name).values()) {
1877       tableNames.add(descriptor.getTableName());
1878     }
1879     return tableNames;
1880   }
1881 }