View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import javax.servlet.ServletException;
22  import javax.servlet.http.HttpServlet;
23  import javax.servlet.http.HttpServletRequest;
24  import javax.servlet.http.HttpServletResponse;
25  import java.io.IOException;
26  import java.io.InterruptedIOException;
27  import java.lang.reflect.Constructor;
28  import java.lang.reflect.InvocationTargetException;
29  import java.net.InetAddress;
30  import java.net.InetSocketAddress;
31  import java.net.UnknownHostException;
32  import java.util.ArrayList;
33  import java.util.Arrays;
34  import java.util.Collection;
35  import java.util.Collections;
36  import java.util.Comparator;
37  import java.util.HashSet;
38  import java.util.Iterator;
39  import java.util.List;
40  import java.util.Map;
41  import java.util.Set;
42  import java.util.concurrent.TimeUnit;
43  import java.util.concurrent.atomic.AtomicReference;
44  import java.util.regex.Pattern;
45  
46  import com.google.common.annotations.VisibleForTesting;
47  import com.google.common.collect.Maps;
48  import com.google.protobuf.Descriptors;
49  import com.google.protobuf.Service;
50  import org.apache.commons.logging.Log;
51  import org.apache.commons.logging.LogFactory;
52  import org.apache.hadoop.conf.Configuration;
53  import org.apache.hadoop.fs.Path;
54  import org.apache.hadoop.hbase.ClusterStatus;
55  import org.apache.hadoop.hbase.CoordinatedStateException;
56  import org.apache.hadoop.hbase.CoordinatedStateManager;
57  import org.apache.hadoop.hbase.DoNotRetryIOException;
58  import org.apache.hadoop.hbase.HBaseIOException;
59  import org.apache.hadoop.hbase.HBaseInterfaceAudience;
60  import org.apache.hadoop.hbase.HColumnDescriptor;
61  import org.apache.hadoop.hbase.HConstants;
62  import org.apache.hadoop.hbase.HRegionInfo;
63  import org.apache.hadoop.hbase.HTableDescriptor;
64  import org.apache.hadoop.hbase.MasterNotRunningException;
65  import org.apache.hadoop.hbase.MetaTableAccessor;
66  import org.apache.hadoop.hbase.NamespaceDescriptor;
67  import org.apache.hadoop.hbase.NamespaceNotFoundException;
68  import org.apache.hadoop.hbase.PleaseHoldException;
69  import org.apache.hadoop.hbase.Server;
70  import org.apache.hadoop.hbase.ServerLoad;
71  import org.apache.hadoop.hbase.ServerName;
72  import org.apache.hadoop.hbase.TableDescriptors;
73  import org.apache.hadoop.hbase.TableName;
74  import org.apache.hadoop.hbase.TableNotDisabledException;
75  import org.apache.hadoop.hbase.TableNotFoundException;
76  import org.apache.hadoop.hbase.UnknownRegionException;
77  import org.apache.hadoop.hbase.classification.InterfaceAudience;
78  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
79  import org.apache.hadoop.hbase.client.Result;
80  import org.apache.hadoop.hbase.client.TableState;
81  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
82  import org.apache.hadoop.hbase.exceptions.DeserializationException;
83  import org.apache.hadoop.hbase.executor.ExecutorType;
84  import org.apache.hadoop.hbase.ipc.RequestContext;
85  import org.apache.hadoop.hbase.ipc.RpcServer;
86  import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
87  import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
88  import org.apache.hadoop.hbase.master.balancer.BalancerChore;
89  import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
90  import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
91  import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
92  import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
93  import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
94  import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
95  import org.apache.hadoop.hbase.master.handler.DeleteTableHandler;
96  import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
97  import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
98  import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
99  import org.apache.hadoop.hbase.master.handler.ModifyTableHandler;
100 import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
101 import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
102 import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
103 import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
104 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
105 import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
106 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
107 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
108 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
109 import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
110 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
111 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
112 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
113 import org.apache.hadoop.hbase.quotas.RegionStateListener;
114 import org.apache.hadoop.hbase.regionserver.HRegionServer;
115 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
116 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
117 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
118 import org.apache.hadoop.hbase.replication.regionserver.Replication;
119 import org.apache.hadoop.hbase.security.UserProvider;
120 import org.apache.hadoop.hbase.util.Addressing;
121 import org.apache.hadoop.hbase.util.Bytes;
122 import org.apache.hadoop.hbase.util.CompressionTest;
123 import org.apache.hadoop.hbase.util.EncryptionTest;
124 import org.apache.hadoop.hbase.util.FSUtils;
125 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
126 import org.apache.hadoop.hbase.util.HasThread;
127 import org.apache.hadoop.hbase.util.Pair;
128 import org.apache.hadoop.hbase.util.Threads;
129 import org.apache.hadoop.hbase.util.VersionInfo;
130 import org.apache.hadoop.hbase.util.ZKDataMigrator;
131 import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
132 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
133 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
134 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
135 import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
136 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
137 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
138 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
139 import org.apache.zookeeper.KeeperException;
140 import org.mortbay.jetty.Connector;
141 import org.mortbay.jetty.nio.SelectChannelConnector;
142 import org.mortbay.jetty.servlet.Context;
143 
144 /**
145  * HMaster is the "master server" for HBase. An HBase cluster has one active
146  * master.  If many masters are started, all compete.  Whichever wins goes on to
147  * run the cluster.  All others park themselves in their constructor until
148  * master or cluster shutdown or until the active master loses its lease in
149  * zookeeper.  Thereafter, all running master jostle to take over master role.
150  *
151  * <p>The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In
152  * this case it will tell all regionservers to go down and then wait on them
153  * all reporting in that they are down.  This master will then shut itself down.
154  *
155  * <p>You can also shutdown just this master.  Call {@link #stopMaster()}.
156  *
157  * @see org.apache.zookeeper.Watcher
158  */
159 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
160 @SuppressWarnings("deprecation")
161 public class HMaster extends HRegionServer implements MasterServices, Server {
162   private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
163 
164   /**
165    * Protection against zombie master. Started once Master accepts active responsibility and
166    * starts taking over responsibilities. Allows a finite time window before giving up ownership.
167    */
168   private static class InitializationMonitor extends HasThread {
169     /** The amount of time in milliseconds to sleep before checking initialization status. */
170     public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout";
171     public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES);
172 
173     /**
174      * When timeout expired and initialization has not complete, call {@link System#exit(int)} when
175      * true, do nothing otherwise.
176      */
177     public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout";
178     public static final boolean HALT_DEFAULT = false;
179 
180     private final HMaster master;
181     private final long timeout;
182     private final boolean haltOnTimeout;
183 
184     /** Creates a Thread that monitors the {@link #isInitialized()} state. */
185     InitializationMonitor(HMaster master) {
186       super("MasterInitializationMonitor");
187       this.master = master;
188       this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT);
189       this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT);
190       this.setDaemon(true);
191     }
192 
193     @Override
194     public void run() {
195       try {
196         while (!master.isStopped() && master.isActiveMaster()) {
197           Thread.sleep(timeout);
198           if (master.isInitialized()) {
199             LOG.debug("Initialization completed within allotted tolerance. Monitor exiting.");
200           } else {
201             LOG.error("Master failed to complete initialization after " + timeout + "ms. Please"
202                 + " consider submitting a bug report including a thread dump of this process.");
203             if (haltOnTimeout) {
204               LOG.error("Zombie Master exiting. Thread dump to stdout");
205               Threads.printThreadInfo(System.out, "Zombie HMaster");
206               System.exit(-1);
207             }
208           }
209         }
210       } catch (InterruptedException ie) {
211         LOG.trace("InitMonitor thread interrupted. Existing.");
212       }
213     }
214   }
215 
216   // MASTER is name of the webapp and the attribute name used stuffing this
217   //instance into web context.
218   public static final String MASTER = "master";
219 
220   // Manager and zk listener for master election
221   private final ActiveMasterManager activeMasterManager;
222   // Region server tracker
223   RegionServerTracker regionServerTracker;
224   // Draining region server tracker
225   private DrainingServerTracker drainingServerTracker;
226   // Tracker for load balancer state
227   LoadBalancerTracker loadBalancerTracker;
228 
229   /** Namespace stuff */
230   private TableNamespaceManager tableNamespaceManager;
231 
232   // Metrics for the HMaster
233   final MetricsMaster metricsMaster;
234   // file system manager for the master FS operations
235   private MasterFileSystem fileSystemManager;
236 
237   // server manager to deal with region server info
238   volatile ServerManager serverManager;
239 
240   // manager of assignment nodes in zookeeper
241   AssignmentManager assignmentManager;
242 
243   // buffer for "fatal error" notices from region servers
244   // in the cluster. This is only used for assisting
245   // operations/debugging.
246   MemoryBoundedLogMessageBuffer rsFatals;
247 
248   // flag set after we become the active master (used for testing)
249   private volatile boolean isActiveMaster = false;
250 
251   // flag set after we complete initialization once active,
252   // it is not private since it's used in unit tests
253   volatile boolean initialized = false;
254 
255   // flag set after master services are started,
256   // initialization may have not completed yet.
257   volatile boolean serviceStarted = false;
258 
259   // flag set after we complete assignMeta.
260   private volatile boolean serverShutdownHandlerEnabled = false;
261 
262   LoadBalancer balancer;
263   private BalancerChore balancerChore;
264   private ClusterStatusChore clusterStatusChore;
265   private ClusterStatusPublisher clusterStatusPublisherChore = null;
266 
267   CatalogJanitor catalogJanitorChore;
268   private LogCleaner logCleaner;
269   private HFileCleaner hfileCleaner;
270 
271   MasterCoprocessorHost cpHost;
272 
273   private final boolean preLoadTableDescriptors;
274 
275   // Time stamps for when a hmaster became active
276   private long masterActiveTime;
277 
278   //should we check the compression codec type at master side, default true, HBASE-6370
279   private final boolean masterCheckCompression;
280 
281   //should we check encryption settings at master side, default true
282   private final boolean masterCheckEncryption;
283 
284   Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
285 
286   // monitor for snapshot of hbase tables
287   SnapshotManager snapshotManager;
288   // monitor for distributed procedures
289   MasterProcedureManagerHost mpmHost;
290 
291   // it is assigned after 'initialized' guard set to true, so should be volatile
292   private volatile MasterQuotaManager quotaManager;
293 
294   // handle table states
295   private TableStateManager tableStateManager;
296 
297   /** flag used in test cases in order to simulate RS failures during master initialization */
298   private volatile boolean initializationBeforeMetaAssignment = false;
299 
300   /** jetty server for master to redirect requests to regionserver infoServer */
301   private org.mortbay.jetty.Server masterJettyServer;
302 
303   public static class RedirectServlet extends HttpServlet {
304     private static final long serialVersionUID = 2894774810058302472L;
305     private static int regionServerInfoPort;
306 
307     @Override
308     public void doGet(HttpServletRequest request,
309         HttpServletResponse response) throws ServletException, IOException {
310       String redirectUrl = request.getScheme() + "://"
311         + request.getServerName() + ":" + regionServerInfoPort
312         + request.getRequestURI();
313       response.sendRedirect(redirectUrl);
314     }
315   }
316 
317   /**
318    * Initializes the HMaster. The steps are as follows:
319    * <p>
320    * <ol>
321    * <li>Initialize the local HRegionServer
322    * <li>Start the ActiveMasterManager.
323    * </ol>
324    * <p>
325    * Remaining steps of initialization occur in
326    * #finishActiveMasterInitialization(MonitoredTask) after
327    * the master becomes the active one.
328    *
329    * @throws KeeperException
330    * @throws IOException
331    */
332   public HMaster(final Configuration conf, CoordinatedStateManager csm)
333       throws IOException, KeeperException {
334     super(conf, csm);
335     this.rsFatals = new MemoryBoundedLogMessageBuffer(
336       conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
337 
338     LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
339         ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));
340 
341     // Disable usage of meta replicas in the master
342     this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);
343 
344     Replication.decorateMasterConfiguration(this.conf);
345 
346     // Hack! Maps DFSClient => Master for logs.  HDFS made this
347     // config param for task trackers, but we can piggyback off of it.
348     if (this.conf.get("mapreduce.task.attempt.id") == null) {
349       this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
350     }
351 
352     // should we check the compression codec type at master side, default true, HBASE-6370
353     this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
354 
355     // should we check encryption settings at master side, default true
356     this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);
357 
358     this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this));
359 
360     // preload table descriptor at startup
361     this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);
362 
363     // Do we publish the status?
364 
365     boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
366         HConstants.STATUS_PUBLISHED_DEFAULT);
367     Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
368         conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
369             ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
370             ClusterStatusPublisher.Publisher.class);
371 
372     if (shouldPublish) {
373       if (publisherClass == null) {
374         LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
375             ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
376             " is not set - not publishing status");
377       } else {
378         clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
379         getChoreService().scheduleChore(clusterStatusPublisherChore);
380       }
381     }
382     activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
383     int infoPort = putUpJettyServer();
384     startActiveMasterManager(infoPort);
385   }
386 
387   // return the actual infoPort, -1 means disable info server.
388   private int putUpJettyServer() throws IOException {
389     if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
390       return -1;
391     }
392     int infoPort = conf.getInt("hbase.master.info.port.orig",
393       HConstants.DEFAULT_MASTER_INFOPORT);
394     // -1 is for disabling info server, so no redirecting
395     if (infoPort < 0 || infoServer == null) {
396       return -1;
397     }
398     String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
399     if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
400       String msg =
401           "Failed to start redirecting jetty server. Address " + addr
402               + " does not belong to this host. Correct configuration parameter: "
403               + "hbase.master.info.bindAddress";
404       LOG.error(msg);
405       throw new IOException(msg);
406     }
407 
408     RedirectServlet.regionServerInfoPort = infoServer.getPort();
409     masterJettyServer = new org.mortbay.jetty.Server();
410     Connector connector = new SelectChannelConnector();
411     connector.setHost(addr);
412     connector.setPort(infoPort);
413     masterJettyServer.addConnector(connector);
414     masterJettyServer.setStopAtShutdown(true);
415     Context context = new Context(masterJettyServer, "/", Context.NO_SESSIONS);
416     context.addServlet(RedirectServlet.class, "/*");
417     try {
418       masterJettyServer.start();
419     } catch (Exception e) {
420       throw new IOException("Failed to start redirecting jetty server", e);
421     }
422     return connector.getLocalPort();
423   }
424 
425   @Override
426   protected TableDescriptors getFsTableDescriptors() throws IOException {
427     return super.getFsTableDescriptors();
428   }
429 
430   /**
431    * For compatibility, if failed with regionserver credentials, try the master one
432    */
433   @Override
434   protected void login(UserProvider user, String host) throws IOException {
435     try {
436       super.login(user, host);
437     } catch (IOException ie) {
438       user.login("hbase.master.keytab.file",
439         "hbase.master.kerberos.principal", host);
440     }
441   }
442 
443   /**
444    * If configured to put regions on active master,
445    * wait till a backup master becomes active.
446    * Otherwise, loop till the server is stopped or aborted.
447    */
448   @Override
449   protected void waitForMasterActive(){
450     boolean tablesOnMaster = BaseLoadBalancer.tablesOnMaster(conf);
451     while (!(tablesOnMaster && isActiveMaster)
452         && !isStopped() && !isAborted()) {
453       sleeper.sleep();
454     }
455   }
456 
457   @VisibleForTesting
458   public MasterRpcServices getMasterRpcServices() {
459     return (MasterRpcServices)rpcServices;
460   }
461 
462   public boolean balanceSwitch(final boolean b) throws IOException {
463     return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
464   }
465 
466   @Override
467   protected String getProcessName() {
468     return MASTER;
469   }
470 
471   @Override
472   protected boolean canCreateBaseZNode() {
473     return true;
474   }
475 
476   @Override
477   protected boolean canUpdateTableDescriptor() {
478     return true;
479   }
480 
481   @Override
482   protected RSRpcServices createRpcServices() throws IOException {
483     return new MasterRpcServices(this);
484   }
485 
486   @Override
487   protected void configureInfoServer() {
488     infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);
489     infoServer.setAttribute(MASTER, this);
490     if (BaseLoadBalancer.tablesOnMaster(conf)) {
491       super.configureInfoServer();
492     }
493   }
494 
495   @Override
496   protected Class<? extends HttpServlet> getDumpServlet() {
497     return MasterDumpServlet.class;
498   }
499 
500   /**
501    * Emit the HMaster metrics, such as region in transition metrics.
502    * Surrounding in a try block just to be sure metrics doesn't abort HMaster.
503    */
504   @Override
505   protected void doMetrics() {
506     try {
507       if (assignmentManager != null) {
508         assignmentManager.updateRegionsInTransitionMetrics();
509       }
510     } catch (Throwable e) {
511       LOG.error("Couldn't update metrics: " + e.getMessage());
512     }
513   }
514 
515   MetricsMaster getMasterMetrics() {
516     return metricsMaster;
517   }
518 
519   /**
520    * Initialize all ZK based system trackers.
521    * @throws IOException
522    * @throws InterruptedException
523    * @throws KeeperException
524    * @throws CoordinatedStateException
525    */
526   void initializeZKBasedSystemTrackers() throws IOException,
527       InterruptedException, KeeperException, CoordinatedStateException {
528     this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
529     this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
530     this.loadBalancerTracker.start();
531     this.assignmentManager = new AssignmentManager(this, serverManager,
532       this.balancer, this.service, this.metricsMaster,
533       this.tableLockManager, tableStateManager);
534 
535     this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
536         this.serverManager);
537     this.regionServerTracker.start();
538 
539     this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
540       this.serverManager);
541     this.drainingServerTracker.start();
542 
543     // Set the cluster as up.  If new RSs, they'll be waiting on this before
544     // going ahead with their startup.
545     boolean wasUp = this.clusterStatusTracker.isClusterUp();
546     if (!wasUp) this.clusterStatusTracker.setClusterUp();
547 
548     LOG.info("Server active/primary master=" + this.serverName +
549         ", sessionid=0x" +
550         Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
551         ", setting cluster-up flag (Was=" + wasUp + ")");
552 
553     // create/initialize the snapshot manager and other procedure managers
554     this.snapshotManager = new SnapshotManager();
555     this.mpmHost = new MasterProcedureManagerHost();
556     this.mpmHost.register(this.snapshotManager);
557     this.mpmHost.register(new MasterFlushTableProcedureManager());
558     this.mpmHost.loadProcedures(conf);
559     this.mpmHost.initialize(this, this.metricsMaster);
560 
561   }
562 
563   /**
564    * Finish initialization of HMaster after becoming the primary master.
565    *
566    * <ol>
567    * <li>Initialize master components - file system manager, server manager,
568    *     assignment manager, region server tracker, etc</li>
569    * <li>Start necessary service threads - balancer, catalog janior,
570    *     executor services, etc</li>
571    * <li>Set cluster as UP in ZooKeeper</li>
572    * <li>Wait for RegionServers to check-in</li>
573    * <li>Split logs and perform data recovery, if necessary</li>
574    * <li>Ensure assignment of meta/namespace regions<li>
575    * <li>Handle either fresh cluster start or master failover</li>
576    * </ol>
577    *
578    * @throws IOException
579    * @throws InterruptedException
580    * @throws KeeperException
581    * @throws CoordinatedStateException
582    */
583   private void finishActiveMasterInitialization(MonitoredTask status)
584       throws IOException, InterruptedException, KeeperException, CoordinatedStateException {
585 
586     isActiveMaster = true;
587     Thread zombieDetector = new Thread(new InitializationMonitor(this));
588     zombieDetector.start();
589 
590     /*
591      * We are active master now... go initialize components we need to run.
592      * Note, there may be dross in zk from previous runs; it'll get addressed
593      * below after we determine if cluster startup or failover.
594      */
595 
596     status.setStatus("Initializing Master file system");
597 
598     this.masterActiveTime = System.currentTimeMillis();
599     // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
600     this.fileSystemManager = new MasterFileSystem(this, this);
601 
602     // enable table descriptors cache
603     this.tableDescriptors.setCacheOn();
604     // set the META's descriptor to the correct replication
605     this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
606         conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
607     // warm-up HTDs cache on master initialization
608     if (preLoadTableDescriptors) {
609       status.setStatus("Pre-loading table descriptors");
610       this.tableDescriptors.getAll();
611     }
612 
613     // publish cluster ID
614     status.setStatus("Publishing Cluster ID in ZooKeeper");
615     ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
616     this.serverManager = createServerManager(this, this);
617 
618     setupClusterConnection();
619 
620     // Invalidate all write locks held previously
621     this.tableLockManager.reapWriteLocks();
622     this.tableStateManager = new TableStateManager(this);
623 
624     status.setStatus("Initializing ZK system trackers");
625     initializeZKBasedSystemTrackers();
626 
627     // initialize master side coprocessors before we start handling requests
628     status.setStatus("Initializing master coprocessors");
629     this.cpHost = new MasterCoprocessorHost(this, this.conf);
630 
631     // start up all service threads.
632     status.setStatus("Initializing master service threads");
633     startServiceThreads();
634 
635     // Wake up this server to check in
636     sleeper.skipSleepCycle();
637 
638     // Wait for region servers to report in
639     this.serverManager.waitForRegionServers(status);
640     // Check zk for region servers that are up but didn't register
641     for (ServerName sn: this.regionServerTracker.getOnlineServers()) {
642       // The isServerOnline check is opportunistic, correctness is handled inside
643       if (!this.serverManager.isServerOnline(sn)
644           && serverManager.checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
645         LOG.info("Registered server found up in zk but who has not yet reported in: " + sn);
646       }
647     }
648 
649     // get a list for previously failed RS which need log splitting work
650     // we recover hbase:meta region servers inside master initialization and
651     // handle other failed servers in SSH in order to start up master node ASAP
652     Set<ServerName> previouslyFailedServers = this.fileSystemManager
653         .getFailedServersFromLogFolders();
654 
655     // remove stale recovering regions from previous run
656     this.fileSystemManager.removeStaleRecoveringRegionsFromZK(previouslyFailedServers);
657 
658     // log splitting for hbase:meta server
659     ServerName oldMetaServerLocation = metaTableLocator.getMetaRegionLocation(this.getZooKeeper());
660     if (oldMetaServerLocation != null && previouslyFailedServers.contains(oldMetaServerLocation)) {
661       splitMetaLogBeforeAssignment(oldMetaServerLocation);
662       // Note: we can't remove oldMetaServerLocation from previousFailedServers list because it
663       // may also host user regions
664     }
665     Set<ServerName> previouslyFailedMetaRSs = getPreviouselyFailedMetaServersFromZK();
666     // need to use union of previouslyFailedMetaRSs recorded in ZK and previouslyFailedServers
667     // instead of previouslyFailedMetaRSs alone to address the following two situations:
668     // 1) the chained failure situation(recovery failed multiple times in a row).
669     // 2) master get killed right before it could delete the recovering hbase:meta from ZK while the
670     // same server still has non-meta wals to be replayed so that
671     // removeStaleRecoveringRegionsFromZK can't delete the stale hbase:meta region
672     // Passing more servers into splitMetaLog is all right. If a server doesn't have hbase:meta wal,
673     // there is no op for the server.
674     previouslyFailedMetaRSs.addAll(previouslyFailedServers);
675 
676     this.initializationBeforeMetaAssignment = true;
677 
678     // Wait for regionserver to finish initialization.
679     if (BaseLoadBalancer.tablesOnMaster(conf)) {
680       waitForServerOnline();
681     }
682 
683     //initialize load balancer
684     this.balancer.setClusterStatus(getClusterStatus());
685     this.balancer.setMasterServices(this);
686     this.balancer.initialize();
687 
688     // Check if master is shutting down because of some issue
689     // in initializing the regionserver or the balancer.
690     if(isStopped()) return;
691 
692     // Make sure meta assigned before proceeding.
693     status.setStatus("Assigning Meta Region");
694     assignMeta(status, previouslyFailedMetaRSs, HRegionInfo.DEFAULT_REPLICA_ID);
695     // check if master is shutting down because above assignMeta could return even hbase:meta isn't
696     // assigned when master is shutting down
697     if(isStopped()) return;
698 
699     // migrating existent table state from zk, so splitters
700     // and recovery process treat states properly.
701     for (Map.Entry<TableName, TableState.State> entry : ZKDataMigrator
702         .queryForTableStates(getZooKeeper()).entrySet()) {
703       LOG.info("Converting state from zk to new states:" + entry);
704       tableStateManager.setTableState(entry.getKey(), entry.getValue());
705     }
706     ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().tableZNode);
707 
708     status.setStatus("Submitting log splitting work for previously failed region servers");
709     // Master has recovered hbase:meta region server and we put
710     // other failed region servers in a queue to be handled later by SSH
711     for (ServerName tmpServer : previouslyFailedServers) {
712       this.serverManager.processDeadServer(tmpServer, true);
713     }
714 
715     // Fix up assignment manager status
716     status.setStatus("Starting assignment manager");
717     this.assignmentManager.joinCluster();
718 
719     //set cluster status again after user regions are assigned
720     this.balancer.setClusterStatus(getClusterStatus());
721 
722     // Start balancer and meta catalog janitor after meta and regions have
723     // been assigned.
724     status.setStatus("Starting balancer and catalog janitor");
725     this.clusterStatusChore = new ClusterStatusChore(this, balancer);
726     getChoreService().scheduleChore(clusterStatusChore);
727     this.balancerChore = new BalancerChore(this);
728     getChoreService().scheduleChore(balancerChore);
729     this.catalogJanitorChore = new CatalogJanitor(this, this);
730     getChoreService().scheduleChore(catalogJanitorChore);
731 
732     status.setStatus("Starting namespace manager");
733     initNamespace();
734 
735     if (this.cpHost != null) {
736       try {
737         this.cpHost.preMasterInitialization();
738       } catch (IOException e) {
739         LOG.error("Coprocessor preMasterInitialization() hook failed", e);
740       }
741     }
742 
743     status.markComplete("Initialization successful");
744     LOG.info("Master has completed initialization");
745     configurationManager.registerObserver(this.balancer);
746     initialized = true;
747     // assign the meta replicas
748     Set<ServerName> EMPTY_SET = new HashSet<ServerName>();
749     int numReplicas = conf.getInt(HConstants.META_REPLICAS_NUM,
750            HConstants.DEFAULT_META_REPLICA_NUM);
751     for (int i = 1; i < numReplicas; i++) {
752       assignMeta(status, EMPTY_SET, i);
753     }
754     unassignExcessMetaReplica(zooKeeper, numReplicas);
755 
756     status.setStatus("Starting quota manager");
757     initQuotaManager();
758 
759     // clear the dead servers with same host name and port of online server because we are not
760     // removing dead server with same hostname and port of rs which is trying to check in before
761     // master initialization. See HBASE-5916.
762     this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
763 
764     if (this.cpHost != null) {
765       // don't let cp initialization errors kill the master
766       try {
767         this.cpHost.postStartMaster();
768       } catch (IOException ioe) {
769         LOG.error("Coprocessor postStartMaster() hook failed", ioe);
770       }
771     }
772 
773     zombieDetector.interrupt();
774   }
775 
776   /**
777    * Create a {@link ServerManager} instance.
778    * @param master
779    * @param services
780    * @return An instance of {@link ServerManager}
781    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
782    * @throws IOException
783    */
784   ServerManager createServerManager(final Server master,
785       final MasterServices services)
786   throws IOException {
787     // We put this out here in a method so can do a Mockito.spy and stub it out
788     // w/ a mocked up ServerManager.
789     return new ServerManager(master, services);
790   }
791 
792   private void unassignExcessMetaReplica(ZooKeeperWatcher zkw, int numMetaReplicasConfigured) {
793     // unassign the unneeded replicas (for e.g., if the previous master was configured
794     // with a replication of 3 and now it is 2, we need to unassign the 1 unneeded replica)
795     try {
796       List<String> metaReplicaZnodes = zooKeeper.getMetaReplicaNodes();
797       for (String metaReplicaZnode : metaReplicaZnodes) {
798         int replicaId = zooKeeper.getMetaReplicaIdFromZnode(metaReplicaZnode);
799         if (replicaId >= numMetaReplicasConfigured) {
800           RegionState r = MetaTableLocator.getMetaRegionState(zkw, replicaId);
801           LOG.info("Closing excess replica of meta region " + r.getRegion());
802           // send a close and wait for a max of 30 seconds
803           ServerManager.closeRegionSilentlyAndWait(getConnection(), r.getServerName(),
804               r.getRegion(), 30000);
805           ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(replicaId));
806         }
807       }
808     } catch (Exception ex) {
809       // ignore the exception since we don't want the master to be wedged due to potential
810       // issues in the cleanup of the extra regions. We can do that cleanup via hbck or manually
811       LOG.warn("Ignoring exception " + ex);
812     }
813   }
814 
815   /**
816    * Check <code>hbase:meta</code> is assigned. If not, assign it.
817    * @param status MonitoredTask
818    * @param previouslyFailedMetaRSs
819    * @param replicaId
820    * @throws InterruptedException
821    * @throws IOException
822    * @throws KeeperException
823    */
824   void assignMeta(MonitoredTask status, Set<ServerName> previouslyFailedMetaRSs, int replicaId)
825       throws InterruptedException, IOException, KeeperException {
826     // Work on meta region
827     int assigned = 0;
828     long timeout = this.conf.getLong("hbase.catalog.verification.timeout", 1000);
829     if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
830       status.setStatus("Assigning hbase:meta region");
831     } else {
832       status.setStatus("Assigning hbase:meta region, replicaId " + replicaId);
833     }
834 
835     // Get current meta state from zk.
836     RegionState metaState = MetaTableLocator.getMetaRegionState(getZooKeeper(), replicaId);
837     HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO,
838         replicaId);
839     RegionStates regionStates = assignmentManager.getRegionStates();
840     regionStates.createRegionState(hri, metaState.getState(),
841         metaState.getServerName(), null);
842 
843     if (!metaState.isOpened() || !metaTableLocator.verifyMetaRegionLocation(
844         this.getConnection(), this.getZooKeeper(), timeout, replicaId)) {
845       ServerName currentMetaServer = metaState.getServerName();
846       if (serverManager.isServerOnline(currentMetaServer)) {
847         if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
848           LOG.info("Meta was in transition on " + currentMetaServer);
849         } else {
850           LOG.info("Meta with replicaId " + replicaId + " was in transition on " +
851                     currentMetaServer);
852         }
853         assignmentManager.processRegionsInTransition(Arrays.asList(metaState));
854       } else {
855         if (currentMetaServer != null) {
856           if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
857             splitMetaLogBeforeAssignment(currentMetaServer);
858             regionStates.logSplit(HRegionInfo.FIRST_META_REGIONINFO);
859             previouslyFailedMetaRSs.add(currentMetaServer);
860           }
861         }
862         LOG.info("Re-assigning hbase:meta with replicaId, " + replicaId +
863             " it was on " + currentMetaServer);
864         assignmentManager.assignMeta(hri);
865       }
866       assigned++;
867     }
868 
869     if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID)
870       getTableStateManager().setTableState(TableName.META_TABLE_NAME, TableState.State.ENABLED);
871     // TODO: should we prevent from using state manager before meta was initialized?
872     // tableStateManager.start();
873 
874     if ((RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode())
875         && (!previouslyFailedMetaRSs.isEmpty())) {
876       // replay WAL edits mode need new hbase:meta RS is assigned firstly
877       status.setStatus("replaying log for Meta Region");
878       this.fileSystemManager.splitMetaLog(previouslyFailedMetaRSs);
879     }
880 
881     this.assignmentManager.setEnabledTable(TableName.META_TABLE_NAME);
882     tableStateManager.start();
883 
884     // Make sure a hbase:meta location is set. We need to enable SSH here since
885     // if the meta region server is died at this time, we need it to be re-assigned
886     // by SSH so that system tables can be assigned.
887     // No need to wait for meta is assigned = 0 when meta is just verified.
888     if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) enableServerShutdownHandler(assigned != 0);
889     LOG.info("hbase:meta with replicaId " + replicaId + " assigned=" + assigned + ", location="
890       + metaTableLocator.getMetaRegionLocation(this.getZooKeeper(), replicaId));
891     status.setStatus("META assigned.");
892   }
893 
894   void initNamespace() throws IOException {
895     //create namespace manager
896     tableNamespaceManager = new TableNamespaceManager(this);
897     tableNamespaceManager.start();
898   }
899 
900   void initQuotaManager() throws IOException {
901     MasterQuotaManager quotaManager = new MasterQuotaManager(this);
902     this.assignmentManager.setRegionStateListener((RegionStateListener)quotaManager);
903     quotaManager.start();
904     this.quotaManager = quotaManager;
905   }
906 
907   boolean isCatalogJanitorEnabled() {
908     return catalogJanitorChore != null ?
909       catalogJanitorChore.getEnabled() : false;
910   }
911 
912   private void splitMetaLogBeforeAssignment(ServerName currentMetaServer) throws IOException {
913     if (RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode()) {
914       // In log replay mode, we mark hbase:meta region as recovering in ZK
915       Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
916       regions.add(HRegionInfo.FIRST_META_REGIONINFO);
917       this.fileSystemManager.prepareLogReplay(currentMetaServer, regions);
918     } else {
919       // In recovered.edits mode: create recovered edits file for hbase:meta server
920       this.fileSystemManager.splitMetaLog(currentMetaServer);
921     }
922   }
923 
924   private void enableServerShutdownHandler(
925       final boolean waitForMeta) throws IOException, InterruptedException {
926     // If ServerShutdownHandler is disabled, we enable it and expire those dead
927     // but not expired servers. This is required so that if meta is assigning to
928     // a server which dies after assignMeta starts assignment,
929     // SSH can re-assign it. Otherwise, we will be
930     // stuck here waiting forever if waitForMeta is specified.
931     if (!serverShutdownHandlerEnabled) {
932       serverShutdownHandlerEnabled = true;
933       this.serverManager.processQueuedDeadServers();
934     }
935 
936     if (waitForMeta) {
937       metaTableLocator.waitMetaRegionLocation(this.getZooKeeper());
938     }
939   }
940 
941   /**
942    * This function returns a set of region server names under hbase:meta recovering region ZK node
943    * @return Set of meta server names which were recorded in ZK
944    * @throws KeeperException
945    */
946   private Set<ServerName> getPreviouselyFailedMetaServersFromZK() throws KeeperException {
947     Set<ServerName> result = new HashSet<ServerName>();
948     String metaRecoveringZNode = ZKUtil.joinZNode(zooKeeper.recoveringRegionsZNode,
949       HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
950     List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(zooKeeper, metaRecoveringZNode);
951     if (regionFailedServers == null) return result;
952 
953     for(String failedServer : regionFailedServers) {
954       ServerName server = ServerName.parseServerName(failedServer);
955       result.add(server);
956     }
957     return result;
958   }
959 
960   @Override
961   public TableDescriptors getTableDescriptors() {
962     return this.tableDescriptors;
963   }
964 
965   @Override
966   public ServerManager getServerManager() {
967     return this.serverManager;
968   }
969 
970   @Override
971   public MasterFileSystem getMasterFileSystem() {
972     return this.fileSystemManager;
973   }
974 
975   @Override
976   public TableStateManager getTableStateManager() {
977     return tableStateManager;
978   }
979 
980   /*
981    * Start up all services. If any of these threads gets an unhandled exception
982    * then they just die with a logged message.  This should be fine because
983    * in general, we do not expect the master to get such unhandled exceptions
984    *  as OOMEs; it should be lightly loaded. See what HRegionServer does if
985    *  need to install an unexpected exception handler.
986    */
987   private void startServiceThreads() throws IOException{
988    // Start the executor service pools
989    this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
990       conf.getInt("hbase.master.executor.openregion.threads", 5));
991    this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
992       conf.getInt("hbase.master.executor.closeregion.threads", 5));
993    this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
994       conf.getInt("hbase.master.executor.serverops.threads", 5));
995    this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
996       conf.getInt("hbase.master.executor.serverops.threads", 5));
997    this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
998       conf.getInt("hbase.master.executor.logreplayops.threads", 10));
999 
1000    // We depend on there being only one instance of this executor running
1001    // at a time.  To do concurrency, would need fencing of enable/disable of
1002    // tables.
1003    // Any time changing this maxThreads to > 1, pls see the comment at
1004    // AccessController#postCreateTableHandler
1005    this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
1006 
1007    // Start log cleaner thread
1008    int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
1009    this.logCleaner =
1010       new LogCleaner(cleanerInterval,
1011          this, conf, getMasterFileSystem().getFileSystem(),
1012          getMasterFileSystem().getOldLogDir());
1013     getChoreService().scheduleChore(logCleaner);
1014 
1015    //start the hfile archive cleaner thread
1016     Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
1017     this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
1018         .getFileSystem(), archiveDir);
1019     getChoreService().scheduleChore(hfileCleaner);
1020     serviceStarted = true;
1021     if (LOG.isTraceEnabled()) {
1022       LOG.trace("Started service threads");
1023     }
1024   }
1025 
1026   @Override
1027   protected void stopServiceThreads() {
1028     if (masterJettyServer != null) {
1029       LOG.info("Stopping master jetty server");
1030       try {
1031         masterJettyServer.stop();
1032       } catch (Exception e) {
1033         LOG.error("Failed to stop master jetty server", e);
1034       }
1035     }
1036     super.stopServiceThreads();
1037     stopChores();
1038     // Wait for all the remaining region servers to report in IFF we were
1039     // running a cluster shutdown AND we were NOT aborting.
1040     if (!isAborted() && this.serverManager != null &&
1041         this.serverManager.isClusterShutdown()) {
1042       this.serverManager.letRegionServersShutdown();
1043     }
1044     if (LOG.isDebugEnabled()) {
1045       LOG.debug("Stopping service threads");
1046     }
1047     // Clean up and close up shop
1048     if (this.logCleaner != null) this.logCleaner.cancel(true);
1049     if (this.hfileCleaner != null) this.hfileCleaner.cancel(true);
1050     if (this.quotaManager != null) this.quotaManager.stop();
1051     if (this.activeMasterManager != null) this.activeMasterManager.stop();
1052     if (this.serverManager != null) this.serverManager.stop();
1053     if (this.assignmentManager != null) this.assignmentManager.stop();
1054     if (this.fileSystemManager != null) this.fileSystemManager.stop();
1055     if (this.mpmHost != null) this.mpmHost.stop("server shutting down.");
1056   }
1057 
1058   private void stopChores() {
1059     if (this.balancerChore != null) {
1060       this.balancerChore.cancel(true);
1061     }
1062     if (this.clusterStatusChore != null) {
1063       this.clusterStatusChore.cancel(true);
1064     }
1065     if (this.catalogJanitorChore != null) {
1066       this.catalogJanitorChore.cancel(true);
1067     }
1068     if (this.clusterStatusPublisherChore != null){
1069       clusterStatusPublisherChore.cancel(true);
1070     }
1071   }
1072 
1073   /**
1074    * @return Get remote side's InetAddress
1075    * @throws UnknownHostException
1076    */
1077   InetAddress getRemoteInetAddress(final int port,
1078       final long serverStartCode) throws UnknownHostException {
1079     // Do it out here in its own little method so can fake an address when
1080     // mocking up in tests.
1081     InetAddress ia = RpcServer.getRemoteIp();
1082 
1083     // The call could be from the local regionserver,
1084     // in which case, there is no remote address.
1085     if (ia == null && serverStartCode == startcode) {
1086       InetSocketAddress isa = rpcServices.getSocketAddress();
1087       if (isa != null && isa.getPort() == port) {
1088         ia = isa.getAddress();
1089       }
1090     }
1091     return ia;
1092   }
1093 
1094   /**
1095    * @return Maximum time we should run balancer for
1096    */
1097   private int getBalancerCutoffTime() {
1098     int balancerCutoffTime = getConfiguration().getInt("hbase.balancer.max.balancing", -1);
1099     if (balancerCutoffTime == -1) {
1100       // if cutoff time isn't set, defaulting it to period time
1101       int balancerPeriod = getConfiguration().getInt("hbase.balancer.period", 300000);
1102       balancerCutoffTime = balancerPeriod;
1103     }
1104     return balancerCutoffTime;
1105   }
1106 
1107   public boolean balance() throws IOException {
1108     // if master not initialized, don't run balancer.
1109     if (!this.initialized) {
1110       LOG.debug("Master has not been initialized, don't run balancer.");
1111       return false;
1112     }
1113     // Do this call outside of synchronized block.
1114     int maximumBalanceTime = getBalancerCutoffTime();
1115     synchronized (this.balancer) {
1116       // If balance not true, don't run balancer.
1117       if (!this.loadBalancerTracker.isBalancerOn()) return false;
1118       // Only allow one balance run at at time.
1119       if (this.assignmentManager.getRegionStates().isRegionsInTransition()) {
1120         Map<String, RegionState> regionsInTransition =
1121           this.assignmentManager.getRegionStates().getRegionsInTransition();
1122         LOG.debug("Not running balancer because " + regionsInTransition.size() +
1123           " region(s) in transition: " + org.apache.commons.lang.StringUtils.
1124             abbreviate(regionsInTransition.toString(), 256));
1125         return false;
1126       }
1127       if (this.serverManager.areDeadServersInProgress()) {
1128         LOG.debug("Not running balancer because processing dead regionserver(s): " +
1129           this.serverManager.getDeadServers());
1130         return false;
1131       }
1132 
1133       if (this.cpHost != null) {
1134         try {
1135           if (this.cpHost.preBalance()) {
1136             LOG.debug("Coprocessor bypassing balancer request");
1137             return false;
1138           }
1139         } catch (IOException ioe) {
1140           LOG.error("Error invoking master coprocessor preBalance()", ioe);
1141           return false;
1142         }
1143       }
1144 
1145       Map<TableName, Map<ServerName, List<HRegionInfo>>> assignmentsByTable =
1146         this.assignmentManager.getRegionStates().getAssignmentsByTable();
1147 
1148       List<RegionPlan> plans = new ArrayList<RegionPlan>();
1149       //Give the balancer the current cluster state.
1150       this.balancer.setClusterStatus(getClusterStatus());
1151       for (Map<ServerName, List<HRegionInfo>> assignments : assignmentsByTable.values()) {
1152         List<RegionPlan> partialPlans = this.balancer.balanceCluster(assignments);
1153         if (partialPlans != null) plans.addAll(partialPlans);
1154       }
1155       long cutoffTime = System.currentTimeMillis() + maximumBalanceTime;
1156       int rpCount = 0;  // number of RegionPlans balanced so far
1157       long totalRegPlanExecTime = 0;
1158       if (plans != null && !plans.isEmpty()) {
1159         for (RegionPlan plan: plans) {
1160           LOG.info("balance " + plan);
1161           long balStartTime = System.currentTimeMillis();
1162           //TODO: bulk assign
1163           this.assignmentManager.balance(plan);
1164           totalRegPlanExecTime += System.currentTimeMillis()-balStartTime;
1165           rpCount++;
1166           if (rpCount < plans.size() &&
1167               // if performing next balance exceeds cutoff time, exit the loop
1168               (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) {
1169             //TODO: After balance, there should not be a cutoff time (keeping it as
1170             // a security net for now)
1171             LOG.debug("No more balancing till next balance run; maximumBalanceTime=" +
1172               maximumBalanceTime);
1173             break;
1174           }
1175         }
1176       }
1177       if (this.cpHost != null) {
1178         try {
1179           this.cpHost.postBalance(rpCount < plans.size() ? plans.subList(0, rpCount) : plans);
1180         } catch (IOException ioe) {
1181           // balancing already succeeded so don't change the result
1182           LOG.error("Error invoking master coprocessor postBalance()", ioe);
1183         }
1184       }
1185     }
1186     // If LoadBalancer did not generate any plans, it means the cluster is already balanced.
1187     // Return true indicating a success.
1188     return true;
1189   }
1190 
1191   /**
1192    * @return Client info for use as prefix on an audit log string; who did an action
1193    */
1194   String getClientIdAuditPrefix() {
1195     return "Client=" + RequestContext.getRequestUserName() + "/" +
1196       RequestContext.get().getRemoteAddress();
1197   }
1198 
1199   /**
1200    * Switch for the background CatalogJanitor thread.
1201    * Used for testing.  The thread will continue to run.  It will just be a noop
1202    * if disabled.
1203    * @param b If false, the catalog janitor won't do anything.
1204    */
1205   public void setCatalogJanitorEnabled(final boolean b) {
1206     this.catalogJanitorChore.setEnabled(b);
1207   }
1208 
1209   @Override
1210   public void dispatchMergingRegions(final HRegionInfo region_a,
1211       final HRegionInfo region_b, final boolean forcible) throws IOException {
1212     checkInitialized();
1213     this.service.submit(new DispatchMergingRegionHandler(this,
1214         this.catalogJanitorChore, region_a, region_b, forcible));
1215   }
1216 
1217   void move(final byte[] encodedRegionName,
1218       final byte[] destServerName) throws HBaseIOException {
1219     RegionState regionState = assignmentManager.getRegionStates().
1220       getRegionState(Bytes.toString(encodedRegionName));
1221     if (regionState == null) {
1222       throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
1223     }
1224 
1225     HRegionInfo hri = regionState.getRegion();
1226     ServerName dest;
1227     if (destServerName == null || destServerName.length == 0) {
1228       LOG.info("Passed destination servername is null/empty so " +
1229         "choosing a server at random");
1230       final List<ServerName> destServers = this.serverManager.createDestinationServersList(
1231         regionState.getServerName());
1232       dest = balancer.randomAssignment(hri, destServers);
1233       if (dest == null) {
1234         LOG.debug("Unable to determine a plan to assign " + hri);
1235         return;
1236       }
1237     } else {
1238       dest = ServerName.valueOf(Bytes.toString(destServerName));
1239       if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer
1240           && !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) {
1241         // To avoid unnecessary region moving later by balancer. Don't put user
1242         // regions on master. Regions on master could be put on other region
1243         // server intentionally by test however.
1244         LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
1245           + " to avoid unnecessary region moving later by load balancer,"
1246           + " because it should not be on master");
1247         return;
1248       }
1249     }
1250 
1251     if (dest.equals(regionState.getServerName())) {
1252       LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
1253         + " because region already assigned to the same server " + dest + ".");
1254       return;
1255     }
1256 
1257     // Now we can do the move
1258     RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest);
1259 
1260     try {
1261       checkInitialized();
1262       if (this.cpHost != null) {
1263         if (this.cpHost.preMove(hri, rp.getSource(), rp.getDestination())) {
1264           return;
1265         }
1266       }
1267       LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
1268       this.assignmentManager.balance(rp);
1269       if (this.cpHost != null) {
1270         this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
1271       }
1272     } catch (IOException ioe) {
1273       if (ioe instanceof HBaseIOException) {
1274         throw (HBaseIOException)ioe;
1275       }
1276       throw new HBaseIOException(ioe);
1277     }
1278   }
1279 
1280   @Override
1281   public void createTable(HTableDescriptor hTableDescriptor,
1282       byte [][] splitKeys) throws IOException {
1283     if (isStopped()) {
1284       throw new MasterNotRunningException();
1285     }
1286 
1287     String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
1288     ensureNamespaceExists(namespace);
1289 
1290     HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
1291     checkInitialized();
1292     sanityCheckTableDescriptor(hTableDescriptor);
1293     this.quotaManager.checkNamespaceTableAndRegionQuota(hTableDescriptor.getTableName(),
1294       newRegions.length);
1295     if (cpHost != null) {
1296       cpHost.preCreateTable(hTableDescriptor, newRegions);
1297     }
1298     LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
1299     this.service.submit(new CreateTableHandler(this,
1300       this.fileSystemManager, hTableDescriptor, conf,
1301       newRegions, this).prepare());
1302     if (cpHost != null) {
1303       cpHost.postCreateTable(hTableDescriptor, newRegions);
1304     }
1305 
1306   }
1307 
1308   /**
1309    * Checks whether the table conforms to some sane limits, and configured
1310    * values (compression, etc) work. Throws an exception if something is wrong.
1311    * @throws IOException
1312    */
1313   private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
1314     final String CONF_KEY = "hbase.table.sanity.checks";
1315     if (!conf.getBoolean(CONF_KEY, true)) {
1316       return;
1317     }
1318     String tableVal = htd.getConfigurationValue(CONF_KEY);
1319     if (tableVal != null && !Boolean.valueOf(tableVal)) {
1320       return;
1321     }
1322 
1323     // check max file size
1324     long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit
1325     long maxFileSize = htd.getMaxFileSize();
1326     if (maxFileSize < 0) {
1327       maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
1328     }
1329     if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
1330       throw new DoNotRetryIOException("MAX_FILESIZE for table descriptor or "
1331         + "\"hbase.hregion.max.filesize\" (" + maxFileSize
1332         + ") is too small, which might cause over splitting into unmanageable "
1333         + "number of regions. Set " + CONF_KEY + " to false at conf or table descriptor "
1334           + "if you want to bypass sanity checks");
1335     }
1336 
1337     // check flush size
1338     long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit
1339     long flushSize = htd.getMemStoreFlushSize();
1340     if (flushSize < 0) {
1341       flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
1342     }
1343     if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
1344       throw new DoNotRetryIOException("MEMSTORE_FLUSHSIZE for table descriptor or "
1345           + "\"hbase.hregion.memstore.flush.size\" ("+flushSize+") is too small, which might cause"
1346           + " very frequent flushing. Set " + CONF_KEY + " to false at conf or table descriptor "
1347           + "if you want to bypass sanity checks");
1348     }
1349 
1350     // check that coprocessors and other specified plugin classes can be loaded
1351     try {
1352       checkClassLoading(conf, htd);
1353     } catch (Exception ex) {
1354       throw new DoNotRetryIOException(ex);
1355     }
1356 
1357     // check compression can be loaded
1358     try {
1359       checkCompression(htd);
1360     } catch (IOException e) {
1361       throw new DoNotRetryIOException(e.getMessage(), e);
1362     }
1363 
1364     // check encryption can be loaded
1365     try {
1366       checkEncryption(conf, htd);
1367     } catch (IOException e) {
1368       throw new DoNotRetryIOException(e.getMessage(), e);
1369     }
1370 
1371     // check that we have at least 1 CF
1372     if (htd.getColumnFamilies().length == 0) {
1373       throw new DoNotRetryIOException("Table should have at least one column family "
1374           + "Set "+CONF_KEY+" at conf or table descriptor if you want to bypass sanity checks");
1375     }
1376 
1377     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1378       if (hcd.getTimeToLive() <= 0) {
1379         throw new DoNotRetryIOException("TTL for column family " + hcd.getNameAsString()
1380           + "  must be positive. Set " + CONF_KEY + " to false at conf or table descriptor "
1381           + "if you want to bypass sanity checks");
1382       }
1383 
1384       // check blockSize
1385       if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
1386         throw new DoNotRetryIOException("Block size for column family " + hcd.getNameAsString()
1387           + "  must be between 1K and 16MB Set "+CONF_KEY+" to false at conf or table descriptor "
1388           + "if you want to bypass sanity checks");
1389       }
1390 
1391       // check versions
1392       if (hcd.getMinVersions() < 0) {
1393         throw new DoNotRetryIOException("Min versions for column family " + hcd.getNameAsString()
1394           + "  must be positive. Set " + CONF_KEY + " to false at conf or table descriptor "
1395           + "if you want to bypass sanity checks");
1396       }
1397       // max versions already being checked
1398 
1399       // check replication scope
1400       if (hcd.getScope() < 0) {
1401         throw new DoNotRetryIOException("Replication scope for column family "
1402           + hcd.getNameAsString() + "  must be positive. Set " + CONF_KEY + " to false at conf "
1403           + "or table descriptor if you want to bypass sanity checks");
1404       }
1405 
1406       // TODO: should we check coprocessors and encryption ?
1407     }
1408   }
1409 
1410   private void startActiveMasterManager(int infoPort) throws KeeperException {
1411     String backupZNode = ZKUtil.joinZNode(
1412       zooKeeper.backupMasterAddressesZNode, serverName.toString());
1413     /*
1414     * Add a ZNode for ourselves in the backup master directory since we
1415     * may not become the active master. If so, we want the actual active
1416     * master to know we are backup masters, so that it won't assign
1417     * regions to us if so configured.
1418     *
1419     * If we become the active master later, ActiveMasterManager will delete
1420     * this node explicitly.  If we crash before then, ZooKeeper will delete
1421     * this node for us since it is ephemeral.
1422     */
1423     LOG.info("Adding backup master ZNode " + backupZNode);
1424     if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode,
1425         serverName, infoPort)) {
1426       LOG.warn("Failed create of " + backupZNode + " by " + serverName);
1427     }
1428 
1429     activeMasterManager.setInfoPort(infoPort);
1430     // Start a thread to try to become the active master, so we won't block here
1431     Threads.setDaemonThreadRunning(new Thread(new Runnable() {
1432       @Override
1433       public void run() {
1434         int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT,
1435           HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
1436         // If we're a backup master, stall until a primary to writes his address
1437         if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP,
1438             HConstants.DEFAULT_MASTER_TYPE_BACKUP)) {
1439           LOG.debug("HMaster started in backup mode. "
1440             + "Stalling until master znode is written.");
1441           // This will only be a minute or so while the cluster starts up,
1442           // so don't worry about setting watches on the parent znode
1443           while (!activeMasterManager.hasActiveMaster()) {
1444             LOG.debug("Waiting for master address ZNode to be written "
1445               + "(Also watching cluster state node)");
1446             Threads.sleep(timeout);
1447           }
1448         }
1449         MonitoredTask status = TaskMonitor.get().createStatus("Master startup");
1450         status.setDescription("Master startup");
1451         try {
1452           if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, status)) {
1453             finishActiveMasterInitialization(status);
1454           }
1455         } catch (Throwable t) {
1456           status.setStatus("Failed to become active: " + t.getMessage());
1457           LOG.fatal("Failed to become active master", t);
1458           // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility
1459           if (t instanceof NoClassDefFoundError &&
1460               t.getMessage()
1461                   .contains("org/apache/hadoop/hdfs/protocol/FSConstants$SafeModeAction")) {
1462             // improved error message for this special case
1463             abort("HBase is having a problem with its Hadoop jars.  You may need to "
1464               + "recompile HBase against Hadoop version "
1465               +  org.apache.hadoop.util.VersionInfo.getVersion()
1466               + " or change your hadoop jars to start properly", t);
1467           } else {
1468             abort("Unhandled exception. Starting shutdown.", t);
1469           }
1470         } finally {
1471           status.cleanup();
1472         }
1473       }
1474     }, getServerName().toShortString() + ".activeMasterManager"));
1475   }
1476 
1477   private void checkCompression(final HTableDescriptor htd)
1478   throws IOException {
1479     if (!this.masterCheckCompression) return;
1480     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1481       checkCompression(hcd);
1482     }
1483   }
1484 
1485   private void checkCompression(final HColumnDescriptor hcd)
1486   throws IOException {
1487     if (!this.masterCheckCompression) return;
1488     CompressionTest.testCompression(hcd.getCompression());
1489     CompressionTest.testCompression(hcd.getCompactionCompression());
1490   }
1491 
1492   private void checkEncryption(final Configuration conf, final HTableDescriptor htd)
1493   throws IOException {
1494     if (!this.masterCheckEncryption) return;
1495     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1496       checkEncryption(conf, hcd);
1497     }
1498   }
1499 
1500   private void checkEncryption(final Configuration conf, final HColumnDescriptor hcd)
1501   throws IOException {
1502     if (!this.masterCheckEncryption) return;
1503     EncryptionTest.testEncryption(conf, hcd.getEncryptionType(), hcd.getEncryptionKey());
1504   }
1505 
1506   private void checkClassLoading(final Configuration conf, final HTableDescriptor htd)
1507   throws IOException {
1508     RegionSplitPolicy.getSplitPolicyClass(htd, conf);
1509     RegionCoprocessorHost.testTableCoprocessorAttrs(conf, htd);
1510   }
1511 
1512   private HRegionInfo[] getHRegionInfos(HTableDescriptor hTableDescriptor,
1513     byte[][] splitKeys) {
1514     long regionId = System.currentTimeMillis();
1515     HRegionInfo[] hRegionInfos = null;
1516     if (splitKeys == null || splitKeys.length == 0) {
1517       hRegionInfos = new HRegionInfo[]{new HRegionInfo(hTableDescriptor.getTableName(), null, null,
1518                 false, regionId)};
1519     } else {
1520       int numRegions = splitKeys.length + 1;
1521       hRegionInfos = new HRegionInfo[numRegions];
1522       byte[] startKey = null;
1523       byte[] endKey = null;
1524       for (int i = 0; i < numRegions; i++) {
1525         endKey = (i == splitKeys.length) ? null : splitKeys[i];
1526         hRegionInfos[i] =
1527              new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
1528                  false, regionId);
1529         startKey = endKey;
1530       }
1531     }
1532     return hRegionInfos;
1533   }
1534 
1535   private static boolean isCatalogTable(final TableName tableName) {
1536     return tableName.equals(TableName.META_TABLE_NAME);
1537   }
1538 
1539   @Override
1540   public void deleteTable(final TableName tableName) throws IOException {
1541     checkInitialized();
1542     if (cpHost != null) {
1543       cpHost.preDeleteTable(tableName);
1544     }
1545     LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
1546     this.service.submit(new DeleteTableHandler(tableName, this, this).prepare());
1547     if (cpHost != null) {
1548       cpHost.postDeleteTable(tableName);
1549     }
1550   }
1551 
1552   @Override
1553   public void truncateTable(TableName tableName, boolean preserveSplits) throws IOException {
1554     checkInitialized();
1555     if (cpHost != null) {
1556       cpHost.preTruncateTable(tableName);
1557     }
1558     LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
1559     TruncateTableHandler handler = new TruncateTableHandler(tableName, this, this, preserveSplits);
1560     handler.prepare();
1561     handler.process();
1562     if (cpHost != null) {
1563       cpHost.postTruncateTable(tableName);
1564     }
1565   }
1566 
1567   @Override
1568   public void addColumn(final TableName tableName, final HColumnDescriptor columnDescriptor)
1569       throws IOException {
1570     checkInitialized();
1571     checkCompression(columnDescriptor);
1572     checkEncryption(conf, columnDescriptor);
1573     if (cpHost != null) {
1574       if (cpHost.preAddColumn(tableName, columnDescriptor)) {
1575         return;
1576       }
1577     }
1578     //TODO: we should process this (and some others) in an executor
1579     new TableAddFamilyHandler(tableName, columnDescriptor, this, this).prepare().process();
1580     if (cpHost != null) {
1581       cpHost.postAddColumn(tableName, columnDescriptor);
1582     }
1583   }
1584 
1585   @Override
1586   public void modifyColumn(TableName tableName, HColumnDescriptor descriptor)
1587       throws IOException {
1588     checkInitialized();
1589     checkCompression(descriptor);
1590     checkEncryption(conf, descriptor);
1591     if (cpHost != null) {
1592       if (cpHost.preModifyColumn(tableName, descriptor)) {
1593         return;
1594       }
1595     }
1596     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
1597     new TableModifyFamilyHandler(tableName, descriptor, this, this)
1598       .prepare().process();
1599     if (cpHost != null) {
1600       cpHost.postModifyColumn(tableName, descriptor);
1601     }
1602   }
1603 
1604   @Override
1605   public void deleteColumn(final TableName tableName, final byte[] columnName)
1606       throws IOException {
1607     checkInitialized();
1608     if (cpHost != null) {
1609       if (cpHost.preDeleteColumn(tableName, columnName)) {
1610         return;
1611       }
1612     }
1613     LOG.info(getClientIdAuditPrefix() + " delete " + Bytes.toString(columnName));
1614     new TableDeleteFamilyHandler(tableName, columnName, this, this).prepare().process();
1615     if (cpHost != null) {
1616       cpHost.postDeleteColumn(tableName, columnName);
1617     }
1618   }
1619 
1620   @Override
1621   public void enableTable(final TableName tableName) throws IOException {
1622     checkInitialized();
1623     if (cpHost != null) {
1624       cpHost.preEnableTable(tableName);
1625     }
1626     LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
1627     this.service.submit(new EnableTableHandler(this, tableName,
1628       assignmentManager, tableLockManager, false).prepare());
1629     if (cpHost != null) {
1630       cpHost.postEnableTable(tableName);
1631    }
1632   }
1633 
1634   @Override
1635   public void disableTable(final TableName tableName) throws IOException {
1636     checkInitialized();
1637     if (cpHost != null) {
1638       cpHost.preDisableTable(tableName);
1639     }
1640     LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
1641     this.service.submit(new DisableTableHandler(this, tableName,
1642       assignmentManager, tableLockManager, false).prepare());
1643     if (cpHost != null) {
1644       cpHost.postDisableTable(tableName);
1645     }
1646   }
1647 
1648   /**
1649    * Return the region and current deployment for the region containing
1650    * the given row. If the region cannot be found, returns null. If it
1651    * is found, but not currently deployed, the second element of the pair
1652    * may be null.
1653    */
1654   @VisibleForTesting // Used by TestMaster.
1655   Pair<HRegionInfo, ServerName> getTableRegionForRow(
1656       final TableName tableName, final byte [] rowKey)
1657   throws IOException {
1658     final AtomicReference<Pair<HRegionInfo, ServerName>> result =
1659       new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
1660 
1661     MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
1662         @Override
1663         public boolean visit(Result data) throws IOException {
1664           if (data == null || data.size() <= 0) {
1665             return true;
1666           }
1667           Pair<HRegionInfo, ServerName> pair = HRegionInfo.getHRegionInfoAndServerName(data);
1668           if (pair == null) {
1669             return false;
1670           }
1671           if (!pair.getFirst().getTable().equals(tableName)) {
1672             return false;
1673           }
1674           result.set(pair);
1675           return true;
1676         }
1677     };
1678 
1679     MetaTableAccessor.scanMeta(clusterConnection, visitor, tableName, rowKey, 1);
1680     return result.get();
1681   }
1682 
1683   @Override
1684   public void modifyTable(final TableName tableName, final HTableDescriptor descriptor)
1685       throws IOException {
1686     checkInitialized();
1687     sanityCheckTableDescriptor(descriptor);
1688     if (cpHost != null) {
1689       cpHost.preModifyTable(tableName, descriptor);
1690     }
1691     LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
1692     new ModifyTableHandler(tableName, descriptor, this, this).prepare().process();
1693     if (cpHost != null) {
1694       cpHost.postModifyTable(tableName, descriptor);
1695     }
1696   }
1697 
1698   @Override
1699   public void checkTableModifiable(final TableName tableName)
1700       throws IOException, TableNotFoundException, TableNotDisabledException {
1701     if (isCatalogTable(tableName)) {
1702       throw new IOException("Can't modify catalog tables");
1703     }
1704     if (!MetaTableAccessor.tableExists(getConnection(), tableName)) {
1705       throw new TableNotFoundException(tableName);
1706     }
1707     if (!getAssignmentManager().getTableStateManager().
1708         isTableState(tableName, TableState.State.DISABLED)) {
1709       throw new TableNotDisabledException(tableName);
1710     }
1711   }
1712 
1713   /**
1714    * @return cluster status
1715    */
1716   public ClusterStatus getClusterStatus() throws InterruptedIOException {
1717     // Build Set of backup masters from ZK nodes
1718     List<String> backupMasterStrings;
1719     try {
1720       backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper,
1721         this.zooKeeper.backupMasterAddressesZNode);
1722     } catch (KeeperException e) {
1723       LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
1724       backupMasterStrings = new ArrayList<String>(0);
1725     }
1726     List<ServerName> backupMasters = new ArrayList<ServerName>(
1727                                           backupMasterStrings.size());
1728     for (String s: backupMasterStrings) {
1729       try {
1730         byte [] bytes;
1731         try {
1732           bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
1733               this.zooKeeper.backupMasterAddressesZNode, s));
1734         } catch (InterruptedException e) {
1735           throw new InterruptedIOException();
1736         }
1737         if (bytes != null) {
1738           ServerName sn;
1739           try {
1740             sn = ServerName.parseFrom(bytes);
1741           } catch (DeserializationException e) {
1742             LOG.warn("Failed parse, skipping registering backup server", e);
1743             continue;
1744           }
1745           backupMasters.add(sn);
1746         }
1747       } catch (KeeperException e) {
1748         LOG.warn(this.zooKeeper.prefix("Unable to get information about " +
1749                  "backup servers"), e);
1750       }
1751     }
1752     Collections.sort(backupMasters, new Comparator<ServerName>() {
1753       @Override
1754       public int compare(ServerName s1, ServerName s2) {
1755         return s1.getServerName().compareTo(s2.getServerName());
1756       }});
1757 
1758     String clusterId = fileSystemManager != null ?
1759       fileSystemManager.getClusterId().toString() : null;
1760     Map<String, RegionState> regionsInTransition = assignmentManager != null ?
1761       assignmentManager.getRegionStates().getRegionsInTransition() : null;
1762     String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null;
1763     boolean balancerOn = loadBalancerTracker != null ?
1764       loadBalancerTracker.isBalancerOn() : false;
1765     Map<ServerName, ServerLoad> onlineServers = null;
1766     Set<ServerName> deadServers = null;
1767     if (serverManager != null) {
1768       deadServers = serverManager.getDeadServers().copyServerNames();
1769       onlineServers = serverManager.getOnlineServers();
1770     }
1771     return new ClusterStatus(VersionInfo.getVersion(), clusterId,
1772       onlineServers, deadServers, serverName, backupMasters,
1773       regionsInTransition, coprocessors, balancerOn);
1774   }
1775 
1776   /**
1777    * The set of loaded coprocessors is stored in a static set. Since it's
1778    * statically allocated, it does not require that HMaster's cpHost be
1779    * initialized prior to accessing it.
1780    * @return a String representation of the set of names of the loaded
1781    * coprocessors.
1782    */
1783   public static String getLoadedCoprocessors() {
1784     return CoprocessorHost.getLoadedCoprocessors().toString();
1785   }
1786 
1787   /**
1788    * @return timestamp in millis when HMaster was started.
1789    */
1790   public long getMasterStartTime() {
1791     return startcode;
1792   }
1793 
1794   /**
1795    * @return timestamp in millis when HMaster became the active master.
1796    */
1797   public long getMasterActiveTime() {
1798     return masterActiveTime;
1799   }
1800 
1801   public int getRegionServerInfoPort(final ServerName sn) {
1802     RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
1803     if (info == null || info.getInfoPort() == 0) {
1804       return conf.getInt(HConstants.REGIONSERVER_INFO_PORT,
1805         HConstants.DEFAULT_REGIONSERVER_INFOPORT);
1806     }
1807     return info.getInfoPort();
1808   }
1809 
1810   /**
1811    * @return array of coprocessor SimpleNames.
1812    */
1813   public String[] getMasterCoprocessors() {
1814     Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors();
1815     return masterCoprocessors.toArray(new String[masterCoprocessors.size()]);
1816   }
1817 
1818   @Override
1819   public void abort(final String msg, final Throwable t) {
1820     if (isAborted() || isStopped()) {
1821       return;
1822     }
1823     if (cpHost != null) {
1824       // HBASE-4014: dump a list of loaded coprocessors.
1825       LOG.fatal("Master server abort: loaded coprocessors are: " +
1826           getLoadedCoprocessors());
1827     }
1828     if (t != null) LOG.fatal(msg, t);
1829     stop(msg);
1830   }
1831 
1832   @Override
1833   public ZooKeeperWatcher getZooKeeper() {
1834     return zooKeeper;
1835   }
1836 
1837   @Override
1838   public MasterCoprocessorHost getMasterCoprocessorHost() {
1839     return cpHost;
1840   }
1841 
1842   @Override
1843   public MasterQuotaManager getMasterQuotaManager() {
1844     return quotaManager;
1845   }
1846 
1847   @Override
1848   public ServerName getServerName() {
1849     return this.serverName;
1850   }
1851 
1852   @Override
1853   public AssignmentManager getAssignmentManager() {
1854     return this.assignmentManager;
1855   }
1856 
1857   public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
1858     return rsFatals;
1859   }
1860 
1861   public void shutdown() {
1862     if (cpHost != null) {
1863       try {
1864         cpHost.preShutdown();
1865       } catch (IOException ioe) {
1866         LOG.error("Error call master coprocessor preShutdown()", ioe);
1867       }
1868     }
1869 
1870     if (this.serverManager != null) {
1871       this.serverManager.shutdownCluster();
1872     }
1873     if (this.clusterStatusTracker != null){
1874       try {
1875         this.clusterStatusTracker.setClusterDown();
1876       } catch (KeeperException e) {
1877         LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
1878       }
1879     }
1880   }
1881 
1882   public void stopMaster() {
1883     if (cpHost != null) {
1884       try {
1885         cpHost.preStopMaster();
1886       } catch (IOException ioe) {
1887         LOG.error("Error call master coprocessor preStopMaster()", ioe);
1888       }
1889     }
1890     stop("Stopped by " + Thread.currentThread().getName());
1891   }
1892 
1893   void checkServiceStarted() throws ServerNotRunningYetException {
1894     if (!serviceStarted) {
1895       throw new ServerNotRunningYetException("Server is not running yet");
1896     }
1897   }
1898 
1899   void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException {
1900     checkServiceStarted();
1901     if (!this.initialized) {
1902       throw new PleaseHoldException("Master is initializing");
1903     }
1904   }
1905 
1906   void checkNamespaceManagerReady() throws IOException {
1907     checkInitialized();
1908     if (tableNamespaceManager == null ||
1909         !tableNamespaceManager.isTableAvailableAndInitialized()) {
1910       throw new IOException("Table Namespace Manager not ready yet, try again later");
1911     }
1912   }
1913   /**
1914    * Report whether this master is currently the active master or not.
1915    * If not active master, we are parked on ZK waiting to become active.
1916    *
1917    * This method is used for testing.
1918    *
1919    * @return true if active master, false if not.
1920    */
1921   public boolean isActiveMaster() {
1922     return isActiveMaster;
1923   }
1924 
1925   /**
1926    * Report whether this master has completed with its initialization and is
1927    * ready.  If ready, the master is also the active master.  A standby master
1928    * is never ready.
1929    *
1930    * This method is used for testing.
1931    *
1932    * @return true if master is ready to go, false if not.
1933    */
1934   @Override
1935   public boolean isInitialized() {
1936     return initialized;
1937   }
1938 
1939   /**
1940    * ServerShutdownHandlerEnabled is set false before completing
1941    * assignMeta to prevent processing of ServerShutdownHandler.
1942    * @return true if assignMeta has completed;
1943    */
1944   @Override
1945   public boolean isServerShutdownHandlerEnabled() {
1946     return this.serverShutdownHandlerEnabled;
1947   }
1948 
1949   /**
1950    * Report whether this master has started initialization and is about to do meta region assignment
1951    * @return true if master is in initialization & about to assign hbase:meta regions
1952    */
1953   public boolean isInitializationStartsMetaRegionAssignment() {
1954     return this.initializationBeforeMetaAssignment;
1955   }
1956 
1957   public void assignRegion(HRegionInfo hri) {
1958     assignmentManager.assign(hri);
1959   }
1960 
1961   /**
1962    * Compute the average load across all region servers.
1963    * Currently, this uses a very naive computation - just uses the number of
1964    * regions being served, ignoring stats about number of requests.
1965    * @return the average load
1966    */
1967   public double getAverageLoad() {
1968     if (this.assignmentManager == null) {
1969       return 0;
1970     }
1971 
1972     RegionStates regionStates = this.assignmentManager.getRegionStates();
1973     if (regionStates == null) {
1974       return 0;
1975     }
1976     return regionStates.getAverageLoad();
1977   }
1978 
1979   @Override
1980   public boolean registerService(Service instance) {
1981     /*
1982      * No stacking of instances is allowed for a single service name
1983      */
1984     Descriptors.ServiceDescriptor serviceDesc = instance.getDescriptorForType();
1985     if (coprocessorServiceHandlers.containsKey(serviceDesc.getFullName())) {
1986       LOG.error("Coprocessor service "+serviceDesc.getFullName()+
1987           " already registered, rejecting request from "+instance
1988       );
1989       return false;
1990     }
1991 
1992     coprocessorServiceHandlers.put(serviceDesc.getFullName(), instance);
1993     if (LOG.isDebugEnabled()) {
1994       LOG.debug("Registered master coprocessor service: service="+serviceDesc.getFullName());
1995     }
1996     return true;
1997   }
1998 
1999   /**
2000    * Utility for constructing an instance of the passed HMaster class.
2001    * @param masterClass
2002    * @param conf
2003    * @return HMaster instance.
2004    */
2005   public static HMaster constructMaster(Class<? extends HMaster> masterClass,
2006       final Configuration conf, final CoordinatedStateManager cp)  {
2007     try {
2008       Constructor<? extends HMaster> c =
2009         masterClass.getConstructor(Configuration.class, CoordinatedStateManager.class);
2010       return c.newInstance(conf, cp);
2011     } catch (InvocationTargetException ite) {
2012       Throwable target = ite.getTargetException() != null?
2013         ite.getTargetException(): ite;
2014       if (target.getCause() != null) target = target.getCause();
2015       throw new RuntimeException("Failed construction of Master: " +
2016         masterClass.toString(), target);
2017     } catch (Exception e) {
2018       throw new RuntimeException("Failed construction of Master: " +
2019         masterClass.toString() + ((e.getCause() != null)?
2020           e.getCause().getMessage(): ""), e);
2021     }
2022   }
2023 
2024   /**
2025    * @see org.apache.hadoop.hbase.master.HMasterCommandLine
2026    */
2027   public static void main(String [] args) {
2028     VersionInfo.logVersion();
2029     new HMasterCommandLine(HMaster.class).doMain(args);
2030   }
2031 
2032   public HFileCleaner getHFileCleaner() {
2033     return this.hfileCleaner;
2034   }
2035 
2036   /**
2037    * Exposed for TESTING!
2038    * @return the underlying snapshot manager
2039    */
2040   public SnapshotManager getSnapshotManagerForTesting() {
2041     return this.snapshotManager;
2042   }
2043 
2044   @Override
2045   public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
2046     TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
2047     checkNamespaceManagerReady();
2048     if (cpHost != null) {
2049       if (cpHost.preCreateNamespace(descriptor)) {
2050         return;
2051       }
2052     }
2053     LOG.info(getClientIdAuditPrefix() + " creating " + descriptor);
2054     tableNamespaceManager.create(descriptor);
2055     if (cpHost != null) {
2056       cpHost.postCreateNamespace(descriptor);
2057     }
2058   }
2059 
2060   @Override
2061   public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
2062     TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
2063     checkNamespaceManagerReady();
2064     if (cpHost != null) {
2065       if (cpHost.preModifyNamespace(descriptor)) {
2066         return;
2067       }
2068     }
2069     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
2070     tableNamespaceManager.update(descriptor);
2071     if (cpHost != null) {
2072       cpHost.postModifyNamespace(descriptor);
2073     }
2074   }
2075 
2076   @Override
2077   public void deleteNamespace(String name) throws IOException {
2078     checkNamespaceManagerReady();
2079     if (cpHost != null) {
2080       if (cpHost.preDeleteNamespace(name)) {
2081         return;
2082       }
2083     }
2084     LOG.info(getClientIdAuditPrefix() + " delete " + name);
2085     tableNamespaceManager.remove(name);
2086     if (cpHost != null) {
2087       cpHost.postDeleteNamespace(name);
2088     }
2089   }
2090 
2091   /**
2092    * Ensure that the specified namespace exists, otherwise throws a NamespaceNotFoundException
2093    *
2094    * @param name the namespace to check
2095    * @throws IOException if the namespace manager is not ready yet.
2096    * @throws NamespaceNotFoundException if the namespace does not exists
2097    */
2098   private void ensureNamespaceExists(final String name)
2099       throws IOException, NamespaceNotFoundException {
2100     checkNamespaceManagerReady();
2101     NamespaceDescriptor nsd = tableNamespaceManager.get(name);
2102     if (nsd == null) {
2103       throw new NamespaceNotFoundException(name);
2104     }
2105   }
2106 
2107   @Override
2108   public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
2109     checkNamespaceManagerReady();
2110 
2111     if (cpHost != null) {
2112       cpHost.preGetNamespaceDescriptor(name);
2113     }
2114 
2115     NamespaceDescriptor nsd = tableNamespaceManager.get(name);
2116     if (nsd == null) {
2117       throw new NamespaceNotFoundException(name);
2118     }
2119 
2120     if (cpHost != null) {
2121       cpHost.postGetNamespaceDescriptor(nsd);
2122     }
2123 
2124     return nsd;
2125   }
2126 
2127   @Override
2128   public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
2129     checkNamespaceManagerReady();
2130 
2131     final List<NamespaceDescriptor> descriptors = new ArrayList<NamespaceDescriptor>();
2132     boolean bypass = false;
2133     if (cpHost != null) {
2134       bypass = cpHost.preListNamespaceDescriptors(descriptors);
2135     }
2136 
2137     if (!bypass) {
2138       descriptors.addAll(tableNamespaceManager.list());
2139 
2140       if (cpHost != null) {
2141         cpHost.postListNamespaceDescriptors(descriptors);
2142       }
2143     }
2144     return descriptors;
2145   }
2146 
2147   @Override
2148   public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
2149     ensureNamespaceExists(name);
2150     return listTableDescriptors(name, null, null, true);
2151   }
2152 
2153   @Override
2154   public List<TableName> listTableNamesByNamespace(String name) throws IOException {
2155     ensureNamespaceExists(name);
2156     return listTableNames(name, null, true);
2157   }
2158 
2159   /**
2160    * Returns the list of table descriptors that match the specified request
2161    *
2162    * @param namespace the namespace to query, or null if querying for all
2163    * @param regex The regular expression to match against, or null if querying for all
2164    * @param tableNameList the list of table names, or null if querying for all
2165    * @param includeSysTables False to match only against userspace tables
2166    * @return the list of table descriptors
2167    */
2168   public List<HTableDescriptor> listTableDescriptors(final String namespace, final String regex,
2169       final List<TableName> tableNameList, final boolean includeSysTables)
2170       throws IOException {
2171     final List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
2172 
2173     boolean bypass = false;
2174     if (cpHost != null) {
2175       bypass = cpHost.preGetTableDescriptors(tableNameList, descriptors, regex);
2176     }
2177 
2178     if (!bypass) {
2179       if (tableNameList == null || tableNameList.size() == 0) {
2180         // request for all TableDescriptors
2181         Collection<HTableDescriptor> htds;
2182         if (namespace != null && namespace.length() > 0) {
2183           htds = tableDescriptors.getByNamespace(namespace).values();
2184         } else {
2185           htds = tableDescriptors.getAll().values();
2186         }
2187 
2188         for (HTableDescriptor desc: htds) {
2189           if (tableStateManager.isTablePresent(desc.getTableName())
2190               && (includeSysTables || !desc.getTableName().isSystemTable())) {
2191             descriptors.add(desc);
2192           }
2193         }
2194       } else {
2195         for (TableName s: tableNameList) {
2196           if (tableStateManager.isTablePresent(s)) {
2197             HTableDescriptor desc = tableDescriptors.get(s);
2198             if (desc != null) {
2199               descriptors.add(desc);
2200             }
2201           }
2202         }
2203       }
2204 
2205       // Retains only those matched by regular expression.
2206       if (regex != null) {
2207         filterTablesByRegex(descriptors, Pattern.compile(regex));
2208       }
2209 
2210       if (cpHost != null) {
2211         cpHost.postGetTableDescriptors(tableNameList, descriptors, regex);
2212       }
2213     }
2214     return descriptors;
2215   }
2216 
2217   /**
2218    * Returns the list of table names that match the specified request
2219    * @param regex The regular expression to match against, or null if querying for all
2220    * @param namespace the namespace to query, or null if querying for all
2221    * @param includeSysTables False to match only against userspace tables
2222    * @return the list of table names
2223    */
2224   public List<TableName> listTableNames(final String namespace, final String regex,
2225       final boolean includeSysTables) throws IOException {
2226     final List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
2227 
2228     boolean bypass = false;
2229     if (cpHost != null) {
2230       bypass = cpHost.preGetTableNames(descriptors, regex);
2231     }
2232 
2233     if (!bypass) {
2234       // get all descriptors
2235       Collection<HTableDescriptor> htds;
2236       if (namespace != null && namespace.length() > 0) {
2237         htds = tableDescriptors.getByNamespace(namespace).values();
2238       } else {
2239         htds = tableDescriptors.getAll().values();
2240       }
2241 
2242       for (HTableDescriptor htd: htds) {
2243         if (includeSysTables || !htd.getTableName().isSystemTable()) {
2244           descriptors.add(htd);
2245         }
2246       }
2247 
2248       // Retains only those matched by regular expression.
2249       if (regex != null) {
2250         filterTablesByRegex(descriptors, Pattern.compile(regex));
2251       }
2252 
2253       if (cpHost != null) {
2254         cpHost.postGetTableNames(descriptors, regex);
2255       }
2256     }
2257 
2258     List<TableName> result = new ArrayList<TableName>(descriptors.size());
2259     for (HTableDescriptor htd: descriptors) {
2260       result.add(htd.getTableName());
2261     }
2262     return result;
2263   }
2264 
2265 
2266   /**
2267    * Removes the table descriptors that don't match the pattern.
2268    * @param descriptors list of table descriptors to filter
2269    * @param pattern the regex to use
2270    */
2271   private static void filterTablesByRegex(final Collection<HTableDescriptor> descriptors,
2272       final Pattern pattern) {
2273     final String defaultNS = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR;
2274     Iterator<HTableDescriptor> itr = descriptors.iterator();
2275     while (itr.hasNext()) {
2276       HTableDescriptor htd = itr.next();
2277       String tableName = htd.getTableName().getNameAsString();
2278       boolean matched = pattern.matcher(tableName).matches();
2279       if (!matched && htd.getTableName().getNamespaceAsString().equals(defaultNS)) {
2280         matched = pattern.matcher(defaultNS + TableName.NAMESPACE_DELIM + tableName).matches();
2281       }
2282       if (!matched) {
2283         itr.remove();
2284       }
2285     }
2286   }
2287 
2288   @Override
2289   public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
2290     return getClusterStatus().getLastMajorCompactionTsForTable(table);
2291   }
2292 
2293   @Override
2294   public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
2295     return getClusterStatus().getLastMajorCompactionTsForRegion(regionName);
2296   }
2297 }