View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.lang.reflect.Constructor;
24  import java.lang.reflect.InvocationTargetException;
25  import java.net.InetAddress;
26  import java.net.InetSocketAddress;
27  import java.net.UnknownHostException;
28  import java.util.ArrayList;
29  import java.util.Arrays;
30  import java.util.Collection;
31  import java.util.Collections;
32  import java.util.Comparator;
33  import java.util.HashSet;
34  import java.util.Iterator;
35  import java.util.List;
36  import java.util.Map;
37  import java.util.Set;
38  import java.util.concurrent.TimeUnit;
39  import java.util.concurrent.atomic.AtomicReference;
40  import java.util.regex.Pattern;
41  
42  import javax.servlet.ServletException;
43  import javax.servlet.http.HttpServlet;
44  import javax.servlet.http.HttpServletRequest;
45  import javax.servlet.http.HttpServletResponse;
46  
47  import org.apache.commons.logging.Log;
48  import org.apache.commons.logging.LogFactory;
49  import org.apache.hadoop.conf.Configuration;
50  import org.apache.hadoop.fs.Path;
51  import org.apache.hadoop.hbase.ClusterStatus;
52  import org.apache.hadoop.hbase.CoordinatedStateException;
53  import org.apache.hadoop.hbase.CoordinatedStateManager;
54  import org.apache.hadoop.hbase.DoNotRetryIOException;
55  import org.apache.hadoop.hbase.HBaseIOException;
56  import org.apache.hadoop.hbase.HBaseInterfaceAudience;
57  import org.apache.hadoop.hbase.HColumnDescriptor;
58  import org.apache.hadoop.hbase.HConstants;
59  import org.apache.hadoop.hbase.HRegionInfo;
60  import org.apache.hadoop.hbase.HTableDescriptor;
61  import org.apache.hadoop.hbase.MasterNotRunningException;
62  import org.apache.hadoop.hbase.MetaTableAccessor;
63  import org.apache.hadoop.hbase.NamespaceDescriptor;
64  import org.apache.hadoop.hbase.NamespaceNotFoundException;
65  import org.apache.hadoop.hbase.PleaseHoldException;
66  import org.apache.hadoop.hbase.Server;
67  import org.apache.hadoop.hbase.ServerLoad;
68  import org.apache.hadoop.hbase.ServerName;
69  import org.apache.hadoop.hbase.TableDescriptors;
70  import org.apache.hadoop.hbase.TableName;
71  import org.apache.hadoop.hbase.TableNotDisabledException;
72  import org.apache.hadoop.hbase.TableNotFoundException;
73  import org.apache.hadoop.hbase.UnknownRegionException;
74  import org.apache.hadoop.hbase.classification.InterfaceAudience;
75  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
76  import org.apache.hadoop.hbase.client.Result;
77  import org.apache.hadoop.hbase.client.TableState;
78  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
79  import org.apache.hadoop.hbase.exceptions.DeserializationException;
80  import org.apache.hadoop.hbase.executor.ExecutorType;
81  import org.apache.hadoop.hbase.ipc.RpcServer;
82  import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
83  import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
84  import org.apache.hadoop.hbase.master.balancer.BalancerChore;
85  import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
86  import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
87  import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
88  import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
89  import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
90  import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
91  import org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure;
92  import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
93  import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
94  import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
95  import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
96  import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
97  import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
98  import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
99  import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
100 import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
101 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
102 import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
103 import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
104 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
105 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
106 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
107 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
108 import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
109 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
110 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
111 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
112 import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
113 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
114 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
115 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
116 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
117 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
118 import org.apache.hadoop.hbase.quotas.RegionStateListener;
119 import org.apache.hadoop.hbase.regionserver.HRegionServer;
120 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
121 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
122 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
123 import org.apache.hadoop.hbase.replication.regionserver.Replication;
124 import org.apache.hadoop.hbase.security.UserProvider;
125 import org.apache.hadoop.hbase.util.Addressing;
126 import org.apache.hadoop.hbase.util.Bytes;
127 import org.apache.hadoop.hbase.util.CompressionTest;
128 import org.apache.hadoop.hbase.util.EncryptionTest;
129 import org.apache.hadoop.hbase.util.FSUtils;
130 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
131 import org.apache.hadoop.hbase.util.HasThread;
132 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
133 import org.apache.hadoop.hbase.util.Pair;
134 import org.apache.hadoop.hbase.util.Threads;
135 import org.apache.hadoop.hbase.util.VersionInfo;
136 import org.apache.hadoop.hbase.util.ZKDataMigrator;
137 import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
138 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
139 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
140 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
141 import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
142 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
143 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
144 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
145 import org.apache.zookeeper.KeeperException;
146 import org.mortbay.jetty.Connector;
147 import org.mortbay.jetty.nio.SelectChannelConnector;
148 import org.mortbay.jetty.servlet.Context;
149 
150 import com.google.common.annotations.VisibleForTesting;
151 import com.google.common.collect.Maps;
152 import com.google.protobuf.Descriptors;
153 import com.google.protobuf.Service;
154 
155 /**
156  * HMaster is the "master server" for HBase. An HBase cluster has one active
157  * master.  If many masters are started, all compete.  Whichever wins goes on to
158  * run the cluster.  All others park themselves in their constructor until
159  * master or cluster shutdown or until the active master loses its lease in
160  * zookeeper.  Thereafter, all running master jostle to take over master role.
161  *
162  * <p>The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In
163  * this case it will tell all regionservers to go down and then wait on them
164  * all reporting in that they are down.  This master will then shut itself down.
165  *
166  * <p>You can also shutdown just this master.  Call {@link #stopMaster()}.
167  *
168  * @see org.apache.zookeeper.Watcher
169  */
170 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
171 @SuppressWarnings("deprecation")
172 public class HMaster extends HRegionServer implements MasterServices, Server {
173   private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
174 
175   /**
176    * Protection against zombie master. Started once Master accepts active responsibility and
177    * starts taking over responsibilities. Allows a finite time window before giving up ownership.
178    */
179   private static class InitializationMonitor extends HasThread {
180     /** The amount of time in milliseconds to sleep before checking initialization status. */
181     public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout";
182     public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES);
183 
184     /**
185      * When timeout expired and initialization has not complete, call {@link System#exit(int)} when
186      * true, do nothing otherwise.
187      */
188     public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout";
189     public static final boolean HALT_DEFAULT = false;
190 
191     private final HMaster master;
192     private final long timeout;
193     private final boolean haltOnTimeout;
194 
195     /** Creates a Thread that monitors the {@link #isInitialized()} state. */
196     InitializationMonitor(HMaster master) {
197       super("MasterInitializationMonitor");
198       this.master = master;
199       this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT);
200       this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT);
201       this.setDaemon(true);
202     }
203 
204     @Override
205     public void run() {
206       try {
207         while (!master.isStopped() && master.isActiveMaster()) {
208           Thread.sleep(timeout);
209           if (master.isInitialized()) {
210             LOG.debug("Initialization completed within allotted tolerance. Monitor exiting.");
211           } else {
212             LOG.error("Master failed to complete initialization after " + timeout + "ms. Please"
213                 + " consider submitting a bug report including a thread dump of this process.");
214             if (haltOnTimeout) {
215               LOG.error("Zombie Master exiting. Thread dump to stdout");
216               Threads.printThreadInfo(System.out, "Zombie HMaster");
217               System.exit(-1);
218             }
219           }
220         }
221       } catch (InterruptedException ie) {
222         LOG.trace("InitMonitor thread interrupted. Existing.");
223       }
224     }
225   }
226 
227   // MASTER is name of the webapp and the attribute name used stuffing this
228   //instance into web context.
229   public static final String MASTER = "master";
230 
231   // Manager and zk listener for master election
232   private final ActiveMasterManager activeMasterManager;
233   // Region server tracker
234   RegionServerTracker regionServerTracker;
235   // Draining region server tracker
236   private DrainingServerTracker drainingServerTracker;
237   // Tracker for load balancer state
238   LoadBalancerTracker loadBalancerTracker;
239 
240   /** Namespace stuff */
241   private TableNamespaceManager tableNamespaceManager;
242 
243   // Metrics for the HMaster
244   final MetricsMaster metricsMaster;
245   // file system manager for the master FS operations
246   private MasterFileSystem fileSystemManager;
247 
248   // server manager to deal with region server info
249   volatile ServerManager serverManager;
250 
251   // manager of assignment nodes in zookeeper
252   AssignmentManager assignmentManager;
253 
254   // buffer for "fatal error" notices from region servers
255   // in the cluster. This is only used for assisting
256   // operations/debugging.
257   MemoryBoundedLogMessageBuffer rsFatals;
258 
259   // flag set after we become the active master (used for testing)
260   private volatile boolean isActiveMaster = false;
261 
262   // flag set after we complete initialization once active,
263   // it is not private since it's used in unit tests
264   volatile boolean initialized = false;
265 
266   // flag set after master services are started,
267   // initialization may have not completed yet.
268   volatile boolean serviceStarted = false;
269 
270   // flag set after we complete assignMeta.
271   private volatile boolean serverCrashProcessingEnabled = false;
272 
273   LoadBalancer balancer;
274   RegionNormalizer normalizer;
275   private boolean normalizerEnabled = false;
276   private BalancerChore balancerChore;
277   private RegionNormalizerChore normalizerChore;
278   private ClusterStatusChore clusterStatusChore;
279   private ClusterStatusPublisher clusterStatusPublisherChore = null;
280 
281   CatalogJanitor catalogJanitorChore;
282   private LogCleaner logCleaner;
283   private HFileCleaner hfileCleaner;
284 
285   MasterCoprocessorHost cpHost;
286 
287   private final boolean preLoadTableDescriptors;
288 
289   // Time stamps for when a hmaster became active
290   private long masterActiveTime;
291 
292   //should we check the compression codec type at master side, default true, HBASE-6370
293   private final boolean masterCheckCompression;
294 
295   //should we check encryption settings at master side, default true
296   private final boolean masterCheckEncryption;
297 
298   Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
299 
300   // monitor for snapshot of hbase tables
301   SnapshotManager snapshotManager;
302   // monitor for distributed procedures
303   MasterProcedureManagerHost mpmHost;
304 
305   // it is assigned after 'initialized' guard set to true, so should be volatile
306   private volatile MasterQuotaManager quotaManager;
307 
308   private ProcedureExecutor<MasterProcedureEnv> procedureExecutor;
309   private WALProcedureStore procedureStore;
310 
311   // handle table states
312   private TableStateManager tableStateManager;
313 
314   /** flag used in test cases in order to simulate RS failures during master initialization */
315   private volatile boolean initializationBeforeMetaAssignment = false;
316 
317   /** jetty server for master to redirect requests to regionserver infoServer */
318   private org.mortbay.jetty.Server masterJettyServer;
319 
320   public static class RedirectServlet extends HttpServlet {
321     private static final long serialVersionUID = 2894774810058302472L;
322     private static int regionServerInfoPort;
323 
324     @Override
325     public void doGet(HttpServletRequest request,
326         HttpServletResponse response) throws ServletException, IOException {
327       String redirectUrl = request.getScheme() + "://"
328         + request.getServerName() + ":" + regionServerInfoPort
329         + request.getRequestURI();
330       response.sendRedirect(redirectUrl);
331     }
332   }
333 
334   /**
335    * Initializes the HMaster. The steps are as follows:
336    * <p>
337    * <ol>
338    * <li>Initialize the local HRegionServer
339    * <li>Start the ActiveMasterManager.
340    * </ol>
341    * <p>
342    * Remaining steps of initialization occur in
343    * #finishActiveMasterInitialization(MonitoredTask) after
344    * the master becomes the active one.
345    *
346    * @throws KeeperException
347    * @throws IOException
348    */
349   public HMaster(final Configuration conf, CoordinatedStateManager csm)
350       throws IOException, KeeperException {
351     super(conf, csm);
352     this.rsFatals = new MemoryBoundedLogMessageBuffer(
353       conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
354 
355     LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
356         ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));
357 
358     // Disable usage of meta replicas in the master
359     this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);
360 
361     Replication.decorateMasterConfiguration(this.conf);
362 
363     // Hack! Maps DFSClient => Master for logs.  HDFS made this
364     // config param for task trackers, but we can piggyback off of it.
365     if (this.conf.get("mapreduce.task.attempt.id") == null) {
366       this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
367     }
368 
369     // should we check the compression codec type at master side, default true, HBASE-6370
370     this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
371 
372     // should we check encryption settings at master side, default true
373     this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);
374 
375     this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this));
376 
377     // preload table descriptor at startup
378     this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);
379 
380     // Do we publish the status?
381 
382     boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
383         HConstants.STATUS_PUBLISHED_DEFAULT);
384     Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
385         conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
386             ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
387             ClusterStatusPublisher.Publisher.class);
388 
389     if (shouldPublish) {
390       if (publisherClass == null) {
391         LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
392             ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
393             " is not set - not publishing status");
394       } else {
395         clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
396         getChoreService().scheduleChore(clusterStatusPublisherChore);
397       }
398     }
399 
400     // Some unit tests don't need a cluster, so no zookeeper at all
401     if (!conf.getBoolean("hbase.testing.nocluster", false)) {
402       activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
403       int infoPort = putUpJettyServer();
404       startActiveMasterManager(infoPort);
405     } else {
406       activeMasterManager = null;
407     }
408   }
409 
410   // return the actual infoPort, -1 means disable info server.
411   private int putUpJettyServer() throws IOException {
412     if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
413       return -1;
414     }
415     int infoPort = conf.getInt("hbase.master.info.port.orig",
416       HConstants.DEFAULT_MASTER_INFOPORT);
417     // -1 is for disabling info server, so no redirecting
418     if (infoPort < 0 || infoServer == null) {
419       return -1;
420     }
421     String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
422     if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
423       String msg =
424           "Failed to start redirecting jetty server. Address " + addr
425               + " does not belong to this host. Correct configuration parameter: "
426               + "hbase.master.info.bindAddress";
427       LOG.error(msg);
428       throw new IOException(msg);
429     }
430 
431     RedirectServlet.regionServerInfoPort = infoServer.getPort();
432     if(RedirectServlet.regionServerInfoPort == infoPort) {
433       return infoPort;
434     }
435     masterJettyServer = new org.mortbay.jetty.Server();
436     Connector connector = new SelectChannelConnector();
437     connector.setHost(addr);
438     connector.setPort(infoPort);
439     masterJettyServer.addConnector(connector);
440     masterJettyServer.setStopAtShutdown(true);
441     Context context = new Context(masterJettyServer, "/", Context.NO_SESSIONS);
442     context.addServlet(RedirectServlet.class, "/*");
443     try {
444       masterJettyServer.start();
445     } catch (Exception e) {
446       throw new IOException("Failed to start redirecting jetty server", e);
447     }
448     return connector.getLocalPort();
449   }
450 
451   @Override
452   protected TableDescriptors getFsTableDescriptors() throws IOException {
453     return super.getFsTableDescriptors();
454   }
455 
456   /**
457    * For compatibility, if failed with regionserver credentials, try the master one
458    */
459   @Override
460   protected void login(UserProvider user, String host) throws IOException {
461     try {
462       super.login(user, host);
463     } catch (IOException ie) {
464       user.login("hbase.master.keytab.file",
465         "hbase.master.kerberos.principal", host);
466     }
467   }
468 
469   /**
470    * If configured to put regions on active master,
471    * wait till a backup master becomes active.
472    * Otherwise, loop till the server is stopped or aborted.
473    */
474   @Override
475   protected void waitForMasterActive(){
476     boolean tablesOnMaster = BaseLoadBalancer.tablesOnMaster(conf);
477     while (!(tablesOnMaster && isActiveMaster)
478         && !isStopped() && !isAborted()) {
479       sleeper.sleep();
480     }
481   }
482 
483   @VisibleForTesting
484   public MasterRpcServices getMasterRpcServices() {
485     return (MasterRpcServices)rpcServices;
486   }
487 
488   public boolean balanceSwitch(final boolean b) throws IOException {
489     return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
490   }
491 
492   @Override
493   protected String getProcessName() {
494     return MASTER;
495   }
496 
497   @Override
498   protected boolean canCreateBaseZNode() {
499     return true;
500   }
501 
502   @Override
503   protected boolean canUpdateTableDescriptor() {
504     return true;
505   }
506 
507   @Override
508   protected RSRpcServices createRpcServices() throws IOException {
509     return new MasterRpcServices(this);
510   }
511 
512   @Override
513   protected void configureInfoServer() {
514     infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);
515     infoServer.setAttribute(MASTER, this);
516     if (BaseLoadBalancer.tablesOnMaster(conf)) {
517       super.configureInfoServer();
518     }
519   }
520 
521   @Override
522   protected Class<? extends HttpServlet> getDumpServlet() {
523     return MasterDumpServlet.class;
524   }
525 
526   /**
527    * Emit the HMaster metrics, such as region in transition metrics.
528    * Surrounding in a try block just to be sure metrics doesn't abort HMaster.
529    */
530   @Override
531   protected void doMetrics() {
532     try {
533       if (assignmentManager != null) {
534         assignmentManager.updateRegionsInTransitionMetrics();
535       }
536     } catch (Throwable e) {
537       LOG.error("Couldn't update metrics: " + e.getMessage());
538     }
539   }
540 
541   MetricsMaster getMasterMetrics() {
542     return metricsMaster;
543   }
544 
545   /**
546    * Initialize all ZK based system trackers.
547    * @throws IOException
548    * @throws InterruptedException
549    * @throws KeeperException
550    * @throws CoordinatedStateException
551    */
552   void initializeZKBasedSystemTrackers() throws IOException,
553       InterruptedException, KeeperException, CoordinatedStateException {
554     this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
555     this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);
556     this.normalizer.setMasterServices(this);
557     this.normalizerEnabled = conf.getBoolean(HConstants.HBASE_NORMALIZER_ENABLED, false);
558     this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
559     this.loadBalancerTracker.start();
560     this.assignmentManager = new AssignmentManager(this, serverManager,
561       this.balancer, this.service, this.metricsMaster,
562       this.tableLockManager, tableStateManager);
563 
564     this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
565         this.serverManager);
566     this.regionServerTracker.start();
567 
568     this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
569       this.serverManager);
570     this.drainingServerTracker.start();
571 
572     // Set the cluster as up.  If new RSs, they'll be waiting on this before
573     // going ahead with their startup.
574     boolean wasUp = this.clusterStatusTracker.isClusterUp();
575     if (!wasUp) this.clusterStatusTracker.setClusterUp();
576 
577     LOG.info("Server active/primary master=" + this.serverName +
578         ", sessionid=0x" +
579         Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
580         ", setting cluster-up flag (Was=" + wasUp + ")");
581 
582     // create/initialize the snapshot manager and other procedure managers
583     this.snapshotManager = new SnapshotManager();
584     this.mpmHost = new MasterProcedureManagerHost();
585     this.mpmHost.register(this.snapshotManager);
586     this.mpmHost.register(new MasterFlushTableProcedureManager());
587     this.mpmHost.loadProcedures(conf);
588     this.mpmHost.initialize(this, this.metricsMaster);
589 
590   }
591 
592   /**
593    * Finish initialization of HMaster after becoming the primary master.
594    *
595    * <ol>
596    * <li>Initialize master components - file system manager, server manager,
597    *     assignment manager, region server tracker, etc</li>
598    * <li>Start necessary service threads - balancer, catalog janior,
599    *     executor services, etc</li>
600    * <li>Set cluster as UP in ZooKeeper</li>
601    * <li>Wait for RegionServers to check-in</li>
602    * <li>Split logs and perform data recovery, if necessary</li>
603    * <li>Ensure assignment of meta/namespace regions<li>
604    * <li>Handle either fresh cluster start or master failover</li>
605    * </ol>
606    *
607    * @throws IOException
608    * @throws InterruptedException
609    * @throws KeeperException
610    * @throws CoordinatedStateException
611    */
612   private void finishActiveMasterInitialization(MonitoredTask status)
613       throws IOException, InterruptedException, KeeperException, CoordinatedStateException {
614 
615     isActiveMaster = true;
616     Thread zombieDetector = new Thread(new InitializationMonitor(this));
617     zombieDetector.start();
618 
619     /*
620      * We are active master now... go initialize components we need to run.
621      * Note, there may be dross in zk from previous runs; it'll get addressed
622      * below after we determine if cluster startup or failover.
623      */
624 
625     status.setStatus("Initializing Master file system");
626 
627     this.masterActiveTime = System.currentTimeMillis();
628     // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
629     this.fileSystemManager = new MasterFileSystem(this, this);
630 
631     // enable table descriptors cache
632     this.tableDescriptors.setCacheOn();
633     // set the META's descriptor to the correct replication
634     this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
635         conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
636     // warm-up HTDs cache on master initialization
637     if (preLoadTableDescriptors) {
638       status.setStatus("Pre-loading table descriptors");
639       this.tableDescriptors.getAll();
640     }
641 
642     // publish cluster ID
643     status.setStatus("Publishing Cluster ID in ZooKeeper");
644     ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
645     this.serverManager = createServerManager(this, this);
646 
647     setupClusterConnection();
648 
649     // Invalidate all write locks held previously
650     this.tableLockManager.reapWriteLocks();
651     this.tableStateManager = new TableStateManager(this);
652 
653     status.setStatus("Initializing ZK system trackers");
654     initializeZKBasedSystemTrackers();
655 
656     // initialize master side coprocessors before we start handling requests
657     status.setStatus("Initializing master coprocessors");
658     this.cpHost = new MasterCoprocessorHost(this, this.conf);
659 
660     // start up all service threads.
661     status.setStatus("Initializing master service threads");
662     startServiceThreads();
663 
664     // Wake up this server to check in
665     sleeper.skipSleepCycle();
666 
667     // Wait for region servers to report in
668     this.serverManager.waitForRegionServers(status);
669     // Check zk for region servers that are up but didn't register
670     for (ServerName sn: this.regionServerTracker.getOnlineServers()) {
671       // The isServerOnline check is opportunistic, correctness is handled inside
672       if (!this.serverManager.isServerOnline(sn)
673           && serverManager.checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
674         LOG.info("Registered server found up in zk but who has not yet reported in: " + sn);
675       }
676     }
677 
678     // get a list for previously failed RS which need log splitting work
679     // we recover hbase:meta region servers inside master initialization and
680     // handle other failed servers in SSH in order to start up master node ASAP
681     Set<ServerName> previouslyFailedServers =
682       this.fileSystemManager.getFailedServersFromLogFolders();
683 
684     // log splitting for hbase:meta server
685     ServerName oldMetaServerLocation = metaTableLocator.getMetaRegionLocation(this.getZooKeeper());
686     if (oldMetaServerLocation != null && previouslyFailedServers.contains(oldMetaServerLocation)) {
687       splitMetaLogBeforeAssignment(oldMetaServerLocation);
688       // Note: we can't remove oldMetaServerLocation from previousFailedServers list because it
689       // may also host user regions
690     }
691     Set<ServerName> previouslyFailedMetaRSs = getPreviouselyFailedMetaServersFromZK();
692     // need to use union of previouslyFailedMetaRSs recorded in ZK and previouslyFailedServers
693     // instead of previouslyFailedMetaRSs alone to address the following two situations:
694     // 1) the chained failure situation(recovery failed multiple times in a row).
695     // 2) master get killed right before it could delete the recovering hbase:meta from ZK while the
696     // same server still has non-meta wals to be replayed so that
697     // removeStaleRecoveringRegionsFromZK can't delete the stale hbase:meta region
698     // Passing more servers into splitMetaLog is all right. If a server doesn't have hbase:meta wal,
699     // there is no op for the server.
700     previouslyFailedMetaRSs.addAll(previouslyFailedServers);
701 
702     this.initializationBeforeMetaAssignment = true;
703 
704     // Wait for regionserver to finish initialization.
705     if (BaseLoadBalancer.tablesOnMaster(conf)) {
706       waitForServerOnline();
707     }
708 
709     //initialize load balancer
710     this.balancer.setClusterStatus(getClusterStatus());
711     this.balancer.setMasterServices(this);
712     this.balancer.initialize();
713 
714     // Check if master is shutting down because of some issue
715     // in initializing the regionserver or the balancer.
716     if (isStopped()) return;
717 
718     // Make sure meta assigned before proceeding.
719     status.setStatus("Assigning Meta Region");
720     assignMeta(status, previouslyFailedMetaRSs, HRegionInfo.DEFAULT_REPLICA_ID);
721     // check if master is shutting down because above assignMeta could return even hbase:meta isn't
722     // assigned when master is shutting down
723     if (isStopped()) return;
724 
725     // migrating existent table state from zk, so splitters
726     // and recovery process treat states properly.
727     for (Map.Entry<TableName, TableState.State> entry : ZKDataMigrator
728         .queryForTableStates(getZooKeeper()).entrySet()) {
729       LOG.info("Converting state from zk to new states:" + entry);
730       tableStateManager.setTableState(entry.getKey(), entry.getValue());
731     }
732     ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().tableZNode);
733 
734     status.setStatus("Submitting log splitting work for previously failed region servers");
735     // Master has recovered hbase:meta region server and we put
736     // other failed region servers in a queue to be handled later by SSH
737     for (ServerName tmpServer : previouslyFailedServers) {
738       this.serverManager.processDeadServer(tmpServer, true);
739     }
740 
741     // Fix up assignment manager status
742     status.setStatus("Starting assignment manager");
743     this.assignmentManager.joinCluster();
744 
745     // set cluster status again after user regions are assigned
746     this.balancer.setClusterStatus(getClusterStatus());
747 
748     // Start balancer and meta catalog janitor after meta and regions have been assigned.
749     status.setStatus("Starting balancer and catalog janitor");
750     this.clusterStatusChore = new ClusterStatusChore(this, balancer);
751     getChoreService().scheduleChore(clusterStatusChore);
752     this.balancerChore = new BalancerChore(this);
753     getChoreService().scheduleChore(balancerChore);
754     this.normalizerChore = new RegionNormalizerChore(this);
755     getChoreService().scheduleChore(normalizerChore);
756     this.catalogJanitorChore = new CatalogJanitor(this, this);
757     getChoreService().scheduleChore(catalogJanitorChore);
758 
759     status.setStatus("Starting namespace manager");
760     initNamespace();
761 
762     if (this.cpHost != null) {
763       try {
764         this.cpHost.preMasterInitialization();
765       } catch (IOException e) {
766         LOG.error("Coprocessor preMasterInitialization() hook failed", e);
767       }
768     }
769 
770     status.markComplete("Initialization successful");
771     LOG.info("Master has completed initialization");
772     configurationManager.registerObserver(this.balancer);
773     // Set master as 'initialized'.
774     initialized = true;
775     // assign the meta replicas
776     Set<ServerName> EMPTY_SET = new HashSet<ServerName>();
777     int numReplicas = conf.getInt(HConstants.META_REPLICAS_NUM,
778            HConstants.DEFAULT_META_REPLICA_NUM);
779     for (int i = 1; i < numReplicas; i++) {
780       assignMeta(status, EMPTY_SET, i);
781     }
782     unassignExcessMetaReplica(zooKeeper, numReplicas);
783 
784     status.setStatus("Starting quota manager");
785     initQuotaManager();
786 
787     // clear the dead servers with same host name and port of online server because we are not
788     // removing dead server with same hostname and port of rs which is trying to check in before
789     // master initialization. See HBASE-5916.
790     this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
791 
792     // Check and set the znode ACLs if needed in case we are overtaking a non-secure configuration
793     status.setStatus("Checking ZNode ACLs");
794     zooKeeper.checkAndSetZNodeAcls();
795 
796     status.setStatus("Calling postStartMaster coprocessors");
797     if (this.cpHost != null) {
798       // don't let cp initialization errors kill the master
799       try {
800         this.cpHost.postStartMaster();
801       } catch (IOException ioe) {
802         LOG.error("Coprocessor postStartMaster() hook failed", ioe);
803       }
804     }
805 
806     zombieDetector.interrupt();
807   }
808 
809   /**
810    * Create a {@link ServerManager} instance.
811    * @param master
812    * @param services
813    * @return An instance of {@link ServerManager}
814    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
815    * @throws IOException
816    */
817   ServerManager createServerManager(final Server master,
818       final MasterServices services)
819   throws IOException {
820     // We put this out here in a method so can do a Mockito.spy and stub it out
821     // w/ a mocked up ServerManager.
822     return new ServerManager(master, services);
823   }
824 
825   private void unassignExcessMetaReplica(ZooKeeperWatcher zkw, int numMetaReplicasConfigured) {
826     // unassign the unneeded replicas (for e.g., if the previous master was configured
827     // with a replication of 3 and now it is 2, we need to unassign the 1 unneeded replica)
828     try {
829       List<String> metaReplicaZnodes = zooKeeper.getMetaReplicaNodes();
830       for (String metaReplicaZnode : metaReplicaZnodes) {
831         int replicaId = zooKeeper.getMetaReplicaIdFromZnode(metaReplicaZnode);
832         if (replicaId >= numMetaReplicasConfigured) {
833           RegionState r = MetaTableLocator.getMetaRegionState(zkw, replicaId);
834           LOG.info("Closing excess replica of meta region " + r.getRegion());
835           // send a close and wait for a max of 30 seconds
836           ServerManager.closeRegionSilentlyAndWait(getConnection(), r.getServerName(),
837               r.getRegion(), 30000);
838           ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(replicaId));
839         }
840       }
841     } catch (Exception ex) {
842       // ignore the exception since we don't want the master to be wedged due to potential
843       // issues in the cleanup of the extra regions. We can do that cleanup via hbck or manually
844       LOG.warn("Ignoring exception " + ex);
845     }
846   }
847 
848   /**
849    * Check <code>hbase:meta</code> is assigned. If not, assign it.
850    * @param status MonitoredTask
851    * @param previouslyFailedMetaRSs
852    * @param replicaId
853    * @throws InterruptedException
854    * @throws IOException
855    * @throws KeeperException
856    */
857   void assignMeta(MonitoredTask status, Set<ServerName> previouslyFailedMetaRSs, int replicaId)
858       throws InterruptedException, IOException, KeeperException {
859     // Work on meta region
860     int assigned = 0;
861     long timeout = this.conf.getLong("hbase.catalog.verification.timeout", 1000);
862     if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
863       status.setStatus("Assigning hbase:meta region");
864     } else {
865       status.setStatus("Assigning hbase:meta region, replicaId " + replicaId);
866     }
867 
868     // Get current meta state from zk.
869     RegionState metaState = MetaTableLocator.getMetaRegionState(getZooKeeper(), replicaId);
870     HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO,
871         replicaId);
872     RegionStates regionStates = assignmentManager.getRegionStates();
873     regionStates.createRegionState(hri, metaState.getState(),
874         metaState.getServerName(), null);
875 
876     if (!metaState.isOpened() || !metaTableLocator.verifyMetaRegionLocation(
877         this.getConnection(), this.getZooKeeper(), timeout, replicaId)) {
878       ServerName currentMetaServer = metaState.getServerName();
879       if (serverManager.isServerOnline(currentMetaServer)) {
880         if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
881           LOG.info("Meta was in transition on " + currentMetaServer);
882         } else {
883           LOG.info("Meta with replicaId " + replicaId + " was in transition on " +
884                     currentMetaServer);
885         }
886         assignmentManager.processRegionsInTransition(Arrays.asList(metaState));
887       } else {
888         if (currentMetaServer != null) {
889           if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
890             splitMetaLogBeforeAssignment(currentMetaServer);
891             regionStates.logSplit(HRegionInfo.FIRST_META_REGIONINFO);
892             previouslyFailedMetaRSs.add(currentMetaServer);
893           }
894         }
895         LOG.info("Re-assigning hbase:meta with replicaId, " + replicaId +
896             " it was on " + currentMetaServer);
897         assignmentManager.assignMeta(hri);
898       }
899       assigned++;
900     }
901 
902     if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID)
903       getTableStateManager().setTableState(TableName.META_TABLE_NAME, TableState.State.ENABLED);
904     // TODO: should we prevent from using state manager before meta was initialized?
905     // tableStateManager.start();
906 
907     if ((RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode())
908         && (!previouslyFailedMetaRSs.isEmpty())) {
909       // replay WAL edits mode need new hbase:meta RS is assigned firstly
910       status.setStatus("replaying log for Meta Region");
911       this.fileSystemManager.splitMetaLog(previouslyFailedMetaRSs);
912     }
913 
914     this.assignmentManager.setEnabledTable(TableName.META_TABLE_NAME);
915     tableStateManager.start();
916 
917     // Make sure a hbase:meta location is set. We need to enable SSH here since
918     // if the meta region server is died at this time, we need it to be re-assigned
919     // by SSH so that system tables can be assigned.
920     // No need to wait for meta is assigned = 0 when meta is just verified.
921     if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) enableCrashedServerProcessing(assigned != 0);
922     LOG.info("hbase:meta with replicaId " + replicaId + " assigned=" + assigned + ", location="
923       + metaTableLocator.getMetaRegionLocation(this.getZooKeeper(), replicaId));
924     status.setStatus("META assigned.");
925   }
926 
927   void initNamespace() throws IOException {
928     //create namespace manager
929     tableNamespaceManager = new TableNamespaceManager(this);
930     tableNamespaceManager.start();
931   }
932 
933   void initQuotaManager() throws IOException {
934     MasterQuotaManager quotaManager = new MasterQuotaManager(this);
935     this.assignmentManager.setRegionStateListener((RegionStateListener)quotaManager);
936     quotaManager.start();
937     this.quotaManager = quotaManager;
938   }
939 
940   boolean isCatalogJanitorEnabled() {
941     return catalogJanitorChore != null ?
942       catalogJanitorChore.getEnabled() : false;
943   }
944 
945   private void splitMetaLogBeforeAssignment(ServerName currentMetaServer) throws IOException {
946     if (RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode()) {
947       // In log replay mode, we mark hbase:meta region as recovering in ZK
948       Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
949       regions.add(HRegionInfo.FIRST_META_REGIONINFO);
950       this.fileSystemManager.prepareLogReplay(currentMetaServer, regions);
951     } else {
952       // In recovered.edits mode: create recovered edits file for hbase:meta server
953       this.fileSystemManager.splitMetaLog(currentMetaServer);
954     }
955   }
956 
957   private void enableCrashedServerProcessing(final boolean waitForMeta)
958   throws IOException, InterruptedException {
959     // If crashed server processing is disabled, we enable it and expire those dead but not expired
960     // servers. This is required so that if meta is assigning to a server which dies after
961     // assignMeta starts assignment, ServerCrashProcedure can re-assign it. Otherwise, we will be
962     // stuck here waiting forever if waitForMeta is specified.
963     if (!serverCrashProcessingEnabled) {
964       serverCrashProcessingEnabled = true;
965       this.serverManager.processQueuedDeadServers();
966     }
967 
968     if (waitForMeta) {
969       metaTableLocator.waitMetaRegionLocation(this.getZooKeeper());
970     }
971   }
972 
973   /**
974    * This function returns a set of region server names under hbase:meta recovering region ZK node
975    * @return Set of meta server names which were recorded in ZK
976    * @throws KeeperException
977    */
978   private Set<ServerName> getPreviouselyFailedMetaServersFromZK() throws KeeperException {
979     Set<ServerName> result = new HashSet<ServerName>();
980     String metaRecoveringZNode = ZKUtil.joinZNode(zooKeeper.recoveringRegionsZNode,
981       HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
982     List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(zooKeeper, metaRecoveringZNode);
983     if (regionFailedServers == null) return result;
984 
985     for(String failedServer : regionFailedServers) {
986       ServerName server = ServerName.parseServerName(failedServer);
987       result.add(server);
988     }
989     return result;
990   }
991 
992   @Override
993   public TableDescriptors getTableDescriptors() {
994     return this.tableDescriptors;
995   }
996 
997   @Override
998   public ServerManager getServerManager() {
999     return this.serverManager;
1000   }
1001 
1002   @Override
1003   public MasterFileSystem getMasterFileSystem() {
1004     return this.fileSystemManager;
1005   }
1006 
1007   @Override
1008   public TableStateManager getTableStateManager() {
1009     return tableStateManager;
1010   }
1011 
1012   /*
1013    * Start up all services. If any of these threads gets an unhandled exception
1014    * then they just die with a logged message.  This should be fine because
1015    * in general, we do not expect the master to get such unhandled exceptions
1016    *  as OOMEs; it should be lightly loaded. See what HRegionServer does if
1017    *  need to install an unexpected exception handler.
1018    */
1019   private void startServiceThreads() throws IOException{
1020    // Start the executor service pools
1021    this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
1022       conf.getInt("hbase.master.executor.openregion.threads", 5));
1023    this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
1024       conf.getInt("hbase.master.executor.closeregion.threads", 5));
1025    this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
1026       conf.getInt("hbase.master.executor.serverops.threads", 5));
1027    this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
1028       conf.getInt("hbase.master.executor.meta.serverops.threads", 5));
1029    this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
1030       conf.getInt("hbase.master.executor.logreplayops.threads", 10));
1031 
1032    // We depend on there being only one instance of this executor running
1033    // at a time.  To do concurrency, would need fencing of enable/disable of
1034    // tables.
1035    // Any time changing this maxThreads to > 1, pls see the comment at
1036    // AccessController#postCreateTableHandler
1037    this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
1038    startProcedureExecutor();
1039 
1040    // Start log cleaner thread
1041    int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
1042    this.logCleaner =
1043       new LogCleaner(cleanerInterval,
1044          this, conf, getMasterFileSystem().getFileSystem(),
1045          getMasterFileSystem().getOldLogDir());
1046     getChoreService().scheduleChore(logCleaner);
1047 
1048    //start the hfile archive cleaner thread
1049     Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
1050     this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
1051         .getFileSystem(), archiveDir);
1052     getChoreService().scheduleChore(hfileCleaner);
1053     serviceStarted = true;
1054     if (LOG.isTraceEnabled()) {
1055       LOG.trace("Started service threads");
1056     }
1057   }
1058 
1059   @Override
1060   protected void sendShutdownInterrupt() {
1061     super.sendShutdownInterrupt();
1062     stopProcedureExecutor();
1063   }
1064 
1065   @Override
1066   protected void stopServiceThreads() {
1067     if (masterJettyServer != null) {
1068       LOG.info("Stopping master jetty server");
1069       try {
1070         masterJettyServer.stop();
1071       } catch (Exception e) {
1072         LOG.error("Failed to stop master jetty server", e);
1073       }
1074     }
1075     super.stopServiceThreads();
1076     stopChores();
1077 
1078     // Wait for all the remaining region servers to report in IFF we were
1079     // running a cluster shutdown AND we were NOT aborting.
1080     if (!isAborted() && this.serverManager != null &&
1081         this.serverManager.isClusterShutdown()) {
1082       this.serverManager.letRegionServersShutdown();
1083     }
1084     if (LOG.isDebugEnabled()) {
1085       LOG.debug("Stopping service threads");
1086     }
1087     // Clean up and close up shop
1088     if (this.logCleaner != null) this.logCleaner.cancel(true);
1089     if (this.hfileCleaner != null) this.hfileCleaner.cancel(true);
1090     if (this.quotaManager != null) this.quotaManager.stop();
1091     if (this.activeMasterManager != null) this.activeMasterManager.stop();
1092     if (this.serverManager != null) this.serverManager.stop();
1093     if (this.assignmentManager != null) this.assignmentManager.stop();
1094     if (this.fileSystemManager != null) this.fileSystemManager.stop();
1095     if (this.mpmHost != null) this.mpmHost.stop("server shutting down.");
1096   }
1097 
1098   private void startProcedureExecutor() throws IOException {
1099     final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
1100     final Path logDir = new Path(fileSystemManager.getRootDir(),
1101         MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
1102 
1103     procedureStore = new WALProcedureStore(conf, fileSystemManager.getFileSystem(), logDir,
1104         new MasterProcedureEnv.WALStoreLeaseRecovery(this));
1105     procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
1106     procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
1107         procEnv.getProcedureQueue());
1108 
1109     final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
1110         Math.max(Runtime.getRuntime().availableProcessors(),
1111           MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
1112     final boolean abortOnCorruption = conf.getBoolean(
1113         MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
1114         MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
1115     procedureStore.start(numThreads);
1116     procedureExecutor.start(numThreads, abortOnCorruption);
1117   }
1118 
1119   private void stopProcedureExecutor() {
1120     if (procedureExecutor != null) {
1121       procedureExecutor.stop();
1122     }
1123 
1124     if (procedureStore != null) {
1125       procedureStore.stop(isAborted());
1126     }
1127   }
1128 
1129   private void stopChores() {
1130     if (this.balancerChore != null) {
1131       this.balancerChore.cancel(true);
1132     }
1133     if (this.normalizerChore != null) {
1134       this.normalizerChore.cancel(true);
1135     }
1136     if (this.clusterStatusChore != null) {
1137       this.clusterStatusChore.cancel(true);
1138     }
1139     if (this.catalogJanitorChore != null) {
1140       this.catalogJanitorChore.cancel(true);
1141     }
1142     if (this.clusterStatusPublisherChore != null){
1143       clusterStatusPublisherChore.cancel(true);
1144     }
1145   }
1146 
1147   /**
1148    * @return Get remote side's InetAddress
1149    * @throws UnknownHostException
1150    */
1151   InetAddress getRemoteInetAddress(final int port,
1152       final long serverStartCode) throws UnknownHostException {
1153     // Do it out here in its own little method so can fake an address when
1154     // mocking up in tests.
1155     InetAddress ia = RpcServer.getRemoteIp();
1156 
1157     // The call could be from the local regionserver,
1158     // in which case, there is no remote address.
1159     if (ia == null && serverStartCode == startcode) {
1160       InetSocketAddress isa = rpcServices.getSocketAddress();
1161       if (isa != null && isa.getPort() == port) {
1162         ia = isa.getAddress();
1163       }
1164     }
1165     return ia;
1166   }
1167 
1168   /**
1169    * @return Maximum time we should run balancer for
1170    */
1171   private int getBalancerCutoffTime() {
1172     int balancerCutoffTime = getConfiguration().getInt("hbase.balancer.max.balancing", -1);
1173     if (balancerCutoffTime == -1) {
1174       // if cutoff time isn't set, defaulting it to period time
1175       int balancerPeriod = getConfiguration().getInt("hbase.balancer.period", 300000);
1176       balancerCutoffTime = balancerPeriod;
1177     }
1178     return balancerCutoffTime;
1179   }
1180 
1181   public boolean balance() throws IOException {
1182     // if master not initialized, don't run balancer.
1183     if (!this.initialized) {
1184       LOG.debug("Master has not been initialized, don't run balancer.");
1185       return false;
1186     }
1187     // Do this call outside of synchronized block.
1188     int maximumBalanceTime = getBalancerCutoffTime();
1189     synchronized (this.balancer) {
1190       // If balance not true, don't run balancer.
1191       if (!this.loadBalancerTracker.isBalancerOn()) return false;
1192       // Only allow one balance run at at time.
1193       if (this.assignmentManager.getRegionStates().isRegionsInTransition()) {
1194         Map<String, RegionState> regionsInTransition =
1195           this.assignmentManager.getRegionStates().getRegionsInTransition();
1196         LOG.debug("Not running balancer because " + regionsInTransition.size() +
1197           " region(s) in transition: " + org.apache.commons.lang.StringUtils.
1198             abbreviate(regionsInTransition.toString(), 256));
1199         return false;
1200       }
1201       if (this.serverManager.areDeadServersInProgress()) {
1202         LOG.debug("Not running balancer because processing dead regionserver(s): " +
1203           this.serverManager.getDeadServers());
1204         return false;
1205       }
1206 
1207       if (this.cpHost != null) {
1208         try {
1209           if (this.cpHost.preBalance()) {
1210             LOG.debug("Coprocessor bypassing balancer request");
1211             return false;
1212           }
1213         } catch (IOException ioe) {
1214           LOG.error("Error invoking master coprocessor preBalance()", ioe);
1215           return false;
1216         }
1217       }
1218 
1219       Map<TableName, Map<ServerName, List<HRegionInfo>>> assignmentsByTable =
1220         this.assignmentManager.getRegionStates().getAssignmentsByTable();
1221 
1222       List<RegionPlan> plans = new ArrayList<RegionPlan>();
1223       //Give the balancer the current cluster state.
1224       this.balancer.setClusterStatus(getClusterStatus());
1225       for (Map<ServerName, List<HRegionInfo>> assignments : assignmentsByTable.values()) {
1226         List<RegionPlan> partialPlans = this.balancer.balanceCluster(assignments);
1227         if (partialPlans != null) plans.addAll(partialPlans);
1228       }
1229       long cutoffTime = System.currentTimeMillis() + maximumBalanceTime;
1230       int rpCount = 0;  // number of RegionPlans balanced so far
1231       long totalRegPlanExecTime = 0;
1232       if (plans != null && !plans.isEmpty()) {
1233         for (RegionPlan plan: plans) {
1234           LOG.info("balance " + plan);
1235           long balStartTime = System.currentTimeMillis();
1236           //TODO: bulk assign
1237           this.assignmentManager.balance(plan);
1238           totalRegPlanExecTime += System.currentTimeMillis()-balStartTime;
1239           rpCount++;
1240           if (rpCount < plans.size() &&
1241               // if performing next balance exceeds cutoff time, exit the loop
1242               (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) {
1243             //TODO: After balance, there should not be a cutoff time (keeping it as
1244             // a security net for now)
1245             LOG.debug("No more balancing till next balance run; maximumBalanceTime=" +
1246               maximumBalanceTime);
1247             break;
1248           }
1249         }
1250       }
1251       if (this.cpHost != null) {
1252         try {
1253           this.cpHost.postBalance(rpCount < plans.size() ? plans.subList(0, rpCount) : plans);
1254         } catch (IOException ioe) {
1255           // balancing already succeeded so don't change the result
1256           LOG.error("Error invoking master coprocessor postBalance()", ioe);
1257         }
1258       }
1259     }
1260     // If LoadBalancer did not generate any plans, it means the cluster is already balanced.
1261     // Return true indicating a success.
1262     return true;
1263   }
1264 
1265   /**
1266    * Perform normalization of cluster (invoked by {@link RegionNormalizerChore}).
1267    *
1268    * @return true if normalization step was performed successfully, false otherwise
1269    *   (specifically, if HMaster hasn't been initialized properly or normalization
1270    *   is globally disabled)
1271    * @throws IOException
1272    */
1273   public boolean normalizeRegions() throws IOException {
1274     if (!this.initialized) {
1275       LOG.debug("Master has not been initialized, don't run region normalizer.");
1276       return false;
1277     }
1278 
1279     if (!this.normalizerEnabled) {
1280       LOG.debug("Region normalization is disabled, don't run region normalizer.");
1281       return false;
1282     }
1283 
1284     synchronized (this.normalizer) {
1285       // Don't run the normalizer concurrently
1286       List<TableName> allEnabledTables = new ArrayList<>(
1287         this.tableStateManager.getTablesInStates(TableState.State.ENABLED));
1288 
1289       Collections.shuffle(allEnabledTables);
1290 
1291       for (TableName table : allEnabledTables) {
1292         if (quotaManager.getNamespaceQuotaManager() != null &&
1293             quotaManager.getNamespaceQuotaManager().getState(table.getNamespaceAsString()) != null){
1294           LOG.debug("Skipping normalizing " + table + " since its namespace has quota");
1295           continue;
1296         }
1297         if (table.isSystemTable() || !getTableDescriptors().getDescriptor(table).
1298             getHTableDescriptor().isNormalizationEnabled()) {
1299           LOG.debug("Skipping normalization for table: " + table + ", as it's either system"
1300             + " table or doesn't have auto normalization turned on");
1301           continue;
1302         }
1303         this.normalizer.computePlanForTable(table).execute(clusterConnection.getAdmin());
1304       }
1305     }
1306     // If Region did not generate any plans, it means the cluster is already balanced.
1307     // Return true indicating a success.
1308     return true;
1309   }
1310 
1311   /**
1312    * @return Client info for use as prefix on an audit log string; who did an action
1313    */
1314   String getClientIdAuditPrefix() {
1315     return "Client=" + RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress();
1316   }
1317 
1318   /**
1319    * Switch for the background CatalogJanitor thread.
1320    * Used for testing.  The thread will continue to run.  It will just be a noop
1321    * if disabled.
1322    * @param b If false, the catalog janitor won't do anything.
1323    */
1324   public void setCatalogJanitorEnabled(final boolean b) {
1325     this.catalogJanitorChore.setEnabled(b);
1326   }
1327 
1328   @Override
1329   public void dispatchMergingRegions(final HRegionInfo region_a,
1330       final HRegionInfo region_b, final boolean forcible) throws IOException {
1331     checkInitialized();
1332     this.service.submit(new DispatchMergingRegionHandler(this,
1333       this.catalogJanitorChore, region_a, region_b, forcible));
1334   }
1335 
1336   void move(final byte[] encodedRegionName,
1337       final byte[] destServerName) throws HBaseIOException {
1338     RegionState regionState = assignmentManager.getRegionStates().
1339       getRegionState(Bytes.toString(encodedRegionName));
1340     if (regionState == null) {
1341       throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
1342     }
1343 
1344     HRegionInfo hri = regionState.getRegion();
1345     ServerName dest;
1346     if (destServerName == null || destServerName.length == 0) {
1347       LOG.info("Passed destination servername is null/empty so " +
1348         "choosing a server at random");
1349       final List<ServerName> destServers = this.serverManager.createDestinationServersList(
1350         regionState.getServerName());
1351       dest = balancer.randomAssignment(hri, destServers);
1352       if (dest == null) {
1353         LOG.debug("Unable to determine a plan to assign " + hri);
1354         return;
1355       }
1356     } else {
1357       dest = ServerName.valueOf(Bytes.toString(destServerName));
1358       if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer
1359           && !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) {
1360         // To avoid unnecessary region moving later by balancer. Don't put user
1361         // regions on master. Regions on master could be put on other region
1362         // server intentionally by test however.
1363         LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
1364           + " to avoid unnecessary region moving later by load balancer,"
1365           + " because it should not be on master");
1366         return;
1367       }
1368     }
1369 
1370     if (dest.equals(regionState.getServerName())) {
1371       LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
1372         + " because region already assigned to the same server " + dest + ".");
1373       return;
1374     }
1375 
1376     // Now we can do the move
1377     RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest);
1378 
1379     try {
1380       checkInitialized();
1381       if (this.cpHost != null) {
1382         if (this.cpHost.preMove(hri, rp.getSource(), rp.getDestination())) {
1383           return;
1384         }
1385       }
1386       // warmup the region on the destination before initiating the move. this call
1387       // is synchronous and takes some time. doing it before the source region gets
1388       // closed
1389       serverManager.sendRegionWarmup(rp.getDestination(), hri);
1390 
1391       LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
1392       this.assignmentManager.balance(rp);
1393       if (this.cpHost != null) {
1394         this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
1395       }
1396     } catch (IOException ioe) {
1397       if (ioe instanceof HBaseIOException) {
1398         throw (HBaseIOException)ioe;
1399       }
1400       throw new HBaseIOException(ioe);
1401     }
1402   }
1403 
1404   @Override
1405   public long createTable(HTableDescriptor hTableDescriptor,
1406       byte [][] splitKeys) throws IOException {
1407     if (isStopped()) {
1408       throw new MasterNotRunningException();
1409     }
1410 
1411     String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
1412     ensureNamespaceExists(namespace);
1413 
1414     HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys);
1415     checkInitialized();
1416     sanityCheckTableDescriptor(hTableDescriptor);
1417 
1418     if (cpHost != null) {
1419       cpHost.preCreateTable(hTableDescriptor, newRegions);
1420     }
1421     LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
1422 
1423     // TODO: We can handle/merge duplicate requests, and differentiate the case of
1424     //       TableExistsException by saying if the schema is the same or not.
1425     ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
1426     long procId = this.procedureExecutor.submitProcedure(
1427       new CreateTableProcedure(procedureExecutor.getEnvironment(),
1428         hTableDescriptor, newRegions, latch));
1429     latch.await();
1430 
1431     if (cpHost != null) {
1432       cpHost.postCreateTable(hTableDescriptor, newRegions);
1433     }
1434 
1435     return procId;
1436   }
1437 
1438   /**
1439    * Checks whether the table conforms to some sane limits, and configured
1440    * values (compression, etc) work. Throws an exception if something is wrong.
1441    * @throws IOException
1442    */
1443   private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
1444     final String CONF_KEY = "hbase.table.sanity.checks";
1445     boolean logWarn = false;
1446     if (!conf.getBoolean(CONF_KEY, true)) {
1447       logWarn = true;
1448     }
1449     String tableVal = htd.getConfigurationValue(CONF_KEY);
1450     if (tableVal != null && !Boolean.valueOf(tableVal)) {
1451       logWarn = true;
1452     }
1453 
1454     // check max file size
1455     long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit
1456     long maxFileSize = htd.getMaxFileSize();
1457     if (maxFileSize < 0) {
1458       maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
1459     }
1460     if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
1461       String message = "MAX_FILESIZE for table descriptor or "
1462           + "\"hbase.hregion.max.filesize\" (" + maxFileSize
1463           + ") is too small, which might cause over splitting into unmanageable "
1464           + "number of regions.";
1465       warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1466     }
1467 
1468     // check flush size
1469     long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit
1470     long flushSize = htd.getMemStoreFlushSize();
1471     if (flushSize < 0) {
1472       flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
1473     }
1474     if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
1475       String message = "MEMSTORE_FLUSHSIZE for table descriptor or "
1476           + "\"hbase.hregion.memstore.flush.size\" ("+flushSize+") is too small, which might cause"
1477           + " very frequent flushing.";
1478       warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1479     }
1480 
1481     // check that coprocessors and other specified plugin classes can be loaded
1482     try {
1483       checkClassLoading(conf, htd);
1484     } catch (Exception ex) {
1485       warnOrThrowExceptionForFailure(logWarn, CONF_KEY, ex.getMessage(), null);
1486     }
1487 
1488     // check compression can be loaded
1489     try {
1490       checkCompression(htd);
1491     } catch (IOException e) {
1492       warnOrThrowExceptionForFailure(logWarn, CONF_KEY, e.getMessage(), e);
1493     }
1494 
1495     // check encryption can be loaded
1496     try {
1497       checkEncryption(conf, htd);
1498     } catch (IOException e) {
1499       warnOrThrowExceptionForFailure(logWarn, CONF_KEY, e.getMessage(), e);
1500     }
1501 
1502     // check that we have at least 1 CF
1503     if (htd.getColumnFamilies().length == 0) {
1504       String message = "Table should have at least one column family.";
1505       warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1506     }
1507 
1508     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1509       if (hcd.getTimeToLive() <= 0) {
1510         String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
1511         warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1512       }
1513 
1514       // check blockSize
1515       if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
1516         String message = "Block size for column family " + hcd.getNameAsString()
1517             + "  must be between 1K and 16MB.";
1518         warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1519       }
1520 
1521       // check versions
1522       if (hcd.getMinVersions() < 0) {
1523         String message = "Min versions for column family " + hcd.getNameAsString()
1524           + "  must be positive.";
1525         warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1526       }
1527       // max versions already being checked
1528 
1529       // HBASE-13776 Setting illegal versions for HColumnDescriptor
1530       //  does not throw IllegalArgumentException
1531       // check minVersions <= maxVerions
1532       if (hcd.getMinVersions() > hcd.getMaxVersions()) {
1533         String message = "Min versions for column family " + hcd.getNameAsString()
1534             + " must be less than the Max versions.";
1535         warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1536       }
1537 
1538       // check replication scope
1539       if (hcd.getScope() < 0) {
1540         String message = "Replication scope for column family "
1541           + hcd.getNameAsString() + "  must be positive.";
1542         warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1543       }
1544 
1545       // TODO: should we check coprocessors and encryption ?
1546     }
1547   }
1548 
1549   // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled.
1550   private static void warnOrThrowExceptionForFailure(boolean logWarn, String confKey,
1551       String message, Exception cause) throws IOException {
1552     if (!logWarn) {
1553       throw new DoNotRetryIOException(message + " Set " + confKey +
1554           " to false at conf or table descriptor if you want to bypass sanity checks", cause);
1555     }
1556     LOG.warn(message);
1557   }
1558 
1559   private void startActiveMasterManager(int infoPort) throws KeeperException {
1560     String backupZNode = ZKUtil.joinZNode(
1561       zooKeeper.backupMasterAddressesZNode, serverName.toString());
1562     /*
1563     * Add a ZNode for ourselves in the backup master directory since we
1564     * may not become the active master. If so, we want the actual active
1565     * master to know we are backup masters, so that it won't assign
1566     * regions to us if so configured.
1567     *
1568     * If we become the active master later, ActiveMasterManager will delete
1569     * this node explicitly.  If we crash before then, ZooKeeper will delete
1570     * this node for us since it is ephemeral.
1571     */
1572     LOG.info("Adding backup master ZNode " + backupZNode);
1573     if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode,
1574         serverName, infoPort)) {
1575       LOG.warn("Failed create of " + backupZNode + " by " + serverName);
1576     }
1577 
1578     activeMasterManager.setInfoPort(infoPort);
1579     // Start a thread to try to become the active master, so we won't block here
1580     Threads.setDaemonThreadRunning(new Thread(new Runnable() {
1581       @Override
1582       public void run() {
1583         int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT,
1584           HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
1585         // If we're a backup master, stall until a primary to writes his address
1586         if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP,
1587           HConstants.DEFAULT_MASTER_TYPE_BACKUP)) {
1588           LOG.debug("HMaster started in backup mode. "
1589             + "Stalling until master znode is written.");
1590           // This will only be a minute or so while the cluster starts up,
1591           // so don't worry about setting watches on the parent znode
1592           while (!activeMasterManager.hasActiveMaster()) {
1593             LOG.debug("Waiting for master address ZNode to be written "
1594               + "(Also watching cluster state node)");
1595             Threads.sleep(timeout);
1596           }
1597         }
1598         MonitoredTask status = TaskMonitor.get().createStatus("Master startup");
1599         status.setDescription("Master startup");
1600         try {
1601           if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, status)) {
1602             finishActiveMasterInitialization(status);
1603           }
1604         } catch (Throwable t) {
1605           status.setStatus("Failed to become active: " + t.getMessage());
1606           LOG.fatal("Failed to become active master", t);
1607           // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility
1608           if (t instanceof NoClassDefFoundError &&
1609             t.getMessage()
1610               .contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction")) {
1611             // improved error message for this special case
1612             abort("HBase is having a problem with its Hadoop jars.  You may need to "
1613               + "recompile HBase against Hadoop version "
1614               + org.apache.hadoop.util.VersionInfo.getVersion()
1615               + " or change your hadoop jars to start properly", t);
1616           } else {
1617             abort("Unhandled exception. Starting shutdown.", t);
1618           }
1619         } finally {
1620           status.cleanup();
1621         }
1622       }
1623     }, getServerName().toShortString() + ".activeMasterManager"));
1624   }
1625 
1626   private void checkCompression(final HTableDescriptor htd)
1627   throws IOException {
1628     if (!this.masterCheckCompression) return;
1629     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1630       checkCompression(hcd);
1631     }
1632   }
1633 
1634   private void checkCompression(final HColumnDescriptor hcd)
1635   throws IOException {
1636     if (!this.masterCheckCompression) return;
1637     CompressionTest.testCompression(hcd.getCompressionType());
1638     CompressionTest.testCompression(hcd.getCompactionCompressionType());
1639   }
1640 
1641   private void checkEncryption(final Configuration conf, final HTableDescriptor htd)
1642   throws IOException {
1643     if (!this.masterCheckEncryption) return;
1644     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1645       checkEncryption(conf, hcd);
1646     }
1647   }
1648 
1649   private void checkEncryption(final Configuration conf, final HColumnDescriptor hcd)
1650   throws IOException {
1651     if (!this.masterCheckEncryption) return;
1652     EncryptionTest.testEncryption(conf, hcd.getEncryptionType(), hcd.getEncryptionKey());
1653   }
1654 
1655   private void checkClassLoading(final Configuration conf, final HTableDescriptor htd)
1656   throws IOException {
1657     RegionSplitPolicy.getSplitPolicyClass(htd, conf);
1658     RegionCoprocessorHost.testTableCoprocessorAttrs(conf, htd);
1659   }
1660 
1661   private static boolean isCatalogTable(final TableName tableName) {
1662     return tableName.equals(TableName.META_TABLE_NAME);
1663   }
1664 
1665   @Override
1666   public long deleteTable(final TableName tableName) throws IOException {
1667     checkInitialized();
1668     if (cpHost != null) {
1669       cpHost.preDeleteTable(tableName);
1670     }
1671     LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
1672 
1673     // TODO: We can handle/merge duplicate request
1674     ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
1675     long procId = this.procedureExecutor.submitProcedure(
1676         new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch));
1677     latch.await();
1678 
1679     if (cpHost != null) {
1680       cpHost.postDeleteTable(tableName);
1681     }
1682 
1683     return procId;
1684   }
1685 
1686   @Override
1687   public long truncateTable(TableName tableName, boolean preserveSplits) throws IOException {
1688     checkInitialized();
1689     if (cpHost != null) {
1690       cpHost.preTruncateTable(tableName);
1691     }
1692     LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
1693 
1694     long procId = this.procedureExecutor.submitProcedure(
1695         new TruncateTableProcedure(procedureExecutor.getEnvironment(), tableName, preserveSplits));
1696     ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
1697 
1698     if (cpHost != null) {
1699       cpHost.postTruncateTable(tableName);
1700     }
1701     return procId;
1702   }
1703 
1704   @Override
1705   public void addColumn(final TableName tableName, final HColumnDescriptor columnDescriptor)
1706       throws IOException {
1707     checkInitialized();
1708     checkCompression(columnDescriptor);
1709     checkEncryption(conf, columnDescriptor);
1710     if (cpHost != null) {
1711       if (cpHost.preAddColumn(tableName, columnDescriptor)) {
1712         return;
1713       }
1714     }
1715     // Execute the operation synchronously - wait for the operation to complete before continuing.
1716     long procId =
1717         this.procedureExecutor.submitProcedure(new AddColumnFamilyProcedure(procedureExecutor
1718             .getEnvironment(), tableName, columnDescriptor));
1719     ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
1720     if (cpHost != null) {
1721       cpHost.postAddColumn(tableName, columnDescriptor);
1722     }
1723   }
1724 
1725   @Override
1726   public void modifyColumn(TableName tableName, HColumnDescriptor descriptor)
1727       throws IOException {
1728     checkInitialized();
1729     checkCompression(descriptor);
1730     checkEncryption(conf, descriptor);
1731     if (cpHost != null) {
1732       if (cpHost.preModifyColumn(tableName, descriptor)) {
1733         return;
1734       }
1735     }
1736     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
1737 
1738     // Execute the operation synchronously - wait for the operation to complete before continuing.
1739     long procId =
1740         this.procedureExecutor.submitProcedure(new ModifyColumnFamilyProcedure(procedureExecutor
1741             .getEnvironment(), tableName, descriptor));
1742     ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
1743 
1744     if (cpHost != null) {
1745       cpHost.postModifyColumn(tableName, descriptor);
1746     }
1747   }
1748 
1749   @Override
1750   public void deleteColumn(final TableName tableName, final byte[] columnName)
1751       throws IOException {
1752     checkInitialized();
1753     if (cpHost != null) {
1754       if (cpHost.preDeleteColumn(tableName, columnName)) {
1755         return;
1756       }
1757     }
1758     LOG.info(getClientIdAuditPrefix() + " delete " + Bytes.toString(columnName));
1759 
1760     // Execute the operation synchronously - wait for the operation to complete before continuing.
1761     long procId =
1762         this.procedureExecutor.submitProcedure(new DeleteColumnFamilyProcedure(procedureExecutor
1763             .getEnvironment(), tableName, columnName));
1764     ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
1765 
1766     if (cpHost != null) {
1767       cpHost.postDeleteColumn(tableName, columnName);
1768     }
1769   }
1770 
1771   @Override
1772   public long enableTable(final TableName tableName) throws IOException {
1773     checkInitialized();
1774     if (cpHost != null) {
1775       cpHost.preEnableTable(tableName);
1776     }
1777     LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
1778 
1779     // Execute the operation asynchronously - client will check the progress of the operation
1780     final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch();
1781     long procId =
1782         this.procedureExecutor.submitProcedure(new EnableTableProcedure(procedureExecutor
1783             .getEnvironment(), tableName, false, prepareLatch));
1784     // Before returning to client, we want to make sure that the table is prepared to be
1785     // enabled (the table is locked and the table state is set).
1786     //
1787     // Note: if the procedure throws exception, we will catch it and rethrow.
1788     prepareLatch.await();
1789 
1790     if (cpHost != null) {
1791       cpHost.postEnableTable(tableName);
1792     }
1793 
1794     return procId;
1795   }
1796 
1797   @Override
1798   public long disableTable(final TableName tableName) throws IOException {
1799     checkInitialized();
1800     if (cpHost != null) {
1801       cpHost.preDisableTable(tableName);
1802     }
1803     LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
1804 
1805     // Execute the operation asynchronously - client will check the progress of the operation
1806     final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch();
1807     // Execute the operation asynchronously - client will check the progress of the operation
1808     long procId =
1809         this.procedureExecutor.submitProcedure(new DisableTableProcedure(procedureExecutor
1810             .getEnvironment(), tableName, false, prepareLatch));
1811     // Before returning to client, we want to make sure that the table is prepared to be
1812     // enabled (the table is locked and the table state is set).
1813     //
1814     // Note: if the procedure throws exception, we will catch it and rethrow.
1815     prepareLatch.await();
1816 
1817     if (cpHost != null) {
1818       cpHost.postDisableTable(tableName);
1819     }
1820 
1821     return procId;
1822   }
1823 
1824   /**
1825    * Return the region and current deployment for the region containing
1826    * the given row. If the region cannot be found, returns null. If it
1827    * is found, but not currently deployed, the second element of the pair
1828    * may be null.
1829    */
1830   @VisibleForTesting // Used by TestMaster.
1831   Pair<HRegionInfo, ServerName> getTableRegionForRow(
1832       final TableName tableName, final byte [] rowKey)
1833   throws IOException {
1834     final AtomicReference<Pair<HRegionInfo, ServerName>> result =
1835       new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
1836 
1837     MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
1838         @Override
1839         public boolean visit(Result data) throws IOException {
1840           if (data == null || data.size() <= 0) {
1841             return true;
1842           }
1843           Pair<HRegionInfo, ServerName> pair = HRegionInfo.getHRegionInfoAndServerName(data);
1844           if (pair == null) {
1845             return false;
1846           }
1847           if (!pair.getFirst().getTable().equals(tableName)) {
1848             return false;
1849           }
1850           result.set(pair);
1851           return true;
1852         }
1853     };
1854 
1855     MetaTableAccessor.scanMeta(clusterConnection, visitor, tableName, rowKey, 1);
1856     return result.get();
1857   }
1858 
1859   @Override
1860   public long modifyTable(final TableName tableName, final HTableDescriptor descriptor)
1861       throws IOException {
1862     checkInitialized();
1863     sanityCheckTableDescriptor(descriptor);
1864     if (cpHost != null) {
1865       cpHost.preModifyTable(tableName, descriptor);
1866     }
1867 
1868     LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
1869 
1870     // Execute the operation synchronously - wait for the operation completes before continuing.
1871     long procId = this.procedureExecutor.submitProcedure(
1872         new ModifyTableProcedure(procedureExecutor.getEnvironment(), descriptor));
1873 
1874     ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
1875 
1876     if (cpHost != null) {
1877       cpHost.postModifyTable(tableName, descriptor);
1878     }
1879 
1880     return procId;
1881   }
1882 
1883   @Override
1884   public void checkTableModifiable(final TableName tableName)
1885       throws IOException, TableNotFoundException, TableNotDisabledException {
1886     if (isCatalogTable(tableName)) {
1887       throw new IOException("Can't modify catalog tables");
1888     }
1889     if (!MetaTableAccessor.tableExists(getConnection(), tableName)) {
1890       throw new TableNotFoundException(tableName);
1891     }
1892     if (!getAssignmentManager().getTableStateManager().
1893         isTableState(tableName, TableState.State.DISABLED)) {
1894       throw new TableNotDisabledException(tableName);
1895     }
1896   }
1897 
1898   /**
1899    * @return cluster status
1900    */
1901   public ClusterStatus getClusterStatus() throws InterruptedIOException {
1902     // Build Set of backup masters from ZK nodes
1903     List<String> backupMasterStrings;
1904     try {
1905       backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper,
1906         this.zooKeeper.backupMasterAddressesZNode);
1907     } catch (KeeperException e) {
1908       LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
1909       backupMasterStrings = null;
1910     }
1911 
1912     List<ServerName> backupMasters = null;
1913     if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) {
1914       backupMasters = new ArrayList<ServerName>(backupMasterStrings.size());
1915       for (String s: backupMasterStrings) {
1916         try {
1917           byte [] bytes;
1918           try {
1919             bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
1920                 this.zooKeeper.backupMasterAddressesZNode, s));
1921           } catch (InterruptedException e) {
1922             throw new InterruptedIOException();
1923           }
1924           if (bytes != null) {
1925             ServerName sn;
1926             try {
1927               sn = ServerName.parseFrom(bytes);
1928             } catch (DeserializationException e) {
1929               LOG.warn("Failed parse, skipping registering backup server", e);
1930               continue;
1931             }
1932             backupMasters.add(sn);
1933           }
1934         } catch (KeeperException e) {
1935           LOG.warn(this.zooKeeper.prefix("Unable to get information about " +
1936                    "backup servers"), e);
1937         }
1938       }
1939       Collections.sort(backupMasters, new Comparator<ServerName>() {
1940         @Override
1941         public int compare(ServerName s1, ServerName s2) {
1942           return s1.getServerName().compareTo(s2.getServerName());
1943         }});
1944     }
1945 
1946     String clusterId = fileSystemManager != null ?
1947       fileSystemManager.getClusterId().toString() : null;
1948     Map<String, RegionState> regionsInTransition = assignmentManager != null ?
1949       assignmentManager.getRegionStates().getRegionsInTransition() : null;
1950     String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null;
1951     boolean balancerOn = loadBalancerTracker != null ?
1952       loadBalancerTracker.isBalancerOn() : false;
1953     Map<ServerName, ServerLoad> onlineServers = null;
1954     Set<ServerName> deadServers = null;
1955     if (serverManager != null) {
1956       deadServers = serverManager.getDeadServers().copyServerNames();
1957       onlineServers = serverManager.getOnlineServers();
1958     }
1959     return new ClusterStatus(VersionInfo.getVersion(), clusterId,
1960       onlineServers, deadServers, serverName, backupMasters,
1961       regionsInTransition, coprocessors, balancerOn);
1962   }
1963 
1964   /**
1965    * The set of loaded coprocessors is stored in a static set. Since it's
1966    * statically allocated, it does not require that HMaster's cpHost be
1967    * initialized prior to accessing it.
1968    * @return a String representation of the set of names of the loaded
1969    * coprocessors.
1970    */
1971   public static String getLoadedCoprocessors() {
1972     return CoprocessorHost.getLoadedCoprocessors().toString();
1973   }
1974 
1975   /**
1976    * @return timestamp in millis when HMaster was started.
1977    */
1978   public long getMasterStartTime() {
1979     return startcode;
1980   }
1981 
1982   /**
1983    * @return timestamp in millis when HMaster became the active master.
1984    */
1985   public long getMasterActiveTime() {
1986     return masterActiveTime;
1987   }
1988 
1989   public int getRegionServerInfoPort(final ServerName sn) {
1990     RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
1991     if (info == null || info.getInfoPort() == 0) {
1992       return conf.getInt(HConstants.REGIONSERVER_INFO_PORT,
1993         HConstants.DEFAULT_REGIONSERVER_INFOPORT);
1994     }
1995     return info.getInfoPort();
1996   }
1997 
1998   /**
1999    * @return array of coprocessor SimpleNames.
2000    */
2001   public String[] getMasterCoprocessors() {
2002     Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors();
2003     return masterCoprocessors.toArray(new String[masterCoprocessors.size()]);
2004   }
2005 
2006   @Override
2007   public void abort(final String msg, final Throwable t) {
2008     if (isAborted() || isStopped()) {
2009       return;
2010     }
2011     if (cpHost != null) {
2012       // HBASE-4014: dump a list of loaded coprocessors.
2013       LOG.fatal("Master server abort: loaded coprocessors are: " +
2014           getLoadedCoprocessors());
2015     }
2016     if (t != null) LOG.fatal(msg, t);
2017     stop(msg);
2018   }
2019 
2020   @Override
2021   public ZooKeeperWatcher getZooKeeper() {
2022     return zooKeeper;
2023   }
2024 
2025   @Override
2026   public MasterCoprocessorHost getMasterCoprocessorHost() {
2027     return cpHost;
2028   }
2029 
2030   @Override
2031   public MasterQuotaManager getMasterQuotaManager() {
2032     return quotaManager;
2033   }
2034 
2035   @Override
2036   public ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
2037     return procedureExecutor;
2038   }
2039 
2040   @Override
2041   public ServerName getServerName() {
2042     return this.serverName;
2043   }
2044 
2045   @Override
2046   public AssignmentManager getAssignmentManager() {
2047     return this.assignmentManager;
2048   }
2049 
2050   public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
2051     return rsFatals;
2052   }
2053 
2054   public void shutdown() {
2055     if (cpHost != null) {
2056       try {
2057         cpHost.preShutdown();
2058       } catch (IOException ioe) {
2059         LOG.error("Error call master coprocessor preShutdown()", ioe);
2060       }
2061     }
2062 
2063     if (this.serverManager != null) {
2064       this.serverManager.shutdownCluster();
2065     }
2066     if (this.clusterStatusTracker != null){
2067       try {
2068         this.clusterStatusTracker.setClusterDown();
2069       } catch (KeeperException e) {
2070         LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
2071       }
2072     }
2073   }
2074 
2075   public void stopMaster() {
2076     if (cpHost != null) {
2077       try {
2078         cpHost.preStopMaster();
2079       } catch (IOException ioe) {
2080         LOG.error("Error call master coprocessor preStopMaster()", ioe);
2081       }
2082     }
2083     stop("Stopped by " + Thread.currentThread().getName());
2084   }
2085 
2086   void checkServiceStarted() throws ServerNotRunningYetException {
2087     if (!serviceStarted) {
2088       throw new ServerNotRunningYetException("Server is not running yet");
2089     }
2090   }
2091 
2092   void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException {
2093     checkServiceStarted();
2094     if (!this.initialized) {
2095       throw new PleaseHoldException("Master is initializing");
2096     }
2097   }
2098 
2099   void checkNamespaceManagerReady() throws IOException {
2100     checkInitialized();
2101     if (tableNamespaceManager == null ||
2102         !tableNamespaceManager.isTableAvailableAndInitialized()) {
2103       throw new IOException("Table Namespace Manager not ready yet, try again later");
2104     }
2105   }
2106   /**
2107    * Report whether this master is currently the active master or not.
2108    * If not active master, we are parked on ZK waiting to become active.
2109    *
2110    * This method is used for testing.
2111    *
2112    * @return true if active master, false if not.
2113    */
2114   public boolean isActiveMaster() {
2115     return isActiveMaster;
2116   }
2117 
2118   /**
2119    * Report whether this master has completed with its initialization and is
2120    * ready.  If ready, the master is also the active master.  A standby master
2121    * is never ready.
2122    *
2123    * This method is used for testing.
2124    *
2125    * @return true if master is ready to go, false if not.
2126    */
2127   @Override
2128   public boolean isInitialized() {
2129     return initialized;
2130   }
2131 
2132   /**
2133    * ServerCrashProcessingEnabled is set false before completing assignMeta to prevent processing
2134    * of crashed servers.
2135    * @return true if assignMeta has completed;
2136    */
2137   @Override
2138   public boolean isServerCrashProcessingEnabled() {
2139     return this.serverCrashProcessingEnabled;
2140   }
2141 
2142   @VisibleForTesting
2143   public void setServerCrashProcessingEnabled(final boolean b) {
2144     this.serverCrashProcessingEnabled = b;
2145   }
2146 
2147   /**
2148    * Report whether this master has started initialization and is about to do meta region assignment
2149    * @return true if master is in initialization &amp; about to assign hbase:meta regions
2150    */
2151   public boolean isInitializationStartsMetaRegionAssignment() {
2152     return this.initializationBeforeMetaAssignment;
2153   }
2154 
2155   public void assignRegion(HRegionInfo hri) {
2156     assignmentManager.assign(hri);
2157   }
2158 
2159   /**
2160    * Compute the average load across all region servers.
2161    * Currently, this uses a very naive computation - just uses the number of
2162    * regions being served, ignoring stats about number of requests.
2163    * @return the average load
2164    */
2165   public double getAverageLoad() {
2166     if (this.assignmentManager == null) {
2167       return 0;
2168     }
2169 
2170     RegionStates regionStates = this.assignmentManager.getRegionStates();
2171     if (regionStates == null) {
2172       return 0;
2173     }
2174     return regionStates.getAverageLoad();
2175   }
2176 
2177   @Override
2178   public boolean registerService(Service instance) {
2179     /*
2180      * No stacking of instances is allowed for a single service name
2181      */
2182     Descriptors.ServiceDescriptor serviceDesc = instance.getDescriptorForType();
2183     if (coprocessorServiceHandlers.containsKey(serviceDesc.getFullName())) {
2184       LOG.error("Coprocessor service "+serviceDesc.getFullName()+
2185           " already registered, rejecting request from "+instance
2186       );
2187       return false;
2188     }
2189 
2190     coprocessorServiceHandlers.put(serviceDesc.getFullName(), instance);
2191     if (LOG.isDebugEnabled()) {
2192       LOG.debug("Registered master coprocessor service: service="+serviceDesc.getFullName());
2193     }
2194     return true;
2195   }
2196 
2197   /**
2198    * Utility for constructing an instance of the passed HMaster class.
2199    * @param masterClass
2200    * @param conf
2201    * @return HMaster instance.
2202    */
2203   public static HMaster constructMaster(Class<? extends HMaster> masterClass,
2204       final Configuration conf, final CoordinatedStateManager cp)  {
2205     try {
2206       Constructor<? extends HMaster> c =
2207         masterClass.getConstructor(Configuration.class, CoordinatedStateManager.class);
2208       return c.newInstance(conf, cp);
2209     } catch (InvocationTargetException ite) {
2210       Throwable target = ite.getTargetException() != null?
2211         ite.getTargetException(): ite;
2212       if (target.getCause() != null) target = target.getCause();
2213       throw new RuntimeException("Failed construction of Master: " +
2214         masterClass.toString(), target);
2215     } catch (Exception e) {
2216       throw new RuntimeException("Failed construction of Master: " +
2217         masterClass.toString() + ((e.getCause() != null)?
2218           e.getCause().getMessage(): ""), e);
2219     }
2220   }
2221 
2222   /**
2223    * @see org.apache.hadoop.hbase.master.HMasterCommandLine
2224    */
2225   public static void main(String [] args) {
2226     VersionInfo.logVersion();
2227     new HMasterCommandLine(HMaster.class).doMain(args);
2228   }
2229 
2230   public HFileCleaner getHFileCleaner() {
2231     return this.hfileCleaner;
2232   }
2233 
2234   /**
2235    * Exposed for TESTING!
2236    * @return the underlying snapshot manager
2237    */
2238   public SnapshotManager getSnapshotManagerForTesting() {
2239     return this.snapshotManager;
2240   }
2241 
2242   @Override
2243   public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
2244     TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
2245     checkNamespaceManagerReady();
2246     if (cpHost != null) {
2247       if (cpHost.preCreateNamespace(descriptor)) {
2248         return;
2249       }
2250     }
2251     LOG.info(getClientIdAuditPrefix() + " creating " + descriptor);
2252     tableNamespaceManager.create(descriptor);
2253     if (cpHost != null) {
2254       cpHost.postCreateNamespace(descriptor);
2255     }
2256   }
2257 
2258   @Override
2259   public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
2260     TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
2261     checkNamespaceManagerReady();
2262     if (cpHost != null) {
2263       if (cpHost.preModifyNamespace(descriptor)) {
2264         return;
2265       }
2266     }
2267     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
2268     tableNamespaceManager.update(descriptor);
2269     if (cpHost != null) {
2270       cpHost.postModifyNamespace(descriptor);
2271     }
2272   }
2273 
2274   @Override
2275   public void deleteNamespace(String name) throws IOException {
2276     checkNamespaceManagerReady();
2277     if (cpHost != null) {
2278       if (cpHost.preDeleteNamespace(name)) {
2279         return;
2280       }
2281     }
2282     LOG.info(getClientIdAuditPrefix() + " delete " + name);
2283     tableNamespaceManager.remove(name);
2284     if (cpHost != null) {
2285       cpHost.postDeleteNamespace(name);
2286     }
2287   }
2288 
2289   /**
2290    * Ensure that the specified namespace exists, otherwise throws a NamespaceNotFoundException
2291    *
2292    * @param name the namespace to check
2293    * @throws IOException if the namespace manager is not ready yet.
2294    * @throws NamespaceNotFoundException if the namespace does not exists
2295    */
2296   private void ensureNamespaceExists(final String name)
2297       throws IOException, NamespaceNotFoundException {
2298     checkNamespaceManagerReady();
2299     NamespaceDescriptor nsd = tableNamespaceManager.get(name);
2300     if (nsd == null) {
2301       throw new NamespaceNotFoundException(name);
2302     }
2303   }
2304 
2305   @Override
2306   public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
2307     checkNamespaceManagerReady();
2308 
2309     if (cpHost != null) {
2310       cpHost.preGetNamespaceDescriptor(name);
2311     }
2312 
2313     NamespaceDescriptor nsd = tableNamespaceManager.get(name);
2314     if (nsd == null) {
2315       throw new NamespaceNotFoundException(name);
2316     }
2317 
2318     if (cpHost != null) {
2319       cpHost.postGetNamespaceDescriptor(nsd);
2320     }
2321 
2322     return nsd;
2323   }
2324 
2325   @Override
2326   public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
2327     checkNamespaceManagerReady();
2328 
2329     final List<NamespaceDescriptor> descriptors = new ArrayList<NamespaceDescriptor>();
2330     boolean bypass = false;
2331     if (cpHost != null) {
2332       bypass = cpHost.preListNamespaceDescriptors(descriptors);
2333     }
2334 
2335     if (!bypass) {
2336       descriptors.addAll(tableNamespaceManager.list());
2337 
2338       if (cpHost != null) {
2339         cpHost.postListNamespaceDescriptors(descriptors);
2340       }
2341     }
2342     return descriptors;
2343   }
2344 
2345   @Override
2346   public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
2347     ensureNamespaceExists(name);
2348     return listTableDescriptors(name, null, null, true);
2349   }
2350 
2351   @Override
2352   public List<TableName> listTableNamesByNamespace(String name) throws IOException {
2353     ensureNamespaceExists(name);
2354     return listTableNames(name, null, true);
2355   }
2356 
2357   /**
2358    * Returns the list of table descriptors that match the specified request
2359    *
2360    * @param namespace the namespace to query, or null if querying for all
2361    * @param regex The regular expression to match against, or null if querying for all
2362    * @param tableNameList the list of table names, or null if querying for all
2363    * @param includeSysTables False to match only against userspace tables
2364    * @return the list of table descriptors
2365    */
2366   public List<HTableDescriptor> listTableDescriptors(final String namespace, final String regex,
2367       final List<TableName> tableNameList, final boolean includeSysTables)
2368       throws IOException {
2369     final List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
2370 
2371     boolean bypass = false;
2372     if (cpHost != null) {
2373       bypass = cpHost.preGetTableDescriptors(tableNameList, descriptors, regex);
2374     }
2375 
2376     if (!bypass) {
2377       if (tableNameList == null || tableNameList.size() == 0) {
2378         // request for all TableDescriptors
2379         Collection<HTableDescriptor> htds;
2380         if (namespace != null && namespace.length() > 0) {
2381           htds = tableDescriptors.getByNamespace(namespace).values();
2382         } else {
2383           htds = tableDescriptors.getAll().values();
2384         }
2385 
2386         for (HTableDescriptor desc: htds) {
2387           if (tableStateManager.isTablePresent(desc.getTableName())
2388               && (includeSysTables || !desc.getTableName().isSystemTable())) {
2389             descriptors.add(desc);
2390           }
2391         }
2392       } else {
2393         for (TableName s: tableNameList) {
2394           if (tableStateManager.isTablePresent(s)) {
2395             HTableDescriptor desc = tableDescriptors.get(s);
2396             if (desc != null) {
2397               descriptors.add(desc);
2398             }
2399           }
2400         }
2401       }
2402 
2403       // Retains only those matched by regular expression.
2404       if (regex != null) {
2405         filterTablesByRegex(descriptors, Pattern.compile(regex));
2406       }
2407 
2408       if (cpHost != null) {
2409         cpHost.postGetTableDescriptors(tableNameList, descriptors, regex);
2410       }
2411     }
2412     return descriptors;
2413   }
2414 
2415   /**
2416    * Returns the list of table names that match the specified request
2417    * @param regex The regular expression to match against, or null if querying for all
2418    * @param namespace the namespace to query, or null if querying for all
2419    * @param includeSysTables False to match only against userspace tables
2420    * @return the list of table names
2421    */
2422   public List<TableName> listTableNames(final String namespace, final String regex,
2423       final boolean includeSysTables) throws IOException {
2424     final List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
2425 
2426     boolean bypass = false;
2427     if (cpHost != null) {
2428       bypass = cpHost.preGetTableNames(descriptors, regex);
2429     }
2430 
2431     if (!bypass) {
2432       // get all descriptors
2433       Collection<HTableDescriptor> htds;
2434       if (namespace != null && namespace.length() > 0) {
2435         htds = tableDescriptors.getByNamespace(namespace).values();
2436       } else {
2437         htds = tableDescriptors.getAll().values();
2438       }
2439 
2440       for (HTableDescriptor htd: htds) {
2441         if (includeSysTables || !htd.getTableName().isSystemTable()) {
2442           descriptors.add(htd);
2443         }
2444       }
2445 
2446       // Retains only those matched by regular expression.
2447       if (regex != null) {
2448         filterTablesByRegex(descriptors, Pattern.compile(regex));
2449       }
2450 
2451       if (cpHost != null) {
2452         cpHost.postGetTableNames(descriptors, regex);
2453       }
2454     }
2455 
2456     List<TableName> result = new ArrayList<TableName>(descriptors.size());
2457     for (HTableDescriptor htd: descriptors) {
2458       result.add(htd.getTableName());
2459     }
2460     return result;
2461   }
2462 
2463 
2464   /**
2465    * Removes the table descriptors that don't match the pattern.
2466    * @param descriptors list of table descriptors to filter
2467    * @param pattern the regex to use
2468    */
2469   private static void filterTablesByRegex(final Collection<HTableDescriptor> descriptors,
2470       final Pattern pattern) {
2471     final String defaultNS = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR;
2472     Iterator<HTableDescriptor> itr = descriptors.iterator();
2473     while (itr.hasNext()) {
2474       HTableDescriptor htd = itr.next();
2475       String tableName = htd.getTableName().getNameAsString();
2476       boolean matched = pattern.matcher(tableName).matches();
2477       if (!matched && htd.getTableName().getNamespaceAsString().equals(defaultNS)) {
2478         matched = pattern.matcher(defaultNS + TableName.NAMESPACE_DELIM + tableName).matches();
2479       }
2480       if (!matched) {
2481         itr.remove();
2482       }
2483     }
2484   }
2485 
2486   @Override
2487   public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
2488     return getClusterStatus().getLastMajorCompactionTsForTable(table);
2489   }
2490 
2491   @Override
2492   public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
2493     return getClusterStatus().getLastMajorCompactionTsForRegion(regionName);
2494   }
2495 
2496   /**
2497    * Queries the state of the {@link LoadBalancerTracker}. If the balancer is not initialized,
2498    * false is returned.
2499    *
2500    * @return The state of the load balancer, or false if the load balancer isn't defined.
2501    */
2502   public boolean isBalancerOn() {
2503     if (null == loadBalancerTracker) return false;
2504     return loadBalancerTracker.isBalancerOn();
2505   }
2506 
2507   /**
2508    * Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned.
2509    *
2510    * @return The name of the {@link LoadBalancer} in use.
2511    */
2512   public String getLoadBalancerClassName() {
2513     return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, LoadBalancerFactory
2514         .getDefaultLoadBalancerClass().getName());
2515   }
2516 }