1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.master;
20
21 import java.io.IOException;
22 import java.io.InterruptedIOException;
23 import java.lang.reflect.Constructor;
24 import java.lang.reflect.InvocationTargetException;
25 import java.net.InetAddress;
26 import java.net.InetSocketAddress;
27 import java.net.UnknownHostException;
28 import java.util.ArrayList;
29 import java.util.Collection;
30 import java.util.Collections;
31 import java.util.Comparator;
32 import java.util.HashMap;
33 import java.util.HashSet;
34 import java.util.Iterator;
35 import java.util.List;
36 import java.util.Map;
37 import java.util.Set;
38 import java.util.concurrent.TimeUnit;
39 import java.util.concurrent.atomic.AtomicReference;
40 import java.util.regex.Pattern;
41
42 import javax.servlet.ServletException;
43 import javax.servlet.http.HttpServlet;
44 import javax.servlet.http.HttpServletRequest;
45 import javax.servlet.http.HttpServletResponse;
46
47 import org.apache.commons.logging.Log;
48 import org.apache.commons.logging.LogFactory;
49 import org.apache.hadoop.conf.Configuration;
50 import org.apache.hadoop.fs.Path;
51 import org.apache.hadoop.hbase.ClusterStatus;
52 import org.apache.hadoop.hbase.CoordinatedStateException;
53 import org.apache.hadoop.hbase.CoordinatedStateManager;
54 import org.apache.hadoop.hbase.DoNotRetryIOException;
55 import org.apache.hadoop.hbase.HBaseIOException;
56 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
57 import org.apache.hadoop.hbase.HColumnDescriptor;
58 import org.apache.hadoop.hbase.HConstants;
59 import org.apache.hadoop.hbase.HRegionInfo;
60 import org.apache.hadoop.hbase.HTableDescriptor;
61 import org.apache.hadoop.hbase.MasterNotRunningException;
62 import org.apache.hadoop.hbase.MetaMigrationConvertingToPB;
63 import org.apache.hadoop.hbase.MetaTableAccessor;
64 import org.apache.hadoop.hbase.NamespaceDescriptor;
65 import org.apache.hadoop.hbase.NamespaceNotFoundException;
66 import org.apache.hadoop.hbase.PleaseHoldException;
67 import org.apache.hadoop.hbase.ProcedureInfo;
68 import org.apache.hadoop.hbase.RegionStateListener;
69 import org.apache.hadoop.hbase.ScheduledChore;
70 import org.apache.hadoop.hbase.Server;
71 import org.apache.hadoop.hbase.ServerLoad;
72 import org.apache.hadoop.hbase.ServerName;
73 import org.apache.hadoop.hbase.TableDescriptors;
74 import org.apache.hadoop.hbase.TableName;
75 import org.apache.hadoop.hbase.TableNotDisabledException;
76 import org.apache.hadoop.hbase.TableNotFoundException;
77 import org.apache.hadoop.hbase.UnknownRegionException;
78 import org.apache.hadoop.hbase.classification.InterfaceAudience;
79 import org.apache.hadoop.hbase.client.MetaScanner;
80 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
81 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
82 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
83 import org.apache.hadoop.hbase.client.Result;
84 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
85 import org.apache.hadoop.hbase.exceptions.DeserializationException;
86 import org.apache.hadoop.hbase.executor.ExecutorType;
87 import org.apache.hadoop.hbase.http.InfoServer;
88 import org.apache.hadoop.hbase.ipc.RpcServer;
89 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
90 import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
91 import org.apache.hadoop.hbase.master.RegionState.State;
92 import org.apache.hadoop.hbase.master.balancer.BalancerChore;
93 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
94 import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
95 import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
96 import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
97 import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
98 import org.apache.hadoop.hbase.master.cleaner.ReplicationZKLockCleanerChore;
99 import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
100 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
101 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
102 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
103 import org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure;
104 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
105 import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
106 import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
107 import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
108 import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
109 import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
110 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
111 import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler.ProcedureEvent;
112 import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
113 import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
114 import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
115 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
116 import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
117 import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
118 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
119 import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
120 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
121 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
122 import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
123 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
124 import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
125 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
126 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
127 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
128 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
129 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
130 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
131 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
132 import org.apache.hadoop.hbase.regionserver.HRegionServer;
133 import org.apache.hadoop.hbase.regionserver.HStore;
134 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
135 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
136 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
137 import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
138 import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
139 import org.apache.hadoop.hbase.replication.regionserver.Replication;
140 import org.apache.hadoop.hbase.security.UserProvider;
141 import org.apache.hadoop.hbase.util.Addressing;
142 import org.apache.hadoop.hbase.util.Bytes;
143 import org.apache.hadoop.hbase.util.CompressionTest;
144 import org.apache.hadoop.hbase.util.ConfigUtil;
145 import org.apache.hadoop.hbase.util.EncryptionTest;
146 import org.apache.hadoop.hbase.util.FSUtils;
147 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
148 import org.apache.hadoop.hbase.util.HasThread;
149 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
150 import org.apache.hadoop.hbase.util.Pair;
151 import org.apache.hadoop.hbase.util.Threads;
152 import org.apache.hadoop.hbase.util.VersionInfo;
153 import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
154 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
155 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
156 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
157 import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
158 import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
159 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
160 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
161 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
162 import org.apache.zookeeper.KeeperException;
163 import org.mortbay.jetty.Connector;
164 import org.mortbay.jetty.nio.SelectChannelConnector;
165 import org.mortbay.jetty.servlet.Context;
166 import org.mortbay.jetty.servlet.ServletHolder;
167
168 import com.google.common.annotations.VisibleForTesting;
169 import com.google.common.collect.Maps;
170 import com.google.protobuf.Descriptors;
171 import com.google.protobuf.Service;
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
189 @SuppressWarnings("deprecation")
190 public class HMaster extends HRegionServer implements MasterServices, Server {
191 private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
192
193
194
195
196
197 private static class InitializationMonitor extends HasThread {
198
199 public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout";
200 public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES);
201
202
203
204
205
206 public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout";
207 public static final boolean HALT_DEFAULT = false;
208
209 private final HMaster master;
210 private final long timeout;
211 private final boolean haltOnTimeout;
212
213
214 InitializationMonitor(HMaster master) {
215 super("MasterInitializationMonitor");
216 this.master = master;
217 this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT);
218 this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT);
219 this.setDaemon(true);
220 }
221
222 @Override
223 public void run() {
224 try {
225 while (!master.isStopped() && master.isActiveMaster()) {
226 Thread.sleep(timeout);
227 if (master.isInitialized()) {
228 LOG.debug("Initialization completed within allotted tolerance. Monitor exiting.");
229 } else {
230 LOG.error("Master failed to complete initialization after " + timeout + "ms. Please"
231 + " consider submitting a bug report including a thread dump of this process.");
232 if (haltOnTimeout) {
233 LOG.error("Zombie Master exiting. Thread dump to stdout");
234 Threads.printThreadInfo(System.out, "Zombie HMaster");
235 System.exit(-1);
236 }
237 }
238 }
239 } catch (InterruptedException ie) {
240 LOG.trace("InitMonitor thread interrupted. Existing.");
241 }
242 }
243 }
244
245
246
247 public static final String MASTER = "master";
248
249
250 private final ActiveMasterManager activeMasterManager;
251
252 RegionServerTracker regionServerTracker;
253
254 private DrainingServerTracker drainingServerTracker;
255
256 LoadBalancerTracker loadBalancerTracker;
257
258
259 private RegionNormalizerTracker regionNormalizerTracker;
260
261
262 private TableNamespaceManager tableNamespaceManager;
263
264
265 final MetricsMaster metricsMaster;
266
267 private MasterFileSystem fileSystemManager;
268
269
270 volatile ServerManager serverManager;
271
272
273 AssignmentManager assignmentManager;
274
275
276
277
278 MemoryBoundedLogMessageBuffer rsFatals;
279
280
281 private volatile boolean isActiveMaster = false;
282
283
284
285 private final ProcedureEvent initialized = new ProcedureEvent("master initialized");
286
287
288
289 volatile boolean serviceStarted = false;
290
291
292 private final ProcedureEvent serverCrashProcessingEnabled =
293 new ProcedureEvent("server crash processing");
294
295 LoadBalancer balancer;
296 private RegionNormalizer normalizer;
297 private BalancerChore balancerChore;
298 private RegionNormalizerChore normalizerChore;
299 private ClusterStatusChore clusterStatusChore;
300 private ClusterStatusPublisher clusterStatusPublisherChore = null;
301 private PeriodicDoMetrics periodicDoMetricsChore = null;
302
303 CatalogJanitor catalogJanitorChore;
304 private ReplicationZKLockCleanerChore replicationZKLockCleanerChore;
305 private LogCleaner logCleaner;
306 private HFileCleaner hfileCleaner;
307
308 MasterCoprocessorHost cpHost;
309
310 private final boolean preLoadTableDescriptors;
311
312
313 private long masterActiveTime;
314
315
316 private final boolean masterCheckCompression;
317
318
319 private final boolean masterCheckEncryption;
320
321 Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
322
323
324 SnapshotManager snapshotManager;
325
326 MasterProcedureManagerHost mpmHost;
327
328
329 private volatile MasterQuotaManager quotaManager;
330
331 private ProcedureExecutor<MasterProcedureEnv> procedureExecutor;
332 private WALProcedureStore procedureStore;
333
334
335 private volatile boolean initializationBeforeMetaAssignment = false;
336
337
338 private org.mortbay.jetty.Server masterJettyServer;
339
340 public static class RedirectServlet extends HttpServlet {
341 private static final long serialVersionUID = 2894774810058302473L;
342 private final int regionServerInfoPort;
343 private final String regionServerHostname;
344
345
346
347
348
349 public RedirectServlet(InfoServer infoServer, String hostname) {
350 regionServerInfoPort = infoServer.getPort();
351 regionServerHostname = hostname;
352 }
353
354 @Override
355 public void doGet(HttpServletRequest request,
356 HttpServletResponse response) throws ServletException, IOException {
357 String redirectHost = regionServerHostname;
358 if(redirectHost == null) {
359 redirectHost = request.getServerName();
360 if(!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) {
361 LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" +
362 MASTER_HOSTNAME_KEY + "' is not set; client will get a HTTP 400 response. If " +
363 "your HBase deployment relies on client accessible names that the region server process " +
364 "can't resolve locally, then you should set the previously mentioned configuration variable " +
365 "to an appropriate hostname.");
366
367 response.sendError(400, "Request was to a host that I can't resolve for any of the network interfaces on " +
368 "this node. If this is due to an intermediary such as an HTTP load balancer or other proxy, your HBase " +
369 "administrator can set '" + MASTER_HOSTNAME_KEY + "' to point to the correct hostname.");
370 return;
371 }
372 }
373
374
375 String redirectUrl = request.getScheme() + "://"
376 + redirectHost + ":" + regionServerInfoPort
377 + request.getRequestURI();
378 response.sendRedirect(redirectUrl);
379 }
380 }
381
382 private static class PeriodicDoMetrics extends ScheduledChore {
383 private final HMaster server;
384 public PeriodicDoMetrics(int doMetricsInterval, final HMaster server) {
385 super(server.getServerName() + "-DoMetricsChore", server, doMetricsInterval);
386 this.server = server;
387 }
388
389 @Override
390 protected void chore() {
391 server.doMetrics();
392 }
393 }
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411 public HMaster(final Configuration conf, CoordinatedStateManager csm)
412 throws IOException, KeeperException, InterruptedException {
413 super(conf, csm);
414 this.rsFatals = new MemoryBoundedLogMessageBuffer(
415 conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
416
417 LOG.info("hbase.rootdir=" + getRootDir() +
418 ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));
419
420
421 this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);
422
423 Replication.decorateMasterConfiguration(this.conf);
424
425
426
427 if (this.conf.get("mapreduce.task.attempt.id") == null) {
428 this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
429 }
430
431
432 this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
433
434
435 this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);
436
437 this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));
438
439
440 this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);
441
442
443
444 boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
445 HConstants.STATUS_PUBLISHED_DEFAULT);
446 Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
447 conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
448 ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
449 ClusterStatusPublisher.Publisher.class);
450
451 if (shouldPublish) {
452 if (publisherClass == null) {
453 LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
454 ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
455 " is not set - not publishing status");
456 } else {
457 clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
458 getChoreService().scheduleChore(clusterStatusPublisherChore);
459 }
460 }
461
462
463 if (!conf.getBoolean("hbase.testing.nocluster", false)) {
464 activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
465 int infoPort = putUpJettyServer();
466 startActiveMasterManager(infoPort);
467 } else {
468 activeMasterManager = null;
469 }
470 }
471
472
473 private int putUpJettyServer() throws IOException {
474 if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
475 return -1;
476 }
477 final int infoPort = conf.getInt("hbase.master.info.port.orig",
478 HConstants.DEFAULT_MASTER_INFOPORT);
479
480 if (infoPort < 0 || infoServer == null) {
481 return -1;
482 }
483 if(infoPort == infoServer.getPort()) {
484 return infoPort;
485 }
486 final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
487 if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
488 String msg =
489 "Failed to start redirecting jetty server. Address " + addr
490 + " does not belong to this host. Correct configuration parameter: "
491 + "hbase.master.info.bindAddress";
492 LOG.error(msg);
493 throw new IOException(msg);
494 }
495
496
497
498
499 masterJettyServer = new org.mortbay.jetty.Server();
500 Connector connector = new SelectChannelConnector();
501 connector.setHost(addr);
502 connector.setPort(infoPort);
503 masterJettyServer.addConnector(connector);
504 masterJettyServer.setStopAtShutdown(true);
505
506 final String redirectHostname = shouldUseThisHostnameInstead() ? useThisHostnameInstead : null;
507
508 final RedirectServlet redirect = new RedirectServlet(infoServer, redirectHostname);
509 Context context = new Context(masterJettyServer, "/", Context.NO_SESSIONS);
510 context.addServlet(new ServletHolder(redirect), "/*");
511
512 try {
513 masterJettyServer.start();
514 } catch (Exception e) {
515 throw new IOException("Failed to start redirecting jetty server", e);
516 }
517 return connector.getLocalPort();
518 }
519
520
521
522
523 @Override
524 protected void login(UserProvider user, String host) throws IOException {
525 try {
526 super.login(user, host);
527 } catch (IOException ie) {
528 user.login("hbase.master.keytab.file",
529 "hbase.master.kerberos.principal", host);
530 }
531 }
532
533
534
535
536
537
538 @Override
539 protected void waitForMasterActive(){
540 boolean tablesOnMaster = BaseLoadBalancer.tablesOnMaster(conf);
541 while (!(tablesOnMaster && isActiveMaster)
542 && !isStopped() && !isAborted()) {
543 sleeper.sleep();
544 }
545 }
546
547 @VisibleForTesting
548 public MasterRpcServices getMasterRpcServices() {
549 return (MasterRpcServices)rpcServices;
550 }
551
552 public boolean balanceSwitch(final boolean b) throws IOException {
553 return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
554 }
555
556 @Override
557 protected String getProcessName() {
558 return MASTER;
559 }
560
561 @Override
562 protected boolean canCreateBaseZNode() {
563 return true;
564 }
565
566 @Override
567 protected boolean canUpdateTableDescriptor() {
568 return true;
569 }
570
571 @Override
572 protected RSRpcServices createRpcServices() throws IOException {
573 return new MasterRpcServices(this);
574 }
575
576 @Override
577 protected void configureInfoServer() {
578 infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);
579 infoServer.setAttribute(MASTER, this);
580 if (BaseLoadBalancer.tablesOnMaster(conf)) {
581 super.configureInfoServer();
582 }
583 }
584
585 @Override
586 protected Class<? extends HttpServlet> getDumpServlet() {
587 return MasterDumpServlet.class;
588 }
589
590
591
592
593
594 private void doMetrics() {
595 try {
596 if (assignmentManager != null) {
597 assignmentManager.updateRegionsInTransitionMetrics();
598 }
599 } catch (Throwable e) {
600 LOG.error("Couldn't update metrics: " + e.getMessage());
601 }
602 }
603
604 MetricsMaster getMasterMetrics() {
605 return metricsMaster;
606 }
607
608
609
610
611
612
613
614
615 void initializeZKBasedSystemTrackers() throws IOException,
616 InterruptedException, KeeperException, CoordinatedStateException {
617 this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
618 this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);
619 this.normalizer.setMasterServices(this);
620 this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
621 this.loadBalancerTracker.start();
622 this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this);
623 this.regionNormalizerTracker.start();
624 this.assignmentManager = new AssignmentManager(this, serverManager,
625 this.balancer, this.service, this.metricsMaster,
626 this.tableLockManager);
627 zooKeeper.registerListenerFirst(assignmentManager);
628
629 this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
630 this.serverManager);
631 this.regionServerTracker.start();
632
633 this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
634 this.serverManager);
635 this.drainingServerTracker.start();
636
637
638
639 boolean wasUp = this.clusterStatusTracker.isClusterUp();
640 if (!wasUp) this.clusterStatusTracker.setClusterUp();
641
642 LOG.info("Server active/primary master=" + this.serverName +
643 ", sessionid=0x" +
644 Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
645 ", setting cluster-up flag (Was=" + wasUp + ")");
646
647
648 this.snapshotManager = new SnapshotManager();
649 this.mpmHost = new MasterProcedureManagerHost();
650 this.mpmHost.register(this.snapshotManager);
651 this.mpmHost.register(new MasterFlushTableProcedureManager());
652 this.mpmHost.loadProcedures(conf);
653 this.mpmHost.initialize(this, this.metricsMaster);
654 }
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676 private void finishActiveMasterInitialization(MonitoredTask status)
677 throws IOException, InterruptedException, KeeperException, CoordinatedStateException {
678
679 isActiveMaster = true;
680 Thread zombieDetector = new Thread(new InitializationMonitor(this),
681 "ActiveMasterInitializationMonitor-" + System.currentTimeMillis());
682 zombieDetector.start();
683
684
685
686
687
688
689
690 status.setStatus("Initializing Master file system");
691
692 this.masterActiveTime = System.currentTimeMillis();
693
694 this.fileSystemManager = new MasterFileSystem(this, this);
695
696
697 this.tableDescriptors.setCacheOn();
698
699 this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
700 conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
701
702 if (preLoadTableDescriptors) {
703 status.setStatus("Pre-loading table descriptors");
704 this.tableDescriptors.getAll();
705 }
706
707
708 status.setStatus("Publishing Cluster ID in ZooKeeper");
709 ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
710 this.serverManager = createServerManager(this, this);
711
712 setupClusterConnection();
713
714
715 this.tableLockManager.reapWriteLocks();
716
717 status.setStatus("Initializing ZK system trackers");
718 initializeZKBasedSystemTrackers();
719
720
721 status.setStatus("Initializing master coprocessors");
722 this.cpHost = new MasterCoprocessorHost(this, this.conf);
723
724
725 status.setStatus("Initializing master service threads");
726 startServiceThreads();
727
728
729 sleeper.skipSleepCycle();
730
731
732 this.serverManager.waitForRegionServers(status);
733
734 for (ServerName sn: this.regionServerTracker.getOnlineServers()) {
735
736 if (!this.serverManager.isServerOnline(sn)
737 && serverManager.checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
738 LOG.info("Registered server found up in zk but who has not yet reported in: " + sn);
739 }
740 }
741
742
743
744
745 Set<ServerName> previouslyFailedServers =
746 this.fileSystemManager.getFailedServersFromLogFolders();
747
748
749 ServerName oldMetaServerLocation = metaTableLocator.getMetaRegionLocation(this.getZooKeeper());
750 if (oldMetaServerLocation != null && previouslyFailedServers.contains(oldMetaServerLocation)) {
751 splitMetaLogBeforeAssignment(oldMetaServerLocation);
752
753
754 }
755 Set<ServerName> previouslyFailedMetaRSs = getPreviouselyFailedMetaServersFromZK();
756
757
758
759
760
761
762
763
764 previouslyFailedMetaRSs.addAll(previouslyFailedServers);
765
766 this.initializationBeforeMetaAssignment = true;
767
768
769 if (BaseLoadBalancer.tablesOnMaster(conf)) {
770 waitForServerOnline();
771 }
772
773
774 this.balancer.setClusterStatus(getClusterStatus());
775 this.balancer.setMasterServices(this);
776 this.balancer.initialize();
777
778
779
780 if (isStopped()) return;
781
782
783 status.setStatus("Assigning Meta Region");
784 assignMeta(status, previouslyFailedMetaRSs, HRegionInfo.DEFAULT_REPLICA_ID);
785
786
787 if (isStopped()) return;
788
789 status.setStatus("Submitting log splitting work for previously failed region servers");
790
791
792 for (ServerName tmpServer : previouslyFailedServers) {
793 this.serverManager.processDeadServer(tmpServer, true);
794 }
795
796
797
798 if (this.conf.getBoolean("hbase.MetaMigrationConvertingToPB", true)) {
799 MetaMigrationConvertingToPB.updateMetaIfNecessary(this);
800 }
801
802
803 status.setStatus("Starting assignment manager");
804 this.assignmentManager.joinCluster();
805
806
807 this.balancer.setClusterStatus(getClusterStatus());
808
809
810 status.setStatus("Starting balancer and catalog janitor");
811 this.clusterStatusChore = new ClusterStatusChore(this, balancer);
812 getChoreService().scheduleChore(clusterStatusChore);
813 this.balancerChore = new BalancerChore(this);
814 getChoreService().scheduleChore(balancerChore);
815 this.normalizerChore = new RegionNormalizerChore(this);
816 getChoreService().scheduleChore(normalizerChore);
817 this.catalogJanitorChore = createCatalogJanitor();
818 getChoreService().scheduleChore(catalogJanitorChore);
819
820
821 periodicDoMetricsChore = new PeriodicDoMetrics(msgInterval, this);
822 getChoreService().scheduleChore(periodicDoMetricsChore);
823
824 status.setStatus("Starting namespace manager");
825 initNamespace();
826
827 if (this.cpHost != null) {
828 try {
829 this.cpHost.preMasterInitialization();
830 } catch (IOException e) {
831 LOG.error("Coprocessor preMasterInitialization() hook failed", e);
832 }
833 }
834
835 status.markComplete("Initialization successful");
836 LOG.info("Master has completed initialization");
837 configurationManager.registerObserver(this.balancer);
838
839
840 setInitialized(true);
841
842 status.setStatus("Starting quota manager");
843 initQuotaManager();
844
845
846 Set<ServerName> EMPTY_SET = new HashSet<ServerName>();
847 int numReplicas = conf.getInt(HConstants.META_REPLICAS_NUM,
848 HConstants.DEFAULT_META_REPLICA_NUM);
849 for (int i = 1; i < numReplicas; i++) {
850 assignMeta(status, EMPTY_SET, i);
851 }
852 unassignExcessMetaReplica(zooKeeper, numReplicas);
853
854
855
856
857 this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
858
859
860 status.setStatus("Checking ZNode ACLs");
861 zooKeeper.checkAndSetZNodeAcls();
862
863 status.setStatus("Calling postStartMaster coprocessors");
864 if (this.cpHost != null) {
865
866 try {
867 this.cpHost.postStartMaster();
868 } catch (IOException ioe) {
869 LOG.error("Coprocessor postStartMaster() hook failed", ioe);
870 }
871 }
872
873 zombieDetector.interrupt();
874 }
875
876 @VisibleForTesting
877 protected CatalogJanitor createCatalogJanitor() {
878 return new CatalogJanitor(this, this);
879 }
880
881 private void initQuotaManager() throws IOException {
882 quotaManager = new MasterQuotaManager(this);
883 this.assignmentManager.setRegionStateListener((RegionStateListener) quotaManager);
884 quotaManager.start();
885 }
886
887
888
889
890
891
892
893
894
895 ServerManager createServerManager(final Server master,
896 final MasterServices services)
897 throws IOException {
898
899
900 return new ServerManager(master, services);
901 }
902
903 private void unassignExcessMetaReplica(ZooKeeperWatcher zkw, int numMetaReplicasConfigured) {
904
905
906 try {
907 List<String> metaReplicaZnodes = zooKeeper.getMetaReplicaNodes();
908 for (String metaReplicaZnode : metaReplicaZnodes) {
909 int replicaId = zooKeeper.getMetaReplicaIdFromZnode(metaReplicaZnode);
910 if (replicaId >= numMetaReplicasConfigured) {
911 RegionState r = MetaTableLocator.getMetaRegionState(zkw, replicaId);
912 LOG.info("Closing excess replica of meta region " + r.getRegion());
913
914 ServerManager.closeRegionSilentlyAndWait(getConnection(), r.getServerName(),
915 r.getRegion(), 30000);
916 ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(replicaId));
917 }
918 }
919 } catch (Exception ex) {
920
921
922 LOG.warn("Ignoring exception " + ex);
923 }
924 }
925
926
927
928
929
930
931
932
933
934
935 void assignMeta(MonitoredTask status, Set<ServerName> previouslyFailedMetaRSs, int replicaId)
936 throws InterruptedException, IOException, KeeperException {
937
938 int assigned = 0;
939 long timeout = this.conf.getLong("hbase.catalog.verification.timeout", 1000);
940 if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
941 status.setStatus("Assigning hbase:meta region");
942 } else {
943 status.setStatus("Assigning hbase:meta region, replicaId " + replicaId);
944 }
945
946 RegionStates regionStates = assignmentManager.getRegionStates();
947 RegionState metaState = MetaTableLocator.getMetaRegionState(getZooKeeper(), replicaId);
948 HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO,
949 replicaId);
950 ServerName currentMetaServer = metaState.getServerName();
951 if (!ConfigUtil.useZKForAssignment(conf)) {
952 regionStates.createRegionState(hri, metaState.getState(),
953 currentMetaServer, null);
954 } else {
955 regionStates.createRegionState(hri);
956 }
957 boolean rit = this.assignmentManager.
958 processRegionInTransitionAndBlockUntilAssigned(hri);
959 boolean metaRegionLocation = metaTableLocator.verifyMetaRegionLocation(
960 this.getConnection(), this.getZooKeeper(), timeout, replicaId);
961 if (!metaRegionLocation || !metaState.isOpened()) {
962
963
964 assigned++;
965 if (!ConfigUtil.useZKForAssignment(conf)) {
966 assignMetaZkLess(regionStates, metaState, timeout, previouslyFailedMetaRSs);
967 } else if (!rit) {
968
969 if (currentMetaServer != null) {
970
971
972
973
974
975
976
977 if (serverManager.isServerOnline(currentMetaServer)) {
978 LOG.info("Forcing expire of " + currentMetaServer);
979 serverManager.expireServer(currentMetaServer);
980 }
981 if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
982 splitMetaLogBeforeAssignment(currentMetaServer);
983 previouslyFailedMetaRSs.add(currentMetaServer);
984 }
985 }
986 assignmentManager.assignMeta(hri);
987 }
988 } else {
989
990 regionStates.updateRegionState(hri, State.OPEN, currentMetaServer);
991 this.assignmentManager.regionOnline(hri, currentMetaServer);
992 }
993
994 if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) enableMeta(TableName.META_TABLE_NAME);
995
996 if ((RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode())
997 && (!previouslyFailedMetaRSs.isEmpty())) {
998
999 status.setStatus("replaying log for Meta Region");
1000 this.fileSystemManager.splitMetaLog(previouslyFailedMetaRSs);
1001 }
1002
1003
1004
1005
1006
1007 if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) enableCrashedServerProcessing(assigned != 0);
1008 LOG.info("hbase:meta with replicaId " + replicaId + " assigned=" + assigned + ", rit=" + rit +
1009 ", location=" + metaTableLocator.getMetaRegionLocation(this.getZooKeeper(), replicaId));
1010 status.setStatus("META assigned.");
1011 }
1012
1013 private void assignMetaZkLess(RegionStates regionStates, RegionState regionState, long timeout,
1014 Set<ServerName> previouslyFailedRs) throws IOException, KeeperException {
1015 ServerName currentServer = regionState.getServerName();
1016 if (serverManager.isServerOnline(currentServer)) {
1017 LOG.info("Meta was in transition on " + currentServer);
1018 assignmentManager.processRegionInTransitionZkLess();
1019 } else {
1020 if (currentServer != null) {
1021 if (regionState.getRegion().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
1022 splitMetaLogBeforeAssignment(currentServer);
1023 regionStates.logSplit(HRegionInfo.FIRST_META_REGIONINFO);
1024 previouslyFailedRs.add(currentServer);
1025 }
1026 }
1027 LOG.info("Re-assigning hbase:meta, it was on " + currentServer);
1028 regionStates.updateRegionState(regionState.getRegion(), State.OFFLINE);
1029 assignmentManager.assignMeta(regionState.getRegion());
1030 }
1031 }
1032
1033 void initNamespace() throws IOException {
1034
1035 tableNamespaceManager = new TableNamespaceManager(this);
1036 tableNamespaceManager.start();
1037 }
1038
1039 boolean isCatalogJanitorEnabled() {
1040 return catalogJanitorChore != null ?
1041 catalogJanitorChore.getEnabled() : false;
1042 }
1043
1044 private void splitMetaLogBeforeAssignment(ServerName currentMetaServer) throws IOException {
1045 if (RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode()) {
1046
1047 Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
1048 regions.add(HRegionInfo.FIRST_META_REGIONINFO);
1049 this.fileSystemManager.prepareLogReplay(currentMetaServer, regions);
1050 } else {
1051
1052 this.fileSystemManager.splitMetaLog(currentMetaServer);
1053 }
1054 }
1055
1056 private void enableCrashedServerProcessing(final boolean waitForMeta)
1057 throws IOException, InterruptedException {
1058
1059
1060
1061
1062 if (!isServerCrashProcessingEnabled()) {
1063 setServerCrashProcessingEnabled(true);
1064 this.serverManager.processQueuedDeadServers();
1065 }
1066
1067 if (waitForMeta) {
1068 metaTableLocator.waitMetaRegionLocation(this.getZooKeeper());
1069
1070
1071 this.assignmentManager.waitForAssignment(HRegionInfo.FIRST_META_REGIONINFO);
1072 }
1073 }
1074
1075 private void enableMeta(TableName metaTableName) {
1076 if (!this.assignmentManager.getTableStateManager().isTableState(metaTableName,
1077 ZooKeeperProtos.Table.State.ENABLED)) {
1078 this.assignmentManager.setEnabledTable(metaTableName);
1079 }
1080 }
1081
1082
1083
1084
1085
1086
1087 private Set<ServerName> getPreviouselyFailedMetaServersFromZK() throws KeeperException {
1088 Set<ServerName> result = new HashSet<ServerName>();
1089 String metaRecoveringZNode = ZKUtil.joinZNode(zooKeeper.recoveringRegionsZNode,
1090 HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
1091 List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(zooKeeper, metaRecoveringZNode);
1092 if (regionFailedServers == null) return result;
1093
1094 for(String failedServer : regionFailedServers) {
1095 ServerName server = ServerName.parseServerName(failedServer);
1096 result.add(server);
1097 }
1098 return result;
1099 }
1100
1101 @Override
1102 public TableDescriptors getTableDescriptors() {
1103 return this.tableDescriptors;
1104 }
1105
1106 @Override
1107 public ServerManager getServerManager() {
1108 return this.serverManager;
1109 }
1110
1111 @Override
1112 public MasterFileSystem getMasterFileSystem() {
1113 return this.fileSystemManager;
1114 }
1115
1116
1117
1118
1119
1120
1121
1122
1123 private void startServiceThreads() throws IOException{
1124
1125 this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
1126 conf.getInt("hbase.master.executor.openregion.threads", 5));
1127 this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
1128 conf.getInt("hbase.master.executor.closeregion.threads", 5));
1129 this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
1130 conf.getInt("hbase.master.executor.serverops.threads", 5));
1131 this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
1132 conf.getInt("hbase.master.executor.serverops.threads", 5));
1133 this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
1134 conf.getInt("hbase.master.executor.logreplayops.threads", 10));
1135
1136
1137
1138
1139
1140
1141 this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
1142 startProcedureExecutor();
1143
1144
1145 int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
1146 this.logCleaner =
1147 new LogCleaner(cleanerInterval,
1148 this, conf, getMasterFileSystem().getFileSystem(),
1149 getMasterFileSystem().getOldLogDir());
1150 getChoreService().scheduleChore(logCleaner);
1151
1152
1153 Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
1154 Map<String, Object> params = new HashMap<String, Object>();
1155 params.put(MASTER, this);
1156 this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
1157 .getFileSystem(), archiveDir, params);
1158 getChoreService().scheduleChore(hfileCleaner);
1159 serviceStarted = true;
1160 if (LOG.isTraceEnabled()) {
1161 LOG.trace("Started service threads");
1162 }
1163 if (!conf.getBoolean(HConstants.ZOOKEEPER_USEMULTI, true)) {
1164 try {
1165 replicationZKLockCleanerChore = new ReplicationZKLockCleanerChore(this, this,
1166 cleanerInterval, this.getZooKeeper(), this.conf);
1167 getChoreService().scheduleChore(replicationZKLockCleanerChore);
1168 } catch (Exception e) {
1169 LOG.error("start replicationZKLockCleanerChore failed", e);
1170 }
1171 }
1172 }
1173
1174 @Override
1175 protected void sendShutdownInterrupt() {
1176 super.sendShutdownInterrupt();
1177 stopProcedureExecutor();
1178 }
1179
1180 @Override
1181 protected void stopServiceThreads() {
1182 if (masterJettyServer != null) {
1183 LOG.info("Stopping master jetty server");
1184 try {
1185 masterJettyServer.stop();
1186 } catch (Exception e) {
1187 LOG.error("Failed to stop master jetty server", e);
1188 }
1189 }
1190 super.stopServiceThreads();
1191 stopChores();
1192
1193
1194
1195 if (!isAborted() && this.serverManager != null &&
1196 this.serverManager.isClusterShutdown()) {
1197 this.serverManager.letRegionServersShutdown();
1198 }
1199 if (LOG.isDebugEnabled()) {
1200 LOG.debug("Stopping service threads");
1201 }
1202
1203 if (this.logCleaner != null) this.logCleaner.cancel(true);
1204 if (this.hfileCleaner != null) this.hfileCleaner.cancel(true);
1205 if (this.replicationZKLockCleanerChore != null) this.replicationZKLockCleanerChore.cancel(true);
1206 if (this.quotaManager != null) this.quotaManager.stop();
1207 if (this.activeMasterManager != null) this.activeMasterManager.stop();
1208 if (this.serverManager != null) this.serverManager.stop();
1209 if (this.assignmentManager != null) this.assignmentManager.stop();
1210 if (this.fileSystemManager != null) this.fileSystemManager.stop();
1211 if (this.mpmHost != null) this.mpmHost.stop("server shutting down.");
1212 }
1213
1214 private void startProcedureExecutor() throws IOException {
1215 final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
1216 final Path logDir = new Path(fileSystemManager.getRootDir(),
1217 MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
1218
1219 procedureStore = new WALProcedureStore(conf, fileSystemManager.getFileSystem(), logDir,
1220 new MasterProcedureEnv.WALStoreLeaseRecovery(this));
1221 procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
1222 procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
1223 procEnv.getProcedureQueue());
1224
1225 final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
1226 Math.max(Runtime.getRuntime().availableProcessors(),
1227 MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
1228 final boolean abortOnCorruption = conf.getBoolean(
1229 MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
1230 MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
1231 procedureStore.start(numThreads);
1232 procedureExecutor.start(numThreads, abortOnCorruption);
1233 }
1234
1235 private void stopProcedureExecutor() {
1236 if (procedureExecutor != null) {
1237 procedureExecutor.stop();
1238 }
1239
1240 if (procedureStore != null) {
1241 procedureStore.stop(isAborted());
1242 }
1243 }
1244
1245 private void stopChores() {
1246 if (this.balancerChore != null) {
1247 this.balancerChore.cancel(true);
1248 }
1249 if (this.normalizerChore != null) {
1250 this.normalizerChore.cancel(true);
1251 }
1252 if (this.clusterStatusChore != null) {
1253 this.clusterStatusChore.cancel(true);
1254 }
1255 if (this.catalogJanitorChore != null) {
1256 this.catalogJanitorChore.cancel(true);
1257 }
1258 if (this.clusterStatusPublisherChore != null){
1259 clusterStatusPublisherChore.cancel(true);
1260 }
1261 if (this.periodicDoMetricsChore != null) {
1262 periodicDoMetricsChore.cancel();
1263 }
1264 }
1265
1266
1267
1268
1269
1270 InetAddress getRemoteInetAddress(final int port,
1271 final long serverStartCode) throws UnknownHostException {
1272
1273
1274 InetAddress ia = RpcServer.getRemoteIp();
1275
1276
1277
1278 if (ia == null && serverStartCode == startcode) {
1279 InetSocketAddress isa = rpcServices.getSocketAddress();
1280 if (isa != null && isa.getPort() == port) {
1281 ia = isa.getAddress();
1282 }
1283 }
1284 return ia;
1285 }
1286
1287
1288
1289
1290 private int getBalancerCutoffTime() {
1291 int balancerCutoffTime =
1292 getConfiguration().getInt("hbase.balancer.max.balancing", -1);
1293 if (balancerCutoffTime == -1) {
1294
1295 int balancerPeriod =
1296 getConfiguration().getInt("hbase.balancer.period", 300000);
1297 balancerCutoffTime = balancerPeriod;
1298
1299 if (balancerCutoffTime <= 0) balancerCutoffTime = balancerPeriod;
1300 }
1301 return balancerCutoffTime;
1302 }
1303
1304 public boolean balance() throws IOException {
1305
1306 if (!isInitialized()) {
1307 LOG.debug("Master has not been initialized, don't run balancer.");
1308 return false;
1309 }
1310
1311 int maximumBalanceTime = getBalancerCutoffTime();
1312 synchronized (this.balancer) {
1313
1314 if (!this.loadBalancerTracker.isBalancerOn()) return false;
1315
1316 if (this.assignmentManager.getRegionStates().isRegionsInTransition()) {
1317 Map<String, RegionState> regionsInTransition =
1318 this.assignmentManager.getRegionStates().getRegionsInTransition();
1319 LOG.debug("Not running balancer because " + regionsInTransition.size() +
1320 " region(s) in transition: " + org.apache.commons.lang.StringUtils.
1321 abbreviate(regionsInTransition.toString(), 256));
1322 return false;
1323 }
1324 if (this.serverManager.areDeadServersInProgress()) {
1325 LOG.debug("Not running balancer because processing dead regionserver(s): " +
1326 this.serverManager.getDeadServers());
1327 return false;
1328 }
1329
1330 if (this.cpHost != null) {
1331 try {
1332 if (this.cpHost.preBalance()) {
1333 LOG.debug("Coprocessor bypassing balancer request");
1334 return false;
1335 }
1336 } catch (IOException ioe) {
1337 LOG.error("Error invoking master coprocessor preBalance()", ioe);
1338 return false;
1339 }
1340 }
1341
1342 Map<TableName, Map<ServerName, List<HRegionInfo>>> assignmentsByTable =
1343 this.assignmentManager.getRegionStates().getAssignmentsByTable();
1344
1345 List<RegionPlan> plans = new ArrayList<RegionPlan>();
1346
1347 this.balancer.setClusterStatus(getClusterStatus());
1348 for (Map<ServerName, List<HRegionInfo>> assignments : assignmentsByTable.values()) {
1349 List<RegionPlan> partialPlans = this.balancer.balanceCluster(assignments);
1350 if (partialPlans != null) plans.addAll(partialPlans);
1351 }
1352 long cutoffTime = System.currentTimeMillis() + maximumBalanceTime;
1353 int rpCount = 0;
1354 long totalRegPlanExecTime = 0;
1355 if (plans != null && !plans.isEmpty()) {
1356 for (RegionPlan plan: plans) {
1357 LOG.info("balance " + plan);
1358 long balStartTime = System.currentTimeMillis();
1359
1360 this.assignmentManager.balance(plan);
1361 totalRegPlanExecTime += System.currentTimeMillis()-balStartTime;
1362 rpCount++;
1363 if (rpCount < plans.size() &&
1364
1365 (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) {
1366
1367 LOG.debug("No more balancing till next balance run; maximumBalanceTime=" +
1368 maximumBalanceTime);
1369 break;
1370 }
1371 }
1372 }
1373 if (this.cpHost != null) {
1374 try {
1375 this.cpHost.postBalance(rpCount < plans.size() ? plans.subList(0, rpCount) : plans);
1376 } catch (IOException ioe) {
1377
1378 LOG.error("Error invoking master coprocessor postBalance()", ioe);
1379 }
1380 }
1381 }
1382
1383
1384 return true;
1385 }
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396 public boolean normalizeRegions() throws IOException, CoordinatedStateException {
1397 if (!isInitialized()) {
1398 LOG.debug("Master has not been initialized, don't run region normalizer.");
1399 return false;
1400 }
1401
1402 if (!this.regionNormalizerTracker.isNormalizerOn()) {
1403 LOG.debug("Region normalization is disabled, don't run region normalizer.");
1404 return false;
1405 }
1406
1407 synchronized (this.normalizer) {
1408
1409 List<TableName> allEnabledTables = new ArrayList<>(
1410 this.assignmentManager.getTableStateManager().getTablesInStates(
1411 ZooKeeperProtos.Table.State.ENABLED));
1412
1413 Collections.shuffle(allEnabledTables);
1414
1415 for (TableName table : allEnabledTables) {
1416 if (quotaManager.getNamespaceQuotaManager() != null &&
1417 quotaManager.getNamespaceQuotaManager().getState(table.getNamespaceAsString()) != null){
1418 LOG.debug("Skipping normalizing " + table + " since its namespace has quota");
1419 continue;
1420 }
1421 if (table.isSystemTable() || (getTableDescriptors().get(table) != null &&
1422 !getTableDescriptors().get(table).isNormalizationEnabled())) {
1423 LOG.debug("Skipping normalization for table: " + table + ", as it's either system"
1424 + " table or doesn't have auto normalization turned on");
1425 continue;
1426 }
1427 List<NormalizationPlan> plans = this.normalizer.computePlanForTable(table);
1428 if (plans != null) {
1429 for (NormalizationPlan plan : plans) {
1430 plan.execute(clusterConnection.getAdmin());
1431 }
1432 }
1433 }
1434 }
1435
1436
1437 return true;
1438 }
1439
1440
1441
1442
1443 String getClientIdAuditPrefix() {
1444 return "Client=" + RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress();
1445 }
1446
1447
1448
1449
1450
1451
1452
1453 public void setCatalogJanitorEnabled(final boolean b) {
1454 this.catalogJanitorChore.setEnabled(b);
1455 }
1456
1457 @Override
1458 public void dispatchMergingRegions(final HRegionInfo region_a,
1459 final HRegionInfo region_b, final boolean forcible) throws IOException {
1460 checkInitialized();
1461 this.service.submit(new DispatchMergingRegionHandler(this,
1462 this.catalogJanitorChore, region_a, region_b, forcible));
1463 }
1464
1465 void move(final byte[] encodedRegionName,
1466 final byte[] destServerName) throws HBaseIOException {
1467 RegionState regionState = assignmentManager.getRegionStates().
1468 getRegionState(Bytes.toString(encodedRegionName));
1469 if (regionState == null) {
1470 throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
1471 }
1472
1473 HRegionInfo hri = regionState.getRegion();
1474 ServerName dest;
1475 if (destServerName == null || destServerName.length == 0) {
1476 LOG.info("Passed destination servername is null/empty so " +
1477 "choosing a server at random");
1478 final List<ServerName> destServers = this.serverManager.createDestinationServersList(
1479 regionState.getServerName());
1480 dest = balancer.randomAssignment(hri, destServers);
1481 if (dest == null) {
1482 LOG.debug("Unable to determine a plan to assign " + hri);
1483 return;
1484 }
1485 } else {
1486 dest = ServerName.valueOf(Bytes.toString(destServerName));
1487 if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer
1488 && !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) {
1489
1490
1491
1492 LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
1493 + " to avoid unnecessary region moving later by load balancer,"
1494 + " because it should not be on master");
1495 return;
1496 }
1497 }
1498
1499 if (dest.equals(regionState.getServerName())) {
1500 LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
1501 + " because region already assigned to the same server " + dest + ".");
1502 return;
1503 }
1504
1505
1506 RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest);
1507
1508 try {
1509 checkInitialized();
1510 if (this.cpHost != null) {
1511 if (this.cpHost.preMove(hri, rp.getSource(), rp.getDestination())) {
1512 return;
1513 }
1514 }
1515
1516
1517
1518 serverManager.sendRegionWarmup(rp.getDestination(), hri);
1519
1520 LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
1521 this.assignmentManager.balance(rp);
1522 if (this.cpHost != null) {
1523 this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
1524 }
1525 } catch (IOException ioe) {
1526 if (ioe instanceof HBaseIOException) {
1527 throw (HBaseIOException)ioe;
1528 }
1529 throw new HBaseIOException(ioe);
1530 }
1531 }
1532
1533 @Override
1534 public long createTable(
1535 final HTableDescriptor hTableDescriptor,
1536 final byte [][] splitKeys,
1537 final long nonceGroup,
1538 final long nonce) throws IOException {
1539 if (isStopped()) {
1540 throw new MasterNotRunningException();
1541 }
1542
1543 String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
1544 ensureNamespaceExists(namespace);
1545
1546 final HRegionInfo[] newRegions =
1547 ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys);
1548 checkInitialized();
1549 sanityCheckTableDescriptor(hTableDescriptor);
1550
1551 return MasterProcedureUtil.submitProcedure(
1552 new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
1553 @Override
1554 protected void run() throws IOException {
1555 getMaster().getMasterCoprocessorHost().preCreateTable(hTableDescriptor, newRegions);
1556
1557 LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
1558
1559
1560
1561 ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
1562 submitProcedure(new CreateTableProcedure(
1563 procedureExecutor.getEnvironment(), hTableDescriptor, newRegions, latch));
1564 latch.await();
1565
1566 getMaster().getMasterCoprocessorHost().postCreateTable(hTableDescriptor, newRegions);
1567 }
1568
1569 @Override
1570 protected String getDescription() {
1571 return "CreateTableProcedure";
1572 }
1573 });
1574 }
1575
1576
1577
1578
1579
1580
1581 private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
1582 final String CONF_KEY = "hbase.table.sanity.checks";
1583 boolean logWarn = false;
1584 if (!conf.getBoolean(CONF_KEY, true)) {
1585 logWarn = true;
1586 }
1587 String tableVal = htd.getConfigurationValue(CONF_KEY);
1588 if (tableVal != null && !Boolean.valueOf(tableVal)) {
1589 logWarn = true;
1590 }
1591
1592
1593 long maxFileSizeLowerLimit = 2 * 1024 * 1024L;
1594 long maxFileSize = htd.getMaxFileSize();
1595 if (maxFileSize < 0) {
1596 maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
1597 }
1598 if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
1599 String message = "MAX_FILESIZE for table descriptor or "
1600 + "\"hbase.hregion.max.filesize\" (" + maxFileSize
1601 + ") is too small, which might cause over splitting into unmanageable "
1602 + "number of regions.";
1603 warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1604 }
1605
1606
1607 long flushSizeLowerLimit = 1024 * 1024L;
1608 long flushSize = htd.getMemStoreFlushSize();
1609 if (flushSize < 0) {
1610 flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
1611 }
1612 if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
1613 String message = "MEMSTORE_FLUSHSIZE for table descriptor or "
1614 + "\"hbase.hregion.memstore.flush.size\" ("+flushSize+") is too small, which might cause"
1615 + " very frequent flushing.";
1616 warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1617 }
1618
1619
1620 try {
1621 checkClassLoading(conf, htd);
1622 } catch (Exception ex) {
1623 warnOrThrowExceptionForFailure(logWarn, CONF_KEY, ex.getMessage(), null);
1624 }
1625
1626
1627 try {
1628 checkCompression(htd);
1629 } catch (IOException e) {
1630 warnOrThrowExceptionForFailure(logWarn, CONF_KEY, e.getMessage(), e);
1631 }
1632
1633
1634 try {
1635 checkEncryption(conf, htd);
1636 } catch (IOException e) {
1637 warnOrThrowExceptionForFailure(logWarn, CONF_KEY, e.getMessage(), e);
1638 }
1639
1640 try{
1641 checkCompactionPolicy(conf, htd);
1642 } catch(IOException e){
1643 warnOrThrowExceptionForFailure(false, CONF_KEY, e.getMessage(), e);
1644 }
1645
1646 if (htd.getColumnFamilies().length == 0) {
1647 String message = "Table should have at least one column family.";
1648 warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1649 }
1650
1651 for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1652 if (hcd.getTimeToLive() <= 0) {
1653 String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
1654 warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1655 }
1656
1657
1658 if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
1659 String message = "Block size for column family " + hcd.getNameAsString()
1660 + " must be between 1K and 16MB.";
1661 warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1662 }
1663
1664
1665 if (hcd.getMinVersions() < 0) {
1666 String message = "Min versions for column family " + hcd.getNameAsString()
1667 + " must be positive.";
1668 warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1669 }
1670
1671
1672
1673
1674
1675 if (hcd.getMinVersions() > hcd.getMaxVersions()) {
1676 String message = "Min versions for column family " + hcd.getNameAsString()
1677 + " must be less than the Max versions.";
1678 warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1679 }
1680
1681
1682 if (hcd.getScope() < 0) {
1683 String message = "Replication scope for column family "
1684 + hcd.getNameAsString() + " must be positive.";
1685 warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1686 }
1687
1688
1689
1690 if (hcd.getDFSReplication() < 0) {
1691 String message = "HFile Replication for column family " + hcd.getNameAsString()
1692 + " must be greater than zero.";
1693 warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
1694 }
1695
1696
1697 }
1698 }
1699
1700 private void checkCompactionPolicy(Configuration conf, HTableDescriptor htd)
1701 throws IOException {
1702
1703
1704 String className =
1705 htd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
1706 if (className == null) {
1707 className =
1708 conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
1709 ExploringCompactionPolicy.class.getName());
1710 }
1711
1712 int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
1713 String sv = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
1714 if (sv != null) {
1715 blockingFileCount = Integer.parseInt(sv);
1716 } else {
1717 blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
1718 }
1719
1720 for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1721 String compactionPolicy =
1722 hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
1723 if (compactionPolicy == null) {
1724 compactionPolicy = className;
1725 }
1726 if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
1727 continue;
1728 }
1729
1730 String message = null;
1731
1732
1733 if (hcd.getTimeToLive() == HColumnDescriptor.DEFAULT_TTL) {
1734 message = "Default TTL is not supported for FIFO compaction";
1735 throw new IOException(message);
1736 }
1737
1738
1739 if (hcd.getMinVersions() > 0) {
1740 message = "MIN_VERSION > 0 is not supported for FIFO compaction";
1741 throw new IOException(message);
1742 }
1743
1744
1745 String sbfc = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
1746 if (sbfc != null) {
1747 blockingFileCount = Integer.parseInt(sbfc);
1748 }
1749 if (blockingFileCount < 1000) {
1750 message =
1751 "blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount
1752 + " is below recommended minimum of 1000";
1753 throw new IOException(message);
1754 }
1755 }
1756 }
1757
1758
1759 private static void warnOrThrowExceptionForFailure(boolean logWarn, String confKey,
1760 String message, Exception cause) throws IOException {
1761 if (!logWarn) {
1762 throw new DoNotRetryIOException(message + " Set " + confKey +
1763 " to false at conf or table descriptor if you want to bypass sanity checks", cause);
1764 }
1765 LOG.warn(message);
1766 }
1767
1768 private void startActiveMasterManager(int infoPort) throws KeeperException {
1769 String backupZNode = ZKUtil.joinZNode(
1770 zooKeeper.backupMasterAddressesZNode, serverName.toString());
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781 LOG.info("Adding backup master ZNode " + backupZNode);
1782 if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode,
1783 serverName, infoPort)) {
1784 LOG.warn("Failed create of " + backupZNode + " by " + serverName);
1785 }
1786
1787 activeMasterManager.setInfoPort(infoPort);
1788
1789 Threads.setDaemonThreadRunning(new Thread(new Runnable() {
1790 @Override
1791 public void run() {
1792 int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT,
1793 HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
1794
1795 if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP,
1796 HConstants.DEFAULT_MASTER_TYPE_BACKUP)) {
1797 LOG.debug("HMaster started in backup mode. "
1798 + "Stalling until master znode is written.");
1799
1800
1801 while (!activeMasterManager.hasActiveMaster()) {
1802 LOG.debug("Waiting for master address ZNode to be written "
1803 + "(Also watching cluster state node)");
1804 Threads.sleep(timeout);
1805 }
1806 }
1807 MonitoredTask status = TaskMonitor.get().createStatus("Master startup");
1808 status.setDescription("Master startup");
1809 try {
1810 if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, status)) {
1811 finishActiveMasterInitialization(status);
1812 }
1813 } catch (Throwable t) {
1814 status.setStatus("Failed to become active: " + t.getMessage());
1815 LOG.fatal("Failed to become active master", t);
1816
1817 if (t instanceof NoClassDefFoundError &&
1818 t.getMessage()
1819 .contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction")) {
1820
1821 abort("HBase is having a problem with its Hadoop jars. You may need to "
1822 + "recompile HBase against Hadoop version "
1823 + org.apache.hadoop.util.VersionInfo.getVersion()
1824 + " or change your hadoop jars to start properly", t);
1825 } else {
1826 abort("Unhandled exception. Starting shutdown.", t);
1827 }
1828 } finally {
1829 status.cleanup();
1830 }
1831 }
1832 }, getServerName().toShortString() + ".activeMasterManager"));
1833 }
1834
1835 private void checkCompression(final HTableDescriptor htd)
1836 throws IOException {
1837 if (!this.masterCheckCompression) return;
1838 for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1839 checkCompression(hcd);
1840 }
1841 }
1842
1843 private void checkCompression(final HColumnDescriptor hcd)
1844 throws IOException {
1845 if (!this.masterCheckCompression) return;
1846 CompressionTest.testCompression(hcd.getCompression());
1847 CompressionTest.testCompression(hcd.getCompactionCompression());
1848 }
1849
1850 private void checkEncryption(final Configuration conf, final HTableDescriptor htd)
1851 throws IOException {
1852 if (!this.masterCheckEncryption) return;
1853 for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1854 checkEncryption(conf, hcd);
1855 }
1856 }
1857
1858 private void checkEncryption(final Configuration conf, final HColumnDescriptor hcd)
1859 throws IOException {
1860 if (!this.masterCheckEncryption) return;
1861 EncryptionTest.testEncryption(conf, hcd.getEncryptionType(), hcd.getEncryptionKey());
1862 }
1863
1864 private void checkClassLoading(final Configuration conf, final HTableDescriptor htd)
1865 throws IOException {
1866 RegionSplitPolicy.getSplitPolicyClass(htd, conf);
1867 RegionCoprocessorHost.testTableCoprocessorAttrs(conf, htd);
1868 }
1869
1870 private static boolean isCatalogTable(final TableName tableName) {
1871 return tableName.equals(TableName.META_TABLE_NAME);
1872 }
1873
1874 @Override
1875 public long deleteTable(
1876 final TableName tableName,
1877 final long nonceGroup,
1878 final long nonce) throws IOException {
1879 checkInitialized();
1880
1881 return MasterProcedureUtil.submitProcedure(
1882 new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
1883 @Override
1884 protected void run() throws IOException {
1885 getMaster().getMasterCoprocessorHost().preDeleteTable(tableName);
1886
1887 LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
1888
1889
1890 ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
1891 submitProcedure(new DeleteTableProcedure(procedureExecutor.getEnvironment(),
1892 tableName, latch));
1893 latch.await();
1894
1895 getMaster().getMasterCoprocessorHost().postDeleteTable(tableName);
1896 }
1897
1898 @Override
1899 protected String getDescription() {
1900 return "DeleteTableProcedure";
1901 }
1902 });
1903 }
1904
1905 @Override
1906 public void truncateTable(
1907 final TableName tableName,
1908 final boolean preserveSplits,
1909 final long nonceGroup,
1910 final long nonce) throws IOException {
1911 checkInitialized();
1912
1913 MasterProcedureUtil.submitProcedure(
1914 new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
1915 @Override
1916 protected void run() throws IOException {
1917 getMaster().getMasterCoprocessorHost().preTruncateTable(tableName);
1918
1919 LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
1920
1921 long procId = submitProcedure(new TruncateTableProcedure(procedureExecutor.getEnvironment(),
1922 tableName, preserveSplits));
1923 ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
1924
1925 getMaster().getMasterCoprocessorHost().postTruncateTable(tableName);
1926 }
1927
1928 @Override
1929 protected String getDescription() {
1930 return "TruncateTableProcedure";
1931 }
1932 });
1933 }
1934
1935 @Override
1936 public void addColumn(
1937 final TableName tableName,
1938 final HColumnDescriptor columnDescriptor,
1939 final long nonceGroup,
1940 final long nonce)
1941 throws IOException {
1942 checkInitialized();
1943 checkCompression(columnDescriptor);
1944 checkEncryption(conf, columnDescriptor);
1945
1946 MasterProcedureUtil.submitProcedure(
1947 new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
1948 @Override
1949 protected void run() throws IOException {
1950 if (getMaster().getMasterCoprocessorHost().preAddColumn(tableName, columnDescriptor)) {
1951 return;
1952 }
1953
1954
1955 long procId = submitProcedure(new AddColumnFamilyProcedure(
1956 procedureExecutor.getEnvironment(), tableName, columnDescriptor));
1957 ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
1958
1959 getMaster().getMasterCoprocessorHost().postAddColumn(tableName, columnDescriptor);
1960 }
1961
1962 @Override
1963 protected String getDescription() {
1964 return "AddColumnFamilyProcedure";
1965 }
1966 });
1967 }
1968
1969 @Override
1970 public void modifyColumn(
1971 final TableName tableName,
1972 final HColumnDescriptor descriptor,
1973 final long nonceGroup,
1974 final long nonce)
1975 throws IOException {
1976 checkInitialized();
1977 checkCompression(descriptor);
1978 checkEncryption(conf, descriptor);
1979
1980 MasterProcedureUtil.submitProcedure(
1981 new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
1982 @Override
1983 protected void run() throws IOException {
1984 if (getMaster().getMasterCoprocessorHost().preModifyColumn(tableName, descriptor)) {
1985 return;
1986 }
1987
1988 LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
1989
1990
1991 long procId = submitProcedure(new ModifyColumnFamilyProcedure(
1992 procedureExecutor.getEnvironment(), tableName, descriptor));
1993 ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
1994
1995 getMaster().getMasterCoprocessorHost().postModifyColumn(tableName, descriptor);
1996 }
1997
1998 @Override
1999 protected String getDescription() {
2000 return "ModifyColumnFamilyProcedure";
2001 }
2002 });
2003 }
2004
2005 @Override
2006 public void deleteColumn(
2007 final TableName tableName,
2008 final byte[] columnName,
2009 final long nonceGroup,
2010 final long nonce)
2011 throws IOException {
2012 checkInitialized();
2013
2014 MasterProcedureUtil.submitProcedure(
2015 new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2016 @Override
2017 protected void run() throws IOException {
2018 if (getMaster().getMasterCoprocessorHost().preDeleteColumn(tableName, columnName)) {
2019 return;
2020 }
2021
2022 LOG.info(getClientIdAuditPrefix() + " delete " + Bytes.toString(columnName));
2023
2024
2025
2026 long procId = submitProcedure(new DeleteColumnFamilyProcedure(
2027 procedureExecutor.getEnvironment(), tableName, columnName));
2028 ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
2029
2030 getMaster().getMasterCoprocessorHost().postDeleteColumn(tableName, columnName);
2031 }
2032
2033 @Override
2034 protected String getDescription() {
2035 return "DeleteColumnFamilyProcedure";
2036 }
2037 });
2038 }
2039
2040 @Override
2041 public long enableTable(final TableName tableName, final long nonceGroup, final long nonce)
2042 throws IOException {
2043 checkInitialized();
2044
2045 return MasterProcedureUtil.submitProcedure(
2046 new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2047 @Override
2048 protected void run() throws IOException {
2049 getMaster().getMasterCoprocessorHost().preEnableTable(tableName);
2050
2051 LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
2052
2053
2054
2055
2056
2057
2058 final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch();
2059 submitProcedure(new EnableTableProcedure(procedureExecutor.getEnvironment(),
2060 tableName, false, prepareLatch));
2061 prepareLatch.await();
2062
2063 getMaster().getMasterCoprocessorHost().postEnableTable(tableName);
2064 }
2065
2066 @Override
2067 protected String getDescription() {
2068 return "EnableTableProcedure";
2069 }
2070 });
2071 }
2072
2073 @Override
2074 public long disableTable(final TableName tableName, final long nonceGroup, final long nonce)
2075 throws IOException {
2076 checkInitialized();
2077
2078 return MasterProcedureUtil.submitProcedure(
2079 new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2080 @Override
2081 protected void run() throws IOException {
2082 getMaster().getMasterCoprocessorHost().preDisableTable(tableName);
2083
2084 LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
2085
2086
2087
2088
2089
2090
2091 final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch();
2092 submitProcedure(new DisableTableProcedure(procedureExecutor.getEnvironment(),
2093 tableName, false, prepareLatch));
2094 prepareLatch.await();
2095
2096 getMaster().getMasterCoprocessorHost().postDisableTable(tableName);
2097 }
2098
2099 @Override
2100 protected String getDescription() {
2101 return "DisableTableProcedure";
2102 }
2103 });
2104 }
2105
2106
2107
2108
2109
2110
2111
2112 @VisibleForTesting
2113 Pair<HRegionInfo, ServerName> getTableRegionForRow(
2114 final TableName tableName, final byte [] rowKey)
2115 throws IOException {
2116 final AtomicReference<Pair<HRegionInfo, ServerName>> result =
2117 new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
2118
2119 MetaScannerVisitor visitor =
2120 new MetaScannerVisitorBase() {
2121 @Override
2122 public boolean processRow(Result data) throws IOException {
2123 if (data == null || data.size() <= 0) {
2124 return true;
2125 }
2126 Pair<HRegionInfo, ServerName> pair = HRegionInfo.getHRegionInfoAndServerName(data);
2127 if (pair == null) {
2128 return false;
2129 }
2130 if (!pair.getFirst().getTable().equals(tableName)) {
2131 return false;
2132 }
2133 result.set(pair);
2134 return true;
2135 }
2136 };
2137
2138 MetaScanner.metaScan(clusterConnection, visitor, tableName, rowKey, 1);
2139 return result.get();
2140 }
2141
2142 @Override
2143 public void modifyTable(
2144 final TableName tableName,
2145 final HTableDescriptor descriptor,
2146 final long nonceGroup,
2147 final long nonce)
2148 throws IOException {
2149 checkInitialized();
2150 sanityCheckTableDescriptor(descriptor);
2151
2152 MasterProcedureUtil.submitProcedure(
2153 new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2154 @Override
2155 protected void run() throws IOException {
2156 getMaster().getMasterCoprocessorHost().preModifyTable(tableName, descriptor);
2157
2158 LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
2159
2160
2161 long procId = submitProcedure(new ModifyTableProcedure(
2162 procedureExecutor.getEnvironment(), descriptor));
2163 ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
2164
2165 getMaster().getMasterCoprocessorHost().postModifyTable(tableName, descriptor);
2166 }
2167
2168 @Override
2169 protected String getDescription() {
2170 return "ModifyTableProcedure";
2171 }
2172 });
2173 }
2174
2175 @Override
2176 public void checkTableModifiable(final TableName tableName)
2177 throws IOException, TableNotFoundException, TableNotDisabledException {
2178 if (isCatalogTable(tableName)) {
2179 throw new IOException("Can't modify catalog tables");
2180 }
2181 if (!MetaTableAccessor.tableExists(getConnection(), tableName)) {
2182 throw new TableNotFoundException(tableName);
2183 }
2184 if (!getAssignmentManager().getTableStateManager().
2185 isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) {
2186 throw new TableNotDisabledException(tableName);
2187 }
2188 }
2189
2190
2191
2192
2193 public ClusterStatus getClusterStatus() throws InterruptedIOException {
2194
2195 List<String> backupMasterStrings;
2196 try {
2197 backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper,
2198 this.zooKeeper.backupMasterAddressesZNode);
2199 } catch (KeeperException e) {
2200 LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
2201 backupMasterStrings = null;
2202 }
2203
2204 List<ServerName> backupMasters = null;
2205 if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) {
2206 backupMasters = new ArrayList<ServerName>(backupMasterStrings.size());
2207 for (String s: backupMasterStrings) {
2208 try {
2209 byte [] bytes;
2210 try {
2211 bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
2212 this.zooKeeper.backupMasterAddressesZNode, s));
2213 } catch (InterruptedException e) {
2214 throw new InterruptedIOException();
2215 }
2216 if (bytes != null) {
2217 ServerName sn;
2218 try {
2219 sn = ServerName.parseFrom(bytes);
2220 } catch (DeserializationException e) {
2221 LOG.warn("Failed parse, skipping registering backup server", e);
2222 continue;
2223 }
2224 backupMasters.add(sn);
2225 }
2226 } catch (KeeperException e) {
2227 LOG.warn(this.zooKeeper.prefix("Unable to get information about " +
2228 "backup servers"), e);
2229 }
2230 }
2231 Collections.sort(backupMasters, new Comparator<ServerName>() {
2232 @Override
2233 public int compare(ServerName s1, ServerName s2) {
2234 return s1.getServerName().compareTo(s2.getServerName());
2235 }});
2236 }
2237
2238 String clusterId = fileSystemManager != null ?
2239 fileSystemManager.getClusterId().toString() : null;
2240 Map<String, RegionState> regionsInTransition = assignmentManager != null ?
2241 assignmentManager.getRegionStates().getRegionsInTransition() : null;
2242 String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null;
2243 boolean balancerOn = loadBalancerTracker != null ?
2244 loadBalancerTracker.isBalancerOn() : false;
2245 Map<ServerName, ServerLoad> onlineServers = null;
2246 Set<ServerName> deadServers = null;
2247 if (serverManager != null) {
2248 deadServers = serverManager.getDeadServers().copyServerNames();
2249 onlineServers = serverManager.getOnlineServers();
2250 }
2251 return new ClusterStatus(VersionInfo.getVersion(), clusterId,
2252 onlineServers, deadServers, serverName, backupMasters,
2253 regionsInTransition, coprocessors, balancerOn);
2254 }
2255
2256
2257
2258
2259
2260
2261
2262
2263 public static String getLoadedCoprocessors() {
2264 return CoprocessorHost.getLoadedCoprocessors().toString();
2265 }
2266
2267
2268
2269
2270 public long getMasterStartTime() {
2271 return startcode;
2272 }
2273
2274
2275
2276
2277 public long getMasterActiveTime() {
2278 return masterActiveTime;
2279 }
2280
2281 public int getRegionServerInfoPort(final ServerName sn) {
2282 RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
2283 if (info == null || info.getInfoPort() == 0) {
2284 return conf.getInt(HConstants.REGIONSERVER_INFO_PORT,
2285 HConstants.DEFAULT_REGIONSERVER_INFOPORT);
2286 }
2287 return info.getInfoPort();
2288 }
2289
2290 public String getRegionServerVersion(final ServerName sn) {
2291 RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
2292 if (info != null && info.hasVersionInfo()) {
2293 return info.getVersionInfo().getVersion();
2294 }
2295 return "Unknown";
2296 }
2297
2298
2299
2300
2301 public String[] getMasterCoprocessors() {
2302 Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors();
2303 return masterCoprocessors.toArray(new String[masterCoprocessors.size()]);
2304 }
2305
2306 @Override
2307 public void abort(final String msg, final Throwable t) {
2308 if (isAborted() || isStopped()) {
2309 return;
2310 }
2311 if (cpHost != null) {
2312
2313 LOG.fatal("Master server abort: loaded coprocessors are: " +
2314 getLoadedCoprocessors());
2315 }
2316 if (t != null) LOG.fatal(msg, t);
2317
2318 try {
2319 stopMaster();
2320 } catch (IOException e) {
2321 LOG.error("Exception occurred while stopping master", e);
2322 }
2323 }
2324
2325 @Override
2326 public ZooKeeperWatcher getZooKeeper() {
2327 return zooKeeper;
2328 }
2329
2330 @Override
2331 public MasterCoprocessorHost getMasterCoprocessorHost() {
2332 return cpHost;
2333 }
2334
2335 @Override
2336 public MasterQuotaManager getMasterQuotaManager() {
2337 return quotaManager;
2338 }
2339
2340 @Override
2341 public ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
2342 return procedureExecutor;
2343 }
2344
2345 @Override
2346 public ServerName getServerName() {
2347 return this.serverName;
2348 }
2349
2350 @Override
2351 public AssignmentManager getAssignmentManager() {
2352 return this.assignmentManager;
2353 }
2354
2355 public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
2356 return rsFatals;
2357 }
2358
2359 public void shutdown() throws IOException {
2360 if (cpHost != null) {
2361 cpHost.preShutdown();
2362 }
2363
2364 if (this.serverManager != null) {
2365 this.serverManager.shutdownCluster();
2366 }
2367 if (this.clusterStatusTracker != null){
2368 try {
2369 this.clusterStatusTracker.setClusterDown();
2370 } catch (KeeperException e) {
2371 LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
2372 }
2373 }
2374 }
2375
2376 public void stopMaster() throws IOException {
2377 if (cpHost != null) {
2378 cpHost.preStopMaster();
2379 }
2380 stop("Stopped by " + Thread.currentThread().getName());
2381 }
2382
2383 void checkServiceStarted() throws ServerNotRunningYetException {
2384 if (!serviceStarted) {
2385 throw new ServerNotRunningYetException("Server is not running yet");
2386 }
2387 }
2388
2389 void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException {
2390 checkServiceStarted();
2391 if (!isInitialized()) {
2392 throw new PleaseHoldException("Master is initializing");
2393 }
2394 }
2395
2396 void checkNamespaceManagerReady() throws IOException {
2397 checkInitialized();
2398 if (tableNamespaceManager == null ||
2399 !tableNamespaceManager.isTableAvailableAndInitialized()) {
2400 throw new IOException("Table Namespace Manager not ready yet, try again later");
2401 }
2402 }
2403
2404
2405
2406
2407
2408
2409
2410
2411 public boolean isActiveMaster() {
2412 return isActiveMaster;
2413 }
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424 @Override
2425 public boolean isInitialized() {
2426 return initialized.isReady();
2427 }
2428
2429 @VisibleForTesting
2430 public void setInitialized(boolean isInitialized) {
2431 procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
2432 }
2433
2434 public ProcedureEvent getInitializedEvent() {
2435 return initialized;
2436 }
2437
2438
2439
2440
2441
2442
2443 @Override
2444 public boolean isServerCrashProcessingEnabled() {
2445 return serverCrashProcessingEnabled.isReady();
2446 }
2447
2448 @VisibleForTesting
2449 public void setServerCrashProcessingEnabled(final boolean b) {
2450 procedureExecutor.getEnvironment().setEventReady(serverCrashProcessingEnabled, b);
2451 }
2452
2453 public ProcedureEvent getServerCrashProcessingEnabledEvent() {
2454 return serverCrashProcessingEnabled;
2455 }
2456
2457
2458
2459
2460
2461 public boolean isInitializationStartsMetaRegionAssignment() {
2462 return this.initializationBeforeMetaAssignment;
2463 }
2464
2465 public void assignRegion(HRegionInfo hri) {
2466 assignmentManager.assign(hri, true);
2467 }
2468
2469
2470
2471
2472
2473
2474
2475 public double getAverageLoad() {
2476 if (this.assignmentManager == null) {
2477 return 0;
2478 }
2479
2480 RegionStates regionStates = this.assignmentManager.getRegionStates();
2481 if (regionStates == null) {
2482 return 0;
2483 }
2484 return regionStates.getAverageLoad();
2485 }
2486
2487 @Override
2488 public boolean registerService(Service instance) {
2489
2490
2491
2492 Descriptors.ServiceDescriptor serviceDesc = instance.getDescriptorForType();
2493 if (coprocessorServiceHandlers.containsKey(serviceDesc.getFullName())) {
2494 LOG.error("Coprocessor service "+serviceDesc.getFullName()+
2495 " already registered, rejecting request from "+instance
2496 );
2497 return false;
2498 }
2499
2500 coprocessorServiceHandlers.put(serviceDesc.getFullName(), instance);
2501 if (LOG.isDebugEnabled()) {
2502 LOG.debug("Registered master coprocessor service: service="+serviceDesc.getFullName());
2503 }
2504 return true;
2505 }
2506
2507
2508
2509
2510
2511
2512
2513 public static HMaster constructMaster(Class<? extends HMaster> masterClass,
2514 final Configuration conf, final CoordinatedStateManager cp) {
2515 try {
2516 Constructor<? extends HMaster> c =
2517 masterClass.getConstructor(Configuration.class, CoordinatedStateManager.class);
2518 return c.newInstance(conf, cp);
2519 } catch(Exception e) {
2520 Throwable error = e;
2521 if (e instanceof InvocationTargetException &&
2522 ((InvocationTargetException)e).getTargetException() != null) {
2523 error = ((InvocationTargetException)e).getTargetException();
2524 }
2525 throw new RuntimeException("Failed construction of Master: " + masterClass.toString() + ". "
2526 , error);
2527 }
2528 }
2529
2530
2531
2532
2533 public static void main(String [] args) {
2534 VersionInfo.logVersion();
2535 new HMasterCommandLine(HMaster.class).doMain(args);
2536 }
2537
2538 public HFileCleaner getHFileCleaner() {
2539 return this.hfileCleaner;
2540 }
2541
2542
2543
2544
2545
2546 public SnapshotManager getSnapshotManager() {
2547 return this.snapshotManager;
2548 }
2549
2550 @Override
2551 public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
2552 TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
2553 checkNamespaceManagerReady();
2554 if (cpHost != null) {
2555 if (cpHost.preCreateNamespace(descriptor)) {
2556 return;
2557 }
2558 }
2559 LOG.info(getClientIdAuditPrefix() + " creating " + descriptor);
2560 tableNamespaceManager.create(descriptor);
2561 if (cpHost != null) {
2562 cpHost.postCreateNamespace(descriptor);
2563 }
2564 }
2565
2566 @Override
2567 public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
2568 TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
2569 checkNamespaceManagerReady();
2570 if (cpHost != null) {
2571 if (cpHost.preModifyNamespace(descriptor)) {
2572 return;
2573 }
2574 }
2575 LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
2576 tableNamespaceManager.update(descriptor);
2577 if (cpHost != null) {
2578 cpHost.postModifyNamespace(descriptor);
2579 }
2580 }
2581
2582 @Override
2583 public void deleteNamespace(String name) throws IOException {
2584 checkNamespaceManagerReady();
2585 if (cpHost != null) {
2586 if (cpHost.preDeleteNamespace(name)) {
2587 return;
2588 }
2589 }
2590 LOG.info(getClientIdAuditPrefix() + " delete " + name);
2591 tableNamespaceManager.remove(name);
2592 if (cpHost != null) {
2593 cpHost.postDeleteNamespace(name);
2594 }
2595 }
2596
2597
2598
2599
2600
2601
2602
2603
2604 protected void ensureNamespaceExists(final String name)
2605 throws IOException, NamespaceNotFoundException {
2606 checkNamespaceManagerReady();
2607 NamespaceDescriptor nsd = tableNamespaceManager.get(name);
2608 if (nsd == null) {
2609 throw new NamespaceNotFoundException(name);
2610 }
2611 }
2612
2613 @Override
2614 public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
2615 checkNamespaceManagerReady();
2616
2617 if (cpHost != null) {
2618 cpHost.preGetNamespaceDescriptor(name);
2619 }
2620
2621 NamespaceDescriptor nsd = tableNamespaceManager.get(name);
2622 if (nsd == null) {
2623 throw new NamespaceNotFoundException(name);
2624 }
2625
2626 if (cpHost != null) {
2627 cpHost.postGetNamespaceDescriptor(nsd);
2628 }
2629
2630 return nsd;
2631 }
2632
2633 @Override
2634 public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
2635 checkNamespaceManagerReady();
2636
2637 final List<NamespaceDescriptor> descriptors = new ArrayList<NamespaceDescriptor>();
2638 boolean bypass = false;
2639 if (cpHost != null) {
2640 bypass = cpHost.preListNamespaceDescriptors(descriptors);
2641 }
2642
2643 if (!bypass) {
2644 descriptors.addAll(tableNamespaceManager.list());
2645
2646 if (cpHost != null) {
2647 cpHost.postListNamespaceDescriptors(descriptors);
2648 }
2649 }
2650 return descriptors;
2651 }
2652
2653 @Override
2654 public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
2655 throws IOException {
2656 if (cpHost != null) {
2657 cpHost.preAbortProcedure(this.procedureExecutor, procId);
2658 }
2659
2660 final boolean result = this.procedureExecutor.abort(procId, mayInterruptIfRunning);
2661
2662 if (cpHost != null) {
2663 cpHost.postAbortProcedure();
2664 }
2665
2666 return result;
2667 }
2668
2669 @Override
2670 public List<ProcedureInfo> listProcedures() throws IOException {
2671 if (cpHost != null) {
2672 cpHost.preListProcedures();
2673 }
2674
2675 final List<ProcedureInfo> procInfoList = this.procedureExecutor.listProcedures();
2676
2677 if (cpHost != null) {
2678 cpHost.postListProcedures(procInfoList);
2679 }
2680
2681 return procInfoList;
2682 }
2683
2684 @Override
2685 public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
2686 ensureNamespaceExists(name);
2687 return listTableDescriptors(name, null, null, true);
2688 }
2689
2690 @Override
2691 public List<TableName> listTableNamesByNamespace(String name) throws IOException {
2692 ensureNamespaceExists(name);
2693 return listTableNames(name, null, true);
2694 }
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705 public List<HTableDescriptor> listTableDescriptors(final String namespace, final String regex,
2706 final List<TableName> tableNameList, final boolean includeSysTables)
2707 throws IOException {
2708 final List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
2709
2710 boolean bypass = false;
2711 if (cpHost != null) {
2712 bypass = cpHost.preGetTableDescriptors(tableNameList, descriptors);
2713
2714 bypass |= cpHost.preGetTableDescriptors(tableNameList, descriptors, regex);
2715 }
2716
2717 if (!bypass) {
2718 if (tableNameList == null || tableNameList.size() == 0) {
2719
2720 Collection<HTableDescriptor> htds;
2721 if (namespace != null && namespace.length() > 0) {
2722 htds = tableDescriptors.getByNamespace(namespace).values();
2723 } else {
2724 htds = tableDescriptors.getAll().values();
2725 }
2726
2727 for (HTableDescriptor desc: htds) {
2728 if (includeSysTables || !desc.getTableName().isSystemTable()) {
2729 descriptors.add(desc);
2730 }
2731 }
2732 } else {
2733 for (TableName s: tableNameList) {
2734 HTableDescriptor desc = tableDescriptors.get(s);
2735 if (desc != null) {
2736 descriptors.add(desc);
2737 }
2738 }
2739 }
2740
2741
2742 if (regex != null) {
2743 filterTablesByRegex(descriptors, Pattern.compile(regex));
2744 }
2745
2746 if (cpHost != null) {
2747 cpHost.postGetTableDescriptors(descriptors);
2748
2749 cpHost.postGetTableDescriptors(tableNameList, descriptors, regex);
2750 }
2751 }
2752 return descriptors;
2753 }
2754
2755
2756
2757
2758
2759
2760
2761
2762 public List<TableName> listTableNames(final String namespace, final String regex,
2763 final boolean includeSysTables) throws IOException {
2764 final List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
2765
2766 boolean bypass = false;
2767 if (cpHost != null) {
2768 bypass = cpHost.preGetTableNames(descriptors, regex);
2769 }
2770
2771 if (!bypass) {
2772
2773 Collection<HTableDescriptor> htds;
2774 if (namespace != null && namespace.length() > 0) {
2775 htds = tableDescriptors.getByNamespace(namespace).values();
2776 } else {
2777 htds = tableDescriptors.getAll().values();
2778 }
2779
2780 for (HTableDescriptor htd: htds) {
2781 if (includeSysTables || !htd.getTableName().isSystemTable()) {
2782 descriptors.add(htd);
2783 }
2784 }
2785
2786
2787 if (regex != null) {
2788 filterTablesByRegex(descriptors, Pattern.compile(regex));
2789 }
2790
2791 if (cpHost != null) {
2792 cpHost.postGetTableNames(descriptors, regex);
2793 }
2794 }
2795
2796 List<TableName> result = new ArrayList<TableName>(descriptors.size());
2797 for (HTableDescriptor htd: descriptors) {
2798 result.add(htd.getTableName());
2799 }
2800 return result;
2801 }
2802
2803
2804
2805
2806
2807
2808
2809 private static void filterTablesByRegex(final Collection<HTableDescriptor> descriptors,
2810 final Pattern pattern) {
2811 final String defaultNS = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR;
2812 Iterator<HTableDescriptor> itr = descriptors.iterator();
2813 while (itr.hasNext()) {
2814 HTableDescriptor htd = itr.next();
2815 String tableName = htd.getTableName().getNameAsString();
2816 boolean matched = pattern.matcher(tableName).matches();
2817 if (!matched && htd.getTableName().getNamespaceAsString().equals(defaultNS)) {
2818 matched = pattern.matcher(defaultNS + TableName.NAMESPACE_DELIM + tableName).matches();
2819 }
2820 if (!matched) {
2821 itr.remove();
2822 }
2823 }
2824 }
2825
2826 @Override
2827 public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
2828 return getClusterStatus().getLastMajorCompactionTsForTable(table);
2829 }
2830
2831 @Override
2832 public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
2833 return getClusterStatus().getLastMajorCompactionTsForRegion(regionName);
2834 }
2835
2836
2837
2838
2839
2840
2841
2842 public boolean isBalancerOn() {
2843 if (null == loadBalancerTracker) return false;
2844 return loadBalancerTracker.isBalancerOn();
2845 }
2846
2847
2848
2849
2850
2851 public boolean isNormalizerOn() {
2852 if (null == regionNormalizerTracker) {
2853 return false;
2854 }
2855 return regionNormalizerTracker.isNormalizerOn();
2856 }
2857
2858
2859
2860
2861
2862
2863 public String getLoadBalancerClassName() {
2864 return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, LoadBalancerFactory
2865 .getDefaultLoadBalancerClass().getName());
2866 }
2867
2868
2869
2870
2871 public RegionNormalizerTracker getRegionNormalizerTracker() {
2872 return regionNormalizerTracker;
2873 }
2874 }