001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master; 019 020import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; 021import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS; 022import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; 023import static org.apache.hadoop.hbase.master.cleaner.HFileCleaner.CUSTOM_POOL_SIZE; 024import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY; 025 026import com.google.errorprone.annotations.RestrictedApi; 027import io.opentelemetry.api.trace.Span; 028import io.opentelemetry.api.trace.StatusCode; 029import io.opentelemetry.context.Scope; 030import java.io.IOException; 031import java.io.InterruptedIOException; 032import java.lang.reflect.Constructor; 033import java.lang.reflect.InvocationTargetException; 034import java.net.InetAddress; 035import java.net.InetSocketAddress; 036import java.net.UnknownHostException; 037import java.time.Instant; 038import java.time.ZoneId; 039import java.time.format.DateTimeFormatter; 040import java.util.ArrayList; 041import java.util.Arrays; 042import java.util.Collection; 043import java.util.Collections; 044import java.util.Comparator; 045import java.util.EnumSet; 046import java.util.HashMap; 047import java.util.HashSet; 048import java.util.Iterator; 049import java.util.LinkedList; 050import java.util.List; 051import java.util.Map; 052import java.util.Objects; 053import java.util.Optional; 054import java.util.Set; 055import java.util.concurrent.ExecutionException; 056import java.util.concurrent.Future; 057import java.util.concurrent.Semaphore; 058import java.util.concurrent.TimeUnit; 059import java.util.concurrent.TimeoutException; 060import java.util.concurrent.atomic.AtomicInteger; 061import java.util.regex.Pattern; 062import java.util.stream.Collectors; 063import javax.servlet.http.HttpServlet; 064import org.apache.commons.lang3.StringUtils; 065import org.apache.hadoop.conf.Configuration; 066import org.apache.hadoop.fs.FSDataInputStream; 067import org.apache.hadoop.fs.FSDataOutputStream; 068import org.apache.hadoop.fs.Path; 069import org.apache.hadoop.hbase.CatalogFamilyFormat; 070import org.apache.hadoop.hbase.Cell; 071import org.apache.hadoop.hbase.CellBuilderFactory; 072import org.apache.hadoop.hbase.CellBuilderType; 073import org.apache.hadoop.hbase.ClusterId; 074import org.apache.hadoop.hbase.ClusterMetrics; 075import org.apache.hadoop.hbase.ClusterMetrics.Option; 076import org.apache.hadoop.hbase.ClusterMetricsBuilder; 077import org.apache.hadoop.hbase.DoNotRetryIOException; 078import org.apache.hadoop.hbase.HBaseIOException; 079import org.apache.hadoop.hbase.HBaseInterfaceAudience; 080import org.apache.hadoop.hbase.HBaseServerBase; 081import org.apache.hadoop.hbase.HConstants; 082import org.apache.hadoop.hbase.HRegionLocation; 083import org.apache.hadoop.hbase.InvalidFamilyOperationException; 084import org.apache.hadoop.hbase.MasterNotRunningException; 085import org.apache.hadoop.hbase.MetaTableAccessor; 086import org.apache.hadoop.hbase.NamespaceDescriptor; 087import org.apache.hadoop.hbase.PleaseHoldException; 088import org.apache.hadoop.hbase.PleaseRestartMasterException; 089import org.apache.hadoop.hbase.RegionMetrics; 090import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; 091import org.apache.hadoop.hbase.ScheduledChore; 092import org.apache.hadoop.hbase.ServerMetrics; 093import org.apache.hadoop.hbase.ServerName; 094import org.apache.hadoop.hbase.ServerTask; 095import org.apache.hadoop.hbase.ServerTaskBuilder; 096import org.apache.hadoop.hbase.TableName; 097import org.apache.hadoop.hbase.TableNotDisabledException; 098import org.apache.hadoop.hbase.TableNotFoundException; 099import org.apache.hadoop.hbase.UnknownRegionException; 100import org.apache.hadoop.hbase.client.BalanceRequest; 101import org.apache.hadoop.hbase.client.BalanceResponse; 102import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 103import org.apache.hadoop.hbase.client.CompactionState; 104import org.apache.hadoop.hbase.client.MasterSwitchType; 105import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; 106import org.apache.hadoop.hbase.client.Put; 107import org.apache.hadoop.hbase.client.RegionInfo; 108import org.apache.hadoop.hbase.client.RegionInfoBuilder; 109import org.apache.hadoop.hbase.client.RegionStatesCount; 110import org.apache.hadoop.hbase.client.ResultScanner; 111import org.apache.hadoop.hbase.client.Scan; 112import org.apache.hadoop.hbase.client.TableDescriptor; 113import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 114import org.apache.hadoop.hbase.client.TableState; 115import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; 116import org.apache.hadoop.hbase.exceptions.DeserializationException; 117import org.apache.hadoop.hbase.exceptions.MasterStoppedException; 118import org.apache.hadoop.hbase.executor.ExecutorType; 119import org.apache.hadoop.hbase.favored.FavoredNodesManager; 120import org.apache.hadoop.hbase.http.HttpServer; 121import org.apache.hadoop.hbase.http.InfoServer; 122import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; 123import org.apache.hadoop.hbase.ipc.RpcServer; 124import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; 125import org.apache.hadoop.hbase.log.HBaseMarkers; 126import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode; 127import org.apache.hadoop.hbase.master.assignment.AssignmentManager; 128import org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure; 129import org.apache.hadoop.hbase.master.assignment.RegionStateNode; 130import org.apache.hadoop.hbase.master.assignment.RegionStateStore; 131import org.apache.hadoop.hbase.master.assignment.RegionStates; 132import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; 133import org.apache.hadoop.hbase.master.balancer.BalancerChore; 134import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; 135import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore; 136import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; 137import org.apache.hadoop.hbase.master.balancer.LoadBalancerStateStore; 138import org.apache.hadoop.hbase.master.balancer.MaintenanceLoadBalancer; 139import org.apache.hadoop.hbase.master.cleaner.DirScanPool; 140import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; 141import org.apache.hadoop.hbase.master.cleaner.LogCleaner; 142import org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner; 143import org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore; 144import org.apache.hadoop.hbase.master.hbck.HbckChore; 145import org.apache.hadoop.hbase.master.http.MasterDumpServlet; 146import org.apache.hadoop.hbase.master.http.MasterRedirectServlet; 147import org.apache.hadoop.hbase.master.http.MasterStatusServlet; 148import org.apache.hadoop.hbase.master.http.api_v1.ResourceConfigFactory; 149import org.apache.hadoop.hbase.master.http.hbck.HbckConfigFactory; 150import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; 151import org.apache.hadoop.hbase.master.locking.LockManager; 152import org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore; 153import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory; 154import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager; 155import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerStateStore; 156import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; 157import org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure; 158import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure; 159import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure; 160import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure; 161import org.apache.hadoop.hbase.master.procedure.FlushTableProcedure; 162import org.apache.hadoop.hbase.master.procedure.InitMetaProcedure; 163import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; 164import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; 165import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; 166import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; 167import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable; 168import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure; 169import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; 170import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; 171import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher; 172import org.apache.hadoop.hbase.master.procedure.ReloadQuotasProcedure; 173import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure; 174import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; 175import org.apache.hadoop.hbase.master.procedure.TruncateRegionProcedure; 176import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure; 177import org.apache.hadoop.hbase.master.region.MasterRegion; 178import org.apache.hadoop.hbase.master.region.MasterRegionFactory; 179import org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure; 180import org.apache.hadoop.hbase.master.replication.AddPeerProcedure; 181import org.apache.hadoop.hbase.master.replication.DisablePeerProcedure; 182import org.apache.hadoop.hbase.master.replication.EnablePeerProcedure; 183import org.apache.hadoop.hbase.master.replication.MigrateReplicationQueueFromZkToTableProcedure; 184import org.apache.hadoop.hbase.master.replication.RemovePeerProcedure; 185import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; 186import org.apache.hadoop.hbase.master.replication.ReplicationPeerModificationStateStore; 187import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager; 188import org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure; 189import org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure; 190import org.apache.hadoop.hbase.master.slowlog.SlowLogMasterService; 191import org.apache.hadoop.hbase.master.snapshot.SnapshotCleanupStateStore; 192import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; 193import org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator; 194import org.apache.hadoop.hbase.master.zksyncer.MasterAddressSyncer; 195import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer; 196import org.apache.hadoop.hbase.mob.MobFileCleanerChore; 197import org.apache.hadoop.hbase.mob.MobFileCompactionChore; 198import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; 199import org.apache.hadoop.hbase.monitoring.MonitoredTask; 200import org.apache.hadoop.hbase.monitoring.TaskGroup; 201import org.apache.hadoop.hbase.monitoring.TaskMonitor; 202import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; 203import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; 204import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager; 205import org.apache.hadoop.hbase.procedure2.LockedResource; 206import org.apache.hadoop.hbase.procedure2.Procedure; 207import org.apache.hadoop.hbase.procedure2.ProcedureEvent; 208import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; 209import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure; 210import org.apache.hadoop.hbase.procedure2.RemoteProcedureException; 211import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; 212import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener; 213import org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore; 214import org.apache.hadoop.hbase.quotas.MasterQuotaManager; 215import org.apache.hadoop.hbase.quotas.MasterQuotasObserver; 216import org.apache.hadoop.hbase.quotas.QuotaObserverChore; 217import org.apache.hadoop.hbase.quotas.QuotaTableUtil; 218import org.apache.hadoop.hbase.quotas.QuotaUtil; 219import org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore; 220import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; 221import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus; 222import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier; 223import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory; 224import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy; 225import org.apache.hadoop.hbase.regionserver.HRegionServer; 226import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; 227import org.apache.hadoop.hbase.regionserver.storefiletracker.ModifyColumnFamilyStoreFileTrackerProcedure; 228import org.apache.hadoop.hbase.regionserver.storefiletracker.ModifyTableStoreFileTrackerProcedure; 229import org.apache.hadoop.hbase.replication.ReplicationException; 230import org.apache.hadoop.hbase.replication.ReplicationLoadSource; 231import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; 232import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; 233import org.apache.hadoop.hbase.replication.ReplicationUtils; 234import org.apache.hadoop.hbase.replication.SyncReplicationState; 235import org.apache.hadoop.hbase.replication.ZKReplicationQueueStorageForMigration; 236import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner; 237import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner; 238import org.apache.hadoop.hbase.replication.master.ReplicationLogCleanerBarrier; 239import org.apache.hadoop.hbase.replication.master.ReplicationSinkTrackerTableCreator; 240import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp; 241import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp.ReplicationSyncUpToolInfo; 242import org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint; 243import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer; 244import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; 245import org.apache.hadoop.hbase.rsgroup.RSGroupUtil; 246import org.apache.hadoop.hbase.security.AccessDeniedException; 247import org.apache.hadoop.hbase.security.SecurityConstants; 248import org.apache.hadoop.hbase.security.Superusers; 249import org.apache.hadoop.hbase.security.UserProvider; 250import org.apache.hadoop.hbase.trace.TraceUtil; 251import org.apache.hadoop.hbase.util.Addressing; 252import org.apache.hadoop.hbase.util.Bytes; 253import org.apache.hadoop.hbase.util.CommonFSUtils; 254import org.apache.hadoop.hbase.util.CoprocessorConfigurationUtil; 255import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 256import org.apache.hadoop.hbase.util.FSTableDescriptors; 257import org.apache.hadoop.hbase.util.FutureUtils; 258import org.apache.hadoop.hbase.util.HBaseFsck; 259import org.apache.hadoop.hbase.util.HFileArchiveUtil; 260import org.apache.hadoop.hbase.util.IdLock; 261import org.apache.hadoop.hbase.util.JVMClusterUtil; 262import org.apache.hadoop.hbase.util.JsonMapper; 263import org.apache.hadoop.hbase.util.ModifyRegionUtils; 264import org.apache.hadoop.hbase.util.Pair; 265import org.apache.hadoop.hbase.util.ReflectionUtils; 266import org.apache.hadoop.hbase.util.RetryCounter; 267import org.apache.hadoop.hbase.util.RetryCounterFactory; 268import org.apache.hadoop.hbase.util.TableDescriptorChecker; 269import org.apache.hadoop.hbase.util.Threads; 270import org.apache.hadoop.hbase.util.VersionInfo; 271import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; 272import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; 273import org.apache.hadoop.hbase.zookeeper.ZKClusterId; 274import org.apache.hadoop.hbase.zookeeper.ZKUtil; 275import org.apache.hadoop.hbase.zookeeper.ZKWatcher; 276import org.apache.hadoop.hbase.zookeeper.ZNodePaths; 277import org.apache.yetus.audience.InterfaceAudience; 278import org.apache.zookeeper.KeeperException; 279import org.slf4j.Logger; 280import org.slf4j.LoggerFactory; 281 282import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 283import org.apache.hbase.thirdparty.com.google.common.collect.Maps; 284import org.apache.hbase.thirdparty.com.google.common.collect.Sets; 285import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams; 286import org.apache.hbase.thirdparty.com.google.common.io.Closeables; 287import org.apache.hbase.thirdparty.com.google.gson.JsonParseException; 288import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; 289import org.apache.hbase.thirdparty.com.google.protobuf.Service; 290import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server; 291import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector; 292import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder; 293import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext; 294import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig; 295import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; 296 297import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; 298import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; 299import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; 300 301/** 302 * HMaster is the "master server" for HBase. An HBase cluster has one active master. If many masters 303 * are started, all compete. Whichever wins goes on to run the cluster. All others park themselves 304 * in their constructor until master or cluster shutdown or until the active master loses its lease 305 * in zookeeper. Thereafter, all running master jostle to take over master role. 306 * <p/> 307 * The Master can be asked shutdown the cluster. See {@link #shutdown()}. In this case it will tell 308 * all regionservers to go down and then wait on them all reporting in that they are down. This 309 * master will then shut itself down. 310 * <p/> 311 * You can also shutdown just this master. Call {@link #stopMaster()}. 312 * @see org.apache.zookeeper.Watcher 313 */ 314@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) 315public class HMaster extends HBaseServerBase<MasterRpcServices> implements MasterServices { 316 317 private static final Logger LOG = LoggerFactory.getLogger(HMaster.class); 318 319 // MASTER is name of the webapp and the attribute name used stuffing this 320 // instance into a web context !! AND OTHER PLACES !! 321 public static final String MASTER = "master"; 322 323 // Manager and zk listener for master election 324 private final ActiveMasterManager activeMasterManager; 325 // Region server tracker 326 private final RegionServerTracker regionServerTracker; 327 // Draining region server tracker 328 private DrainingServerTracker drainingServerTracker; 329 // Tracker for load balancer state 330 LoadBalancerStateStore loadBalancerStateStore; 331 // Tracker for meta location, if any client ZK quorum specified 332 private MetaLocationSyncer metaLocationSyncer; 333 // Tracker for active master location, if any client ZK quorum specified 334 @InterfaceAudience.Private 335 MasterAddressSyncer masterAddressSyncer; 336 // Tracker for auto snapshot cleanup state 337 SnapshotCleanupStateStore snapshotCleanupStateStore; 338 339 // Tracker for split and merge state 340 private SplitOrMergeStateStore splitOrMergeStateStore; 341 342 private ClusterSchemaService clusterSchemaService; 343 344 public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = 345 "hbase.master.wait.on.service.seconds"; 346 public static final int DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = 5 * 60; 347 348 public static final String HBASE_MASTER_CLEANER_INTERVAL = "hbase.master.cleaner.interval"; 349 350 public static final int DEFAULT_HBASE_MASTER_CLEANER_INTERVAL = 600 * 1000; 351 352 private String clusterId; 353 354 // Metrics for the HMaster 355 final MetricsMaster metricsMaster; 356 // file system manager for the master FS operations 357 private MasterFileSystem fileSystemManager; 358 private MasterWalManager walManager; 359 360 // manager to manage procedure-based WAL splitting, can be null if current 361 // is zk-based WAL splitting. SplitWALManager will replace SplitLogManager 362 // and MasterWalManager, which means zk-based WAL splitting code will be 363 // useless after we switch to the procedure-based one. our eventual goal 364 // is to remove all the zk-based WAL splitting code. 365 private SplitWALManager splitWALManager; 366 367 // server manager to deal with region server info 368 private volatile ServerManager serverManager; 369 370 // manager of assignment nodes in zookeeper 371 private AssignmentManager assignmentManager; 372 373 private RSGroupInfoManager rsGroupInfoManager; 374 375 private final ReplicationLogCleanerBarrier replicationLogCleanerBarrier = 376 new ReplicationLogCleanerBarrier(); 377 378 // Only allow to add one sync replication peer concurrently 379 private final Semaphore syncReplicationPeerLock = new Semaphore(1); 380 381 // manager of replication 382 private ReplicationPeerManager replicationPeerManager; 383 384 private SyncReplicationReplayWALManager syncReplicationReplayWALManager; 385 386 // buffer for "fatal error" notices from region servers 387 // in the cluster. This is only used for assisting 388 // operations/debugging. 389 MemoryBoundedLogMessageBuffer rsFatals; 390 391 // flag set after we become the active master (used for testing) 392 private volatile boolean activeMaster = false; 393 394 // flag set after we complete initialization once active 395 private final ProcedureEvent<?> initialized = new ProcedureEvent<>("master initialized"); 396 397 // flag set after master services are started, 398 // initialization may have not completed yet. 399 volatile boolean serviceStarted = false; 400 401 // Maximum time we should run balancer for 402 private final int maxBalancingTime; 403 // Maximum percent of regions in transition when balancing 404 private final double maxRitPercent; 405 406 private final LockManager lockManager = new LockManager(this); 407 408 private RSGroupBasedLoadBalancer balancer; 409 private BalancerChore balancerChore; 410 private static boolean disableBalancerChoreForTest = false; 411 private RegionNormalizerManager regionNormalizerManager; 412 private ClusterStatusChore clusterStatusChore; 413 private ClusterStatusPublisher clusterStatusPublisherChore = null; 414 private SnapshotCleanerChore snapshotCleanerChore = null; 415 416 private HbckChore hbckChore; 417 CatalogJanitor catalogJanitorChore; 418 // Threadpool for scanning the Old logs directory, used by the LogCleaner 419 private DirScanPool logCleanerPool; 420 private LogCleaner logCleaner; 421 // HFile cleaners for the custom hfile archive paths and the default archive path 422 // The archive path cleaner is the first element 423 private List<HFileCleaner> hfileCleaners = new ArrayList<>(); 424 // The hfile cleaner paths, including custom paths and the default archive path 425 private List<Path> hfileCleanerPaths = new ArrayList<>(); 426 // The shared hfile cleaner pool for the custom archive paths 427 private DirScanPool sharedHFileCleanerPool; 428 // The exclusive hfile cleaner pool for scanning the archive directory 429 private DirScanPool exclusiveHFileCleanerPool; 430 private ReplicationBarrierCleaner replicationBarrierCleaner; 431 private MobFileCleanerChore mobFileCleanerChore; 432 private MobFileCompactionChore mobFileCompactionChore; 433 private RollingUpgradeChore rollingUpgradeChore; 434 // used to synchronize the mobCompactionStates 435 private final IdLock mobCompactionLock = new IdLock(); 436 // save the information of mob compactions in tables. 437 // the key is table name, the value is the number of compactions in that table. 438 private Map<TableName, AtomicInteger> mobCompactionStates = Maps.newConcurrentMap(); 439 440 volatile MasterCoprocessorHost cpHost; 441 442 private final boolean preLoadTableDescriptors; 443 444 // Time stamps for when a hmaster became active 445 private long masterActiveTime; 446 447 // Time stamp for when HMaster finishes becoming Active Master 448 private long masterFinishedInitializationTime; 449 450 Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap(); 451 452 // monitor for snapshot of hbase tables 453 SnapshotManager snapshotManager; 454 // monitor for distributed procedures 455 private MasterProcedureManagerHost mpmHost; 456 457 private RegionsRecoveryChore regionsRecoveryChore = null; 458 459 private RegionsRecoveryConfigManager regionsRecoveryConfigManager = null; 460 // it is assigned after 'initialized' guard set to true, so should be volatile 461 private volatile MasterQuotaManager quotaManager; 462 private SpaceQuotaSnapshotNotifier spaceQuotaSnapshotNotifier; 463 private QuotaObserverChore quotaObserverChore; 464 private SnapshotQuotaObserverChore snapshotQuotaChore; 465 private OldWALsDirSizeChore oldWALsDirSizeChore; 466 467 private ProcedureExecutor<MasterProcedureEnv> procedureExecutor; 468 private ProcedureStore procedureStore; 469 470 // the master local storage to store procedure data, meta region locations, etc. 471 private MasterRegion masterRegion; 472 473 private RegionServerList rsListStorage; 474 475 // handle table states 476 private TableStateManager tableStateManager; 477 478 /** jetty server for master to redirect requests to regionserver infoServer */ 479 private Server masterJettyServer; 480 481 // Determine if we should do normal startup or minimal "single-user" mode with no region 482 // servers and no user tables. Useful for repair and recovery of hbase:meta 483 private final boolean maintenanceMode; 484 static final String MAINTENANCE_MODE = "hbase.master.maintenance_mode"; 485 486 // the in process region server for carry system regions in maintenanceMode 487 private JVMClusterUtil.RegionServerThread maintenanceRegionServer; 488 489 // Cached clusterId on stand by masters to serve clusterID requests from clients. 490 private final CachedClusterId cachedClusterId; 491 492 public static final String WARMUP_BEFORE_MOVE = "hbase.master.warmup.before.move"; 493 private static final boolean DEFAULT_WARMUP_BEFORE_MOVE = true; 494 495 /** 496 * Use RSProcedureDispatcher instance to initiate master -> rs remote procedure execution. Use 497 * this config to extend RSProcedureDispatcher (mainly for testing purpose). 498 */ 499 public static final String HBASE_MASTER_RSPROC_DISPATCHER_CLASS = 500 "hbase.master.rsproc.dispatcher.class"; 501 private static final String DEFAULT_HBASE_MASTER_RSPROC_DISPATCHER_CLASS = 502 RSProcedureDispatcher.class.getName(); 503 504 private TaskGroup startupTaskGroup; 505 506 /** 507 * Store whether we allow replication peer modification operations. 508 */ 509 private ReplicationPeerModificationStateStore replicationPeerModificationStateStore; 510 511 /** 512 * Initializes the HMaster. The steps are as follows: 513 * <p> 514 * <ol> 515 * <li>Initialize the local HRegionServer 516 * <li>Start the ActiveMasterManager. 517 * </ol> 518 * <p> 519 * Remaining steps of initialization occur in {@link #finishActiveMasterInitialization()} after 520 * the master becomes the active one. 521 */ 522 public HMaster(final Configuration conf) throws IOException { 523 super(conf, "Master"); 524 final Span span = TraceUtil.createSpan("HMaster.cxtor"); 525 try (Scope ignored = span.makeCurrent()) { 526 if (conf.getBoolean(MAINTENANCE_MODE, false)) { 527 LOG.info("Detected {}=true via configuration.", MAINTENANCE_MODE); 528 maintenanceMode = true; 529 } else if (Boolean.getBoolean(MAINTENANCE_MODE)) { 530 LOG.info("Detected {}=true via environment variables.", MAINTENANCE_MODE); 531 maintenanceMode = true; 532 } else { 533 maintenanceMode = false; 534 } 535 this.rsFatals = new MemoryBoundedLogMessageBuffer( 536 conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024)); 537 LOG.info("hbase.rootdir={}, hbase.cluster.distributed={}", 538 CommonFSUtils.getRootDir(this.conf), 539 this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false)); 540 541 // Disable usage of meta replicas in the master 542 this.conf.setBoolean(HConstants.USE_META_REPLICAS, false); 543 544 decorateMasterConfiguration(this.conf); 545 546 // Hack! Maps DFSClient => Master for logs. HDFS made this 547 // config param for task trackers, but we can piggyback off of it. 548 if (this.conf.get("mapreduce.task.attempt.id") == null) { 549 this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString()); 550 } 551 552 this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this)); 553 554 // preload table descriptor at startup 555 this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true); 556 557 this.maxBalancingTime = getMaxBalancingTime(); 558 this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT, 559 HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT); 560 561 // Do we publish the status? 562 boolean shouldPublish = 563 conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT); 564 Class<? extends ClusterStatusPublisher.Publisher> publisherClass = 565 conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS, 566 ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS, 567 ClusterStatusPublisher.Publisher.class); 568 569 if (shouldPublish) { 570 if (publisherClass == null) { 571 LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " 572 + ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS 573 + " is not set - not publishing status"); 574 } else { 575 clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass); 576 LOG.debug("Created {}", this.clusterStatusPublisherChore); 577 getChoreService().scheduleChore(clusterStatusPublisherChore); 578 } 579 } 580 this.activeMasterManager = createActiveMasterManager(zooKeeper, serverName, this); 581 cachedClusterId = new CachedClusterId(this, conf); 582 this.regionServerTracker = new RegionServerTracker(zooKeeper, this); 583 this.rpcServices.start(zooKeeper); 584 span.setStatus(StatusCode.OK); 585 } catch (Throwable t) { 586 // Make sure we log the exception. HMaster is often started via reflection and the 587 // cause of failed startup is lost. 588 TraceUtil.setError(span, t); 589 LOG.error("Failed construction of Master", t); 590 throw t; 591 } finally { 592 span.end(); 593 } 594 } 595 596 /** 597 * Protected to have custom implementations in tests override the default ActiveMaster 598 * implementation. 599 */ 600 protected ActiveMasterManager createActiveMasterManager(ZKWatcher zk, ServerName sn, 601 org.apache.hadoop.hbase.Server server) throws InterruptedIOException { 602 return new ActiveMasterManager(zk, sn, server); 603 } 604 605 @Override 606 protected String getUseThisHostnameInstead(Configuration conf) { 607 return conf.get(MASTER_HOSTNAME_KEY); 608 } 609 610 private void registerConfigurationObservers() { 611 configurationManager.registerObserver(this.rpcServices); 612 configurationManager.registerObserver(this); 613 } 614 615 // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will 616 // block in here until then. 617 @Override 618 public void run() { 619 try { 620 installShutdownHook(); 621 registerConfigurationObservers(); 622 Threads.setDaemonThreadRunning(new Thread(TraceUtil.tracedRunnable(() -> { 623 try { 624 int infoPort = putUpJettyServer(); 625 startActiveMasterManager(infoPort); 626 } catch (Throwable t) { 627 // Make sure we log the exception. 628 String error = "Failed to become Active Master"; 629 LOG.error(error, t); 630 // Abort should have been called already. 631 if (!isAborted()) { 632 abort(error, t); 633 } 634 } 635 }, "HMaster.becomeActiveMaster")), getName() + ":becomeActiveMaster"); 636 while (!isStopped() && !isAborted()) { 637 sleeper.sleep(); 638 } 639 final Span span = TraceUtil.createSpan("HMaster exiting main loop"); 640 try (Scope ignored = span.makeCurrent()) { 641 stopInfoServer(); 642 closeClusterConnection(); 643 stopServiceThreads(); 644 if (this.rpcServices != null) { 645 this.rpcServices.stop(); 646 } 647 closeZooKeeper(); 648 closeTableDescriptors(); 649 span.setStatus(StatusCode.OK); 650 } finally { 651 span.end(); 652 } 653 } finally { 654 if (this.clusterSchemaService != null) { 655 // If on way out, then we are no longer active master. 656 this.clusterSchemaService.stopAsync(); 657 try { 658 this.clusterSchemaService 659 .awaitTerminated(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, 660 DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); 661 } catch (TimeoutException te) { 662 LOG.warn("Failed shutdown of clusterSchemaService", te); 663 } 664 } 665 this.activeMaster = false; 666 } 667 } 668 669 // return the actual infoPort, -1 means disable info server. 670 private int putUpJettyServer() throws IOException { 671 if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) { 672 return -1; 673 } 674 final int infoPort = 675 conf.getInt("hbase.master.info.port.orig", HConstants.DEFAULT_MASTER_INFOPORT); 676 // -1 is for disabling info server, so no redirecting 677 if (infoPort < 0 || infoServer == null) { 678 return -1; 679 } 680 if (infoPort == infoServer.getPort()) { 681 // server is already running 682 return infoPort; 683 } 684 final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0"); 685 if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) { 686 String msg = "Failed to start redirecting jetty server. Address " + addr 687 + " does not belong to this host. Correct configuration parameter: " 688 + "hbase.master.info.bindAddress"; 689 LOG.error(msg); 690 throw new IOException(msg); 691 } 692 693 // TODO I'm pretty sure we could just add another binding to the InfoServer run by 694 // the RegionServer and have it run the RedirectServlet instead of standing up 695 // a second entire stack here. 696 masterJettyServer = new Server(); 697 final ServerConnector connector = new ServerConnector(masterJettyServer); 698 connector.setHost(addr); 699 connector.setPort(infoPort); 700 masterJettyServer.addConnector(connector); 701 masterJettyServer.setStopAtShutdown(true); 702 masterJettyServer.setHandler(HttpServer.buildGzipHandler(masterJettyServer.getHandler())); 703 704 final String redirectHostname = 705 StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead; 706 707 final MasterRedirectServlet redirect = new MasterRedirectServlet(infoServer, redirectHostname); 708 final WebAppContext context = 709 new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS); 710 context.addServlet(new ServletHolder(redirect), "/*"); 711 context.setServer(masterJettyServer); 712 713 try { 714 masterJettyServer.start(); 715 } catch (Exception e) { 716 throw new IOException("Failed to start redirecting jetty server", e); 717 } 718 return connector.getLocalPort(); 719 } 720 721 /** 722 * For compatibility, if failed with regionserver credentials, try the master one 723 */ 724 @Override 725 protected void login(UserProvider user, String host) throws IOException { 726 try { 727 user.login(SecurityConstants.REGIONSERVER_KRB_KEYTAB_FILE, 728 SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, host); 729 } catch (IOException ie) { 730 user.login(SecurityConstants.MASTER_KRB_KEYTAB_FILE, SecurityConstants.MASTER_KRB_PRINCIPAL, 731 host); 732 } 733 } 734 735 public MasterRpcServices getMasterRpcServices() { 736 return rpcServices; 737 } 738 739 @Override 740 protected MasterCoprocessorHost getCoprocessorHost() { 741 return getMasterCoprocessorHost(); 742 } 743 744 public boolean balanceSwitch(final boolean b) throws IOException { 745 return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC); 746 } 747 748 @Override 749 protected String getProcessName() { 750 return MASTER; 751 } 752 753 @Override 754 protected boolean canCreateBaseZNode() { 755 return true; 756 } 757 758 @Override 759 protected boolean canUpdateTableDescriptor() { 760 return true; 761 } 762 763 @Override 764 protected boolean cacheTableDescriptor() { 765 return true; 766 } 767 768 protected MasterRpcServices createRpcServices() throws IOException { 769 return new MasterRpcServices(this); 770 } 771 772 @Override 773 protected void configureInfoServer(InfoServer infoServer) { 774 infoServer.addUnprivilegedServlet("master-status", "/master-status", MasterStatusServlet.class); 775 infoServer.addUnprivilegedServlet("api_v1", "/api/v1/*", buildApiV1Servlet()); 776 infoServer.addUnprivilegedServlet("hbck", "/hbck/*", buildHbckServlet()); 777 778 infoServer.setAttribute(MASTER, this); 779 } 780 781 private ServletHolder buildApiV1Servlet() { 782 final ResourceConfig config = ResourceConfigFactory.createResourceConfig(conf, this); 783 return new ServletHolder(new ServletContainer(config)); 784 } 785 786 private ServletHolder buildHbckServlet() { 787 final ResourceConfig config = HbckConfigFactory.createResourceConfig(conf, this); 788 return new ServletHolder(new ServletContainer(config)); 789 } 790 791 @Override 792 protected Class<? extends HttpServlet> getDumpServlet() { 793 return MasterDumpServlet.class; 794 } 795 796 @Override 797 public MetricsMaster getMasterMetrics() { 798 return metricsMaster; 799 } 800 801 /** 802 * Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it 803 * should have already been initialized along with {@link ServerManager}. 804 */ 805 private void initializeZKBasedSystemTrackers() 806 throws IOException, KeeperException, ReplicationException, DeserializationException { 807 if (maintenanceMode) { 808 // in maintenance mode, always use MaintenanceLoadBalancer. 809 conf.unset(LoadBalancer.HBASE_RSGROUP_LOADBALANCER_CLASS); 810 conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, MaintenanceLoadBalancer.class, 811 LoadBalancer.class); 812 } 813 this.balancer = new RSGroupBasedLoadBalancer(); 814 this.loadBalancerStateStore = new LoadBalancerStateStore(masterRegion, zooKeeper); 815 816 this.regionNormalizerManager = 817 RegionNormalizerFactory.createNormalizerManager(conf, masterRegion, zooKeeper, this); 818 this.configurationManager.registerObserver(regionNormalizerManager); 819 this.regionNormalizerManager.start(); 820 821 this.splitOrMergeStateStore = new SplitOrMergeStateStore(masterRegion, zooKeeper, conf); 822 823 // This is for backwards compatible. We do not need the CP for rs group now but if user want to 824 // load it, we need to enable rs group. 825 String[] cpClasses = conf.getStrings(MasterCoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); 826 if (cpClasses != null) { 827 for (String cpClass : cpClasses) { 828 if (RSGroupAdminEndpoint.class.getName().equals(cpClass)) { 829 RSGroupUtil.enableRSGroup(conf); 830 break; 831 } 832 } 833 } 834 this.rsGroupInfoManager = RSGroupInfoManager.create(this); 835 836 this.replicationPeerManager = ReplicationPeerManager.create(this, clusterId); 837 this.configurationManager.registerObserver(replicationPeerManager); 838 this.replicationPeerModificationStateStore = 839 new ReplicationPeerModificationStateStore(masterRegion); 840 841 this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager); 842 this.drainingServerTracker.start(); 843 844 this.snapshotCleanupStateStore = new SnapshotCleanupStateStore(masterRegion, zooKeeper); 845 846 String clientQuorumServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM); 847 boolean clientZkObserverMode = conf.getBoolean(HConstants.CLIENT_ZOOKEEPER_OBSERVER_MODE, 848 HConstants.DEFAULT_CLIENT_ZOOKEEPER_OBSERVER_MODE); 849 if (clientQuorumServers != null && !clientZkObserverMode) { 850 // we need to take care of the ZK information synchronization 851 // if given client ZK are not observer nodes 852 ZKWatcher clientZkWatcher = new ZKWatcher(conf, 853 getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this, 854 false, true); 855 this.metaLocationSyncer = new MetaLocationSyncer(zooKeeper, clientZkWatcher, this); 856 this.metaLocationSyncer.start(); 857 this.masterAddressSyncer = new MasterAddressSyncer(zooKeeper, clientZkWatcher, this); 858 this.masterAddressSyncer.start(); 859 // set cluster id is a one-go effort 860 ZKClusterId.setClusterId(clientZkWatcher, fileSystemManager.getClusterId()); 861 } 862 863 // Set the cluster as up. If new RSs, they'll be waiting on this before 864 // going ahead with their startup. 865 boolean wasUp = this.clusterStatusTracker.isClusterUp(); 866 if (!wasUp) this.clusterStatusTracker.setClusterUp(); 867 868 LOG.info("Active/primary master=" + this.serverName + ", sessionid=0x" 869 + Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) 870 + ", setting cluster-up flag (Was=" + wasUp + ")"); 871 872 // create/initialize the snapshot manager and other procedure managers 873 this.snapshotManager = new SnapshotManager(); 874 this.mpmHost = new MasterProcedureManagerHost(); 875 this.mpmHost.register(this.snapshotManager); 876 this.mpmHost.register(new MasterFlushTableProcedureManager()); 877 this.mpmHost.loadProcedures(conf); 878 this.mpmHost.initialize(this, this.metricsMaster); 879 } 880 881 // Will be overriden in test to inject customized AssignmentManager 882 @InterfaceAudience.Private 883 protected AssignmentManager createAssignmentManager(MasterServices master, 884 MasterRegion masterRegion) { 885 return new AssignmentManager(master, masterRegion); 886 } 887 888 private void tryMigrateMetaLocationsFromZooKeeper() throws IOException, KeeperException { 889 // try migrate data from zookeeper 890 try (ResultScanner scanner = 891 masterRegion.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) { 892 if (scanner.next() != null) { 893 // notice that all replicas for a region are in the same row, so the migration can be 894 // done with in a one row put, which means if we have data in catalog family then we can 895 // make sure that the migration is done. 896 LOG.info("The {} family in master local region already has data in it, skip migrating...", 897 HConstants.CATALOG_FAMILY_STR); 898 return; 899 } 900 } 901 // start migrating 902 byte[] row = CatalogFamilyFormat.getMetaKeyForRegion(RegionInfoBuilder.FIRST_META_REGIONINFO); 903 Put put = new Put(row); 904 List<String> metaReplicaNodes = zooKeeper.getMetaReplicaNodes(); 905 StringBuilder info = new StringBuilder("Migrating meta locations:"); 906 for (String metaReplicaNode : metaReplicaNodes) { 907 int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZNode(metaReplicaNode); 908 RegionState state = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId); 909 info.append(" ").append(state); 910 put.setTimestamp(state.getStamp()); 911 MetaTableAccessor.addRegionInfo(put, state.getRegion()); 912 if (state.getServerName() != null) { 913 MetaTableAccessor.addLocation(put, state.getServerName(), HConstants.NO_SEQNUM, replicaId); 914 } 915 put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) 916 .setFamily(HConstants.CATALOG_FAMILY) 917 .setQualifier(RegionStateStore.getStateColumn(replicaId)).setTimestamp(put.getTimestamp()) 918 .setType(Cell.Type.Put).setValue(Bytes.toBytes(state.getState().name())).build()); 919 } 920 if (!put.isEmpty()) { 921 LOG.info(info.toString()); 922 masterRegion.update(r -> r.put(put)); 923 } else { 924 LOG.info("No meta location available on zookeeper, skip migrating..."); 925 } 926 } 927 928 /** 929 * Finish initialization of HMaster after becoming the primary master. 930 * <p/> 931 * The startup order is a bit complicated but very important, do not change it unless you know 932 * what you are doing. 933 * <ol> 934 * <li>Initialize file system based components - file system manager, wal manager, table 935 * descriptors, etc</li> 936 * <li>Publish cluster id</li> 937 * <li>Here comes the most complicated part - initialize server manager, assignment manager and 938 * region server tracker 939 * <ol type='i'> 940 * <li>Create server manager</li> 941 * <li>Create master local region</li> 942 * <li>Create procedure executor, load the procedures, but do not start workers. We will start it 943 * later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same 944 * server</li> 945 * <li>Create assignment manager and start it, load the meta region state, but do not load data 946 * from meta region</li> 947 * <li>Start region server tracker, construct the online servers set and find out dead servers and 948 * schedule SCP for them. The online servers will be constructed by scanning zk, and we will also 949 * scan the wal directory and load from master local region to find out possible live region 950 * servers, and the differences between these two sets are the dead servers</li> 951 * </ol> 952 * </li> 953 * <li>If this is a new deploy, schedule a InitMetaProcedure to initialize meta</li> 954 * <li>Start necessary service threads - balancer, catalog janitor, executor services, and also 955 * the procedure executor, etc. Notice that the balancer must be created first as assignment 956 * manager may use it when assigning regions.</li> 957 * <li>Wait for meta to be initialized if necessary, start table state manager.</li> 958 * <li>Wait for enough region servers to check-in</li> 959 * <li>Let assignment manager load data from meta and construct region states</li> 960 * <li>Start all other things such as chore services, etc</li> 961 * </ol> 962 * <p/> 963 * Notice that now we will not schedule a special procedure to make meta online(unless the first 964 * time where meta has not been created yet), we will rely on SCP to bring meta online. 965 */ 966 private void finishActiveMasterInitialization() throws IOException, InterruptedException, 967 KeeperException, ReplicationException, DeserializationException { 968 /* 969 * We are active master now... go initialize components we need to run. 970 */ 971 startupTaskGroup.addTask("Initializing Master file system"); 972 973 this.masterActiveTime = EnvironmentEdgeManager.currentTime(); 974 // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring. 975 976 // always initialize the MemStoreLAB as we use a region to store data in master now, see 977 // localStore. 978 initializeMemStoreChunkCreator(null); 979 this.fileSystemManager = new MasterFileSystem(conf); 980 this.walManager = new MasterWalManager(this); 981 982 // warm-up HTDs cache on master initialization 983 if (preLoadTableDescriptors) { 984 startupTaskGroup.addTask("Pre-loading table descriptors"); 985 this.tableDescriptors.getAll(); 986 } 987 988 // Publish cluster ID; set it in Master too. The superclass RegionServer does this later but 989 // only after it has checked in with the Master. At least a few tests ask Master for clusterId 990 // before it has called its run method and before RegionServer has done the reportForDuty. 991 ClusterId clusterId = fileSystemManager.getClusterId(); 992 startupTaskGroup.addTask("Publishing Cluster ID " + clusterId + " in ZooKeeper"); 993 ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); 994 this.clusterId = clusterId.toString(); 995 996 // Precaution. Put in place the old hbck1 lock file to fence out old hbase1s running their 997 // hbck1s against an hbase2 cluster; it could do damage. To skip this behavior, set 998 // hbase.write.hbck1.lock.file to false. 999 if (this.conf.getBoolean("hbase.write.hbck1.lock.file", true)) { 1000 Pair<Path, FSDataOutputStream> result = null; 1001 try { 1002 result = HBaseFsck.checkAndMarkRunningHbck(this.conf, 1003 HBaseFsck.createLockRetryCounterFactory(this.conf).create()); 1004 } finally { 1005 if (result != null) { 1006 Closeables.close(result.getSecond(), true); 1007 } 1008 } 1009 } 1010 1011 startupTaskGroup.addTask("Initialize ServerManager and schedule SCP for crash servers"); 1012 // The below two managers must be created before loading procedures, as they will be used during 1013 // loading. 1014 // initialize master local region 1015 masterRegion = MasterRegionFactory.create(this); 1016 rsListStorage = new MasterRegionServerList(masterRegion, this); 1017 1018 // Initialize the ServerManager and register it as a configuration observer 1019 this.serverManager = createServerManager(this, rsListStorage); 1020 this.configurationManager.registerObserver(this.serverManager); 1021 1022 this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this); 1023 if ( 1024 !conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK) 1025 ) { 1026 this.splitWALManager = new SplitWALManager(this); 1027 } 1028 1029 tryMigrateMetaLocationsFromZooKeeper(); 1030 1031 createProcedureExecutor(); 1032 Map<Class<?>, List<Procedure<MasterProcedureEnv>>> procsByType = procedureExecutor 1033 .getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass())); 1034 1035 // Create Assignment Manager 1036 this.assignmentManager = createAssignmentManager(this, masterRegion); 1037 this.assignmentManager.start(); 1038 // TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as 1039 // completed, it could still be in the procedure list. This is a bit strange but is another 1040 // story, need to verify the implementation for ProcedureExecutor and ProcedureStore. 1041 List<TransitRegionStateProcedure> ritList = 1042 procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream() 1043 .filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p) 1044 .collect(Collectors.toList()); 1045 this.assignmentManager.setupRIT(ritList); 1046 1047 // Start RegionServerTracker with listing of servers found with exiting SCPs -- these should 1048 // be registered in the deadServers set -- and the servernames loaded from the WAL directory 1049 // and master local region that COULD BE 'alive'(we'll schedule SCPs for each and let SCP figure 1050 // it out). 1051 // We also pass dirs that are already 'splitting'... so we can do some checks down in tracker. 1052 // TODO: Generate the splitting and live Set in one pass instead of two as we currently do. 1053 this.regionServerTracker.upgrade( 1054 procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream() 1055 .map(p -> (ServerCrashProcedure) p).map(p -> p.getServerName()).collect(Collectors.toSet()), 1056 Sets.union(rsListStorage.getAll(), walManager.getLiveServersFromWALDir()), 1057 walManager.getSplittingServersFromWALDir()); 1058 // This manager must be accessed AFTER hbase:meta is confirmed on line.. 1059 this.tableStateManager = new TableStateManager(this); 1060 1061 startupTaskGroup.addTask("Initializing ZK system trackers"); 1062 initializeZKBasedSystemTrackers(); 1063 startupTaskGroup.addTask("Loading last flushed sequence id of regions"); 1064 try { 1065 this.serverManager.loadLastFlushedSequenceIds(); 1066 } catch (IOException e) { 1067 LOG.info("Failed to load last flushed sequence id of regions" + " from file system", e); 1068 } 1069 // Set ourselves as active Master now our claim has succeeded up in zk. 1070 this.activeMaster = true; 1071 1072 // Start the Zombie master detector after setting master as active, see HBASE-21535 1073 Thread zombieDetector = new Thread(new MasterInitializationMonitor(this), 1074 "ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime()); 1075 zombieDetector.setDaemon(true); 1076 zombieDetector.start(); 1077 1078 if (!maintenanceMode) { 1079 startupTaskGroup.addTask("Initializing master coprocessors"); 1080 setQuotasObserver(conf); 1081 initializeCoprocessorHost(conf); 1082 } else { 1083 // start an in process region server for carrying system regions 1084 maintenanceRegionServer = 1085 JVMClusterUtil.createRegionServerThread(getConfiguration(), HRegionServer.class, 0); 1086 maintenanceRegionServer.start(); 1087 } 1088 1089 // Checking if meta needs initializing. 1090 startupTaskGroup.addTask("Initializing meta table if this is a new deploy"); 1091 InitMetaProcedure initMetaProc = null; 1092 // Print out state of hbase:meta on startup; helps debugging. 1093 if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) { 1094 Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream() 1095 .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny(); 1096 initMetaProc = optProc.orElseGet(() -> { 1097 // schedule an init meta procedure if meta has not been deployed yet 1098 InitMetaProcedure temp = new InitMetaProcedure(); 1099 procedureExecutor.submitProcedure(temp); 1100 return temp; 1101 }); 1102 } 1103 1104 // initialize load balancer 1105 this.balancer.setMasterServices(this); 1106 this.balancer.initialize(); 1107 this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor()); 1108 1109 // try migrate replication data 1110 ZKReplicationQueueStorageForMigration oldReplicationQueueStorage = 1111 new ZKReplicationQueueStorageForMigration(zooKeeper, conf); 1112 // check whether there are something to migrate and we haven't scheduled a migration procedure 1113 // yet 1114 if ( 1115 oldReplicationQueueStorage.hasData() && procedureExecutor.getProcedures().stream() 1116 .allMatch(p -> !(p instanceof MigrateReplicationQueueFromZkToTableProcedure)) 1117 ) { 1118 procedureExecutor.submitProcedure(new MigrateReplicationQueueFromZkToTableProcedure()); 1119 } 1120 // start up all service threads. 1121 startupTaskGroup.addTask("Initializing master service threads"); 1122 startServiceThreads(); 1123 // wait meta to be initialized after we start procedure executor 1124 if (initMetaProc != null) { 1125 initMetaProc.await(); 1126 if (initMetaProc.isFailed() && initMetaProc.hasException()) { 1127 throw new IOException("Failed to initialize meta table", initMetaProc.getException()); 1128 } 1129 } 1130 // Wake up this server to check in 1131 sleeper.skipSleepCycle(); 1132 1133 // Wait for region servers to report in. 1134 // With this as part of master initialization, it precludes our being able to start a single 1135 // server that is both Master and RegionServer. Needs more thought. TODO. 1136 String statusStr = "Wait for region servers to report in"; 1137 MonitoredTask waitRegionServer = startupTaskGroup.addTask(statusStr); 1138 LOG.info(Objects.toString(waitRegionServer)); 1139 waitForRegionServers(waitRegionServer); 1140 1141 // Check if master is shutting down because issue initializing regionservers or balancer. 1142 if (isStopped()) { 1143 return; 1144 } 1145 1146 startupTaskGroup.addTask("Starting assignment manager"); 1147 // FIRST HBASE:META READ!!!! 1148 // The below cannot make progress w/o hbase:meta being online. 1149 // This is the FIRST attempt at going to hbase:meta. Meta on-lining is going on in background 1150 // as procedures run -- in particular SCPs for crashed servers... One should put up hbase:meta 1151 // if it is down. It may take a while to come online. So, wait here until meta if for sure 1152 // available. That's what waitForMetaOnline does. 1153 if (!waitForMetaOnline()) { 1154 return; 1155 } 1156 1157 TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME); 1158 final ColumnFamilyDescriptor tableFamilyDesc = 1159 metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY); 1160 final ColumnFamilyDescriptor replBarrierFamilyDesc = 1161 metaDescriptor.getColumnFamily(HConstants.REPLICATION_BARRIER_FAMILY); 1162 1163 this.assignmentManager.joinCluster(); 1164 // The below depends on hbase:meta being online. 1165 this.assignmentManager.processOfflineRegions(); 1166 // this must be called after the above processOfflineRegions to prevent race 1167 this.assignmentManager.wakeMetaLoadedEvent(); 1168 1169 // for migrating from a version without HBASE-25099, and also for honoring the configuration 1170 // first. 1171 if (conf.get(HConstants.META_REPLICAS_NUM) != null) { 1172 int replicasNumInConf = 1173 conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM); 1174 TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME); 1175 if (metaDesc.getRegionReplication() != replicasNumInConf) { 1176 // it is possible that we already have some replicas before upgrading, so we must set the 1177 // region replication number in meta TableDescriptor directly first, without creating a 1178 // ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas. 1179 int existingReplicasCount = 1180 assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size(); 1181 if (existingReplicasCount > metaDesc.getRegionReplication()) { 1182 LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" 1183 + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); 1184 metaDesc = TableDescriptorBuilder.newBuilder(metaDesc) 1185 .setRegionReplication(existingReplicasCount).build(); 1186 tableDescriptors.update(metaDesc); 1187 } 1188 // check again, and issue a ModifyTableProcedure if needed 1189 if (metaDesc.getRegionReplication() != replicasNumInConf) { 1190 LOG.info( 1191 "The {} config is {} while the replica count in TableDescriptor is {}" 1192 + " for hbase:meta, altering...", 1193 HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication()); 1194 procedureExecutor.submitProcedure(new ModifyTableProcedure( 1195 procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) 1196 .setRegionReplication(replicasNumInConf).build(), 1197 null, metaDesc, false, true)); 1198 } 1199 } 1200 } 1201 // Initialize after meta is up as below scans meta 1202 FavoredNodesManager fnm = getFavoredNodesManager(); 1203 if (fnm != null) { 1204 fnm.initializeFromMeta(); 1205 } 1206 1207 // set cluster status again after user regions are assigned 1208 this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor()); 1209 1210 // Start balancer and meta catalog janitor after meta and regions have been assigned. 1211 startupTaskGroup.addTask("Starting balancer and catalog janitor"); 1212 this.clusterStatusChore = new ClusterStatusChore(this, balancer); 1213 getChoreService().scheduleChore(clusterStatusChore); 1214 this.balancerChore = new BalancerChore(this); 1215 if (!disableBalancerChoreForTest) { 1216 getChoreService().scheduleChore(balancerChore); 1217 } 1218 if (regionNormalizerManager != null) { 1219 getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore()); 1220 } 1221 this.catalogJanitorChore = new CatalogJanitor(this); 1222 getChoreService().scheduleChore(catalogJanitorChore); 1223 this.hbckChore = new HbckChore(this); 1224 getChoreService().scheduleChore(hbckChore); 1225 this.serverManager.startChore(); 1226 1227 // Only for rolling upgrade, where we need to migrate the data in namespace table to meta table. 1228 if (!waitForNamespaceOnline()) { 1229 return; 1230 } 1231 startupTaskGroup.addTask("Starting cluster schema service"); 1232 try { 1233 initClusterSchemaService(); 1234 } catch (IllegalStateException e) { 1235 if ( 1236 e.getCause() != null && e.getCause() instanceof NoSuchColumnFamilyException 1237 && tableFamilyDesc == null && replBarrierFamilyDesc == null 1238 ) { 1239 LOG.info("ClusterSchema service could not be initialized. This is " 1240 + "expected during HBase 1 to 2 upgrade", e); 1241 } else { 1242 throw e; 1243 } 1244 } 1245 1246 if (this.cpHost != null) { 1247 try { 1248 this.cpHost.preMasterInitialization(); 1249 } catch (IOException e) { 1250 LOG.error("Coprocessor preMasterInitialization() hook failed", e); 1251 } 1252 } 1253 1254 LOG.info(String.format("Master has completed initialization %.3fsec", 1255 (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f)); 1256 this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime(); 1257 configurationManager.registerObserver(this.balancer); 1258 configurationManager.registerObserver(this.logCleanerPool); 1259 configurationManager.registerObserver(this.logCleaner); 1260 configurationManager.registerObserver(this.regionsRecoveryConfigManager); 1261 configurationManager.registerObserver(this.exclusiveHFileCleanerPool); 1262 if (this.sharedHFileCleanerPool != null) { 1263 configurationManager.registerObserver(this.sharedHFileCleanerPool); 1264 } 1265 if (this.hfileCleaners != null) { 1266 for (HFileCleaner cleaner : hfileCleaners) { 1267 configurationManager.registerObserver(cleaner); 1268 } 1269 } 1270 // Set master as 'initialized'. 1271 setInitialized(true); 1272 startupTaskGroup.markComplete("Initialization successful"); 1273 MonitoredTask status = 1274 TaskMonitor.get().createStatus("Progress after master initialized", false, true); 1275 1276 if (tableFamilyDesc == null && replBarrierFamilyDesc == null) { 1277 // create missing CFs in meta table after master is set to 'initialized'. 1278 createMissingCFsInMetaDuringUpgrade(metaDescriptor); 1279 1280 // Throwing this Exception to abort active master is painful but this 1281 // seems the only way to add missing CFs in meta while upgrading from 1282 // HBase 1 to 2 (where HBase 2 has HBASE-23055 & HBASE-23782 checked-in). 1283 // So, why do we abort active master after adding missing CFs in meta? 1284 // When we reach here, we would have already bypassed NoSuchColumnFamilyException 1285 // in initClusterSchemaService(), meaning ClusterSchemaService is not 1286 // correctly initialized but we bypassed it. Similarly, we bypassed 1287 // tableStateManager.start() as well. Hence, we should better abort 1288 // current active master because our main task - adding missing CFs 1289 // in meta table is done (possible only after master state is set as 1290 // initialized) at the expense of bypassing few important tasks as part 1291 // of active master init routine. So now we abort active master so that 1292 // next active master init will not face any issues and all mandatory 1293 // services will be started during master init phase. 1294 throw new PleaseRestartMasterException("Aborting active master after missing" 1295 + " CFs are successfully added in meta. Subsequent active master " 1296 + "initialization should be uninterrupted"); 1297 } 1298 1299 if (maintenanceMode) { 1300 LOG.info("Detected repair mode, skipping final initialization steps."); 1301 return; 1302 } 1303 1304 assignmentManager.checkIfShouldMoveSystemRegionAsync(); 1305 status.setStatus("Starting quota manager"); 1306 initQuotaManager(); 1307 if (QuotaUtil.isQuotaEnabled(conf)) { 1308 // Create the quota snapshot notifier 1309 spaceQuotaSnapshotNotifier = createQuotaSnapshotNotifier(); 1310 spaceQuotaSnapshotNotifier.initialize(getConnection()); 1311 this.quotaObserverChore = new QuotaObserverChore(this, getMasterMetrics()); 1312 // Start the chore to read the region FS space reports and act on them 1313 getChoreService().scheduleChore(quotaObserverChore); 1314 1315 this.snapshotQuotaChore = new SnapshotQuotaObserverChore(this, getMasterMetrics()); 1316 // Start the chore to read snapshots and add their usage to table/NS quotas 1317 getChoreService().scheduleChore(snapshotQuotaChore); 1318 } 1319 final SlowLogMasterService slowLogMasterService = new SlowLogMasterService(conf, this); 1320 slowLogMasterService.init(); 1321 1322 WALEventTrackerTableCreator.createIfNeededAndNotExists(conf, this); 1323 // Create REPLICATION.SINK_TRACKER table if needed. 1324 ReplicationSinkTrackerTableCreator.createIfNeededAndNotExists(conf, this); 1325 1326 // clear the dead servers with same host name and port of online server because we are not 1327 // removing dead server with same hostname and port of rs which is trying to check in before 1328 // master initialization. See HBASE-5916. 1329 this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer(); 1330 1331 // Check and set the znode ACLs if needed in case we are overtaking a non-secure configuration 1332 status.setStatus("Checking ZNode ACLs"); 1333 zooKeeper.checkAndSetZNodeAcls(); 1334 1335 status.setStatus("Initializing MOB Cleaner"); 1336 initMobCleaner(); 1337 1338 // delete the stale data for replication sync up tool if necessary 1339 status.setStatus("Cleanup ReplicationSyncUp status if necessary"); 1340 Path replicationSyncUpInfoFile = 1341 new Path(new Path(dataRootDir, ReplicationSyncUp.INFO_DIR), ReplicationSyncUp.INFO_FILE); 1342 if (dataFs.exists(replicationSyncUpInfoFile)) { 1343 // info file is available, load the timestamp and use it to clean up stale data in replication 1344 // queue storage. 1345 byte[] data; 1346 try (FSDataInputStream in = dataFs.open(replicationSyncUpInfoFile)) { 1347 data = ByteStreams.toByteArray(in); 1348 } 1349 ReplicationSyncUpToolInfo info = null; 1350 try { 1351 info = JsonMapper.fromJson(Bytes.toString(data), ReplicationSyncUpToolInfo.class); 1352 } catch (JsonParseException e) { 1353 // usually this should be a partial file, which means the ReplicationSyncUp tool did not 1354 // finish properly, so not a problem. Here we do not clean up the status as we do not know 1355 // the reason why the tool did not finish properly, so let users clean the status up 1356 // manually 1357 LOG.warn("failed to parse replication sync up info file, ignore and continue...", e); 1358 } 1359 if (info != null) { 1360 LOG.info("Remove last sequence ids and hfile references which are written before {}({})", 1361 info.getStartTimeMs(), DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.systemDefault()) 1362 .format(Instant.ofEpochMilli(info.getStartTimeMs()))); 1363 replicationPeerManager.getQueueStorage() 1364 .removeLastSequenceIdsAndHFileRefsBefore(info.getStartTimeMs()); 1365 // delete the file after removing the stale data, so next time we do not need to do this 1366 // again. 1367 dataFs.delete(replicationSyncUpInfoFile, false); 1368 } 1369 } 1370 status.setStatus("Calling postStartMaster coprocessors"); 1371 if (this.cpHost != null) { 1372 // don't let cp initialization errors kill the master 1373 try { 1374 this.cpHost.postStartMaster(); 1375 } catch (IOException ioe) { 1376 LOG.error("Coprocessor postStartMaster() hook failed", ioe); 1377 } 1378 } 1379 1380 zombieDetector.interrupt(); 1381 1382 /* 1383 * After master has started up, lets do balancer post startup initialization. Since this runs in 1384 * activeMasterManager thread, it should be fine. 1385 */ 1386 long start = EnvironmentEdgeManager.currentTime(); 1387 this.balancer.postMasterStartupInitialize(); 1388 if (LOG.isDebugEnabled()) { 1389 LOG.debug("Balancer post startup initialization complete, took " 1390 + ((EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds"); 1391 } 1392 1393 this.rollingUpgradeChore = new RollingUpgradeChore(this); 1394 getChoreService().scheduleChore(rollingUpgradeChore); 1395 1396 this.oldWALsDirSizeChore = new OldWALsDirSizeChore(this); 1397 getChoreService().scheduleChore(this.oldWALsDirSizeChore); 1398 1399 status.markComplete("Progress after master initialized complete"); 1400 } 1401 1402 /** 1403 * Used for testing only to set Mock objects. 1404 * @param hbckChore hbckChore 1405 */ 1406 public void setHbckChoreForTesting(HbckChore hbckChore) { 1407 this.hbckChore = hbckChore; 1408 } 1409 1410 /** 1411 * Used for testing only to set Mock objects. 1412 * @param catalogJanitorChore catalogJanitorChore 1413 */ 1414 public void setCatalogJanitorChoreForTesting(CatalogJanitor catalogJanitorChore) { 1415 this.catalogJanitorChore = catalogJanitorChore; 1416 } 1417 1418 private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor) 1419 throws IOException { 1420 TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor) 1421 .setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf)) 1422 .setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build(); 1423 long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false); 1424 int tries = 30; 1425 while ( 1426 !(getMasterProcedureExecutor().isFinished(pid)) && getMasterProcedureExecutor().isRunning() 1427 && tries > 0 1428 ) { 1429 try { 1430 Thread.sleep(1000); 1431 } catch (InterruptedException e) { 1432 throw new IOException("Wait interrupted", e); 1433 } 1434 tries--; 1435 } 1436 if (tries <= 0) { 1437 throw new HBaseIOException( 1438 "Failed to add table and rep_barrier CFs to meta in a given time."); 1439 } else { 1440 Procedure<?> result = getMasterProcedureExecutor().getResult(pid); 1441 if (result != null && result.isFailed()) { 1442 throw new IOException("Failed to add table and rep_barrier CFs to meta. " 1443 + MasterProcedureUtil.unwrapRemoteIOException(result)); 1444 } 1445 } 1446 } 1447 1448 /** 1449 * Check hbase:meta is up and ready for reading. For use during Master startup only. 1450 * @return True if meta is UP and online and startup can progress. Otherwise, meta is not online 1451 * and we will hold here until operator intervention. 1452 */ 1453 @InterfaceAudience.Private 1454 public boolean waitForMetaOnline() { 1455 return isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO); 1456 } 1457 1458 /** 1459 * @return True if region is online and scannable else false if an error or shutdown (Otherwise we 1460 * just block in here holding up all forward-progess). 1461 */ 1462 private boolean isRegionOnline(RegionInfo ri) { 1463 RetryCounter rc = null; 1464 while (!isStopped()) { 1465 RegionState rs = this.assignmentManager.getRegionStates().getRegionState(ri); 1466 if (rs != null && rs.isOpened()) { 1467 if (this.getServerManager().isServerOnline(rs.getServerName())) { 1468 return true; 1469 } 1470 } 1471 // Region is not OPEN. 1472 Optional<Procedure<MasterProcedureEnv>> optProc = this.procedureExecutor.getProcedures() 1473 .stream().filter(p -> p instanceof ServerCrashProcedure).findAny(); 1474 // TODO: Add a page to refguide on how to do repair. Have this log message point to it. 1475 // Page will talk about loss of edits, how to schedule at least the meta WAL recovery, and 1476 // then how to assign including how to break region lock if one held. 1477 LOG.warn( 1478 "{} is NOT online; state={}; ServerCrashProcedures={}. Master startup cannot " 1479 + "progress, in holding-pattern until region onlined.", 1480 ri.getRegionNameAsString(), rs, optProc.isPresent()); 1481 // Check once-a-minute. 1482 if (rc == null) { 1483 rc = new RetryCounterFactory(Integer.MAX_VALUE, 1000, 60_000).create(); 1484 } 1485 Threads.sleep(rc.getBackoffTimeAndIncrementAttempts()); 1486 } 1487 return false; 1488 } 1489 1490 /** 1491 * Check hbase:namespace table is assigned. If not, startup will hang looking for the ns table 1492 * <p/> 1493 * This is for rolling upgrading, later we will migrate the data in ns table to the ns family of 1494 * meta table. And if this is a new cluster, this method will return immediately as there will be 1495 * no namespace table/region. 1496 * @return True if namespace table is up/online. 1497 */ 1498 private boolean waitForNamespaceOnline() throws IOException { 1499 TableState nsTableState = 1500 MetaTableAccessor.getTableState(getConnection(), TableName.NAMESPACE_TABLE_NAME); 1501 if (nsTableState == null || nsTableState.isDisabled()) { 1502 // this means we have already migrated the data and disabled or deleted the namespace table, 1503 // or this is a new deploy which does not have a namespace table from the beginning. 1504 return true; 1505 } 1506 List<RegionInfo> ris = 1507 this.assignmentManager.getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME); 1508 if (ris.isEmpty()) { 1509 // maybe this will not happen any more, but anyway, no harm to add a check here... 1510 return true; 1511 } 1512 // Else there are namespace regions up in meta. Ensure they are assigned before we go on. 1513 for (RegionInfo ri : ris) { 1514 if (!isRegionOnline(ri)) { 1515 return false; 1516 } 1517 } 1518 return true; 1519 } 1520 1521 /** 1522 * Adds the {@code MasterQuotasObserver} to the list of configured Master observers to 1523 * automatically remove quotas for a table when that table is deleted. 1524 */ 1525 @InterfaceAudience.Private 1526 public void updateConfigurationForQuotasObserver(Configuration conf) { 1527 // We're configured to not delete quotas on table deletion, so we don't need to add the obs. 1528 if ( 1529 !conf.getBoolean(MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE, 1530 MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT) 1531 ) { 1532 return; 1533 } 1534 String[] masterCoprocs = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); 1535 final int length = null == masterCoprocs ? 0 : masterCoprocs.length; 1536 String[] updatedCoprocs = new String[length + 1]; 1537 if (length > 0) { 1538 System.arraycopy(masterCoprocs, 0, updatedCoprocs, 0, masterCoprocs.length); 1539 } 1540 updatedCoprocs[length] = MasterQuotasObserver.class.getName(); 1541 conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, updatedCoprocs); 1542 } 1543 1544 private void initMobCleaner() { 1545 this.mobFileCleanerChore = new MobFileCleanerChore(this); 1546 getChoreService().scheduleChore(mobFileCleanerChore); 1547 this.mobFileCompactionChore = new MobFileCompactionChore(this); 1548 getChoreService().scheduleChore(mobFileCompactionChore); 1549 } 1550 1551 /** 1552 * <p> 1553 * Create a {@link ServerManager} instance. 1554 * </p> 1555 * <p> 1556 * Will be overridden in tests. 1557 * </p> 1558 */ 1559 @InterfaceAudience.Private 1560 protected ServerManager createServerManager(MasterServices master, RegionServerList storage) 1561 throws IOException { 1562 // We put this out here in a method so can do a Mockito.spy and stub it out 1563 // w/ a mocked up ServerManager. 1564 setupClusterConnection(); 1565 return new ServerManager(master, storage); 1566 } 1567 1568 private void waitForRegionServers(final MonitoredTask status) 1569 throws IOException, InterruptedException { 1570 this.serverManager.waitForRegionServers(status); 1571 } 1572 1573 // Will be overridden in tests 1574 @InterfaceAudience.Private 1575 protected void initClusterSchemaService() throws IOException, InterruptedException { 1576 this.clusterSchemaService = new ClusterSchemaServiceImpl(this); 1577 this.clusterSchemaService.startAsync(); 1578 try { 1579 this.clusterSchemaService 1580 .awaitRunning(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, 1581 DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); 1582 } catch (TimeoutException toe) { 1583 throw new IOException("Timedout starting ClusterSchemaService", toe); 1584 } 1585 } 1586 1587 private void initQuotaManager() throws IOException { 1588 MasterQuotaManager quotaManager = new MasterQuotaManager(this); 1589 quotaManager.start(); 1590 this.quotaManager = quotaManager; 1591 } 1592 1593 private SpaceQuotaSnapshotNotifier createQuotaSnapshotNotifier() { 1594 SpaceQuotaSnapshotNotifier notifier = 1595 SpaceQuotaSnapshotNotifierFactory.getInstance().create(getConfiguration()); 1596 return notifier; 1597 } 1598 1599 public boolean isCatalogJanitorEnabled() { 1600 return catalogJanitorChore != null ? catalogJanitorChore.getEnabled() : false; 1601 } 1602 1603 boolean isCleanerChoreEnabled() { 1604 boolean hfileCleanerFlag = true, logCleanerFlag = true; 1605 1606 if (getHFileCleaner() != null) { 1607 hfileCleanerFlag = getHFileCleaner().getEnabled(); 1608 } 1609 1610 if (logCleaner != null) { 1611 logCleanerFlag = logCleaner.getEnabled(); 1612 } 1613 1614 return (hfileCleanerFlag && logCleanerFlag); 1615 } 1616 1617 @Override 1618 public ServerManager getServerManager() { 1619 return this.serverManager; 1620 } 1621 1622 @Override 1623 public MasterFileSystem getMasterFileSystem() { 1624 return this.fileSystemManager; 1625 } 1626 1627 @Override 1628 public MasterWalManager getMasterWalManager() { 1629 return this.walManager; 1630 } 1631 1632 @Override 1633 public SplitWALManager getSplitWALManager() { 1634 return splitWALManager; 1635 } 1636 1637 @Override 1638 public TableStateManager getTableStateManager() { 1639 return tableStateManager; 1640 } 1641 1642 /* 1643 * Start up all services. If any of these threads gets an unhandled exception then they just die 1644 * with a logged message. This should be fine because in general, we do not expect the master to 1645 * get such unhandled exceptions as OOMEs; it should be lightly loaded. See what HRegionServer 1646 * does if need to install an unexpected exception handler. 1647 */ 1648 private void startServiceThreads() throws IOException { 1649 // Start the executor service pools 1650 final int masterOpenRegionPoolSize = conf.getInt(HConstants.MASTER_OPEN_REGION_THREADS, 1651 HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT); 1652 executorService.startExecutorService(executorService.new ExecutorConfig() 1653 .setExecutorType(ExecutorType.MASTER_OPEN_REGION).setCorePoolSize(masterOpenRegionPoolSize)); 1654 final int masterCloseRegionPoolSize = conf.getInt(HConstants.MASTER_CLOSE_REGION_THREADS, 1655 HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT); 1656 executorService.startExecutorService( 1657 executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_CLOSE_REGION) 1658 .setCorePoolSize(masterCloseRegionPoolSize)); 1659 final int masterServerOpThreads = conf.getInt(HConstants.MASTER_SERVER_OPERATIONS_THREADS, 1660 HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT); 1661 executorService.startExecutorService( 1662 executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SERVER_OPERATIONS) 1663 .setCorePoolSize(masterServerOpThreads)); 1664 final int masterServerMetaOpsThreads = 1665 conf.getInt(HConstants.MASTER_META_SERVER_OPERATIONS_THREADS, 1666 HConstants.MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT); 1667 executorService.startExecutorService(executorService.new ExecutorConfig() 1668 .setExecutorType(ExecutorType.MASTER_META_SERVER_OPERATIONS) 1669 .setCorePoolSize(masterServerMetaOpsThreads)); 1670 final int masterLogReplayThreads = conf.getInt(HConstants.MASTER_LOG_REPLAY_OPS_THREADS, 1671 HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT); 1672 executorService.startExecutorService(executorService.new ExecutorConfig() 1673 .setExecutorType(ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads)); 1674 final int masterSnapshotThreads = conf.getInt(SnapshotManager.SNAPSHOT_POOL_THREADS_KEY, 1675 SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT); 1676 executorService.startExecutorService( 1677 executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SNAPSHOT_OPERATIONS) 1678 .setCorePoolSize(masterSnapshotThreads).setAllowCoreThreadTimeout(true)); 1679 final int masterMergeDispatchThreads = conf.getInt(HConstants.MASTER_MERGE_DISPATCH_THREADS, 1680 HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT); 1681 executorService.startExecutorService( 1682 executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_MERGE_OPERATIONS) 1683 .setCorePoolSize(masterMergeDispatchThreads).setAllowCoreThreadTimeout(true)); 1684 1685 // We depend on there being only one instance of this executor running 1686 // at a time. To do concurrency, would need fencing of enable/disable of 1687 // tables. 1688 // Any time changing this maxThreads to > 1, pls see the comment at 1689 // AccessController#postCompletedCreateTableAction 1690 executorService.startExecutorService(executorService.new ExecutorConfig() 1691 .setExecutorType(ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1)); 1692 startProcedureExecutor(); 1693 1694 // Create log cleaner thread pool 1695 logCleanerPool = DirScanPool.getLogCleanerScanPool(conf); 1696 Map<String, Object> params = new HashMap<>(); 1697 params.put(MASTER, this); 1698 // Start log cleaner thread 1699 int cleanerInterval = 1700 conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL); 1701 this.logCleaner = 1702 new LogCleaner(cleanerInterval, this, conf, getMasterWalManager().getFileSystem(), 1703 getMasterWalManager().getOldLogDir(), logCleanerPool, params); 1704 getChoreService().scheduleChore(logCleaner); 1705 1706 Path archiveDir = HFileArchiveUtil.getArchivePath(conf); 1707 1708 // Create custom archive hfile cleaners 1709 String[] paths = conf.getStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS); 1710 // todo: handle the overlap issues for the custom paths 1711 1712 if (paths != null && paths.length > 0) { 1713 if (conf.getStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS) == null) { 1714 Set<String> cleanerClasses = new HashSet<>(); 1715 String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS); 1716 if (cleaners != null) { 1717 Collections.addAll(cleanerClasses, cleaners); 1718 } 1719 conf.setStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS, 1720 cleanerClasses.toArray(new String[cleanerClasses.size()])); 1721 LOG.info("Archive custom cleaner paths: {}, plugins: {}", Arrays.asList(paths), 1722 cleanerClasses); 1723 } 1724 // share the hfile cleaner pool in custom paths 1725 sharedHFileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf.get(CUSTOM_POOL_SIZE, "6")); 1726 for (int i = 0; i < paths.length; i++) { 1727 Path path = new Path(paths[i].trim()); 1728 HFileCleaner cleaner = 1729 new HFileCleaner("ArchiveCustomHFileCleaner-" + path.getName(), cleanerInterval, this, 1730 conf, getMasterFileSystem().getFileSystem(), new Path(archiveDir, path), 1731 HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS, sharedHFileCleanerPool, params, null); 1732 hfileCleaners.add(cleaner); 1733 hfileCleanerPaths.add(path); 1734 } 1735 } 1736 1737 // Create the whole archive dir cleaner thread pool 1738 exclusiveHFileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf); 1739 hfileCleaners.add(0, 1740 new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem().getFileSystem(), 1741 archiveDir, exclusiveHFileCleanerPool, params, hfileCleanerPaths)); 1742 hfileCleanerPaths.add(0, archiveDir); 1743 // Schedule all the hfile cleaners 1744 for (HFileCleaner hFileCleaner : hfileCleaners) { 1745 getChoreService().scheduleChore(hFileCleaner); 1746 } 1747 1748 // Regions Reopen based on very high storeFileRefCount is considered enabled 1749 // only if hbase.regions.recovery.store.file.ref.count has value > 0 1750 final int maxStoreFileRefCount = conf.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD, 1751 HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD); 1752 if (maxStoreFileRefCount > 0) { 1753 this.regionsRecoveryChore = new RegionsRecoveryChore(this, conf, this); 1754 getChoreService().scheduleChore(this.regionsRecoveryChore); 1755 } else { 1756 LOG.info( 1757 "Reopening regions with very high storeFileRefCount is disabled. " 1758 + "Provide threshold value > 0 for {} to enable it.", 1759 HConstants.STORE_FILE_REF_COUNT_THRESHOLD); 1760 } 1761 1762 this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this); 1763 1764 replicationBarrierCleaner = 1765 new ReplicationBarrierCleaner(conf, this, getConnection(), replicationPeerManager); 1766 getChoreService().scheduleChore(replicationBarrierCleaner); 1767 1768 final boolean isSnapshotChoreEnabled = this.snapshotCleanupStateStore.get(); 1769 this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager()); 1770 if (isSnapshotChoreEnabled) { 1771 getChoreService().scheduleChore(this.snapshotCleanerChore); 1772 } else { 1773 if (LOG.isTraceEnabled()) { 1774 LOG.trace("Snapshot Cleaner Chore is disabled. Not starting up the chore.."); 1775 } 1776 } 1777 serviceStarted = true; 1778 if (LOG.isTraceEnabled()) { 1779 LOG.trace("Started service threads"); 1780 } 1781 } 1782 1783 protected void stopServiceThreads() { 1784 if (masterJettyServer != null) { 1785 LOG.info("Stopping master jetty server"); 1786 try { 1787 masterJettyServer.stop(); 1788 } catch (Exception e) { 1789 LOG.error("Failed to stop master jetty server", e); 1790 } 1791 } 1792 stopChoreService(); 1793 stopExecutorService(); 1794 if (exclusiveHFileCleanerPool != null) { 1795 exclusiveHFileCleanerPool.shutdownNow(); 1796 exclusiveHFileCleanerPool = null; 1797 } 1798 if (logCleanerPool != null) { 1799 logCleanerPool.shutdownNow(); 1800 logCleanerPool = null; 1801 } 1802 if (sharedHFileCleanerPool != null) { 1803 sharedHFileCleanerPool.shutdownNow(); 1804 sharedHFileCleanerPool = null; 1805 } 1806 if (maintenanceRegionServer != null) { 1807 maintenanceRegionServer.getRegionServer().stop(HBASE_MASTER_CLEANER_INTERVAL); 1808 } 1809 1810 LOG.debug("Stopping service threads"); 1811 // stop procedure executor prior to other services such as server manager and assignment 1812 // manager, as these services are important for some running procedures. See HBASE-24117 for 1813 // example. 1814 stopProcedureExecutor(); 1815 1816 if (regionNormalizerManager != null) { 1817 regionNormalizerManager.stop(); 1818 } 1819 if (this.quotaManager != null) { 1820 this.quotaManager.stop(); 1821 } 1822 1823 if (this.activeMasterManager != null) { 1824 this.activeMasterManager.stop(); 1825 } 1826 if (this.serverManager != null) { 1827 this.serverManager.stop(); 1828 } 1829 if (this.assignmentManager != null) { 1830 this.assignmentManager.stop(); 1831 } 1832 1833 if (masterRegion != null) { 1834 masterRegion.close(isAborted()); 1835 } 1836 if (this.walManager != null) { 1837 this.walManager.stop(); 1838 } 1839 if (this.fileSystemManager != null) { 1840 this.fileSystemManager.stop(); 1841 } 1842 if (this.mpmHost != null) { 1843 this.mpmHost.stop("server shutting down."); 1844 } 1845 if (this.regionServerTracker != null) { 1846 this.regionServerTracker.stop(); 1847 } 1848 } 1849 1850 private void createProcedureExecutor() throws IOException { 1851 final String procedureDispatcherClassName = 1852 conf.get(HBASE_MASTER_RSPROC_DISPATCHER_CLASS, DEFAULT_HBASE_MASTER_RSPROC_DISPATCHER_CLASS); 1853 final RSProcedureDispatcher procedureDispatcher = ReflectionUtils.instantiateWithCustomCtor( 1854 procedureDispatcherClassName, new Class[] { MasterServices.class }, new Object[] { this }); 1855 final MasterProcedureEnv procEnv = new MasterProcedureEnv(this, procedureDispatcher); 1856 procedureStore = new RegionProcedureStore(this, masterRegion, 1857 new MasterProcedureEnv.FsUtilsLeaseRecovery(this)); 1858 procedureStore.registerListener(new ProcedureStoreListener() { 1859 1860 @Override 1861 public void abortProcess() { 1862 abort("The Procedure Store lost the lease", null); 1863 } 1864 }); 1865 MasterProcedureScheduler procedureScheduler = procEnv.getProcedureScheduler(); 1866 procedureExecutor = new ProcedureExecutor<>(conf, procEnv, procedureStore, procedureScheduler); 1867 configurationManager.registerObserver(procEnv); 1868 1869 int cpus = Runtime.getRuntime().availableProcessors(); 1870 final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, Math.max( 1871 (cpus > 0 ? cpus / 4 : 0), MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS)); 1872 final boolean abortOnCorruption = 1873 conf.getBoolean(MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION, 1874 MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION); 1875 procedureStore.start(numThreads); 1876 // Just initialize it but do not start the workers, we will start the workers later by calling 1877 // startProcedureExecutor. See the javadoc for finishActiveMasterInitialization for more 1878 // details. 1879 procedureExecutor.init(numThreads, abortOnCorruption); 1880 if (!procEnv.getRemoteDispatcher().start()) { 1881 throw new HBaseIOException("Failed start of remote dispatcher"); 1882 } 1883 } 1884 1885 // will be override in UT 1886 protected void startProcedureExecutor() throws IOException { 1887 procedureExecutor.startWorkers(); 1888 } 1889 1890 /** 1891 * Turn on/off Snapshot Cleanup Chore 1892 * @param on indicates whether Snapshot Cleanup Chore is to be run 1893 */ 1894 void switchSnapshotCleanup(final boolean on, final boolean synchronous) throws IOException { 1895 if (synchronous) { 1896 synchronized (this.snapshotCleanerChore) { 1897 switchSnapshotCleanup(on); 1898 } 1899 } else { 1900 switchSnapshotCleanup(on); 1901 } 1902 } 1903 1904 private void switchSnapshotCleanup(final boolean on) throws IOException { 1905 snapshotCleanupStateStore.set(on); 1906 if (on) { 1907 getChoreService().scheduleChore(this.snapshotCleanerChore); 1908 } else { 1909 this.snapshotCleanerChore.cancel(); 1910 } 1911 } 1912 1913 private void stopProcedureExecutor() { 1914 if (procedureExecutor != null) { 1915 configurationManager.deregisterObserver(procedureExecutor.getEnvironment()); 1916 procedureExecutor.getEnvironment().getRemoteDispatcher().stop(); 1917 procedureExecutor.stop(); 1918 procedureExecutor.join(); 1919 procedureExecutor = null; 1920 } 1921 1922 if (procedureStore != null) { 1923 procedureStore.stop(isAborted()); 1924 procedureStore = null; 1925 } 1926 } 1927 1928 protected void stopChores() { 1929 shutdownChore(mobFileCleanerChore); 1930 shutdownChore(mobFileCompactionChore); 1931 shutdownChore(balancerChore); 1932 if (regionNormalizerManager != null) { 1933 shutdownChore(regionNormalizerManager.getRegionNormalizerChore()); 1934 } 1935 shutdownChore(clusterStatusChore); 1936 shutdownChore(catalogJanitorChore); 1937 shutdownChore(clusterStatusPublisherChore); 1938 shutdownChore(snapshotQuotaChore); 1939 shutdownChore(logCleaner); 1940 if (hfileCleaners != null) { 1941 for (ScheduledChore chore : hfileCleaners) { 1942 chore.shutdown(); 1943 } 1944 hfileCleaners = null; 1945 } 1946 shutdownChore(replicationBarrierCleaner); 1947 shutdownChore(snapshotCleanerChore); 1948 shutdownChore(hbckChore); 1949 shutdownChore(regionsRecoveryChore); 1950 shutdownChore(rollingUpgradeChore); 1951 shutdownChore(oldWALsDirSizeChore); 1952 } 1953 1954 /** Returns Get remote side's InetAddress */ 1955 InetAddress getRemoteInetAddress(final int port, final long serverStartCode) 1956 throws UnknownHostException { 1957 // Do it out here in its own little method so can fake an address when 1958 // mocking up in tests. 1959 InetAddress ia = RpcServer.getRemoteIp(); 1960 1961 // The call could be from the local regionserver, 1962 // in which case, there is no remote address. 1963 if (ia == null && serverStartCode == startcode) { 1964 InetSocketAddress isa = rpcServices.getSocketAddress(); 1965 if (isa != null && isa.getPort() == port) { 1966 ia = isa.getAddress(); 1967 } 1968 } 1969 return ia; 1970 } 1971 1972 /** Returns Maximum time we should run balancer for */ 1973 private int getMaxBalancingTime() { 1974 // if max balancing time isn't set, defaulting it to period time 1975 int maxBalancingTime = 1976 getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, getConfiguration() 1977 .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD)); 1978 return maxBalancingTime; 1979 } 1980 1981 /** Returns Maximum number of regions in transition */ 1982 private int getMaxRegionsInTransition() { 1983 int numRegions = this.assignmentManager.getRegionStates().getRegionAssignments().size(); 1984 return Math.max((int) Math.floor(numRegions * this.maxRitPercent), 1); 1985 } 1986 1987 /** 1988 * It first sleep to the next balance plan start time. Meanwhile, throttling by the max number 1989 * regions in transition to protect availability. 1990 * @param nextBalanceStartTime The next balance plan start time 1991 * @param maxRegionsInTransition max number of regions in transition 1992 * @param cutoffTime when to exit balancer 1993 */ 1994 private void balanceThrottling(long nextBalanceStartTime, int maxRegionsInTransition, 1995 long cutoffTime) { 1996 boolean interrupted = false; 1997 1998 // Sleep to next balance plan start time 1999 // But if there are zero regions in transition, it can skip sleep to speed up. 2000 while ( 2001 !interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime 2002 && this.assignmentManager.getRegionStates().hasRegionsInTransition() 2003 ) { 2004 try { 2005 Thread.sleep(100); 2006 } catch (InterruptedException ie) { 2007 interrupted = true; 2008 } 2009 } 2010 2011 // Throttling by max number regions in transition 2012 while ( 2013 !interrupted && maxRegionsInTransition > 0 2014 && this.assignmentManager.getRegionStates().getRegionsInTransitionCount() 2015 >= maxRegionsInTransition 2016 && EnvironmentEdgeManager.currentTime() <= cutoffTime 2017 ) { 2018 try { 2019 // sleep if the number of regions in transition exceeds the limit 2020 Thread.sleep(100); 2021 } catch (InterruptedException ie) { 2022 interrupted = true; 2023 } 2024 } 2025 2026 if (interrupted) Thread.currentThread().interrupt(); 2027 } 2028 2029 public BalanceResponse balance() throws IOException { 2030 return balance(BalanceRequest.defaultInstance()); 2031 } 2032 2033 /** 2034 * Trigger a normal balance, see {@link HMaster#balance()} . If the balance is not executed this 2035 * time, the metrics related to the balance will be updated. When balance is running, related 2036 * metrics will be updated at the same time. But if some checking logic failed and cause the 2037 * balancer exit early, we lost the chance to update balancer metrics. This will lead to user 2038 * missing the latest balancer info. 2039 */ 2040 public BalanceResponse balanceOrUpdateMetrics() throws IOException { 2041 synchronized (this.balancer) { 2042 BalanceResponse response = balance(); 2043 if (!response.isBalancerRan()) { 2044 Map<TableName, Map<ServerName, List<RegionInfo>>> assignments = 2045 this.assignmentManager.getRegionStates().getAssignmentsForBalancer(this.tableStateManager, 2046 this.serverManager.getOnlineServersList()); 2047 for (Map<ServerName, List<RegionInfo>> serverMap : assignments.values()) { 2048 serverMap.keySet().removeAll(this.serverManager.getDrainingServersList()); 2049 } 2050 this.balancer.updateBalancerLoadInfo(assignments); 2051 } 2052 return response; 2053 } 2054 } 2055 2056 /** 2057 * Checks master state before initiating action over region topology. 2058 * @param action the name of the action under consideration, for logging. 2059 * @return {@code true} when the caller should exit early, {@code false} otherwise. 2060 */ 2061 @Override 2062 public boolean skipRegionManagementAction(final String action) { 2063 // Note: this method could be `default` on MasterServices if but for logging. 2064 if (!isInitialized()) { 2065 LOG.debug("Master has not been initialized, don't run {}.", action); 2066 return true; 2067 } 2068 if (this.getServerManager().isClusterShutdown()) { 2069 LOG.info("Cluster is shutting down, don't run {}.", action); 2070 return true; 2071 } 2072 if (isInMaintenanceMode()) { 2073 LOG.info("Master is in maintenance mode, don't run {}.", action); 2074 return true; 2075 } 2076 return false; 2077 } 2078 2079 public BalanceResponse balance(BalanceRequest request) throws IOException { 2080 checkInitialized(); 2081 2082 BalanceResponse.Builder responseBuilder = BalanceResponse.newBuilder(); 2083 2084 if (loadBalancerStateStore == null || !(loadBalancerStateStore.get() || request.isDryRun())) { 2085 return responseBuilder.build(); 2086 } 2087 2088 if (skipRegionManagementAction("balancer")) { 2089 return responseBuilder.build(); 2090 } 2091 2092 synchronized (this.balancer) { 2093 // Only allow one balance run at at time. 2094 if (this.assignmentManager.hasRegionsInTransition()) { 2095 List<RegionStateNode> regionsInTransition = assignmentManager.getRegionsInTransition(); 2096 // if hbase:meta region is in transition, result of assignment cannot be recorded 2097 // ignore the force flag in that case 2098 boolean metaInTransition = assignmentManager.isMetaRegionInTransition(); 2099 List<RegionStateNode> toPrint = regionsInTransition; 2100 int max = 5; 2101 boolean truncated = false; 2102 if (regionsInTransition.size() > max) { 2103 toPrint = regionsInTransition.subList(0, max); 2104 truncated = true; 2105 } 2106 2107 if (!request.isIgnoreRegionsInTransition() || metaInTransition) { 2108 LOG.info("Not running balancer (ignoreRIT=false" + ", metaRIT=" + metaInTransition 2109 + ") because " + regionsInTransition.size() + " region(s) in transition: " + toPrint 2110 + (truncated ? "(truncated list)" : "")); 2111 return responseBuilder.build(); 2112 } 2113 } 2114 if (this.serverManager.areDeadServersInProgress()) { 2115 LOG.info("Not running balancer because processing dead regionserver(s): " 2116 + this.serverManager.getDeadServers()); 2117 return responseBuilder.build(); 2118 } 2119 2120 if (this.cpHost != null) { 2121 try { 2122 if (this.cpHost.preBalance(request)) { 2123 LOG.debug("Coprocessor bypassing balancer request"); 2124 return responseBuilder.build(); 2125 } 2126 } catch (IOException ioe) { 2127 LOG.error("Error invoking master coprocessor preBalance()", ioe); 2128 return responseBuilder.build(); 2129 } 2130 } 2131 2132 Map<TableName, Map<ServerName, List<RegionInfo>>> assignments = 2133 this.assignmentManager.getRegionStates().getAssignmentsForBalancer(tableStateManager, 2134 this.serverManager.getOnlineServersList()); 2135 for (Map<ServerName, List<RegionInfo>> serverMap : assignments.values()) { 2136 serverMap.keySet().removeAll(this.serverManager.getDrainingServersList()); 2137 } 2138 2139 // Give the balancer the current cluster state. 2140 this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor()); 2141 2142 List<RegionPlan> plans = this.balancer.balanceCluster(assignments); 2143 2144 responseBuilder.setBalancerRan(true).setMovesCalculated(plans == null ? 0 : plans.size()); 2145 2146 if (skipRegionManagementAction("balancer")) { 2147 // make one last check that the cluster isn't shutting down before proceeding. 2148 return responseBuilder.build(); 2149 } 2150 2151 // For dry run we don't actually want to execute the moves, but we do want 2152 // to execute the coprocessor below 2153 List<RegionPlan> sucRPs = 2154 request.isDryRun() ? Collections.emptyList() : executeRegionPlansWithThrottling(plans); 2155 2156 if (this.cpHost != null) { 2157 try { 2158 this.cpHost.postBalance(request, sucRPs); 2159 } catch (IOException ioe) { 2160 // balancing already succeeded so don't change the result 2161 LOG.error("Error invoking master coprocessor postBalance()", ioe); 2162 } 2163 } 2164 2165 responseBuilder.setMovesExecuted(sucRPs.size()); 2166 } 2167 2168 // If LoadBalancer did not generate any plans, it means the cluster is already balanced. 2169 // Return true indicating a success. 2170 return responseBuilder.build(); 2171 } 2172 2173 /** 2174 * Execute region plans with throttling 2175 * @param plans to execute 2176 * @return succeeded plans 2177 */ 2178 public List<RegionPlan> executeRegionPlansWithThrottling(List<RegionPlan> plans) { 2179 List<RegionPlan> successRegionPlans = new ArrayList<>(); 2180 int maxRegionsInTransition = getMaxRegionsInTransition(); 2181 long balanceStartTime = EnvironmentEdgeManager.currentTime(); 2182 long cutoffTime = balanceStartTime + this.maxBalancingTime; 2183 int rpCount = 0; // number of RegionPlans balanced so far 2184 if (plans != null && !plans.isEmpty()) { 2185 int balanceInterval = this.maxBalancingTime / plans.size(); 2186 LOG.info( 2187 "Balancer plans size is " + plans.size() + ", the balance interval is " + balanceInterval 2188 + " ms, and the max number regions in transition is " + maxRegionsInTransition); 2189 2190 for (RegionPlan plan : plans) { 2191 LOG.info("balance " + plan); 2192 // TODO: bulk assign 2193 try { 2194 this.assignmentManager.balance(plan); 2195 this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor()); 2196 this.balancer.throttle(plan); 2197 } catch (HBaseIOException hioe) { 2198 // should ignore failed plans here, avoiding the whole balance plans be aborted 2199 // later calls of balance() can fetch up the failed and skipped plans 2200 LOG.warn("Failed balance plan {}, skipping...", plan, hioe); 2201 } catch (Exception e) { 2202 LOG.warn("Failed throttling assigning a new plan.", e); 2203 } 2204 // rpCount records balance plans processed, does not care if a plan succeeds 2205 rpCount++; 2206 successRegionPlans.add(plan); 2207 2208 if (this.maxBalancingTime > 0) { 2209 balanceThrottling(balanceStartTime + rpCount * balanceInterval, maxRegionsInTransition, 2210 cutoffTime); 2211 } 2212 2213 // if performing next balance exceeds cutoff time, exit the loop 2214 if ( 2215 this.maxBalancingTime > 0 && rpCount < plans.size() 2216 && EnvironmentEdgeManager.currentTime() > cutoffTime 2217 ) { 2218 // TODO: After balance, there should not be a cutoff time (keeping it as 2219 // a security net for now) 2220 LOG.debug( 2221 "No more balancing till next balance run; maxBalanceTime=" + this.maxBalancingTime); 2222 break; 2223 } 2224 } 2225 } 2226 LOG.debug("Balancer is going into sleep until next period in {}ms", getConfiguration() 2227 .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD)); 2228 return successRegionPlans; 2229 } 2230 2231 @Override 2232 public RegionNormalizerManager getRegionNormalizerManager() { 2233 return regionNormalizerManager; 2234 } 2235 2236 @Override 2237 public boolean normalizeRegions(final NormalizeTableFilterParams ntfp, 2238 final boolean isHighPriority) throws IOException { 2239 if (regionNormalizerManager == null || !regionNormalizerManager.isNormalizerOn()) { 2240 LOG.debug("Region normalization is disabled, don't run region normalizer."); 2241 return false; 2242 } 2243 if (skipRegionManagementAction("region normalizer")) { 2244 return false; 2245 } 2246 if (assignmentManager.hasRegionsInTransition()) { 2247 return false; 2248 } 2249 2250 final Set<TableName> matchingTables = getTableDescriptors(new LinkedList<>(), 2251 ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false).stream() 2252 .map(TableDescriptor::getTableName).collect(Collectors.toSet()); 2253 final Set<TableName> allEnabledTables = 2254 tableStateManager.getTablesInStates(TableState.State.ENABLED); 2255 final List<TableName> targetTables = 2256 new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables)); 2257 Collections.shuffle(targetTables); 2258 return regionNormalizerManager.normalizeRegions(targetTables, isHighPriority); 2259 } 2260 2261 /** Returns Client info for use as prefix on an audit log string; who did an action */ 2262 @Override 2263 public String getClientIdAuditPrefix() { 2264 return "Client=" + RpcServer.getRequestUserName().orElse(null) + "/" 2265 + RpcServer.getRemoteAddress().orElse(null); 2266 } 2267 2268 /** 2269 * Switch for the background CatalogJanitor thread. Used for testing. The thread will continue to 2270 * run. It will just be a noop if disabled. 2271 * @param b If false, the catalog janitor won't do anything. 2272 */ 2273 public void setCatalogJanitorEnabled(final boolean b) { 2274 this.catalogJanitorChore.setEnabled(b); 2275 } 2276 2277 @Override 2278 public long mergeRegions(final RegionInfo[] regionsToMerge, final boolean forcible, final long ng, 2279 final long nonce) throws IOException { 2280 checkInitialized(); 2281 2282 final String regionNamesToLog = RegionInfo.getShortNameToLog(regionsToMerge); 2283 2284 if (!isSplitOrMergeEnabled(MasterSwitchType.MERGE)) { 2285 LOG.warn("Merge switch is off! skip merge of " + regionNamesToLog); 2286 throw new DoNotRetryIOException( 2287 "Merge of " + regionNamesToLog + " failed because merge switch is off"); 2288 } 2289 2290 if (!getTableDescriptors().get(regionsToMerge[0].getTable()).isMergeEnabled()) { 2291 LOG.warn("Merge is disabled for the table! Skipping merge of {}", regionNamesToLog); 2292 throw new DoNotRetryIOException( 2293 "Merge of " + regionNamesToLog + " failed as region merge is disabled for the table"); 2294 } 2295 2296 return MasterProcedureUtil.submitProcedure(new NonceProcedureRunnable(this, ng, nonce) { 2297 @Override 2298 protected void run() throws IOException { 2299 getMaster().getMasterCoprocessorHost().preMergeRegions(regionsToMerge); 2300 String aid = getClientIdAuditPrefix(); 2301 LOG.info("{} merge regions {}", aid, regionNamesToLog); 2302 submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(), 2303 regionsToMerge, forcible)); 2304 getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge); 2305 } 2306 2307 @Override 2308 protected String getDescription() { 2309 return "MergeTableProcedure"; 2310 } 2311 }); 2312 } 2313 2314 @Override 2315 public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, final long nonceGroup, 2316 final long nonce) throws IOException { 2317 checkInitialized(); 2318 2319 if (!isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) { 2320 LOG.warn("Split switch is off! skip split of " + regionInfo); 2321 throw new DoNotRetryIOException( 2322 "Split region " + regionInfo.getRegionNameAsString() + " failed due to split switch off"); 2323 } 2324 2325 if (!getTableDescriptors().get(regionInfo.getTable()).isSplitEnabled()) { 2326 LOG.warn("Split is disabled for the table! Skipping split of {}", regionInfo); 2327 throw new DoNotRetryIOException("Split region " + regionInfo.getRegionNameAsString() 2328 + " failed as region split is disabled for the table"); 2329 } 2330 2331 return MasterProcedureUtil 2332 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 2333 @Override 2334 protected void run() throws IOException { 2335 getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); 2336 LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); 2337 2338 // Execute the operation asynchronously 2339 submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow)); 2340 } 2341 2342 @Override 2343 protected String getDescription() { 2344 return "SplitTableProcedure"; 2345 } 2346 }); 2347 } 2348 2349 private void warmUpRegion(ServerName server, RegionInfo region) { 2350 FutureUtils.addListener(asyncClusterConnection.getRegionServerAdmin(server) 2351 .warmupRegion(RequestConverter.buildWarmupRegionRequest(region)), (r, e) -> { 2352 if (e != null) { 2353 LOG.warn("Failed to warm up region {} on server {}", region, server, e); 2354 } 2355 }); 2356 } 2357 2358 // Public so can be accessed by tests. Blocks until move is done. 2359 // Replace with an async implementation from which you can get 2360 // a success/failure result. 2361 @InterfaceAudience.Private 2362 public void move(final byte[] encodedRegionName, byte[] destServerName) throws IOException { 2363 RegionState regionState = 2364 assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName)); 2365 2366 RegionInfo hri; 2367 if (regionState != null) { 2368 hri = regionState.getRegion(); 2369 } else { 2370 throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName)); 2371 } 2372 2373 ServerName dest; 2374 List<ServerName> exclude = hri.getTable().isSystemTable() 2375 ? assignmentManager.getExcludedServersForSystemTable() 2376 : new ArrayList<>(1); 2377 if ( 2378 destServerName != null && exclude.contains(ServerName.valueOf(Bytes.toString(destServerName))) 2379 ) { 2380 LOG.info(Bytes.toString(encodedRegionName) + " can not move to " 2381 + Bytes.toString(destServerName) + " because the server is in exclude list"); 2382 destServerName = null; 2383 } 2384 if (destServerName == null || destServerName.length == 0) { 2385 LOG.info("Passed destination servername is null/empty so " + "choosing a server at random"); 2386 exclude.add(regionState.getServerName()); 2387 final List<ServerName> destServers = this.serverManager.createDestinationServersList(exclude); 2388 dest = balancer.randomAssignment(hri, destServers); 2389 if (dest == null) { 2390 LOG.debug("Unable to determine a plan to assign " + hri); 2391 return; 2392 } 2393 } else { 2394 ServerName candidate = ServerName.valueOf(Bytes.toString(destServerName)); 2395 dest = balancer.randomAssignment(hri, Lists.newArrayList(candidate)); 2396 if (dest == null) { 2397 LOG.debug("Unable to determine a plan to assign " + hri); 2398 return; 2399 } 2400 // TODO: deal with table on master for rs group. 2401 if (dest.equals(serverName)) { 2402 // To avoid unnecessary region moving later by balancer. Don't put user 2403 // regions on master. 2404 LOG.debug("Skipping move of region " + hri.getRegionNameAsString() 2405 + " to avoid unnecessary region moving later by load balancer," 2406 + " because it should not be on master"); 2407 return; 2408 } 2409 } 2410 2411 if (dest.equals(regionState.getServerName())) { 2412 LOG.debug("Skipping move of region " + hri.getRegionNameAsString() 2413 + " because region already assigned to the same server " + dest + "."); 2414 return; 2415 } 2416 2417 // Now we can do the move 2418 RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest); 2419 assert rp.getDestination() != null : rp.toString() + " " + dest; 2420 2421 try { 2422 checkInitialized(); 2423 if (this.cpHost != null) { 2424 this.cpHost.preMove(hri, rp.getSource(), rp.getDestination()); 2425 } 2426 2427 TransitRegionStateProcedure proc = 2428 this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination()); 2429 if (conf.getBoolean(WARMUP_BEFORE_MOVE, DEFAULT_WARMUP_BEFORE_MOVE)) { 2430 // Warmup the region on the destination before initiating the move. 2431 // A region server could reject the close request because it either does not 2432 // have the specified region or the region is being split. 2433 LOG.info(getClientIdAuditPrefix() + " move " + rp + ", warming up region on " 2434 + rp.getDestination()); 2435 warmUpRegion(rp.getDestination(), hri); 2436 } 2437 LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer"); 2438 Future<byte[]> future = ProcedureSyncWait.submitProcedure(this.procedureExecutor, proc); 2439 try { 2440 // Is this going to work? Will we throw exception on error? 2441 // TODO: CompletableFuture rather than this stunted Future. 2442 future.get(); 2443 } catch (InterruptedException | ExecutionException e) { 2444 throw new HBaseIOException(e); 2445 } 2446 if (this.cpHost != null) { 2447 this.cpHost.postMove(hri, rp.getSource(), rp.getDestination()); 2448 } 2449 } catch (IOException ioe) { 2450 if (ioe instanceof HBaseIOException) { 2451 throw (HBaseIOException) ioe; 2452 } 2453 throw new HBaseIOException(ioe); 2454 } 2455 } 2456 2457 @Override 2458 public long createTable(final TableDescriptor tableDescriptor, final byte[][] splitKeys, 2459 final long nonceGroup, final long nonce) throws IOException { 2460 checkInitialized(); 2461 TableDescriptor desc = getMasterCoprocessorHost().preCreateTableRegionsInfos(tableDescriptor); 2462 if (desc == null) { 2463 throw new IOException("Creation for " + tableDescriptor + " is canceled by CP"); 2464 } 2465 String namespace = desc.getTableName().getNamespaceAsString(); 2466 this.clusterSchemaService.getNamespace(namespace); 2467 2468 RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(desc, splitKeys); 2469 TableDescriptorChecker.sanityCheck(conf, desc); 2470 2471 return MasterProcedureUtil 2472 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 2473 @Override 2474 protected void run() throws IOException { 2475 getMaster().getMasterCoprocessorHost().preCreateTable(desc, newRegions); 2476 2477 LOG.info(getClientIdAuditPrefix() + " create " + desc); 2478 2479 // TODO: We can handle/merge duplicate requests, and differentiate the case of 2480 // TableExistsException by saying if the schema is the same or not. 2481 // 2482 // We need to wait for the procedure to potentially fail due to "prepare" sanity 2483 // checks. This will block only the beginning of the procedure. See HBASE-19953. 2484 ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); 2485 submitProcedure( 2486 new CreateTableProcedure(procedureExecutor.getEnvironment(), desc, newRegions, latch)); 2487 latch.await(); 2488 2489 getMaster().getMasterCoprocessorHost().postCreateTable(desc, newRegions); 2490 } 2491 2492 @Override 2493 protected String getDescription() { 2494 return "CreateTableProcedure"; 2495 } 2496 }); 2497 } 2498 2499 @Override 2500 public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException { 2501 if (isStopped()) { 2502 throw new MasterNotRunningException(); 2503 } 2504 2505 TableName tableName = tableDescriptor.getTableName(); 2506 if (!(tableName.isSystemTable())) { 2507 throw new IllegalArgumentException( 2508 "Only system table creation can use this createSystemTable API"); 2509 } 2510 2511 RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(tableDescriptor, null); 2512 2513 LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor); 2514 2515 // This special create table is called locally to master. Therefore, no RPC means no need 2516 // to use nonce to detect duplicated RPC call. 2517 long procId = this.procedureExecutor.submitProcedure( 2518 new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions)); 2519 2520 return procId; 2521 } 2522 2523 private void startActiveMasterManager(int infoPort) throws KeeperException { 2524 String backupZNode = ZNodePaths.joinZNode(zooKeeper.getZNodePaths().backupMasterAddressesZNode, 2525 serverName.toString()); 2526 /* 2527 * Add a ZNode for ourselves in the backup master directory since we may not become the active 2528 * master. If so, we want the actual active master to know we are backup masters, so that it 2529 * won't assign regions to us if so configured. If we become the active master later, 2530 * ActiveMasterManager will delete this node explicitly. If we crash before then, ZooKeeper will 2531 * delete this node for us since it is ephemeral. 2532 */ 2533 LOG.info("Adding backup master ZNode " + backupZNode); 2534 if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode, serverName, infoPort)) { 2535 LOG.warn("Failed create of " + backupZNode + " by " + serverName); 2536 } 2537 this.activeMasterManager.setInfoPort(infoPort); 2538 int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); 2539 // If we're a backup master, stall until a primary to write this address 2540 if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP, HConstants.DEFAULT_MASTER_TYPE_BACKUP)) { 2541 LOG.debug("HMaster started in backup mode. Stalling until master znode is written."); 2542 // This will only be a minute or so while the cluster starts up, 2543 // so don't worry about setting watches on the parent znode 2544 while (!activeMasterManager.hasActiveMaster()) { 2545 LOG.debug("Waiting for master address and cluster state znode to be written."); 2546 Threads.sleep(timeout); 2547 } 2548 } 2549 2550 // Here for the master startup process, we use TaskGroup to monitor the whole progress. 2551 // The UI is similar to how Hadoop designed the startup page for the NameNode. 2552 // See HBASE-21521 for more details. 2553 // We do not cleanup the startupTaskGroup, let the startup progress information 2554 // be permanent in the MEM. 2555 startupTaskGroup = TaskMonitor.createTaskGroup(true, "Master startup"); 2556 try { 2557 if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, startupTaskGroup)) { 2558 finishActiveMasterInitialization(); 2559 } 2560 } catch (Throwable t) { 2561 startupTaskGroup.abort("Failed to become active master due to:" + t.getMessage()); 2562 LOG.error(HBaseMarkers.FATAL, "Failed to become active master", t); 2563 // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility 2564 if ( 2565 t instanceof NoClassDefFoundError 2566 && t.getMessage().contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction") 2567 ) { 2568 // improved error message for this special case 2569 abort("HBase is having a problem with its Hadoop jars. You may need to recompile " 2570 + "HBase against Hadoop version " + org.apache.hadoop.util.VersionInfo.getVersion() 2571 + " or change your hadoop jars to start properly", t); 2572 } else { 2573 abort("Unhandled exception. Starting shutdown.", t); 2574 } 2575 } 2576 } 2577 2578 private static boolean isCatalogTable(final TableName tableName) { 2579 return tableName.equals(TableName.META_TABLE_NAME); 2580 } 2581 2582 @Override 2583 public long deleteTable(final TableName tableName, final long nonceGroup, final long nonce) 2584 throws IOException { 2585 checkInitialized(); 2586 2587 return MasterProcedureUtil 2588 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 2589 @Override 2590 protected void run() throws IOException { 2591 getMaster().getMasterCoprocessorHost().preDeleteTable(tableName); 2592 2593 LOG.info(getClientIdAuditPrefix() + " delete " + tableName); 2594 2595 // TODO: We can handle/merge duplicate request 2596 // 2597 // We need to wait for the procedure to potentially fail due to "prepare" sanity 2598 // checks. This will block only the beginning of the procedure. See HBASE-19953. 2599 ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); 2600 submitProcedure( 2601 new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch)); 2602 latch.await(); 2603 2604 getMaster().getMasterCoprocessorHost().postDeleteTable(tableName); 2605 } 2606 2607 @Override 2608 protected String getDescription() { 2609 return "DeleteTableProcedure"; 2610 } 2611 }); 2612 } 2613 2614 @Override 2615 public long truncateTable(final TableName tableName, final boolean preserveSplits, 2616 final long nonceGroup, final long nonce) throws IOException { 2617 checkInitialized(); 2618 2619 return MasterProcedureUtil 2620 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 2621 @Override 2622 protected void run() throws IOException { 2623 getMaster().getMasterCoprocessorHost().preTruncateTable(tableName); 2624 2625 LOG.info(getClientIdAuditPrefix() + " truncate " + tableName); 2626 ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); 2627 submitProcedure(new TruncateTableProcedure(procedureExecutor.getEnvironment(), tableName, 2628 preserveSplits, latch)); 2629 latch.await(); 2630 2631 getMaster().getMasterCoprocessorHost().postTruncateTable(tableName); 2632 } 2633 2634 @Override 2635 protected String getDescription() { 2636 return "TruncateTableProcedure"; 2637 } 2638 }); 2639 } 2640 2641 @Override 2642 public long truncateRegion(final RegionInfo regionInfo, final long nonceGroup, final long nonce) 2643 throws IOException { 2644 checkInitialized(); 2645 2646 return MasterProcedureUtil 2647 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 2648 @Override 2649 protected void run() throws IOException { 2650 getMaster().getMasterCoprocessorHost().preTruncateRegion(regionInfo); 2651 2652 LOG.info( 2653 getClientIdAuditPrefix() + " truncate region " + regionInfo.getRegionNameAsString()); 2654 2655 // Execute the operation asynchronously 2656 ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); 2657 submitProcedure( 2658 new TruncateRegionProcedure(procedureExecutor.getEnvironment(), regionInfo, latch)); 2659 latch.await(); 2660 2661 getMaster().getMasterCoprocessorHost().postTruncateRegion(regionInfo); 2662 } 2663 2664 @Override 2665 protected String getDescription() { 2666 return "TruncateRegionProcedure"; 2667 } 2668 }); 2669 } 2670 2671 @Override 2672 public long addColumn(final TableName tableName, final ColumnFamilyDescriptor column, 2673 final long nonceGroup, final long nonce) throws IOException { 2674 checkInitialized(); 2675 checkTableExists(tableName); 2676 2677 return modifyTable(tableName, new TableDescriptorGetter() { 2678 2679 @Override 2680 public TableDescriptor get() throws IOException { 2681 TableDescriptor old = getTableDescriptors().get(tableName); 2682 if (old.hasColumnFamily(column.getName())) { 2683 throw new InvalidFamilyOperationException("Column family '" + column.getNameAsString() 2684 + "' in table '" + tableName + "' already exists so cannot be added"); 2685 } 2686 2687 return TableDescriptorBuilder.newBuilder(old).setColumnFamily(column).build(); 2688 } 2689 }, nonceGroup, nonce, true); 2690 } 2691 2692 /** 2693 * Implement to return TableDescriptor after pre-checks 2694 */ 2695 protected interface TableDescriptorGetter { 2696 TableDescriptor get() throws IOException; 2697 } 2698 2699 @Override 2700 public long modifyColumn(final TableName tableName, final ColumnFamilyDescriptor descriptor, 2701 final long nonceGroup, final long nonce) throws IOException { 2702 checkInitialized(); 2703 checkTableExists(tableName); 2704 return modifyTable(tableName, new TableDescriptorGetter() { 2705 2706 @Override 2707 public TableDescriptor get() throws IOException { 2708 TableDescriptor old = getTableDescriptors().get(tableName); 2709 if (!old.hasColumnFamily(descriptor.getName())) { 2710 throw new InvalidFamilyOperationException("Family '" + descriptor.getNameAsString() 2711 + "' does not exist, so it cannot be modified"); 2712 } 2713 2714 return TableDescriptorBuilder.newBuilder(old).modifyColumnFamily(descriptor).build(); 2715 } 2716 }, nonceGroup, nonce, true); 2717 } 2718 2719 @Override 2720 public long modifyColumnStoreFileTracker(TableName tableName, byte[] family, String dstSFT, 2721 long nonceGroup, long nonce) throws IOException { 2722 checkInitialized(); 2723 return MasterProcedureUtil 2724 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 2725 2726 @Override 2727 protected void run() throws IOException { 2728 String sft = getMaster().getMasterCoprocessorHost() 2729 .preModifyColumnFamilyStoreFileTracker(tableName, family, dstSFT); 2730 LOG.info("{} modify column {} store file tracker of table {} to {}", 2731 getClientIdAuditPrefix(), Bytes.toStringBinary(family), tableName, sft); 2732 submitProcedure(new ModifyColumnFamilyStoreFileTrackerProcedure( 2733 procedureExecutor.getEnvironment(), tableName, family, sft)); 2734 getMaster().getMasterCoprocessorHost().postModifyColumnFamilyStoreFileTracker(tableName, 2735 family, dstSFT); 2736 } 2737 2738 @Override 2739 protected String getDescription() { 2740 return "ModifyColumnFamilyStoreFileTrackerProcedure"; 2741 } 2742 }); 2743 } 2744 2745 @Override 2746 public long deleteColumn(final TableName tableName, final byte[] columnName, 2747 final long nonceGroup, final long nonce) throws IOException { 2748 checkInitialized(); 2749 checkTableExists(tableName); 2750 2751 return modifyTable(tableName, new TableDescriptorGetter() { 2752 2753 @Override 2754 public TableDescriptor get() throws IOException { 2755 TableDescriptor old = getTableDescriptors().get(tableName); 2756 2757 if (!old.hasColumnFamily(columnName)) { 2758 throw new InvalidFamilyOperationException( 2759 "Family '" + Bytes.toString(columnName) + "' does not exist, so it cannot be deleted"); 2760 } 2761 if (old.getColumnFamilyCount() == 1) { 2762 throw new InvalidFamilyOperationException("Family '" + Bytes.toString(columnName) 2763 + "' is the only column family in the table, so it cannot be deleted"); 2764 } 2765 return TableDescriptorBuilder.newBuilder(old).removeColumnFamily(columnName).build(); 2766 } 2767 }, nonceGroup, nonce, true); 2768 } 2769 2770 @Override 2771 public long enableTable(final TableName tableName, final long nonceGroup, final long nonce) 2772 throws IOException { 2773 checkInitialized(); 2774 2775 return MasterProcedureUtil 2776 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 2777 @Override 2778 protected void run() throws IOException { 2779 getMaster().getMasterCoprocessorHost().preEnableTable(tableName); 2780 2781 // Normally, it would make sense for this authorization check to exist inside 2782 // AccessController, but because the authorization check is done based on internal state 2783 // (rather than explicit permissions) we'll do the check here instead of in the 2784 // coprocessor. 2785 MasterQuotaManager quotaManager = getMasterQuotaManager(); 2786 if (quotaManager != null) { 2787 if (quotaManager.isQuotaInitialized()) { 2788 // skip checking quotas for system tables, see: 2789 // https://issues.apache.org/jira/browse/HBASE-28183 2790 if (!tableName.isSystemTable()) { 2791 SpaceQuotaSnapshot currSnapshotOfTable = 2792 QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName); 2793 if (currSnapshotOfTable != null) { 2794 SpaceQuotaStatus quotaStatus = currSnapshotOfTable.getQuotaStatus(); 2795 if ( 2796 quotaStatus.isInViolation() 2797 && SpaceViolationPolicy.DISABLE == quotaStatus.getPolicy().orElse(null) 2798 ) { 2799 throw new AccessDeniedException("Enabling the table '" + tableName 2800 + "' is disallowed due to a violated space quota."); 2801 } 2802 } 2803 } 2804 } else if (LOG.isTraceEnabled()) { 2805 LOG 2806 .trace("Unable to check for space quotas as the MasterQuotaManager is not enabled"); 2807 } 2808 } 2809 2810 LOG.info(getClientIdAuditPrefix() + " enable " + tableName); 2811 2812 // Execute the operation asynchronously - client will check the progress of the operation 2813 // In case the request is from a <1.1 client before returning, 2814 // we want to make sure that the table is prepared to be 2815 // enabled (the table is locked and the table state is set). 2816 // Note: if the procedure throws exception, we will catch it and rethrow. 2817 final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch(); 2818 submitProcedure( 2819 new EnableTableProcedure(procedureExecutor.getEnvironment(), tableName, prepareLatch)); 2820 prepareLatch.await(); 2821 2822 getMaster().getMasterCoprocessorHost().postEnableTable(tableName); 2823 } 2824 2825 @Override 2826 protected String getDescription() { 2827 return "EnableTableProcedure"; 2828 } 2829 }); 2830 } 2831 2832 @Override 2833 public long disableTable(final TableName tableName, final long nonceGroup, final long nonce) 2834 throws IOException { 2835 checkInitialized(); 2836 2837 return MasterProcedureUtil 2838 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 2839 @Override 2840 protected void run() throws IOException { 2841 getMaster().getMasterCoprocessorHost().preDisableTable(tableName); 2842 2843 LOG.info(getClientIdAuditPrefix() + " disable " + tableName); 2844 2845 // Execute the operation asynchronously - client will check the progress of the operation 2846 // In case the request is from a <1.1 client before returning, 2847 // we want to make sure that the table is prepared to be 2848 // enabled (the table is locked and the table state is set). 2849 // Note: if the procedure throws exception, we will catch it and rethrow. 2850 // 2851 // We need to wait for the procedure to potentially fail due to "prepare" sanity 2852 // checks. This will block only the beginning of the procedure. See HBASE-19953. 2853 final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createBlockingLatch(); 2854 submitProcedure(new DisableTableProcedure(procedureExecutor.getEnvironment(), tableName, 2855 false, prepareLatch)); 2856 prepareLatch.await(); 2857 2858 getMaster().getMasterCoprocessorHost().postDisableTable(tableName); 2859 } 2860 2861 @Override 2862 protected String getDescription() { 2863 return "DisableTableProcedure"; 2864 } 2865 }); 2866 } 2867 2868 private long modifyTable(final TableName tableName, 2869 final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce, 2870 final boolean shouldCheckDescriptor) throws IOException { 2871 return modifyTable(tableName, newDescriptorGetter, nonceGroup, nonce, shouldCheckDescriptor, 2872 true); 2873 } 2874 2875 private long modifyTable(final TableName tableName, 2876 final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce, 2877 final boolean shouldCheckDescriptor, final boolean reopenRegions) throws IOException { 2878 return MasterProcedureUtil 2879 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 2880 @Override 2881 protected void run() throws IOException { 2882 TableDescriptor oldDescriptor = getMaster().getTableDescriptors().get(tableName); 2883 TableDescriptor newDescriptor = getMaster().getMasterCoprocessorHost() 2884 .preModifyTable(tableName, oldDescriptor, newDescriptorGetter.get()); 2885 TableDescriptorChecker.sanityCheck(conf, newDescriptor); 2886 LOG.info("{} modify table {} from {} to {}", getClientIdAuditPrefix(), tableName, 2887 oldDescriptor, newDescriptor); 2888 2889 // Execute the operation synchronously - wait for the operation completes before 2890 // continuing. 2891 // 2892 // We need to wait for the procedure to potentially fail due to "prepare" sanity 2893 // checks. This will block only the beginning of the procedure. See HBASE-19953. 2894 ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); 2895 submitProcedure(new ModifyTableProcedure(procedureExecutor.getEnvironment(), 2896 newDescriptor, latch, oldDescriptor, shouldCheckDescriptor, reopenRegions)); 2897 latch.await(); 2898 2899 getMaster().getMasterCoprocessorHost().postModifyTable(tableName, oldDescriptor, 2900 newDescriptor); 2901 } 2902 2903 @Override 2904 protected String getDescription() { 2905 return "ModifyTableProcedure"; 2906 } 2907 }); 2908 2909 } 2910 2911 @Override 2912 public long modifyTable(final TableName tableName, final TableDescriptor newDescriptor, 2913 final long nonceGroup, final long nonce, final boolean reopenRegions) throws IOException { 2914 checkInitialized(); 2915 return modifyTable(tableName, new TableDescriptorGetter() { 2916 @Override 2917 public TableDescriptor get() throws IOException { 2918 return newDescriptor; 2919 } 2920 }, nonceGroup, nonce, false, reopenRegions); 2921 2922 } 2923 2924 @Override 2925 public long modifyTableStoreFileTracker(TableName tableName, String dstSFT, long nonceGroup, 2926 long nonce) throws IOException { 2927 checkInitialized(); 2928 return MasterProcedureUtil 2929 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 2930 2931 @Override 2932 protected void run() throws IOException { 2933 String sft = getMaster().getMasterCoprocessorHost() 2934 .preModifyTableStoreFileTracker(tableName, dstSFT); 2935 LOG.info("{} modify table store file tracker of table {} to {}", getClientIdAuditPrefix(), 2936 tableName, sft); 2937 submitProcedure(new ModifyTableStoreFileTrackerProcedure( 2938 procedureExecutor.getEnvironment(), tableName, sft)); 2939 getMaster().getMasterCoprocessorHost().postModifyTableStoreFileTracker(tableName, sft); 2940 } 2941 2942 @Override 2943 protected String getDescription() { 2944 return "ModifyTableStoreFileTrackerProcedure"; 2945 } 2946 }); 2947 } 2948 2949 public long restoreSnapshot(final SnapshotDescription snapshotDesc, final long nonceGroup, 2950 final long nonce, final boolean restoreAcl, final String customSFT) throws IOException { 2951 checkInitialized(); 2952 getSnapshotManager().checkSnapshotSupport(); 2953 2954 // Ensure namespace exists. Will throw exception if non-known NS. 2955 final TableName dstTable = TableName.valueOf(snapshotDesc.getTable()); 2956 getClusterSchema().getNamespace(dstTable.getNamespaceAsString()); 2957 2958 return MasterProcedureUtil 2959 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 2960 @Override 2961 protected void run() throws IOException { 2962 setProcId(getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), 2963 restoreAcl, customSFT)); 2964 } 2965 2966 @Override 2967 protected String getDescription() { 2968 return "RestoreSnapshotProcedure"; 2969 } 2970 }); 2971 } 2972 2973 private void checkTableExists(final TableName tableName) 2974 throws IOException, TableNotFoundException { 2975 if (!tableDescriptors.exists(tableName)) { 2976 throw new TableNotFoundException(tableName); 2977 } 2978 } 2979 2980 @Override 2981 public void checkTableModifiable(final TableName tableName) 2982 throws IOException, TableNotFoundException, TableNotDisabledException { 2983 if (isCatalogTable(tableName)) { 2984 throw new IOException("Can't modify catalog tables"); 2985 } 2986 checkTableExists(tableName); 2987 TableState ts = getTableStateManager().getTableState(tableName); 2988 if (!ts.isDisabled()) { 2989 throw new TableNotDisabledException("Not DISABLED; " + ts); 2990 } 2991 } 2992 2993 public void reloadRegionServerQuotas() { 2994 // multiple reloads are harmless, so no need for NonceProcedureRunnable 2995 getLiveRegionServers() 2996 .forEach(sn -> procedureExecutor.submitProcedure(new ReloadQuotasProcedure(sn))); 2997 } 2998 2999 public ClusterMetrics getClusterMetricsWithoutCoprocessor() throws InterruptedIOException { 3000 return getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class)); 3001 } 3002 3003 public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet<Option> options) 3004 throws InterruptedIOException { 3005 ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder(); 3006 // given that hbase1 can't submit the request with Option, 3007 // we return all information to client if the list of Option is empty. 3008 if (options.isEmpty()) { 3009 options = EnumSet.allOf(Option.class); 3010 } 3011 3012 // TASKS and/or LIVE_SERVERS will populate this map, which will be given to the builder if 3013 // not null after option processing completes. 3014 Map<ServerName, ServerMetrics> serverMetricsMap = null; 3015 3016 for (Option opt : options) { 3017 switch (opt) { 3018 case HBASE_VERSION: 3019 builder.setHBaseVersion(VersionInfo.getVersion()); 3020 break; 3021 case CLUSTER_ID: 3022 builder.setClusterId(getClusterId()); 3023 break; 3024 case MASTER: 3025 builder.setMasterName(getServerName()); 3026 break; 3027 case BACKUP_MASTERS: 3028 builder.setBackerMasterNames(getBackupMasters()); 3029 break; 3030 case TASKS: { 3031 // Master tasks 3032 builder.setMasterTasks(TaskMonitor.get().getTasks().stream() 3033 .map(task -> ServerTaskBuilder.newBuilder().setDescription(task.getDescription()) 3034 .setStatus(task.getStatus()) 3035 .setState(ServerTask.State.valueOf(task.getState().name())) 3036 .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTimestamp()) 3037 .build()) 3038 .collect(Collectors.toList())); 3039 // TASKS is also synonymous with LIVE_SERVERS for now because task information for 3040 // regionservers is carried in ServerLoad. 3041 // Add entries to serverMetricsMap for all live servers, if we haven't already done so 3042 if (serverMetricsMap == null) { 3043 serverMetricsMap = getOnlineServers(); 3044 } 3045 break; 3046 } 3047 case LIVE_SERVERS: { 3048 // Add entries to serverMetricsMap for all live servers, if we haven't already done so 3049 if (serverMetricsMap == null) { 3050 serverMetricsMap = getOnlineServers(); 3051 } 3052 break; 3053 } 3054 case DEAD_SERVERS: { 3055 if (serverManager != null) { 3056 builder.setDeadServerNames( 3057 new ArrayList<>(serverManager.getDeadServers().copyServerNames())); 3058 } 3059 break; 3060 } 3061 case UNKNOWN_SERVERS: { 3062 if (serverManager != null) { 3063 builder.setUnknownServerNames(getUnknownServers()); 3064 } 3065 break; 3066 } 3067 case MASTER_COPROCESSORS: { 3068 if (cpHost != null) { 3069 builder.setMasterCoprocessorNames(Arrays.asList(getMasterCoprocessors())); 3070 } 3071 break; 3072 } 3073 case REGIONS_IN_TRANSITION: { 3074 if (assignmentManager != null) { 3075 builder.setRegionsInTransition( 3076 assignmentManager.getRegionStates().getRegionsStateInTransition()); 3077 } 3078 break; 3079 } 3080 case BALANCER_ON: { 3081 if (loadBalancerStateStore != null) { 3082 builder.setBalancerOn(loadBalancerStateStore.get()); 3083 } 3084 break; 3085 } 3086 case MASTER_INFO_PORT: { 3087 if (infoServer != null) { 3088 builder.setMasterInfoPort(infoServer.getPort()); 3089 } 3090 break; 3091 } 3092 case SERVERS_NAME: { 3093 if (serverManager != null) { 3094 builder.setServerNames(serverManager.getOnlineServersList()); 3095 } 3096 break; 3097 } 3098 case TABLE_TO_REGIONS_COUNT: { 3099 if (isActiveMaster() && isInitialized() && assignmentManager != null) { 3100 try { 3101 Map<TableName, RegionStatesCount> tableRegionStatesCountMap = new HashMap<>(); 3102 Map<String, TableDescriptor> tableDescriptorMap = getTableDescriptors().getAll(); 3103 for (TableDescriptor tableDescriptor : tableDescriptorMap.values()) { 3104 TableName tableName = tableDescriptor.getTableName(); 3105 RegionStatesCount regionStatesCount = 3106 assignmentManager.getRegionStatesCount(tableName); 3107 tableRegionStatesCountMap.put(tableName, regionStatesCount); 3108 } 3109 builder.setTableRegionStatesCount(tableRegionStatesCountMap); 3110 } catch (IOException e) { 3111 LOG.error("Error while populating TABLE_TO_REGIONS_COUNT for Cluster Metrics..", e); 3112 } 3113 } 3114 break; 3115 } 3116 case DECOMMISSIONED_SERVERS: { 3117 if (serverManager != null) { 3118 builder.setDecommissionedServerNames(serverManager.getDrainingServersList()); 3119 } 3120 break; 3121 } 3122 } 3123 } 3124 3125 if (serverMetricsMap != null) { 3126 builder.setLiveServerMetrics(serverMetricsMap); 3127 } 3128 3129 return builder.build(); 3130 } 3131 3132 private List<ServerName> getUnknownServers() { 3133 if (serverManager != null) { 3134 final Set<ServerName> serverNames = getAssignmentManager().getRegionStates().getRegionStates() 3135 .stream().map(RegionState::getServerName).collect(Collectors.toSet()); 3136 final List<ServerName> unknownServerNames = serverNames.stream() 3137 .filter(sn -> sn != null && serverManager.isServerUnknown(sn)).collect(Collectors.toList()); 3138 return unknownServerNames; 3139 } 3140 return null; 3141 } 3142 3143 private Map<ServerName, ServerMetrics> getOnlineServers() { 3144 if (serverManager != null) { 3145 final Map<ServerName, ServerMetrics> map = new HashMap<>(); 3146 serverManager.getOnlineServers().entrySet().forEach(e -> map.put(e.getKey(), e.getValue())); 3147 return map; 3148 } 3149 return null; 3150 } 3151 3152 /** Returns cluster status */ 3153 public ClusterMetrics getClusterMetrics() throws IOException { 3154 return getClusterMetrics(EnumSet.allOf(Option.class)); 3155 } 3156 3157 public ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException { 3158 if (cpHost != null) { 3159 cpHost.preGetClusterMetrics(); 3160 } 3161 ClusterMetrics status = getClusterMetricsWithoutCoprocessor(options); 3162 if (cpHost != null) { 3163 cpHost.postGetClusterMetrics(status); 3164 } 3165 return status; 3166 } 3167 3168 /** Returns info port of active master or 0 if any exception occurs. */ 3169 public int getActiveMasterInfoPort() { 3170 return activeMasterManager.getActiveMasterInfoPort(); 3171 } 3172 3173 /** 3174 * @param sn is ServerName of the backup master 3175 * @return info port of backup master or 0 if any exception occurs. 3176 */ 3177 public int getBackupMasterInfoPort(final ServerName sn) { 3178 return activeMasterManager.getBackupMasterInfoPort(sn); 3179 } 3180 3181 /** 3182 * The set of loaded coprocessors is stored in a static set. Since it's statically allocated, it 3183 * does not require that HMaster's cpHost be initialized prior to accessing it. 3184 * @return a String representation of the set of names of the loaded coprocessors. 3185 */ 3186 public static String getLoadedCoprocessors() { 3187 return CoprocessorHost.getLoadedCoprocessors().toString(); 3188 } 3189 3190 /** Returns timestamp in millis when HMaster was started. */ 3191 public long getMasterStartTime() { 3192 return startcode; 3193 } 3194 3195 /** Returns timestamp in millis when HMaster became the active master. */ 3196 @Override 3197 public long getMasterActiveTime() { 3198 return masterActiveTime; 3199 } 3200 3201 /** Returns timestamp in millis when HMaster finished becoming the active master */ 3202 public long getMasterFinishedInitializationTime() { 3203 return masterFinishedInitializationTime; 3204 } 3205 3206 public int getNumWALFiles() { 3207 return 0; 3208 } 3209 3210 public ProcedureStore getProcedureStore() { 3211 return procedureStore; 3212 } 3213 3214 public int getRegionServerInfoPort(final ServerName sn) { 3215 int port = this.serverManager.getInfoPort(sn); 3216 return port == 0 3217 ? conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT) 3218 : port; 3219 } 3220 3221 @Override 3222 public String getRegionServerVersion(ServerName sn) { 3223 // Will return "0.0.0" if the server is not online to prevent move system region to unknown 3224 // version RS. 3225 return this.serverManager.getVersion(sn); 3226 } 3227 3228 @Override 3229 public void checkIfShouldMoveSystemRegionAsync() { 3230 assignmentManager.checkIfShouldMoveSystemRegionAsync(); 3231 } 3232 3233 /** Returns array of coprocessor SimpleNames. */ 3234 public String[] getMasterCoprocessors() { 3235 Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors(); 3236 return masterCoprocessors.toArray(new String[masterCoprocessors.size()]); 3237 } 3238 3239 @Override 3240 public void abort(String reason, Throwable cause) { 3241 if (!setAbortRequested() || isStopped()) { 3242 LOG.debug("Abort called but aborted={}, stopped={}", isAborted(), isStopped()); 3243 return; 3244 } 3245 if (cpHost != null) { 3246 // HBASE-4014: dump a list of loaded coprocessors. 3247 LOG.error(HBaseMarkers.FATAL, 3248 "Master server abort: loaded coprocessors are: " + getLoadedCoprocessors()); 3249 } 3250 String msg = "***** ABORTING master " + this + ": " + reason + " *****"; 3251 if (cause != null) { 3252 LOG.error(HBaseMarkers.FATAL, msg, cause); 3253 } else { 3254 LOG.error(HBaseMarkers.FATAL, msg); 3255 } 3256 3257 try { 3258 stopMaster(); 3259 } catch (IOException e) { 3260 LOG.error("Exception occurred while stopping master", e); 3261 } 3262 } 3263 3264 @Override 3265 public MasterCoprocessorHost getMasterCoprocessorHost() { 3266 return cpHost; 3267 } 3268 3269 @Override 3270 public MasterQuotaManager getMasterQuotaManager() { 3271 return quotaManager; 3272 } 3273 3274 @Override 3275 public ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() { 3276 return procedureExecutor; 3277 } 3278 3279 @Override 3280 public ServerName getServerName() { 3281 return this.serverName; 3282 } 3283 3284 @Override 3285 public AssignmentManager getAssignmentManager() { 3286 return this.assignmentManager; 3287 } 3288 3289 @Override 3290 public CatalogJanitor getCatalogJanitor() { 3291 return this.catalogJanitorChore; 3292 } 3293 3294 public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() { 3295 return rsFatals; 3296 } 3297 3298 public TaskGroup getStartupProgress() { 3299 return startupTaskGroup; 3300 } 3301 3302 /** 3303 * Shutdown the cluster. Master runs a coordinated stop of all RegionServers and then itself. 3304 */ 3305 public void shutdown() throws IOException { 3306 TraceUtil.trace(() -> { 3307 if (cpHost != null) { 3308 cpHost.preShutdown(); 3309 } 3310 3311 // Tell the servermanager cluster shutdown has been called. This makes it so when Master is 3312 // last running server, it'll stop itself. Next, we broadcast the cluster shutdown by setting 3313 // the cluster status as down. RegionServers will notice this change in state and will start 3314 // shutting themselves down. When last has exited, Master can go down. 3315 if (this.serverManager != null) { 3316 this.serverManager.shutdownCluster(); 3317 } 3318 if (this.clusterStatusTracker != null) { 3319 try { 3320 this.clusterStatusTracker.setClusterDown(); 3321 } catch (KeeperException e) { 3322 LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e); 3323 } 3324 } 3325 // Stop the procedure executor. Will stop any ongoing assign, unassign, server crash etc., 3326 // processing so we can go down. 3327 if (this.procedureExecutor != null) { 3328 this.procedureExecutor.stop(); 3329 } 3330 // Shutdown our cluster connection. This will kill any hosted RPCs that might be going on; 3331 // this is what we want especially if the Master is in startup phase doing call outs to 3332 // hbase:meta, etc. when cluster is down. Without ths connection close, we'd have to wait on 3333 // the rpc to timeout. 3334 if (this.asyncClusterConnection != null) { 3335 this.asyncClusterConnection.close(); 3336 } 3337 }, "HMaster.shutdown"); 3338 } 3339 3340 public void stopMaster() throws IOException { 3341 if (cpHost != null) { 3342 cpHost.preStopMaster(); 3343 } 3344 stop("Stopped by " + Thread.currentThread().getName()); 3345 } 3346 3347 @Override 3348 public void stop(String msg) { 3349 if (!this.stopped) { 3350 LOG.info("***** STOPPING master '" + this + "' *****"); 3351 this.stopped = true; 3352 LOG.info("STOPPED: " + msg); 3353 // Wakes run() if it is sleeping 3354 sleeper.skipSleepCycle(); 3355 if (this.activeMasterManager != null) { 3356 this.activeMasterManager.stop(); 3357 } 3358 } 3359 } 3360 3361 protected void checkServiceStarted() throws ServerNotRunningYetException { 3362 if (!serviceStarted) { 3363 throw new ServerNotRunningYetException("Server is not running yet"); 3364 } 3365 } 3366 3367 void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException, 3368 MasterNotRunningException, MasterStoppedException { 3369 checkServiceStarted(); 3370 if (!isInitialized()) { 3371 throw new PleaseHoldException("Master is initializing"); 3372 } 3373 if (isStopped()) { 3374 throw new MasterStoppedException(); 3375 } 3376 } 3377 3378 /** 3379 * Report whether this master is currently the active master or not. If not active master, we are 3380 * parked on ZK waiting to become active. This method is used for testing. 3381 * @return true if active master, false if not. 3382 */ 3383 @Override 3384 public boolean isActiveMaster() { 3385 return activeMaster; 3386 } 3387 3388 /** 3389 * Report whether this master has completed with its initialization and is ready. If ready, the 3390 * master is also the active master. A standby master is never ready. This method is used for 3391 * testing. 3392 * @return true if master is ready to go, false if not. 3393 */ 3394 @Override 3395 public boolean isInitialized() { 3396 return initialized.isReady(); 3397 } 3398 3399 /** 3400 * Report whether this master is started This method is used for testing. 3401 * @return true if master is ready to go, false if not. 3402 */ 3403 public boolean isOnline() { 3404 return serviceStarted; 3405 } 3406 3407 /** 3408 * Report whether this master is in maintenance mode. 3409 * @return true if master is in maintenanceMode 3410 */ 3411 @Override 3412 public boolean isInMaintenanceMode() { 3413 return maintenanceMode; 3414 } 3415 3416 public void setInitialized(boolean isInitialized) { 3417 procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized); 3418 } 3419 3420 /** 3421 * Mainly used in procedure related tests, where we will restart ProcedureExecutor and 3422 * AssignmentManager, but we do not want to restart master(to speed up the test), so we need to 3423 * disable rpc for a while otherwise some critical rpc requests such as 3424 * reportRegionStateTransition could fail and cause region server to abort. 3425 */ 3426 @RestrictedApi(explanation = "Should only be called in tests", link = "", 3427 allowedOnPath = ".*/src/test/.*") 3428 public void setServiceStarted(boolean started) { 3429 this.serviceStarted = started; 3430 } 3431 3432 @Override 3433 public ProcedureEvent<?> getInitializedEvent() { 3434 return initialized; 3435 } 3436 3437 /** 3438 * Compute the average load across all region servers. Currently, this uses a very naive 3439 * computation - just uses the number of regions being served, ignoring stats about number of 3440 * requests. 3441 * @return the average load 3442 */ 3443 public double getAverageLoad() { 3444 if (this.assignmentManager == null) { 3445 return 0; 3446 } 3447 3448 RegionStates regionStates = this.assignmentManager.getRegionStates(); 3449 if (regionStates == null) { 3450 return 0; 3451 } 3452 return regionStates.getAverageLoad(); 3453 } 3454 3455 @Override 3456 public boolean registerService(Service instance) { 3457 /* 3458 * No stacking of instances is allowed for a single service name 3459 */ 3460 Descriptors.ServiceDescriptor serviceDesc = instance.getDescriptorForType(); 3461 String serviceName = CoprocessorRpcUtils.getServiceName(serviceDesc); 3462 if (coprocessorServiceHandlers.containsKey(serviceName)) { 3463 LOG.error("Coprocessor service " + serviceName 3464 + " already registered, rejecting request from " + instance); 3465 return false; 3466 } 3467 3468 coprocessorServiceHandlers.put(serviceName, instance); 3469 if (LOG.isDebugEnabled()) { 3470 LOG.debug("Registered master coprocessor service: service=" + serviceName); 3471 } 3472 return true; 3473 } 3474 3475 /** 3476 * Utility for constructing an instance of the passed HMaster class. 3477 * @return HMaster instance. 3478 */ 3479 public static HMaster constructMaster(Class<? extends HMaster> masterClass, 3480 final Configuration conf) { 3481 try { 3482 Constructor<? extends HMaster> c = masterClass.getConstructor(Configuration.class); 3483 return c.newInstance(conf); 3484 } catch (Exception e) { 3485 Throwable error = e; 3486 if ( 3487 e instanceof InvocationTargetException 3488 && ((InvocationTargetException) e).getTargetException() != null 3489 ) { 3490 error = ((InvocationTargetException) e).getTargetException(); 3491 } 3492 throw new RuntimeException("Failed construction of Master: " + masterClass.toString() + ". ", 3493 error); 3494 } 3495 } 3496 3497 /** 3498 * @see org.apache.hadoop.hbase.master.HMasterCommandLine 3499 */ 3500 public static void main(String[] args) { 3501 LOG.info("STARTING service " + HMaster.class.getSimpleName()); 3502 VersionInfo.logVersion(); 3503 new HMasterCommandLine(HMaster.class).doMain(args); 3504 } 3505 3506 public HFileCleaner getHFileCleaner() { 3507 return this.hfileCleaners.get(0); 3508 } 3509 3510 public List<HFileCleaner> getHFileCleaners() { 3511 return this.hfileCleaners; 3512 } 3513 3514 public LogCleaner getLogCleaner() { 3515 return this.logCleaner; 3516 } 3517 3518 /** Returns the underlying snapshot manager */ 3519 @Override 3520 public SnapshotManager getSnapshotManager() { 3521 return this.snapshotManager; 3522 } 3523 3524 /** Returns the underlying MasterProcedureManagerHost */ 3525 @Override 3526 public MasterProcedureManagerHost getMasterProcedureManagerHost() { 3527 return mpmHost; 3528 } 3529 3530 @Override 3531 public ClusterSchema getClusterSchema() { 3532 return this.clusterSchemaService; 3533 } 3534 3535 /** 3536 * Create a new Namespace. 3537 * @param namespaceDescriptor descriptor for new Namespace 3538 * @param nonceGroup Identifier for the source of the request, a client or process. 3539 * @param nonce A unique identifier for this operation from the client or process 3540 * identified by <code>nonceGroup</code> (the source must ensure each 3541 * operation gets a unique id). 3542 * @return procedure id 3543 */ 3544 long createNamespace(final NamespaceDescriptor namespaceDescriptor, final long nonceGroup, 3545 final long nonce) throws IOException { 3546 checkInitialized(); 3547 3548 TableName.isLegalNamespaceName(Bytes.toBytes(namespaceDescriptor.getName())); 3549 3550 return MasterProcedureUtil 3551 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 3552 @Override 3553 protected void run() throws IOException { 3554 getMaster().getMasterCoprocessorHost().preCreateNamespace(namespaceDescriptor); 3555 // We need to wait for the procedure to potentially fail due to "prepare" sanity 3556 // checks. This will block only the beginning of the procedure. See HBASE-19953. 3557 ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); 3558 LOG.info(getClientIdAuditPrefix() + " creating " + namespaceDescriptor); 3559 // Execute the operation synchronously - wait for the operation to complete before 3560 // continuing. 3561 setProcId(getClusterSchema().createNamespace(namespaceDescriptor, getNonceKey(), latch)); 3562 latch.await(); 3563 getMaster().getMasterCoprocessorHost().postCreateNamespace(namespaceDescriptor); 3564 } 3565 3566 @Override 3567 protected String getDescription() { 3568 return "CreateNamespaceProcedure"; 3569 } 3570 }); 3571 } 3572 3573 /** 3574 * Modify an existing Namespace. 3575 * @param nonceGroup Identifier for the source of the request, a client or process. 3576 * @param nonce A unique identifier for this operation from the client or process identified 3577 * by <code>nonceGroup</code> (the source must ensure each operation gets a 3578 * unique id). 3579 * @return procedure id 3580 */ 3581 long modifyNamespace(final NamespaceDescriptor newNsDescriptor, final long nonceGroup, 3582 final long nonce) throws IOException { 3583 checkInitialized(); 3584 3585 TableName.isLegalNamespaceName(Bytes.toBytes(newNsDescriptor.getName())); 3586 3587 return MasterProcedureUtil 3588 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 3589 @Override 3590 protected void run() throws IOException { 3591 NamespaceDescriptor oldNsDescriptor = getNamespace(newNsDescriptor.getName()); 3592 getMaster().getMasterCoprocessorHost().preModifyNamespace(oldNsDescriptor, 3593 newNsDescriptor); 3594 // We need to wait for the procedure to potentially fail due to "prepare" sanity 3595 // checks. This will block only the beginning of the procedure. See HBASE-19953. 3596 ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); 3597 LOG.info(getClientIdAuditPrefix() + " modify " + newNsDescriptor); 3598 // Execute the operation synchronously - wait for the operation to complete before 3599 // continuing. 3600 setProcId(getClusterSchema().modifyNamespace(newNsDescriptor, getNonceKey(), latch)); 3601 latch.await(); 3602 getMaster().getMasterCoprocessorHost().postModifyNamespace(oldNsDescriptor, 3603 newNsDescriptor); 3604 } 3605 3606 @Override 3607 protected String getDescription() { 3608 return "ModifyNamespaceProcedure"; 3609 } 3610 }); 3611 } 3612 3613 /** 3614 * Delete an existing Namespace. Only empty Namespaces (no tables) can be removed. 3615 * @param nonceGroup Identifier for the source of the request, a client or process. 3616 * @param nonce A unique identifier for this operation from the client or process identified 3617 * by <code>nonceGroup</code> (the source must ensure each operation gets a 3618 * unique id). 3619 * @return procedure id 3620 */ 3621 long deleteNamespace(final String name, final long nonceGroup, final long nonce) 3622 throws IOException { 3623 checkInitialized(); 3624 3625 return MasterProcedureUtil 3626 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 3627 @Override 3628 protected void run() throws IOException { 3629 getMaster().getMasterCoprocessorHost().preDeleteNamespace(name); 3630 LOG.info(getClientIdAuditPrefix() + " delete " + name); 3631 // Execute the operation synchronously - wait for the operation to complete before 3632 // continuing. 3633 // 3634 // We need to wait for the procedure to potentially fail due to "prepare" sanity 3635 // checks. This will block only the beginning of the procedure. See HBASE-19953. 3636 ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); 3637 setProcId(submitProcedure( 3638 new DeleteNamespaceProcedure(procedureExecutor.getEnvironment(), name, latch))); 3639 latch.await(); 3640 // Will not be invoked in the face of Exception thrown by the Procedure's execution 3641 getMaster().getMasterCoprocessorHost().postDeleteNamespace(name); 3642 } 3643 3644 @Override 3645 protected String getDescription() { 3646 return "DeleteNamespaceProcedure"; 3647 } 3648 }); 3649 } 3650 3651 /** 3652 * Get a Namespace 3653 * @param name Name of the Namespace 3654 * @return Namespace descriptor for <code>name</code> 3655 */ 3656 NamespaceDescriptor getNamespace(String name) throws IOException { 3657 checkInitialized(); 3658 if (this.cpHost != null) this.cpHost.preGetNamespaceDescriptor(name); 3659 NamespaceDescriptor nsd = this.clusterSchemaService.getNamespace(name); 3660 if (this.cpHost != null) this.cpHost.postGetNamespaceDescriptor(nsd); 3661 return nsd; 3662 } 3663 3664 /** 3665 * Get all Namespaces 3666 * @return All Namespace descriptors 3667 */ 3668 List<NamespaceDescriptor> getNamespaces() throws IOException { 3669 checkInitialized(); 3670 final List<NamespaceDescriptor> nsds = new ArrayList<>(); 3671 if (cpHost != null) { 3672 cpHost.preListNamespaceDescriptors(nsds); 3673 } 3674 nsds.addAll(this.clusterSchemaService.getNamespaces()); 3675 if (this.cpHost != null) { 3676 this.cpHost.postListNamespaceDescriptors(nsds); 3677 } 3678 return nsds; 3679 } 3680 3681 /** 3682 * List namespace names 3683 * @return All namespace names 3684 */ 3685 public List<String> listNamespaces() throws IOException { 3686 checkInitialized(); 3687 List<String> namespaces = new ArrayList<>(); 3688 if (cpHost != null) { 3689 cpHost.preListNamespaces(namespaces); 3690 } 3691 for (NamespaceDescriptor namespace : clusterSchemaService.getNamespaces()) { 3692 namespaces.add(namespace.getName()); 3693 } 3694 if (cpHost != null) { 3695 cpHost.postListNamespaces(namespaces); 3696 } 3697 return namespaces; 3698 } 3699 3700 @Override 3701 public List<TableName> listTableNamesByNamespace(String name) throws IOException { 3702 checkInitialized(); 3703 return listTableNames(name, null, true); 3704 } 3705 3706 @Override 3707 public List<TableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException { 3708 checkInitialized(); 3709 return listTableDescriptors(name, null, null, true); 3710 } 3711 3712 @Override 3713 public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) 3714 throws IOException { 3715 if (cpHost != null) { 3716 cpHost.preAbortProcedure(this.procedureExecutor, procId); 3717 } 3718 3719 final boolean result = this.procedureExecutor.abort(procId, mayInterruptIfRunning); 3720 3721 if (cpHost != null) { 3722 cpHost.postAbortProcedure(); 3723 } 3724 3725 return result; 3726 } 3727 3728 @Override 3729 public List<Procedure<?>> getProcedures() throws IOException { 3730 if (cpHost != null) { 3731 cpHost.preGetProcedures(); 3732 } 3733 3734 @SuppressWarnings({ "unchecked", "rawtypes" }) 3735 List<Procedure<?>> procList = (List) this.procedureExecutor.getProcedures(); 3736 3737 if (cpHost != null) { 3738 cpHost.postGetProcedures(procList); 3739 } 3740 3741 return procList; 3742 } 3743 3744 @Override 3745 public List<LockedResource> getLocks() throws IOException { 3746 if (cpHost != null) { 3747 cpHost.preGetLocks(); 3748 } 3749 3750 MasterProcedureScheduler procedureScheduler = 3751 procedureExecutor.getEnvironment().getProcedureScheduler(); 3752 3753 final List<LockedResource> lockedResources = procedureScheduler.getLocks(); 3754 3755 if (cpHost != null) { 3756 cpHost.postGetLocks(lockedResources); 3757 } 3758 3759 return lockedResources; 3760 } 3761 3762 /** 3763 * Returns the list of table descriptors that match the specified request 3764 * @param namespace the namespace to query, or null if querying for all 3765 * @param regex The regular expression to match against, or null if querying for all 3766 * @param tableNameList the list of table names, or null if querying for all 3767 * @param includeSysTables False to match only against userspace tables 3768 * @return the list of table descriptors 3769 */ 3770 public List<TableDescriptor> listTableDescriptors(final String namespace, final String regex, 3771 final List<TableName> tableNameList, final boolean includeSysTables) throws IOException { 3772 List<TableDescriptor> htds = new ArrayList<>(); 3773 if (cpHost != null) { 3774 cpHost.preGetTableDescriptors(tableNameList, htds, regex); 3775 } 3776 htds = getTableDescriptors(htds, namespace, regex, tableNameList, includeSysTables); 3777 if (cpHost != null) { 3778 cpHost.postGetTableDescriptors(tableNameList, htds, regex); 3779 } 3780 return htds; 3781 } 3782 3783 /** 3784 * Returns the list of table names that match the specified request 3785 * @param regex The regular expression to match against, or null if querying for all 3786 * @param namespace the namespace to query, or null if querying for all 3787 * @param includeSysTables False to match only against userspace tables 3788 * @return the list of table names 3789 */ 3790 public List<TableName> listTableNames(final String namespace, final String regex, 3791 final boolean includeSysTables) throws IOException { 3792 List<TableDescriptor> htds = new ArrayList<>(); 3793 if (cpHost != null) { 3794 cpHost.preGetTableNames(htds, regex); 3795 } 3796 htds = getTableDescriptors(htds, namespace, regex, null, includeSysTables); 3797 if (cpHost != null) { 3798 cpHost.postGetTableNames(htds, regex); 3799 } 3800 List<TableName> result = new ArrayList<>(htds.size()); 3801 for (TableDescriptor htd : htds) 3802 result.add(htd.getTableName()); 3803 return result; 3804 } 3805 3806 /** 3807 * Return a list of table table descriptors after applying any provided filter parameters. Note 3808 * that the user-facing description of this filter logic is presented on the class-level javadoc 3809 * of {@link NormalizeTableFilterParams}. 3810 */ 3811 private List<TableDescriptor> getTableDescriptors(final List<TableDescriptor> htds, 3812 final String namespace, final String regex, final List<TableName> tableNameList, 3813 final boolean includeSysTables) throws IOException { 3814 if (tableNameList == null || tableNameList.isEmpty()) { 3815 // request for all TableDescriptors 3816 Collection<TableDescriptor> allHtds; 3817 if (namespace != null && namespace.length() > 0) { 3818 // Do a check on the namespace existence. Will fail if does not exist. 3819 this.clusterSchemaService.getNamespace(namespace); 3820 allHtds = tableDescriptors.getByNamespace(namespace).values(); 3821 } else { 3822 allHtds = tableDescriptors.getAll().values(); 3823 } 3824 for (TableDescriptor desc : allHtds) { 3825 if ( 3826 tableStateManager.isTablePresent(desc.getTableName()) 3827 && (includeSysTables || !desc.getTableName().isSystemTable()) 3828 ) { 3829 htds.add(desc); 3830 } 3831 } 3832 } else { 3833 for (TableName s : tableNameList) { 3834 if (tableStateManager.isTablePresent(s)) { 3835 TableDescriptor desc = tableDescriptors.get(s); 3836 if (desc != null) { 3837 htds.add(desc); 3838 } 3839 } 3840 } 3841 } 3842 3843 // Retains only those matched by regular expression. 3844 if (regex != null) filterTablesByRegex(htds, Pattern.compile(regex)); 3845 return htds; 3846 } 3847 3848 /** 3849 * Removes the table descriptors that don't match the pattern. 3850 * @param descriptors list of table descriptors to filter 3851 * @param pattern the regex to use 3852 */ 3853 private static void filterTablesByRegex(final Collection<TableDescriptor> descriptors, 3854 final Pattern pattern) { 3855 final String defaultNS = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR; 3856 Iterator<TableDescriptor> itr = descriptors.iterator(); 3857 while (itr.hasNext()) { 3858 TableDescriptor htd = itr.next(); 3859 String tableName = htd.getTableName().getNameAsString(); 3860 boolean matched = pattern.matcher(tableName).matches(); 3861 if (!matched && htd.getTableName().getNamespaceAsString().equals(defaultNS)) { 3862 matched = pattern.matcher(defaultNS + TableName.NAMESPACE_DELIM + tableName).matches(); 3863 } 3864 if (!matched) { 3865 itr.remove(); 3866 } 3867 } 3868 } 3869 3870 @Override 3871 public long getLastMajorCompactionTimestamp(TableName table) throws IOException { 3872 return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) 3873 .getLastMajorCompactionTimestamp(table); 3874 } 3875 3876 @Override 3877 public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException { 3878 return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) 3879 .getLastMajorCompactionTimestamp(regionName); 3880 } 3881 3882 /** 3883 * Gets the mob file compaction state for a specific table. Whether all the mob files are selected 3884 * is known during the compaction execution, but the statistic is done just before compaction 3885 * starts, it is hard to know the compaction type at that time, so the rough statistics are chosen 3886 * for the mob file compaction. Only two compaction states are available, 3887 * CompactionState.MAJOR_AND_MINOR and CompactionState.NONE. 3888 * @param tableName The current table name. 3889 * @return If a given table is in mob file compaction now. 3890 */ 3891 public GetRegionInfoResponse.CompactionState getMobCompactionState(TableName tableName) { 3892 AtomicInteger compactionsCount = mobCompactionStates.get(tableName); 3893 if (compactionsCount != null && compactionsCount.get() != 0) { 3894 return GetRegionInfoResponse.CompactionState.MAJOR_AND_MINOR; 3895 } 3896 return GetRegionInfoResponse.CompactionState.NONE; 3897 } 3898 3899 public void reportMobCompactionStart(TableName tableName) throws IOException { 3900 IdLock.Entry lockEntry = null; 3901 try { 3902 lockEntry = mobCompactionLock.getLockEntry(tableName.hashCode()); 3903 AtomicInteger compactionsCount = mobCompactionStates.get(tableName); 3904 if (compactionsCount == null) { 3905 compactionsCount = new AtomicInteger(0); 3906 mobCompactionStates.put(tableName, compactionsCount); 3907 } 3908 compactionsCount.incrementAndGet(); 3909 } finally { 3910 if (lockEntry != null) { 3911 mobCompactionLock.releaseLockEntry(lockEntry); 3912 } 3913 } 3914 } 3915 3916 public void reportMobCompactionEnd(TableName tableName) throws IOException { 3917 IdLock.Entry lockEntry = null; 3918 try { 3919 lockEntry = mobCompactionLock.getLockEntry(tableName.hashCode()); 3920 AtomicInteger compactionsCount = mobCompactionStates.get(tableName); 3921 if (compactionsCount != null) { 3922 int count = compactionsCount.decrementAndGet(); 3923 // remove the entry if the count is 0. 3924 if (count == 0) { 3925 mobCompactionStates.remove(tableName); 3926 } 3927 } 3928 } finally { 3929 if (lockEntry != null) { 3930 mobCompactionLock.releaseLockEntry(lockEntry); 3931 } 3932 } 3933 } 3934 3935 /** 3936 * Queries the state of the {@link LoadBalancerStateStore}. If the balancer is not initialized, 3937 * false is returned. 3938 * @return The state of the load balancer, or false if the load balancer isn't defined. 3939 */ 3940 public boolean isBalancerOn() { 3941 return !isInMaintenanceMode() && loadBalancerStateStore != null && loadBalancerStateStore.get(); 3942 } 3943 3944 /** 3945 * Queries the state of the {@link RegionNormalizerStateStore}. If it's not initialized, false is 3946 * returned. 3947 */ 3948 public boolean isNormalizerOn() { 3949 return !isInMaintenanceMode() && getRegionNormalizerManager().isNormalizerOn(); 3950 } 3951 3952 /** 3953 * Queries the state of the {@link SplitOrMergeStateStore}. If it is not initialized, false is 3954 * returned. If switchType is illegal, false will return. 3955 * @param switchType see {@link org.apache.hadoop.hbase.client.MasterSwitchType} 3956 * @return The state of the switch 3957 */ 3958 @Override 3959 public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) { 3960 return !isInMaintenanceMode() && splitOrMergeStateStore != null 3961 && splitOrMergeStateStore.isSplitOrMergeEnabled(switchType); 3962 } 3963 3964 /** 3965 * Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned. 3966 * <p/> 3967 * Notice that, the base load balancer will always be {@link RSGroupBasedLoadBalancer} now, so 3968 * this method will return the balancer used inside each rs group. 3969 * @return The name of the {@link LoadBalancer} in use. 3970 */ 3971 public String getLoadBalancerClassName() { 3972 return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, 3973 LoadBalancerFactory.getDefaultLoadBalancerClass().getName()); 3974 } 3975 3976 public SplitOrMergeStateStore getSplitOrMergeStateStore() { 3977 return splitOrMergeStateStore; 3978 } 3979 3980 @Override 3981 public RSGroupBasedLoadBalancer getLoadBalancer() { 3982 return balancer; 3983 } 3984 3985 @Override 3986 public FavoredNodesManager getFavoredNodesManager() { 3987 return balancer.getFavoredNodesManager(); 3988 } 3989 3990 private long executePeerProcedure(AbstractPeerProcedure<?> procedure) throws IOException { 3991 if (!isReplicationPeerModificationEnabled()) { 3992 throw new IOException("Replication peer modification disabled"); 3993 } 3994 long procId = procedureExecutor.submitProcedure(procedure); 3995 procedure.getLatch().await(); 3996 return procId; 3997 } 3998 3999 @Override 4000 public long addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled) 4001 throws ReplicationException, IOException { 4002 LOG.info(getClientIdAuditPrefix() + " creating replication peer, id=" + peerId + ", config=" 4003 + peerConfig + ", state=" + (enabled ? "ENABLED" : "DISABLED")); 4004 return executePeerProcedure(new AddPeerProcedure(peerId, peerConfig, enabled)); 4005 } 4006 4007 @Override 4008 public long removeReplicationPeer(String peerId) throws ReplicationException, IOException { 4009 LOG.info(getClientIdAuditPrefix() + " removing replication peer, id=" + peerId); 4010 return executePeerProcedure(new RemovePeerProcedure(peerId)); 4011 } 4012 4013 @Override 4014 public long enableReplicationPeer(String peerId) throws ReplicationException, IOException { 4015 LOG.info(getClientIdAuditPrefix() + " enable replication peer, id=" + peerId); 4016 return executePeerProcedure(new EnablePeerProcedure(peerId)); 4017 } 4018 4019 @Override 4020 public long disableReplicationPeer(String peerId) throws ReplicationException, IOException { 4021 LOG.info(getClientIdAuditPrefix() + " disable replication peer, id=" + peerId); 4022 return executePeerProcedure(new DisablePeerProcedure(peerId)); 4023 } 4024 4025 @Override 4026 public ReplicationPeerConfig getReplicationPeerConfig(String peerId) 4027 throws ReplicationException, IOException { 4028 if (cpHost != null) { 4029 cpHost.preGetReplicationPeerConfig(peerId); 4030 } 4031 LOG.info(getClientIdAuditPrefix() + " get replication peer config, id=" + peerId); 4032 ReplicationPeerConfig peerConfig = this.replicationPeerManager.getPeerConfig(peerId) 4033 .orElseThrow(() -> new ReplicationPeerNotFoundException(peerId)); 4034 if (cpHost != null) { 4035 cpHost.postGetReplicationPeerConfig(peerId); 4036 } 4037 return peerConfig; 4038 } 4039 4040 @Override 4041 public long updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig) 4042 throws ReplicationException, IOException { 4043 LOG.info(getClientIdAuditPrefix() + " update replication peer config, id=" + peerId 4044 + ", config=" + peerConfig); 4045 return executePeerProcedure(new UpdatePeerConfigProcedure(peerId, peerConfig)); 4046 } 4047 4048 @Override 4049 public List<ReplicationPeerDescription> listReplicationPeers(String regex) 4050 throws ReplicationException, IOException { 4051 if (cpHost != null) { 4052 cpHost.preListReplicationPeers(regex); 4053 } 4054 LOG.debug("{} list replication peers, regex={}", getClientIdAuditPrefix(), regex); 4055 Pattern pattern = regex == null ? null : Pattern.compile(regex); 4056 List<ReplicationPeerDescription> peers = this.replicationPeerManager.listPeers(pattern); 4057 if (cpHost != null) { 4058 cpHost.postListReplicationPeers(regex); 4059 } 4060 return peers; 4061 } 4062 4063 @Override 4064 public long transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state) 4065 throws ReplicationException, IOException { 4066 LOG.info( 4067 getClientIdAuditPrefix() 4068 + " transit current cluster state to {} in a synchronous replication peer id={}", 4069 state, peerId); 4070 return executePeerProcedure(new TransitPeerSyncReplicationStateProcedure(peerId, state)); 4071 } 4072 4073 @Override 4074 public boolean replicationPeerModificationSwitch(boolean on) throws IOException { 4075 return replicationPeerModificationStateStore.set(on); 4076 } 4077 4078 @Override 4079 public boolean isReplicationPeerModificationEnabled() { 4080 return replicationPeerModificationStateStore.get(); 4081 } 4082 4083 /** 4084 * Mark region server(s) as decommissioned (previously called 'draining') to prevent additional 4085 * regions from getting assigned to them. Also unload the regions on the servers asynchronously.0 4086 * @param servers Region servers to decommission. 4087 */ 4088 public void decommissionRegionServers(final List<ServerName> servers, final boolean offload) 4089 throws IOException { 4090 List<ServerName> serversAdded = new ArrayList<>(servers.size()); 4091 // Place the decommission marker first. 4092 String parentZnode = getZooKeeper().getZNodePaths().drainingZNode; 4093 for (ServerName server : servers) { 4094 try { 4095 String node = ZNodePaths.joinZNode(parentZnode, server.getServerName()); 4096 ZKUtil.createAndFailSilent(getZooKeeper(), node); 4097 } catch (KeeperException ke) { 4098 throw new HBaseIOException( 4099 this.zooKeeper.prefix("Unable to decommission '" + server.getServerName() + "'."), ke); 4100 } 4101 if (this.serverManager.addServerToDrainList(server)) { 4102 serversAdded.add(server); 4103 } 4104 } 4105 // Move the regions off the decommissioned servers. 4106 if (offload) { 4107 final List<ServerName> destServers = this.serverManager.createDestinationServersList(); 4108 for (ServerName server : serversAdded) { 4109 final List<RegionInfo> regionsOnServer = this.assignmentManager.getRegionsOnServer(server); 4110 for (RegionInfo hri : regionsOnServer) { 4111 ServerName dest = balancer.randomAssignment(hri, destServers); 4112 if (dest == null) { 4113 throw new HBaseIOException("Unable to determine a plan to move " + hri); 4114 } 4115 RegionPlan rp = new RegionPlan(hri, server, dest); 4116 this.assignmentManager.moveAsync(rp); 4117 } 4118 } 4119 } 4120 } 4121 4122 /** 4123 * List region servers marked as decommissioned (previously called 'draining') to not get regions 4124 * assigned to them. 4125 * @return List of decommissioned servers. 4126 */ 4127 public List<ServerName> listDecommissionedRegionServers() { 4128 return this.serverManager.getDrainingServersList(); 4129 } 4130 4131 /** 4132 * Remove decommission marker (previously called 'draining') from a region server to allow regions 4133 * assignments. Load regions onto the server asynchronously if a list of regions is given 4134 * @param server Region server to remove decommission marker from. 4135 */ 4136 public void recommissionRegionServer(final ServerName server, 4137 final List<byte[]> encodedRegionNames) throws IOException { 4138 // Remove the server from decommissioned (draining) server list. 4139 String parentZnode = getZooKeeper().getZNodePaths().drainingZNode; 4140 String node = ZNodePaths.joinZNode(parentZnode, server.getServerName()); 4141 try { 4142 ZKUtil.deleteNodeFailSilent(getZooKeeper(), node); 4143 } catch (KeeperException ke) { 4144 throw new HBaseIOException( 4145 this.zooKeeper.prefix("Unable to recommission '" + server.getServerName() + "'."), ke); 4146 } 4147 this.serverManager.removeServerFromDrainList(server); 4148 4149 // Load the regions onto the server if we are given a list of regions. 4150 if (encodedRegionNames == null || encodedRegionNames.isEmpty()) { 4151 return; 4152 } 4153 if (!this.serverManager.isServerOnline(server)) { 4154 return; 4155 } 4156 for (byte[] encodedRegionName : encodedRegionNames) { 4157 RegionState regionState = 4158 assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName)); 4159 if (regionState == null) { 4160 LOG.warn("Unknown region " + Bytes.toStringBinary(encodedRegionName)); 4161 continue; 4162 } 4163 RegionInfo hri = regionState.getRegion(); 4164 if (server.equals(regionState.getServerName())) { 4165 LOG.info("Skipping move of region " + hri.getRegionNameAsString() 4166 + " because region already assigned to the same server " + server + "."); 4167 continue; 4168 } 4169 RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), server); 4170 this.assignmentManager.moveAsync(rp); 4171 } 4172 } 4173 4174 @Override 4175 public LockManager getLockManager() { 4176 return lockManager; 4177 } 4178 4179 public QuotaObserverChore getQuotaObserverChore() { 4180 return this.quotaObserverChore; 4181 } 4182 4183 public SpaceQuotaSnapshotNotifier getSpaceQuotaSnapshotNotifier() { 4184 return this.spaceQuotaSnapshotNotifier; 4185 } 4186 4187 @SuppressWarnings("unchecked") 4188 private RemoteProcedure<MasterProcedureEnv, ?> getRemoteProcedure(long procId) { 4189 Procedure<?> procedure = procedureExecutor.getProcedure(procId); 4190 if (procedure == null) { 4191 return null; 4192 } 4193 assert procedure instanceof RemoteProcedure; 4194 return (RemoteProcedure<MasterProcedureEnv, ?>) procedure; 4195 } 4196 4197 public void remoteProcedureCompleted(long procId) { 4198 LOG.debug("Remote procedure done, pid={}", procId); 4199 RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId); 4200 if (procedure != null) { 4201 procedure.remoteOperationCompleted(procedureExecutor.getEnvironment()); 4202 } 4203 } 4204 4205 public void remoteProcedureFailed(long procId, RemoteProcedureException error) { 4206 LOG.debug("Remote procedure failed, pid={}", procId, error); 4207 RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId); 4208 if (procedure != null) { 4209 procedure.remoteOperationFailed(procedureExecutor.getEnvironment(), error); 4210 } 4211 } 4212 4213 /** 4214 * Reopen regions provided in the argument 4215 * @param tableName The current table name 4216 * @param regionNames The region names of the regions to reopen 4217 * @param nonceGroup Identifier for the source of the request, a client or process 4218 * @param nonce A unique identifier for this operation from the client or process identified 4219 * by <code>nonceGroup</code> (the source must ensure each operation gets a 4220 * unique id). 4221 * @return procedure Id 4222 * @throws IOException if reopening region fails while running procedure 4223 */ 4224 long reopenRegions(final TableName tableName, final List<byte[]> regionNames, 4225 final long nonceGroup, final long nonce) throws IOException { 4226 4227 return MasterProcedureUtil 4228 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 4229 4230 @Override 4231 protected void run() throws IOException { 4232 submitProcedure(new ReopenTableRegionsProcedure(tableName, regionNames)); 4233 } 4234 4235 @Override 4236 protected String getDescription() { 4237 return "ReopenTableRegionsProcedure"; 4238 } 4239 4240 }); 4241 4242 } 4243 4244 @Override 4245 public ReplicationPeerManager getReplicationPeerManager() { 4246 return replicationPeerManager; 4247 } 4248 4249 @Override 4250 public ReplicationLogCleanerBarrier getReplicationLogCleanerBarrier() { 4251 return replicationLogCleanerBarrier; 4252 } 4253 4254 @Override 4255 public Semaphore getSyncReplicationPeerLock() { 4256 return syncReplicationPeerLock; 4257 } 4258 4259 public HashMap<String, List<Pair<ServerName, ReplicationLoadSource>>> 4260 getReplicationLoad(ServerName[] serverNames) { 4261 List<ReplicationPeerDescription> peerList = this.getReplicationPeerManager().listPeers(null); 4262 if (peerList == null) { 4263 return null; 4264 } 4265 HashMap<String, List<Pair<ServerName, ReplicationLoadSource>>> replicationLoadSourceMap = 4266 new HashMap<>(peerList.size()); 4267 peerList.stream() 4268 .forEach(peer -> replicationLoadSourceMap.put(peer.getPeerId(), new ArrayList<>())); 4269 for (ServerName serverName : serverNames) { 4270 List<ReplicationLoadSource> replicationLoadSources = 4271 getServerManager().getLoad(serverName).getReplicationLoadSourceList(); 4272 for (ReplicationLoadSource replicationLoadSource : replicationLoadSources) { 4273 List<Pair<ServerName, ReplicationLoadSource>> replicationLoadSourceList = 4274 replicationLoadSourceMap.get(replicationLoadSource.getPeerID()); 4275 if (replicationLoadSourceList == null) { 4276 LOG.debug("{} does not exist, but it exists " 4277 + "in znode(/hbase/replication/rs). when the rs restarts, peerId is deleted, so " 4278 + "we just need to ignore it", replicationLoadSource.getPeerID()); 4279 continue; 4280 } 4281 replicationLoadSourceList.add(new Pair<>(serverName, replicationLoadSource)); 4282 } 4283 } 4284 for (List<Pair<ServerName, ReplicationLoadSource>> loads : replicationLoadSourceMap.values()) { 4285 if (loads.size() > 0) { 4286 loads.sort(Comparator.comparingLong(load -> (-1) * load.getSecond().getReplicationLag())); 4287 } 4288 } 4289 return replicationLoadSourceMap; 4290 } 4291 4292 /** 4293 * This method modifies the master's configuration in order to inject replication-related features 4294 */ 4295 @InterfaceAudience.Private 4296 public static void decorateMasterConfiguration(Configuration conf) { 4297 String plugins = conf.get(HBASE_MASTER_LOGCLEANER_PLUGINS); 4298 String cleanerClass = ReplicationLogCleaner.class.getCanonicalName(); 4299 if (plugins == null || !plugins.contains(cleanerClass)) { 4300 conf.set(HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass); 4301 } 4302 if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf)) { 4303 plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS); 4304 cleanerClass = ReplicationHFileCleaner.class.getCanonicalName(); 4305 if (!plugins.contains(cleanerClass)) { 4306 conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, plugins + "," + cleanerClass); 4307 } 4308 } 4309 } 4310 4311 public SnapshotQuotaObserverChore getSnapshotQuotaObserverChore() { 4312 return this.snapshotQuotaChore; 4313 } 4314 4315 public ActiveMasterManager getActiveMasterManager() { 4316 return activeMasterManager; 4317 } 4318 4319 @Override 4320 public SyncReplicationReplayWALManager getSyncReplicationReplayWALManager() { 4321 return this.syncReplicationReplayWALManager; 4322 } 4323 4324 @Override 4325 public HbckChore getHbckChore() { 4326 return this.hbckChore; 4327 } 4328 4329 @Override 4330 public void runReplicationBarrierCleaner() { 4331 ReplicationBarrierCleaner rbc = this.replicationBarrierCleaner; 4332 if (rbc != null) { 4333 rbc.chore(); 4334 } 4335 } 4336 4337 @Override 4338 public RSGroupInfoManager getRSGroupInfoManager() { 4339 return rsGroupInfoManager; 4340 } 4341 4342 /** 4343 * Get the compaction state of the table 4344 * @param tableName The table name 4345 * @return CompactionState Compaction state of the table 4346 */ 4347 public CompactionState getCompactionState(final TableName tableName) { 4348 CompactionState compactionState = CompactionState.NONE; 4349 try { 4350 List<RegionInfo> regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName); 4351 for (RegionInfo regionInfo : regions) { 4352 ServerName serverName = 4353 assignmentManager.getRegionStates().getRegionServerOfRegion(regionInfo); 4354 if (serverName == null) { 4355 continue; 4356 } 4357 ServerMetrics sl = serverManager.getLoad(serverName); 4358 if (sl == null) { 4359 continue; 4360 } 4361 RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName()); 4362 if (regionMetrics == null) { 4363 LOG.warn("Can not get compaction details for the region: {} , it may be not online.", 4364 regionInfo.getRegionNameAsString()); 4365 continue; 4366 } 4367 if (regionMetrics.getCompactionState() == CompactionState.MAJOR) { 4368 if (compactionState == CompactionState.MINOR) { 4369 compactionState = CompactionState.MAJOR_AND_MINOR; 4370 } else { 4371 compactionState = CompactionState.MAJOR; 4372 } 4373 } else if (regionMetrics.getCompactionState() == CompactionState.MINOR) { 4374 if (compactionState == CompactionState.MAJOR) { 4375 compactionState = CompactionState.MAJOR_AND_MINOR; 4376 } else { 4377 compactionState = CompactionState.MINOR; 4378 } 4379 } 4380 } 4381 } catch (Exception e) { 4382 compactionState = null; 4383 LOG.error("Exception when get compaction state for " + tableName.getNameAsString(), e); 4384 } 4385 return compactionState; 4386 } 4387 4388 @Override 4389 public MetaLocationSyncer getMetaLocationSyncer() { 4390 return metaLocationSyncer; 4391 } 4392 4393 @RestrictedApi(explanation = "Should only be called in tests", link = "", 4394 allowedOnPath = ".*/src/test/.*") 4395 public MasterRegion getMasterRegion() { 4396 return masterRegion; 4397 } 4398 4399 @Override 4400 public void onConfigurationChange(Configuration newConf) { 4401 try { 4402 Superusers.initialize(newConf); 4403 } catch (IOException e) { 4404 LOG.warn("Failed to initialize SuperUsers on reloading of the configuration"); 4405 } 4406 // append the quotas observer back to the master coprocessor key 4407 setQuotasObserver(newConf); 4408 // update region server coprocessor if the configuration has changed. 4409 if ( 4410 CoprocessorConfigurationUtil.checkConfigurationChange(getConfiguration(), newConf, 4411 CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY) && !maintenanceMode 4412 ) { 4413 LOG.info("Update the master coprocessor(s) because the configuration has changed"); 4414 initializeCoprocessorHost(newConf); 4415 } 4416 } 4417 4418 @Override 4419 protected NamedQueueRecorder createNamedQueueRecord() { 4420 final boolean isBalancerDecisionRecording = 4421 conf.getBoolean(BaseLoadBalancer.BALANCER_DECISION_BUFFER_ENABLED, 4422 BaseLoadBalancer.DEFAULT_BALANCER_DECISION_BUFFER_ENABLED); 4423 final boolean isBalancerRejectionRecording = 4424 conf.getBoolean(BaseLoadBalancer.BALANCER_REJECTION_BUFFER_ENABLED, 4425 BaseLoadBalancer.DEFAULT_BALANCER_REJECTION_BUFFER_ENABLED); 4426 if (isBalancerDecisionRecording || isBalancerRejectionRecording) { 4427 return NamedQueueRecorder.getInstance(conf); 4428 } else { 4429 return null; 4430 } 4431 } 4432 4433 @Override 4434 protected boolean clusterMode() { 4435 return true; 4436 } 4437 4438 public String getClusterId() { 4439 if (activeMaster) { 4440 return clusterId; 4441 } 4442 return cachedClusterId.getFromCacheOrFetch(); 4443 } 4444 4445 public Optional<ServerName> getActiveMaster() { 4446 return activeMasterManager.getActiveMasterServerName(); 4447 } 4448 4449 public List<ServerName> getBackupMasters() { 4450 return activeMasterManager.getBackupMasters(); 4451 } 4452 4453 @Override 4454 public Iterator<ServerName> getBootstrapNodes() { 4455 return regionServerTracker.getRegionServers().iterator(); 4456 } 4457 4458 @Override 4459 public List<HRegionLocation> getMetaLocations() { 4460 return metaRegionLocationCache.getMetaRegionLocations(); 4461 } 4462 4463 @Override 4464 public void flushMasterStore() throws IOException { 4465 LOG.info("Force flush master local region."); 4466 if (this.cpHost != null) { 4467 try { 4468 cpHost.preMasterStoreFlush(); 4469 } catch (IOException ioe) { 4470 LOG.error("Error invoking master coprocessor preMasterStoreFlush()", ioe); 4471 } 4472 } 4473 masterRegion.flush(true); 4474 if (this.cpHost != null) { 4475 try { 4476 cpHost.postMasterStoreFlush(); 4477 } catch (IOException ioe) { 4478 LOG.error("Error invoking master coprocessor postMasterStoreFlush()", ioe); 4479 } 4480 } 4481 } 4482 4483 public Collection<ServerName> getLiveRegionServers() { 4484 return regionServerTracker.getRegionServers(); 4485 } 4486 4487 @RestrictedApi(explanation = "Should only be called in tests", link = "", 4488 allowedOnPath = ".*/src/test/.*") 4489 void setLoadBalancer(RSGroupBasedLoadBalancer loadBalancer) { 4490 this.balancer = loadBalancer; 4491 } 4492 4493 @RestrictedApi(explanation = "Should only be called in tests", link = "", 4494 allowedOnPath = ".*/src/test/.*") 4495 void setAssignmentManager(AssignmentManager assignmentManager) { 4496 this.assignmentManager = assignmentManager; 4497 } 4498 4499 @RestrictedApi(explanation = "Should only be called in tests", link = "", 4500 allowedOnPath = ".*/src/test/.*") 4501 static void setDisableBalancerChoreForTest(boolean disable) { 4502 disableBalancerChoreForTest = disable; 4503 } 4504 4505 private void setQuotasObserver(Configuration conf) { 4506 // Add the Observer to delete quotas on table deletion before starting all CPs by 4507 // default with quota support, avoiding if user specifically asks to not load this Observer. 4508 if (QuotaUtil.isQuotaEnabled(conf)) { 4509 updateConfigurationForQuotasObserver(conf); 4510 } 4511 } 4512 4513 private void initializeCoprocessorHost(Configuration conf) { 4514 // initialize master side coprocessors before we start handling requests 4515 this.cpHost = new MasterCoprocessorHost(this, conf); 4516 } 4517 4518 @Override 4519 public long flushTable(TableName tableName, List<byte[]> columnFamilies, long nonceGroup, 4520 long nonce) throws IOException { 4521 checkInitialized(); 4522 4523 if ( 4524 !getConfiguration().getBoolean(MasterFlushTableProcedureManager.FLUSH_PROCEDURE_ENABLED, 4525 MasterFlushTableProcedureManager.FLUSH_PROCEDURE_ENABLED_DEFAULT) 4526 ) { 4527 throw new DoNotRetryIOException("FlushTableProcedureV2 is DISABLED"); 4528 } 4529 4530 return MasterProcedureUtil 4531 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { 4532 @Override 4533 protected void run() throws IOException { 4534 getMaster().getMasterCoprocessorHost().preTableFlush(tableName); 4535 LOG.info(getClientIdAuditPrefix() + " flush " + tableName); 4536 submitProcedure( 4537 new FlushTableProcedure(procedureExecutor.getEnvironment(), tableName, columnFamilies)); 4538 getMaster().getMasterCoprocessorHost().postTableFlush(tableName); 4539 } 4540 4541 @Override 4542 protected String getDescription() { 4543 return "FlushTableProcedure"; 4544 } 4545 }); 4546 } 4547}