001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master;
019
020import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK;
021import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS;
022import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK;
023import static org.apache.hadoop.hbase.master.cleaner.HFileCleaner.CUSTOM_POOL_SIZE;
024import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY;
025
026import com.google.errorprone.annotations.RestrictedApi;
027import io.opentelemetry.api.trace.Span;
028import io.opentelemetry.api.trace.StatusCode;
029import io.opentelemetry.context.Scope;
030import java.io.IOException;
031import java.io.InterruptedIOException;
032import java.lang.reflect.Constructor;
033import java.lang.reflect.InvocationTargetException;
034import java.net.InetAddress;
035import java.net.InetSocketAddress;
036import java.net.UnknownHostException;
037import java.time.Instant;
038import java.time.ZoneId;
039import java.time.format.DateTimeFormatter;
040import java.util.ArrayList;
041import java.util.Arrays;
042import java.util.Collection;
043import java.util.Collections;
044import java.util.Comparator;
045import java.util.EnumSet;
046import java.util.HashMap;
047import java.util.HashSet;
048import java.util.Iterator;
049import java.util.LinkedList;
050import java.util.List;
051import java.util.Map;
052import java.util.Objects;
053import java.util.Optional;
054import java.util.Set;
055import java.util.concurrent.ExecutionException;
056import java.util.concurrent.Future;
057import java.util.concurrent.TimeUnit;
058import java.util.concurrent.TimeoutException;
059import java.util.concurrent.atomic.AtomicInteger;
060import java.util.regex.Pattern;
061import java.util.stream.Collectors;
062import javax.servlet.http.HttpServlet;
063import org.apache.commons.lang3.StringUtils;
064import org.apache.hadoop.conf.Configuration;
065import org.apache.hadoop.fs.FSDataInputStream;
066import org.apache.hadoop.fs.FSDataOutputStream;
067import org.apache.hadoop.fs.Path;
068import org.apache.hadoop.hbase.CatalogFamilyFormat;
069import org.apache.hadoop.hbase.Cell;
070import org.apache.hadoop.hbase.CellBuilderFactory;
071import org.apache.hadoop.hbase.CellBuilderType;
072import org.apache.hadoop.hbase.ClusterId;
073import org.apache.hadoop.hbase.ClusterMetrics;
074import org.apache.hadoop.hbase.ClusterMetrics.Option;
075import org.apache.hadoop.hbase.ClusterMetricsBuilder;
076import org.apache.hadoop.hbase.DoNotRetryIOException;
077import org.apache.hadoop.hbase.HBaseIOException;
078import org.apache.hadoop.hbase.HBaseInterfaceAudience;
079import org.apache.hadoop.hbase.HBaseServerBase;
080import org.apache.hadoop.hbase.HConstants;
081import org.apache.hadoop.hbase.HRegionLocation;
082import org.apache.hadoop.hbase.InvalidFamilyOperationException;
083import org.apache.hadoop.hbase.MasterNotRunningException;
084import org.apache.hadoop.hbase.MetaTableAccessor;
085import org.apache.hadoop.hbase.NamespaceDescriptor;
086import org.apache.hadoop.hbase.PleaseHoldException;
087import org.apache.hadoop.hbase.PleaseRestartMasterException;
088import org.apache.hadoop.hbase.RegionMetrics;
089import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
090import org.apache.hadoop.hbase.ScheduledChore;
091import org.apache.hadoop.hbase.ServerMetrics;
092import org.apache.hadoop.hbase.ServerName;
093import org.apache.hadoop.hbase.ServerTask;
094import org.apache.hadoop.hbase.ServerTaskBuilder;
095import org.apache.hadoop.hbase.TableName;
096import org.apache.hadoop.hbase.TableNotDisabledException;
097import org.apache.hadoop.hbase.TableNotFoundException;
098import org.apache.hadoop.hbase.UnknownRegionException;
099import org.apache.hadoop.hbase.client.BalanceRequest;
100import org.apache.hadoop.hbase.client.BalanceResponse;
101import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
102import org.apache.hadoop.hbase.client.CompactionState;
103import org.apache.hadoop.hbase.client.MasterSwitchType;
104import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
105import org.apache.hadoop.hbase.client.Put;
106import org.apache.hadoop.hbase.client.RegionInfo;
107import org.apache.hadoop.hbase.client.RegionInfoBuilder;
108import org.apache.hadoop.hbase.client.RegionStatesCount;
109import org.apache.hadoop.hbase.client.ResultScanner;
110import org.apache.hadoop.hbase.client.Scan;
111import org.apache.hadoop.hbase.client.TableDescriptor;
112import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
113import org.apache.hadoop.hbase.client.TableState;
114import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
115import org.apache.hadoop.hbase.exceptions.DeserializationException;
116import org.apache.hadoop.hbase.exceptions.MasterStoppedException;
117import org.apache.hadoop.hbase.executor.ExecutorType;
118import org.apache.hadoop.hbase.favored.FavoredNodesManager;
119import org.apache.hadoop.hbase.http.HttpServer;
120import org.apache.hadoop.hbase.http.InfoServer;
121import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
122import org.apache.hadoop.hbase.ipc.RpcServer;
123import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
124import org.apache.hadoop.hbase.log.HBaseMarkers;
125import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
126import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
127import org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
128import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
129import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
130import org.apache.hadoop.hbase.master.assignment.RegionStates;
131import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
132import org.apache.hadoop.hbase.master.balancer.BalancerChore;
133import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
134import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
135import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
136import org.apache.hadoop.hbase.master.balancer.LoadBalancerStateStore;
137import org.apache.hadoop.hbase.master.balancer.MaintenanceLoadBalancer;
138import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
139import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
140import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
141import org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
142import org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore;
143import org.apache.hadoop.hbase.master.hbck.HbckChore;
144import org.apache.hadoop.hbase.master.http.MasterDumpServlet;
145import org.apache.hadoop.hbase.master.http.MasterRedirectServlet;
146import org.apache.hadoop.hbase.master.http.MasterStatusServlet;
147import org.apache.hadoop.hbase.master.http.api_v1.ResourceConfigFactory;
148import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
149import org.apache.hadoop.hbase.master.locking.LockManager;
150import org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore;
151import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
152import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager;
153import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerStateStore;
154import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
155import org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
156import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
157import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
158import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
159import org.apache.hadoop.hbase.master.procedure.InitMetaProcedure;
160import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
161import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
162import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
163import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
164import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
165import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
166import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
167import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
168import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure;
169import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
170import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
171import org.apache.hadoop.hbase.master.region.MasterRegion;
172import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
173import org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure;
174import org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
175import org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
176import org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
177import org.apache.hadoop.hbase.master.replication.MigrateReplicationQueueFromZkToTableProcedure;
178import org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
179import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
180import org.apache.hadoop.hbase.master.replication.ReplicationPeerModificationStateStore;
181import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager;
182import org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure;
183import org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
184import org.apache.hadoop.hbase.master.slowlog.SlowLogMasterService;
185import org.apache.hadoop.hbase.master.snapshot.SnapshotCleanupStateStore;
186import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
187import org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator;
188import org.apache.hadoop.hbase.master.zksyncer.MasterAddressSyncer;
189import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer;
190import org.apache.hadoop.hbase.mob.MobFileCleanerChore;
191import org.apache.hadoop.hbase.mob.MobFileCompactionChore;
192import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
193import org.apache.hadoop.hbase.monitoring.MonitoredTask;
194import org.apache.hadoop.hbase.monitoring.TaskGroup;
195import org.apache.hadoop.hbase.monitoring.TaskMonitor;
196import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder;
197import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
198import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
199import org.apache.hadoop.hbase.procedure2.LockedResource;
200import org.apache.hadoop.hbase.procedure2.Procedure;
201import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
202import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
203import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
204import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
205import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
206import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener;
207import org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore;
208import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
209import org.apache.hadoop.hbase.quotas.MasterQuotasObserver;
210import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
211import org.apache.hadoop.hbase.quotas.QuotaTableUtil;
212import org.apache.hadoop.hbase.quotas.QuotaUtil;
213import org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore;
214import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
215import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus;
216import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
217import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
218import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
219import org.apache.hadoop.hbase.regionserver.HRegionServer;
220import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
221import org.apache.hadoop.hbase.regionserver.storefiletracker.ModifyColumnFamilyStoreFileTrackerProcedure;
222import org.apache.hadoop.hbase.regionserver.storefiletracker.ModifyTableStoreFileTrackerProcedure;
223import org.apache.hadoop.hbase.replication.ReplicationException;
224import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
225import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
226import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
227import org.apache.hadoop.hbase.replication.ReplicationUtils;
228import org.apache.hadoop.hbase.replication.SyncReplicationState;
229import org.apache.hadoop.hbase.replication.ZKReplicationQueueStorageForMigration;
230import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
231import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
232import org.apache.hadoop.hbase.replication.master.ReplicationSinkTrackerTableCreator;
233import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp;
234import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp.ReplicationSyncUpToolInfo;
235import org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint;
236import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
237import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
238import org.apache.hadoop.hbase.rsgroup.RSGroupUtil;
239import org.apache.hadoop.hbase.security.AccessDeniedException;
240import org.apache.hadoop.hbase.security.SecurityConstants;
241import org.apache.hadoop.hbase.security.Superusers;
242import org.apache.hadoop.hbase.security.UserProvider;
243import org.apache.hadoop.hbase.trace.TraceUtil;
244import org.apache.hadoop.hbase.util.Addressing;
245import org.apache.hadoop.hbase.util.Bytes;
246import org.apache.hadoop.hbase.util.CommonFSUtils;
247import org.apache.hadoop.hbase.util.CoprocessorConfigurationUtil;
248import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
249import org.apache.hadoop.hbase.util.FSTableDescriptors;
250import org.apache.hadoop.hbase.util.FutureUtils;
251import org.apache.hadoop.hbase.util.HBaseFsck;
252import org.apache.hadoop.hbase.util.HFileArchiveUtil;
253import org.apache.hadoop.hbase.util.IdLock;
254import org.apache.hadoop.hbase.util.JVMClusterUtil;
255import org.apache.hadoop.hbase.util.JsonMapper;
256import org.apache.hadoop.hbase.util.ModifyRegionUtils;
257import org.apache.hadoop.hbase.util.Pair;
258import org.apache.hadoop.hbase.util.RetryCounter;
259import org.apache.hadoop.hbase.util.RetryCounterFactory;
260import org.apache.hadoop.hbase.util.TableDescriptorChecker;
261import org.apache.hadoop.hbase.util.Threads;
262import org.apache.hadoop.hbase.util.VersionInfo;
263import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
264import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
265import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
266import org.apache.hadoop.hbase.zookeeper.ZKUtil;
267import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
268import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
269import org.apache.yetus.audience.InterfaceAudience;
270import org.apache.zookeeper.KeeperException;
271import org.slf4j.Logger;
272import org.slf4j.LoggerFactory;
273
274import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
275import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
276import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
277import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams;
278import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
279import org.apache.hbase.thirdparty.com.google.gson.JsonParseException;
280import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors;
281import org.apache.hbase.thirdparty.com.google.protobuf.Service;
282import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server;
283import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector;
284import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder;
285import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext;
286import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig;
287import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer;
288
289import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
290import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
291import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
292
293/**
294 * HMaster is the "master server" for HBase. An HBase cluster has one active master. If many masters
295 * are started, all compete. Whichever wins goes on to run the cluster. All others park themselves
296 * in their constructor until master or cluster shutdown or until the active master loses its lease
297 * in zookeeper. Thereafter, all running master jostle to take over master role.
298 * <p/>
299 * The Master can be asked shutdown the cluster. See {@link #shutdown()}. In this case it will tell
300 * all regionservers to go down and then wait on them all reporting in that they are down. This
301 * master will then shut itself down.
302 * <p/>
303 * You can also shutdown just this master. Call {@link #stopMaster()}.
304 * @see org.apache.zookeeper.Watcher
305 */
306@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
307public class HMaster extends HBaseServerBase<MasterRpcServices> implements MasterServices {
308
309  private static final Logger LOG = LoggerFactory.getLogger(HMaster.class);
310
311  // MASTER is name of the webapp and the attribute name used stuffing this
312  // instance into a web context !! AND OTHER PLACES !!
313  public static final String MASTER = "master";
314
315  // Manager and zk listener for master election
316  private final ActiveMasterManager activeMasterManager;
317  // Region server tracker
318  private final RegionServerTracker regionServerTracker;
319  // Draining region server tracker
320  private DrainingServerTracker drainingServerTracker;
321  // Tracker for load balancer state
322  LoadBalancerStateStore loadBalancerStateStore;
323  // Tracker for meta location, if any client ZK quorum specified
324  private MetaLocationSyncer metaLocationSyncer;
325  // Tracker for active master location, if any client ZK quorum specified
326  @InterfaceAudience.Private
327  MasterAddressSyncer masterAddressSyncer;
328  // Tracker for auto snapshot cleanup state
329  SnapshotCleanupStateStore snapshotCleanupStateStore;
330
331  // Tracker for split and merge state
332  private SplitOrMergeStateStore splitOrMergeStateStore;
333
334  private ClusterSchemaService clusterSchemaService;
335
336  public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS =
337    "hbase.master.wait.on.service.seconds";
338  public static final int DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = 5 * 60;
339
340  public static final String HBASE_MASTER_CLEANER_INTERVAL = "hbase.master.cleaner.interval";
341
342  public static final int DEFAULT_HBASE_MASTER_CLEANER_INTERVAL = 600 * 1000;
343
344  private String clusterId;
345
346  // Metrics for the HMaster
347  final MetricsMaster metricsMaster;
348  // file system manager for the master FS operations
349  private MasterFileSystem fileSystemManager;
350  private MasterWalManager walManager;
351
352  // manager to manage procedure-based WAL splitting, can be null if current
353  // is zk-based WAL splitting. SplitWALManager will replace SplitLogManager
354  // and MasterWalManager, which means zk-based WAL splitting code will be
355  // useless after we switch to the procedure-based one. our eventual goal
356  // is to remove all the zk-based WAL splitting code.
357  private SplitWALManager splitWALManager;
358
359  // server manager to deal with region server info
360  private volatile ServerManager serverManager;
361
362  // manager of assignment nodes in zookeeper
363  private AssignmentManager assignmentManager;
364
365  private RSGroupInfoManager rsGroupInfoManager;
366
367  // manager of replication
368  private ReplicationPeerManager replicationPeerManager;
369
370  private SyncReplicationReplayWALManager syncReplicationReplayWALManager;
371
372  // buffer for "fatal error" notices from region servers
373  // in the cluster. This is only used for assisting
374  // operations/debugging.
375  MemoryBoundedLogMessageBuffer rsFatals;
376
377  // flag set after we become the active master (used for testing)
378  private volatile boolean activeMaster = false;
379
380  // flag set after we complete initialization once active
381  private final ProcedureEvent<?> initialized = new ProcedureEvent<>("master initialized");
382
383  // flag set after master services are started,
384  // initialization may have not completed yet.
385  volatile boolean serviceStarted = false;
386
387  // Maximum time we should run balancer for
388  private final int maxBalancingTime;
389  // Maximum percent of regions in transition when balancing
390  private final double maxRitPercent;
391
392  private final LockManager lockManager = new LockManager(this);
393
394  private RSGroupBasedLoadBalancer balancer;
395  private BalancerChore balancerChore;
396  private static boolean disableBalancerChoreForTest = false;
397  private RegionNormalizerManager regionNormalizerManager;
398  private ClusterStatusChore clusterStatusChore;
399  private ClusterStatusPublisher clusterStatusPublisherChore = null;
400  private SnapshotCleanerChore snapshotCleanerChore = null;
401
402  private HbckChore hbckChore;
403  CatalogJanitor catalogJanitorChore;
404  // Threadpool for scanning the Old logs directory, used by the LogCleaner
405  private DirScanPool logCleanerPool;
406  private LogCleaner logCleaner;
407  // HFile cleaners for the custom hfile archive paths and the default archive path
408  // The archive path cleaner is the first element
409  private List<HFileCleaner> hfileCleaners = new ArrayList<>();
410  // The hfile cleaner paths, including custom paths and the default archive path
411  private List<Path> hfileCleanerPaths = new ArrayList<>();
412  // The shared hfile cleaner pool for the custom archive paths
413  private DirScanPool sharedHFileCleanerPool;
414  // The exclusive hfile cleaner pool for scanning the archive directory
415  private DirScanPool exclusiveHFileCleanerPool;
416  private ReplicationBarrierCleaner replicationBarrierCleaner;
417  private MobFileCleanerChore mobFileCleanerChore;
418  private MobFileCompactionChore mobFileCompactionChore;
419  private RollingUpgradeChore rollingUpgradeChore;
420  // used to synchronize the mobCompactionStates
421  private final IdLock mobCompactionLock = new IdLock();
422  // save the information of mob compactions in tables.
423  // the key is table name, the value is the number of compactions in that table.
424  private Map<TableName, AtomicInteger> mobCompactionStates = Maps.newConcurrentMap();
425
426  volatile MasterCoprocessorHost cpHost;
427
428  private final boolean preLoadTableDescriptors;
429
430  // Time stamps for when a hmaster became active
431  private long masterActiveTime;
432
433  // Time stamp for when HMaster finishes becoming Active Master
434  private long masterFinishedInitializationTime;
435
436  Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
437
438  // monitor for snapshot of hbase tables
439  SnapshotManager snapshotManager;
440  // monitor for distributed procedures
441  private MasterProcedureManagerHost mpmHost;
442
443  private RegionsRecoveryChore regionsRecoveryChore = null;
444
445  private RegionsRecoveryConfigManager regionsRecoveryConfigManager = null;
446  // it is assigned after 'initialized' guard set to true, so should be volatile
447  private volatile MasterQuotaManager quotaManager;
448  private SpaceQuotaSnapshotNotifier spaceQuotaSnapshotNotifier;
449  private QuotaObserverChore quotaObserverChore;
450  private SnapshotQuotaObserverChore snapshotQuotaChore;
451
452  private ProcedureExecutor<MasterProcedureEnv> procedureExecutor;
453  private ProcedureStore procedureStore;
454
455  // the master local storage to store procedure data, meta region locations, etc.
456  private MasterRegion masterRegion;
457
458  private RegionServerList rsListStorage;
459
460  // handle table states
461  private TableStateManager tableStateManager;
462
463  /** jetty server for master to redirect requests to regionserver infoServer */
464  private Server masterJettyServer;
465
466  // Determine if we should do normal startup or minimal "single-user" mode with no region
467  // servers and no user tables. Useful for repair and recovery of hbase:meta
468  private final boolean maintenanceMode;
469  static final String MAINTENANCE_MODE = "hbase.master.maintenance_mode";
470
471  // the in process region server for carry system regions in maintenanceMode
472  private JVMClusterUtil.RegionServerThread maintenanceRegionServer;
473
474  // Cached clusterId on stand by masters to serve clusterID requests from clients.
475  private final CachedClusterId cachedClusterId;
476
477  public static final String WARMUP_BEFORE_MOVE = "hbase.master.warmup.before.move";
478  private static final boolean DEFAULT_WARMUP_BEFORE_MOVE = true;
479
480  private TaskGroup startupTaskGroup;
481
482  /**
483   * Store whether we allow replication peer modification operations.
484   */
485  private ReplicationPeerModificationStateStore replicationPeerModificationStateStore;
486
487  /**
488   * Initializes the HMaster. The steps are as follows:
489   * <p>
490   * <ol>
491   * <li>Initialize the local HRegionServer
492   * <li>Start the ActiveMasterManager.
493   * </ol>
494   * <p>
495   * Remaining steps of initialization occur in {@link #finishActiveMasterInitialization()} after
496   * the master becomes the active one.
497   */
498  public HMaster(final Configuration conf) throws IOException {
499    super(conf, "Master");
500    final Span span = TraceUtil.createSpan("HMaster.cxtor");
501    try (Scope ignored = span.makeCurrent()) {
502      if (conf.getBoolean(MAINTENANCE_MODE, false)) {
503        LOG.info("Detected {}=true via configuration.", MAINTENANCE_MODE);
504        maintenanceMode = true;
505      } else if (Boolean.getBoolean(MAINTENANCE_MODE)) {
506        LOG.info("Detected {}=true via environment variables.", MAINTENANCE_MODE);
507        maintenanceMode = true;
508      } else {
509        maintenanceMode = false;
510      }
511      this.rsFatals = new MemoryBoundedLogMessageBuffer(
512        conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));
513      LOG.info("hbase.rootdir={}, hbase.cluster.distributed={}",
514        CommonFSUtils.getRootDir(this.conf),
515        this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));
516
517      // Disable usage of meta replicas in the master
518      this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);
519
520      decorateMasterConfiguration(this.conf);
521
522      // Hack! Maps DFSClient => Master for logs. HDFS made this
523      // config param for task trackers, but we can piggyback off of it.
524      if (this.conf.get("mapreduce.task.attempt.id") == null) {
525        this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
526      }
527
528      this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));
529
530      // preload table descriptor at startup
531      this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);
532
533      this.maxBalancingTime = getMaxBalancingTime();
534      this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT,
535        HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT);
536
537      // Do we publish the status?
538
539      boolean shouldPublish =
540        conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT);
541      Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
542        conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
543          ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
544          ClusterStatusPublisher.Publisher.class);
545
546      if (shouldPublish) {
547        if (publisherClass == null) {
548          LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but "
549            + ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS
550            + " is not set - not publishing status");
551        } else {
552          clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
553          LOG.debug("Created {}", this.clusterStatusPublisherChore);
554          getChoreService().scheduleChore(clusterStatusPublisherChore);
555        }
556      }
557      this.activeMasterManager = createActiveMasterManager(zooKeeper, serverName, this);
558      cachedClusterId = new CachedClusterId(this, conf);
559      this.regionServerTracker = new RegionServerTracker(zooKeeper, this);
560      this.rpcServices.start(zooKeeper);
561      span.setStatus(StatusCode.OK);
562    } catch (Throwable t) {
563      // Make sure we log the exception. HMaster is often started via reflection and the
564      // cause of failed startup is lost.
565      TraceUtil.setError(span, t);
566      LOG.error("Failed construction of Master", t);
567      throw t;
568    } finally {
569      span.end();
570    }
571  }
572
573  /**
574   * Protected to have custom implementations in tests override the default ActiveMaster
575   * implementation.
576   */
577  protected ActiveMasterManager createActiveMasterManager(ZKWatcher zk, ServerName sn,
578    org.apache.hadoop.hbase.Server server) throws InterruptedIOException {
579    return new ActiveMasterManager(zk, sn, server);
580  }
581
582  @Override
583  protected String getUseThisHostnameInstead(Configuration conf) {
584    return conf.get(MASTER_HOSTNAME_KEY);
585  }
586
587  private void registerConfigurationObservers() {
588    configurationManager.registerObserver(this.rpcServices);
589    configurationManager.registerObserver(this);
590  }
591
592  // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will
593  // block in here until then.
594  @Override
595  public void run() {
596    try {
597      installShutdownHook();
598      registerConfigurationObservers();
599      Threads.setDaemonThreadRunning(new Thread(TraceUtil.tracedRunnable(() -> {
600        try {
601          int infoPort = putUpJettyServer();
602          startActiveMasterManager(infoPort);
603        } catch (Throwable t) {
604          // Make sure we log the exception.
605          String error = "Failed to become Active Master";
606          LOG.error(error, t);
607          // Abort should have been called already.
608          if (!isAborted()) {
609            abort(error, t);
610          }
611        }
612      }, "HMaster.becomeActiveMaster")), getName() + ":becomeActiveMaster");
613      while (!isStopped() && !isAborted()) {
614        sleeper.sleep();
615      }
616      final Span span = TraceUtil.createSpan("HMaster exiting main loop");
617      try (Scope ignored = span.makeCurrent()) {
618        stopInfoServer();
619        closeClusterConnection();
620        stopServiceThreads();
621        if (this.rpcServices != null) {
622          this.rpcServices.stop();
623        }
624        closeZooKeeper();
625        closeTableDescriptors();
626        span.setStatus(StatusCode.OK);
627      } finally {
628        span.end();
629      }
630    } finally {
631      if (this.clusterSchemaService != null) {
632        // If on way out, then we are no longer active master.
633        this.clusterSchemaService.stopAsync();
634        try {
635          this.clusterSchemaService
636            .awaitTerminated(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,
637              DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);
638        } catch (TimeoutException te) {
639          LOG.warn("Failed shutdown of clusterSchemaService", te);
640        }
641      }
642      this.activeMaster = false;
643    }
644  }
645
646  // return the actual infoPort, -1 means disable info server.
647  private int putUpJettyServer() throws IOException {
648    if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
649      return -1;
650    }
651    final int infoPort =
652      conf.getInt("hbase.master.info.port.orig", HConstants.DEFAULT_MASTER_INFOPORT);
653    // -1 is for disabling info server, so no redirecting
654    if (infoPort < 0 || infoServer == null) {
655      return -1;
656    }
657    if (infoPort == infoServer.getPort()) {
658      // server is already running
659      return infoPort;
660    }
661    final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
662    if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
663      String msg = "Failed to start redirecting jetty server. Address " + addr
664        + " does not belong to this host. Correct configuration parameter: "
665        + "hbase.master.info.bindAddress";
666      LOG.error(msg);
667      throw new IOException(msg);
668    }
669
670    // TODO I'm pretty sure we could just add another binding to the InfoServer run by
671    // the RegionServer and have it run the RedirectServlet instead of standing up
672    // a second entire stack here.
673    masterJettyServer = new Server();
674    final ServerConnector connector = new ServerConnector(masterJettyServer);
675    connector.setHost(addr);
676    connector.setPort(infoPort);
677    masterJettyServer.addConnector(connector);
678    masterJettyServer.setStopAtShutdown(true);
679    masterJettyServer.setHandler(HttpServer.buildGzipHandler(masterJettyServer.getHandler()));
680
681    final String redirectHostname =
682      StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead;
683
684    final MasterRedirectServlet redirect = new MasterRedirectServlet(infoServer, redirectHostname);
685    final WebAppContext context =
686      new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);
687    context.addServlet(new ServletHolder(redirect), "/*");
688    context.setServer(masterJettyServer);
689
690    try {
691      masterJettyServer.start();
692    } catch (Exception e) {
693      throw new IOException("Failed to start redirecting jetty server", e);
694    }
695    return connector.getLocalPort();
696  }
697
698  /**
699   * For compatibility, if failed with regionserver credentials, try the master one
700   */
701  @Override
702  protected void login(UserProvider user, String host) throws IOException {
703    try {
704      user.login(SecurityConstants.REGIONSERVER_KRB_KEYTAB_FILE,
705        SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, host);
706    } catch (IOException ie) {
707      user.login(SecurityConstants.MASTER_KRB_KEYTAB_FILE, SecurityConstants.MASTER_KRB_PRINCIPAL,
708        host);
709    }
710  }
711
712  public MasterRpcServices getMasterRpcServices() {
713    return rpcServices;
714  }
715
716  public boolean balanceSwitch(final boolean b) throws IOException {
717    return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
718  }
719
720  @Override
721  protected String getProcessName() {
722    return MASTER;
723  }
724
725  @Override
726  protected boolean canCreateBaseZNode() {
727    return true;
728  }
729
730  @Override
731  protected boolean canUpdateTableDescriptor() {
732    return true;
733  }
734
735  @Override
736  protected boolean cacheTableDescriptor() {
737    return true;
738  }
739
740  protected MasterRpcServices createRpcServices() throws IOException {
741    return new MasterRpcServices(this);
742  }
743
744  @Override
745  protected void configureInfoServer(InfoServer infoServer) {
746    infoServer.addUnprivilegedServlet("master-status", "/master-status", MasterStatusServlet.class);
747    infoServer.addUnprivilegedServlet("api_v1", "/api/v1/*", buildApiV1Servlet());
748
749    infoServer.setAttribute(MASTER, this);
750  }
751
752  private ServletHolder buildApiV1Servlet() {
753    final ResourceConfig config = ResourceConfigFactory.createResourceConfig(conf, this);
754    return new ServletHolder(new ServletContainer(config));
755  }
756
757  @Override
758  protected Class<? extends HttpServlet> getDumpServlet() {
759    return MasterDumpServlet.class;
760  }
761
762  @Override
763  public MetricsMaster getMasterMetrics() {
764    return metricsMaster;
765  }
766
767  /**
768   * Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it
769   * should have already been initialized along with {@link ServerManager}.
770   */
771  private void initializeZKBasedSystemTrackers()
772    throws IOException, KeeperException, ReplicationException, DeserializationException {
773    if (maintenanceMode) {
774      // in maintenance mode, always use MaintenanceLoadBalancer.
775      conf.unset(LoadBalancer.HBASE_RSGROUP_LOADBALANCER_CLASS);
776      conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, MaintenanceLoadBalancer.class,
777        LoadBalancer.class);
778    }
779    this.balancer = new RSGroupBasedLoadBalancer();
780    this.loadBalancerStateStore = new LoadBalancerStateStore(masterRegion, zooKeeper);
781
782    this.regionNormalizerManager =
783      RegionNormalizerFactory.createNormalizerManager(conf, masterRegion, zooKeeper, this);
784    this.configurationManager.registerObserver(regionNormalizerManager);
785    this.regionNormalizerManager.start();
786
787    this.splitOrMergeStateStore = new SplitOrMergeStateStore(masterRegion, zooKeeper, conf);
788
789    // This is for backwards compatible. We do not need the CP for rs group now but if user want to
790    // load it, we need to enable rs group.
791    String[] cpClasses = conf.getStrings(MasterCoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
792    if (cpClasses != null) {
793      for (String cpClass : cpClasses) {
794        if (RSGroupAdminEndpoint.class.getName().equals(cpClass)) {
795          RSGroupUtil.enableRSGroup(conf);
796          break;
797        }
798      }
799    }
800    this.rsGroupInfoManager = RSGroupInfoManager.create(this);
801
802    this.replicationPeerManager = ReplicationPeerManager.create(this, clusterId);
803    this.configurationManager.registerObserver(replicationPeerManager);
804    this.replicationPeerModificationStateStore =
805      new ReplicationPeerModificationStateStore(masterRegion);
806
807    this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);
808    this.drainingServerTracker.start();
809
810    this.snapshotCleanupStateStore = new SnapshotCleanupStateStore(masterRegion, zooKeeper);
811
812    String clientQuorumServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM);
813    boolean clientZkObserverMode = conf.getBoolean(HConstants.CLIENT_ZOOKEEPER_OBSERVER_MODE,
814      HConstants.DEFAULT_CLIENT_ZOOKEEPER_OBSERVER_MODE);
815    if (clientQuorumServers != null && !clientZkObserverMode) {
816      // we need to take care of the ZK information synchronization
817      // if given client ZK are not observer nodes
818      ZKWatcher clientZkWatcher = new ZKWatcher(conf,
819        getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this,
820        false, true);
821      this.metaLocationSyncer = new MetaLocationSyncer(zooKeeper, clientZkWatcher, this);
822      this.metaLocationSyncer.start();
823      this.masterAddressSyncer = new MasterAddressSyncer(zooKeeper, clientZkWatcher, this);
824      this.masterAddressSyncer.start();
825      // set cluster id is a one-go effort
826      ZKClusterId.setClusterId(clientZkWatcher, fileSystemManager.getClusterId());
827    }
828
829    // Set the cluster as up. If new RSs, they'll be waiting on this before
830    // going ahead with their startup.
831    boolean wasUp = this.clusterStatusTracker.isClusterUp();
832    if (!wasUp) this.clusterStatusTracker.setClusterUp();
833
834    LOG.info("Active/primary master=" + this.serverName + ", sessionid=0x"
835      + Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId())
836      + ", setting cluster-up flag (Was=" + wasUp + ")");
837
838    // create/initialize the snapshot manager and other procedure managers
839    this.snapshotManager = new SnapshotManager();
840    this.mpmHost = new MasterProcedureManagerHost();
841    this.mpmHost.register(this.snapshotManager);
842    this.mpmHost.register(new MasterFlushTableProcedureManager());
843    this.mpmHost.loadProcedures(conf);
844    this.mpmHost.initialize(this, this.metricsMaster);
845  }
846
847  // Will be overriden in test to inject customized AssignmentManager
848  @InterfaceAudience.Private
849  protected AssignmentManager createAssignmentManager(MasterServices master,
850    MasterRegion masterRegion) {
851    return new AssignmentManager(master, masterRegion);
852  }
853
854  private void tryMigrateMetaLocationsFromZooKeeper() throws IOException, KeeperException {
855    // try migrate data from zookeeper
856    try (ResultScanner scanner =
857      masterRegion.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) {
858      if (scanner.next() != null) {
859        // notice that all replicas for a region are in the same row, so the migration can be
860        // done with in a one row put, which means if we have data in catalog family then we can
861        // make sure that the migration is done.
862        LOG.info("The {} family in master local region already has data in it, skip migrating...",
863          HConstants.CATALOG_FAMILY_STR);
864        return;
865      }
866    }
867    // start migrating
868    byte[] row = CatalogFamilyFormat.getMetaKeyForRegion(RegionInfoBuilder.FIRST_META_REGIONINFO);
869    Put put = new Put(row);
870    List<String> metaReplicaNodes = zooKeeper.getMetaReplicaNodes();
871    StringBuilder info = new StringBuilder("Migrating meta locations:");
872    for (String metaReplicaNode : metaReplicaNodes) {
873      int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZNode(metaReplicaNode);
874      RegionState state = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId);
875      info.append(" ").append(state);
876      put.setTimestamp(state.getStamp());
877      MetaTableAccessor.addRegionInfo(put, state.getRegion());
878      if (state.getServerName() != null) {
879        MetaTableAccessor.addLocation(put, state.getServerName(), HConstants.NO_SEQNUM, replicaId);
880      }
881      put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
882        .setFamily(HConstants.CATALOG_FAMILY)
883        .setQualifier(RegionStateStore.getStateColumn(replicaId)).setTimestamp(put.getTimestamp())
884        .setType(Cell.Type.Put).setValue(Bytes.toBytes(state.getState().name())).build());
885    }
886    if (!put.isEmpty()) {
887      LOG.info(info.toString());
888      masterRegion.update(r -> r.put(put));
889    } else {
890      LOG.info("No meta location available on zookeeper, skip migrating...");
891    }
892  }
893
894  /**
895   * Finish initialization of HMaster after becoming the primary master.
896   * <p/>
897   * The startup order is a bit complicated but very important, do not change it unless you know
898   * what you are doing.
899   * <ol>
900   * <li>Initialize file system based components - file system manager, wal manager, table
901   * descriptors, etc</li>
902   * <li>Publish cluster id</li>
903   * <li>Here comes the most complicated part - initialize server manager, assignment manager and
904   * region server tracker
905   * <ol type='i'>
906   * <li>Create server manager</li>
907   * <li>Create master local region</li>
908   * <li>Create procedure executor, load the procedures, but do not start workers. We will start it
909   * later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same
910   * server</li>
911   * <li>Create assignment manager and start it, load the meta region state, but do not load data
912   * from meta region</li>
913   * <li>Start region server tracker, construct the online servers set and find out dead servers and
914   * schedule SCP for them. The online servers will be constructed by scanning zk, and we will also
915   * scan the wal directory and load from master local region to find out possible live region
916   * servers, and the differences between these two sets are the dead servers</li>
917   * </ol>
918   * </li>
919   * <li>If this is a new deploy, schedule a InitMetaProcedure to initialize meta</li>
920   * <li>Start necessary service threads - balancer, catalog janitor, executor services, and also
921   * the procedure executor, etc. Notice that the balancer must be created first as assignment
922   * manager may use it when assigning regions.</li>
923   * <li>Wait for meta to be initialized if necessary, start table state manager.</li>
924   * <li>Wait for enough region servers to check-in</li>
925   * <li>Let assignment manager load data from meta and construct region states</li>
926   * <li>Start all other things such as chore services, etc</li>
927   * </ol>
928   * <p/>
929   * Notice that now we will not schedule a special procedure to make meta online(unless the first
930   * time where meta has not been created yet), we will rely on SCP to bring meta online.
931   */
932  private void finishActiveMasterInitialization() throws IOException, InterruptedException,
933    KeeperException, ReplicationException, DeserializationException {
934    /*
935     * We are active master now... go initialize components we need to run.
936     */
937    startupTaskGroup.addTask("Initializing Master file system");
938
939    this.masterActiveTime = EnvironmentEdgeManager.currentTime();
940    // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
941
942    // always initialize the MemStoreLAB as we use a region to store data in master now, see
943    // localStore.
944    initializeMemStoreChunkCreator(null);
945    this.fileSystemManager = new MasterFileSystem(conf);
946    this.walManager = new MasterWalManager(this);
947
948    // warm-up HTDs cache on master initialization
949    if (preLoadTableDescriptors) {
950      startupTaskGroup.addTask("Pre-loading table descriptors");
951      this.tableDescriptors.getAll();
952    }
953
954    // Publish cluster ID; set it in Master too. The superclass RegionServer does this later but
955    // only after it has checked in with the Master. At least a few tests ask Master for clusterId
956    // before it has called its run method and before RegionServer has done the reportForDuty.
957    ClusterId clusterId = fileSystemManager.getClusterId();
958    startupTaskGroup.addTask("Publishing Cluster ID " + clusterId + " in ZooKeeper");
959    ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
960    this.clusterId = clusterId.toString();
961
962    // Precaution. Put in place the old hbck1 lock file to fence out old hbase1s running their
963    // hbck1s against an hbase2 cluster; it could do damage. To skip this behavior, set
964    // hbase.write.hbck1.lock.file to false.
965    if (this.conf.getBoolean("hbase.write.hbck1.lock.file", true)) {
966      Pair<Path, FSDataOutputStream> result = null;
967      try {
968        result = HBaseFsck.checkAndMarkRunningHbck(this.conf,
969          HBaseFsck.createLockRetryCounterFactory(this.conf).create());
970      } finally {
971        if (result != null) {
972          Closeables.close(result.getSecond(), true);
973        }
974      }
975    }
976
977    startupTaskGroup.addTask("Initialize ServerManager and schedule SCP for crash servers");
978    // The below two managers must be created before loading procedures, as they will be used during
979    // loading.
980    // initialize master local region
981    masterRegion = MasterRegionFactory.create(this);
982    rsListStorage = new MasterRegionServerList(masterRegion, this);
983
984    this.serverManager = createServerManager(this, rsListStorage);
985    this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);
986    if (
987      !conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)
988    ) {
989      this.splitWALManager = new SplitWALManager(this);
990    }
991
992    tryMigrateMetaLocationsFromZooKeeper();
993
994    createProcedureExecutor();
995    Map<Class<?>, List<Procedure<MasterProcedureEnv>>> procsByType = procedureExecutor
996      .getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass()));
997
998    // Create Assignment Manager
999    this.assignmentManager = createAssignmentManager(this, masterRegion);
1000    this.assignmentManager.start();
1001    // TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as
1002    // completed, it could still be in the procedure list. This is a bit strange but is another
1003    // story, need to verify the implementation for ProcedureExecutor and ProcedureStore.
1004    List<TransitRegionStateProcedure> ritList =
1005      procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream()
1006        .filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p)
1007        .collect(Collectors.toList());
1008    this.assignmentManager.setupRIT(ritList);
1009
1010    // Start RegionServerTracker with listing of servers found with exiting SCPs -- these should
1011    // be registered in the deadServers set -- and the servernames loaded from the WAL directory
1012    // and master local region that COULD BE 'alive'(we'll schedule SCPs for each and let SCP figure
1013    // it out).
1014    // We also pass dirs that are already 'splitting'... so we can do some checks down in tracker.
1015    // TODO: Generate the splitting and live Set in one pass instead of two as we currently do.
1016    this.regionServerTracker.upgrade(
1017      procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()
1018        .map(p -> (ServerCrashProcedure) p).map(p -> p.getServerName()).collect(Collectors.toSet()),
1019      Sets.union(rsListStorage.getAll(), walManager.getLiveServersFromWALDir()),
1020      walManager.getSplittingServersFromWALDir());
1021    // This manager must be accessed AFTER hbase:meta is confirmed on line..
1022    this.tableStateManager = new TableStateManager(this);
1023
1024    startupTaskGroup.addTask("Initializing ZK system trackers");
1025    initializeZKBasedSystemTrackers();
1026    startupTaskGroup.addTask("Loading last flushed sequence id of regions");
1027    try {
1028      this.serverManager.loadLastFlushedSequenceIds();
1029    } catch (IOException e) {
1030      LOG.info("Failed to load last flushed sequence id of regions" + " from file system", e);
1031    }
1032    // Set ourselves as active Master now our claim has succeeded up in zk.
1033    this.activeMaster = true;
1034
1035    // Start the Zombie master detector after setting master as active, see HBASE-21535
1036    Thread zombieDetector = new Thread(new MasterInitializationMonitor(this),
1037      "ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime());
1038    zombieDetector.setDaemon(true);
1039    zombieDetector.start();
1040
1041    if (!maintenanceMode) {
1042      startupTaskGroup.addTask("Initializing master coprocessors");
1043      setQuotasObserver(conf);
1044      initializeCoprocessorHost(conf);
1045    } else {
1046      // start an in process region server for carrying system regions
1047      maintenanceRegionServer =
1048        JVMClusterUtil.createRegionServerThread(getConfiguration(), HRegionServer.class, 0);
1049      maintenanceRegionServer.start();
1050    }
1051
1052    // Checking if meta needs initializing.
1053    startupTaskGroup.addTask("Initializing meta table if this is a new deploy");
1054    InitMetaProcedure initMetaProc = null;
1055    // Print out state of hbase:meta on startup; helps debugging.
1056    if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) {
1057      Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream()
1058        .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
1059      initMetaProc = optProc.orElseGet(() -> {
1060        // schedule an init meta procedure if meta has not been deployed yet
1061        InitMetaProcedure temp = new InitMetaProcedure();
1062        procedureExecutor.submitProcedure(temp);
1063        return temp;
1064      });
1065    }
1066
1067    // initialize load balancer
1068    this.balancer.setMasterServices(this);
1069    this.balancer.initialize();
1070    this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
1071
1072    // try migrate replication data
1073    ZKReplicationQueueStorageForMigration oldReplicationQueueStorage =
1074      new ZKReplicationQueueStorageForMigration(zooKeeper, conf);
1075    // check whether there are something to migrate and we haven't scheduled a migration procedure
1076    // yet
1077    if (
1078      oldReplicationQueueStorage.hasData() && procedureExecutor.getProcedures().stream()
1079        .allMatch(p -> !(p instanceof MigrateReplicationQueueFromZkToTableProcedure))
1080    ) {
1081      procedureExecutor.submitProcedure(new MigrateReplicationQueueFromZkToTableProcedure());
1082    }
1083    // start up all service threads.
1084    startupTaskGroup.addTask("Initializing master service threads");
1085    startServiceThreads();
1086    // wait meta to be initialized after we start procedure executor
1087    if (initMetaProc != null) {
1088      initMetaProc.await();
1089    }
1090    // Wake up this server to check in
1091    sleeper.skipSleepCycle();
1092
1093    // Wait for region servers to report in.
1094    // With this as part of master initialization, it precludes our being able to start a single
1095    // server that is both Master and RegionServer. Needs more thought. TODO.
1096    String statusStr = "Wait for region servers to report in";
1097    MonitoredTask waitRegionServer = startupTaskGroup.addTask(statusStr);
1098    LOG.info(Objects.toString(waitRegionServer));
1099    waitForRegionServers(waitRegionServer);
1100
1101    // Check if master is shutting down because issue initializing regionservers or balancer.
1102    if (isStopped()) {
1103      return;
1104    }
1105
1106    startupTaskGroup.addTask("Starting assignment manager");
1107    // FIRST HBASE:META READ!!!!
1108    // The below cannot make progress w/o hbase:meta being online.
1109    // This is the FIRST attempt at going to hbase:meta. Meta on-lining is going on in background
1110    // as procedures run -- in particular SCPs for crashed servers... One should put up hbase:meta
1111    // if it is down. It may take a while to come online. So, wait here until meta if for sure
1112    // available. That's what waitForMetaOnline does.
1113    if (!waitForMetaOnline()) {
1114      return;
1115    }
1116
1117    TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME);
1118    final ColumnFamilyDescriptor tableFamilyDesc =
1119      metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY);
1120    final ColumnFamilyDescriptor replBarrierFamilyDesc =
1121      metaDescriptor.getColumnFamily(HConstants.REPLICATION_BARRIER_FAMILY);
1122
1123    this.assignmentManager.joinCluster();
1124    // The below depends on hbase:meta being online.
1125    this.assignmentManager.processOfflineRegions();
1126    // this must be called after the above processOfflineRegions to prevent race
1127    this.assignmentManager.wakeMetaLoadedEvent();
1128
1129    // for migrating from a version without HBASE-25099, and also for honoring the configuration
1130    // first.
1131    if (conf.get(HConstants.META_REPLICAS_NUM) != null) {
1132      int replicasNumInConf =
1133        conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM);
1134      TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME);
1135      if (metaDesc.getRegionReplication() != replicasNumInConf) {
1136        // it is possible that we already have some replicas before upgrading, so we must set the
1137        // region replication number in meta TableDescriptor directly first, without creating a
1138        // ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas.
1139        int existingReplicasCount =
1140          assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size();
1141        if (existingReplicasCount > metaDesc.getRegionReplication()) {
1142          LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)"
1143            + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount);
1144          metaDesc = TableDescriptorBuilder.newBuilder(metaDesc)
1145            .setRegionReplication(existingReplicasCount).build();
1146          tableDescriptors.update(metaDesc);
1147        }
1148        // check again, and issue a ModifyTableProcedure if needed
1149        if (metaDesc.getRegionReplication() != replicasNumInConf) {
1150          LOG.info(
1151            "The {} config is {} while the replica count in TableDescriptor is {}"
1152              + " for hbase:meta, altering...",
1153            HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication());
1154          procedureExecutor.submitProcedure(new ModifyTableProcedure(
1155            procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc)
1156              .setRegionReplication(replicasNumInConf).build(),
1157            null, metaDesc, false));
1158        }
1159      }
1160    }
1161    // Initialize after meta is up as below scans meta
1162    FavoredNodesManager fnm = getFavoredNodesManager();
1163    if (fnm != null) {
1164      fnm.initializeFromMeta();
1165    }
1166
1167    // set cluster status again after user regions are assigned
1168    this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
1169
1170    // Start balancer and meta catalog janitor after meta and regions have been assigned.
1171    startupTaskGroup.addTask("Starting balancer and catalog janitor");
1172    this.clusterStatusChore = new ClusterStatusChore(this, balancer);
1173    getChoreService().scheduleChore(clusterStatusChore);
1174    this.balancerChore = new BalancerChore(this);
1175    if (!disableBalancerChoreForTest) {
1176      getChoreService().scheduleChore(balancerChore);
1177    }
1178    if (regionNormalizerManager != null) {
1179      getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore());
1180    }
1181    this.catalogJanitorChore = new CatalogJanitor(this);
1182    getChoreService().scheduleChore(catalogJanitorChore);
1183    this.hbckChore = new HbckChore(this);
1184    getChoreService().scheduleChore(hbckChore);
1185    this.serverManager.startChore();
1186
1187    // Only for rolling upgrade, where we need to migrate the data in namespace table to meta table.
1188    if (!waitForNamespaceOnline()) {
1189      return;
1190    }
1191    startupTaskGroup.addTask("Starting cluster schema service");
1192    try {
1193      initClusterSchemaService();
1194    } catch (IllegalStateException e) {
1195      if (
1196        e.getCause() != null && e.getCause() instanceof NoSuchColumnFamilyException
1197          && tableFamilyDesc == null && replBarrierFamilyDesc == null
1198      ) {
1199        LOG.info("ClusterSchema service could not be initialized. This is "
1200          + "expected during HBase 1 to 2 upgrade", e);
1201      } else {
1202        throw e;
1203      }
1204    }
1205
1206    if (this.cpHost != null) {
1207      try {
1208        this.cpHost.preMasterInitialization();
1209      } catch (IOException e) {
1210        LOG.error("Coprocessor preMasterInitialization() hook failed", e);
1211      }
1212    }
1213
1214    LOG.info(String.format("Master has completed initialization %.3fsec",
1215      (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f));
1216    this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime();
1217    configurationManager.registerObserver(this.balancer);
1218    configurationManager.registerObserver(this.logCleanerPool);
1219    configurationManager.registerObserver(this.logCleaner);
1220    configurationManager.registerObserver(this.regionsRecoveryConfigManager);
1221    configurationManager.registerObserver(this.exclusiveHFileCleanerPool);
1222    if (this.sharedHFileCleanerPool != null) {
1223      configurationManager.registerObserver(this.sharedHFileCleanerPool);
1224    }
1225    if (this.hfileCleaners != null) {
1226      for (HFileCleaner cleaner : hfileCleaners) {
1227        configurationManager.registerObserver(cleaner);
1228      }
1229    }
1230    // Set master as 'initialized'.
1231    setInitialized(true);
1232    startupTaskGroup.markComplete("Initialization successful");
1233    MonitoredTask status =
1234      TaskMonitor.get().createStatus("Progress after master initialized", false, true);
1235
1236    if (tableFamilyDesc == null && replBarrierFamilyDesc == null) {
1237      // create missing CFs in meta table after master is set to 'initialized'.
1238      createMissingCFsInMetaDuringUpgrade(metaDescriptor);
1239
1240      // Throwing this Exception to abort active master is painful but this
1241      // seems the only way to add missing CFs in meta while upgrading from
1242      // HBase 1 to 2 (where HBase 2 has HBASE-23055 & HBASE-23782 checked-in).
1243      // So, why do we abort active master after adding missing CFs in meta?
1244      // When we reach here, we would have already bypassed NoSuchColumnFamilyException
1245      // in initClusterSchemaService(), meaning ClusterSchemaService is not
1246      // correctly initialized but we bypassed it. Similarly, we bypassed
1247      // tableStateManager.start() as well. Hence, we should better abort
1248      // current active master because our main task - adding missing CFs
1249      // in meta table is done (possible only after master state is set as
1250      // initialized) at the expense of bypassing few important tasks as part
1251      // of active master init routine. So now we abort active master so that
1252      // next active master init will not face any issues and all mandatory
1253      // services will be started during master init phase.
1254      throw new PleaseRestartMasterException("Aborting active master after missing"
1255        + " CFs are successfully added in meta. Subsequent active master "
1256        + "initialization should be uninterrupted");
1257    }
1258
1259    if (maintenanceMode) {
1260      LOG.info("Detected repair mode, skipping final initialization steps.");
1261      return;
1262    }
1263
1264    assignmentManager.checkIfShouldMoveSystemRegionAsync();
1265    status.setStatus("Starting quota manager");
1266    initQuotaManager();
1267    if (QuotaUtil.isQuotaEnabled(conf)) {
1268      // Create the quota snapshot notifier
1269      spaceQuotaSnapshotNotifier = createQuotaSnapshotNotifier();
1270      spaceQuotaSnapshotNotifier.initialize(getConnection());
1271      this.quotaObserverChore = new QuotaObserverChore(this, getMasterMetrics());
1272      // Start the chore to read the region FS space reports and act on them
1273      getChoreService().scheduleChore(quotaObserverChore);
1274
1275      this.snapshotQuotaChore = new SnapshotQuotaObserverChore(this, getMasterMetrics());
1276      // Start the chore to read snapshots and add their usage to table/NS quotas
1277      getChoreService().scheduleChore(snapshotQuotaChore);
1278    }
1279    final SlowLogMasterService slowLogMasterService = new SlowLogMasterService(conf, this);
1280    slowLogMasterService.init();
1281
1282    WALEventTrackerTableCreator.createIfNeededAndNotExists(conf, this);
1283    // Create REPLICATION.SINK_TRACKER table if needed.
1284    ReplicationSinkTrackerTableCreator.createIfNeededAndNotExists(conf, this);
1285
1286    // clear the dead servers with same host name and port of online server because we are not
1287    // removing dead server with same hostname and port of rs which is trying to check in before
1288    // master initialization. See HBASE-5916.
1289    this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
1290
1291    // Check and set the znode ACLs if needed in case we are overtaking a non-secure configuration
1292    status.setStatus("Checking ZNode ACLs");
1293    zooKeeper.checkAndSetZNodeAcls();
1294
1295    status.setStatus("Initializing MOB Cleaner");
1296    initMobCleaner();
1297
1298    // delete the stale data for replication sync up tool if necessary
1299    status.setStatus("Cleanup ReplicationSyncUp status if necessary");
1300    Path replicationSyncUpInfoFile =
1301      new Path(new Path(dataRootDir, ReplicationSyncUp.INFO_DIR), ReplicationSyncUp.INFO_FILE);
1302    if (dataFs.exists(replicationSyncUpInfoFile)) {
1303      // info file is available, load the timestamp and use it to clean up stale data in replication
1304      // queue storage.
1305      byte[] data;
1306      try (FSDataInputStream in = dataFs.open(replicationSyncUpInfoFile)) {
1307        data = ByteStreams.toByteArray(in);
1308      }
1309      ReplicationSyncUpToolInfo info = null;
1310      try {
1311        info = JsonMapper.fromJson(Bytes.toString(data), ReplicationSyncUpToolInfo.class);
1312      } catch (JsonParseException e) {
1313        // usually this should be a partial file, which means the ReplicationSyncUp tool did not
1314        // finish properly, so not a problem. Here we do not clean up the status as we do not know
1315        // the reason why the tool did not finish properly, so let users clean the status up
1316        // manually
1317        LOG.warn("failed to parse replication sync up info file, ignore and continue...", e);
1318      }
1319      if (info != null) {
1320        LOG.info("Remove last sequence ids and hfile references which are written before {}({})",
1321          info.getStartTimeMs(), DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.systemDefault())
1322            .format(Instant.ofEpochMilli(info.getStartTimeMs())));
1323        replicationPeerManager.getQueueStorage()
1324          .removeLastSequenceIdsAndHFileRefsBefore(info.getStartTimeMs());
1325        // delete the file after removing the stale data, so next time we do not need to do this
1326        // again.
1327        dataFs.delete(replicationSyncUpInfoFile, false);
1328      }
1329    }
1330    status.setStatus("Calling postStartMaster coprocessors");
1331    if (this.cpHost != null) {
1332      // don't let cp initialization errors kill the master
1333      try {
1334        this.cpHost.postStartMaster();
1335      } catch (IOException ioe) {
1336        LOG.error("Coprocessor postStartMaster() hook failed", ioe);
1337      }
1338    }
1339
1340    zombieDetector.interrupt();
1341
1342    /*
1343     * After master has started up, lets do balancer post startup initialization. Since this runs in
1344     * activeMasterManager thread, it should be fine.
1345     */
1346    long start = EnvironmentEdgeManager.currentTime();
1347    this.balancer.postMasterStartupInitialize();
1348    if (LOG.isDebugEnabled()) {
1349      LOG.debug("Balancer post startup initialization complete, took "
1350        + ((EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds");
1351    }
1352
1353    this.rollingUpgradeChore = new RollingUpgradeChore(this);
1354    getChoreService().scheduleChore(rollingUpgradeChore);
1355    status.markComplete("Progress after master initialized complete");
1356  }
1357
1358  private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor)
1359    throws IOException {
1360    TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor)
1361      .setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf))
1362      .setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build();
1363    long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false);
1364    int tries = 30;
1365    while (
1366      !(getMasterProcedureExecutor().isFinished(pid)) && getMasterProcedureExecutor().isRunning()
1367        && tries > 0
1368    ) {
1369      try {
1370        Thread.sleep(1000);
1371      } catch (InterruptedException e) {
1372        throw new IOException("Wait interrupted", e);
1373      }
1374      tries--;
1375    }
1376    if (tries <= 0) {
1377      throw new HBaseIOException(
1378        "Failed to add table and rep_barrier CFs to meta in a given time.");
1379    } else {
1380      Procedure<?> result = getMasterProcedureExecutor().getResult(pid);
1381      if (result != null && result.isFailed()) {
1382        throw new IOException("Failed to add table and rep_barrier CFs to meta. "
1383          + MasterProcedureUtil.unwrapRemoteIOException(result));
1384      }
1385    }
1386  }
1387
1388  /**
1389   * Check hbase:meta is up and ready for reading. For use during Master startup only.
1390   * @return True if meta is UP and online and startup can progress. Otherwise, meta is not online
1391   *         and we will hold here until operator intervention.
1392   */
1393  @InterfaceAudience.Private
1394  public boolean waitForMetaOnline() {
1395    return isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO);
1396  }
1397
1398  /**
1399   * @return True if region is online and scannable else false if an error or shutdown (Otherwise we
1400   *         just block in here holding up all forward-progess).
1401   */
1402  private boolean isRegionOnline(RegionInfo ri) {
1403    RetryCounter rc = null;
1404    while (!isStopped()) {
1405      RegionState rs = this.assignmentManager.getRegionStates().getRegionState(ri);
1406      if (rs.isOpened()) {
1407        if (this.getServerManager().isServerOnline(rs.getServerName())) {
1408          return true;
1409        }
1410      }
1411      // Region is not OPEN.
1412      Optional<Procedure<MasterProcedureEnv>> optProc = this.procedureExecutor.getProcedures()
1413        .stream().filter(p -> p instanceof ServerCrashProcedure).findAny();
1414      // TODO: Add a page to refguide on how to do repair. Have this log message point to it.
1415      // Page will talk about loss of edits, how to schedule at least the meta WAL recovery, and
1416      // then how to assign including how to break region lock if one held.
1417      LOG.warn(
1418        "{} is NOT online; state={}; ServerCrashProcedures={}. Master startup cannot "
1419          + "progress, in holding-pattern until region onlined.",
1420        ri.getRegionNameAsString(), rs, optProc.isPresent());
1421      // Check once-a-minute.
1422      if (rc == null) {
1423        rc = new RetryCounterFactory(Integer.MAX_VALUE, 1000, 60_000).create();
1424      }
1425      Threads.sleep(rc.getBackoffTimeAndIncrementAttempts());
1426    }
1427    return false;
1428  }
1429
1430  /**
1431   * Check hbase:namespace table is assigned. If not, startup will hang looking for the ns table
1432   * <p/>
1433   * This is for rolling upgrading, later we will migrate the data in ns table to the ns family of
1434   * meta table. And if this is a new cluster, this method will return immediately as there will be
1435   * no namespace table/region.
1436   * @return True if namespace table is up/online.
1437   */
1438  private boolean waitForNamespaceOnline() throws IOException {
1439    TableState nsTableState =
1440      MetaTableAccessor.getTableState(getConnection(), TableName.NAMESPACE_TABLE_NAME);
1441    if (nsTableState == null || nsTableState.isDisabled()) {
1442      // this means we have already migrated the data and disabled or deleted the namespace table,
1443      // or this is a new deploy which does not have a namespace table from the beginning.
1444      return true;
1445    }
1446    List<RegionInfo> ris =
1447      this.assignmentManager.getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME);
1448    if (ris.isEmpty()) {
1449      // maybe this will not happen any more, but anyway, no harm to add a check here...
1450      return true;
1451    }
1452    // Else there are namespace regions up in meta. Ensure they are assigned before we go on.
1453    for (RegionInfo ri : ris) {
1454      if (!isRegionOnline(ri)) {
1455        return false;
1456      }
1457    }
1458    return true;
1459  }
1460
1461  /**
1462   * Adds the {@code MasterQuotasObserver} to the list of configured Master observers to
1463   * automatically remove quotas for a table when that table is deleted.
1464   */
1465  @InterfaceAudience.Private
1466  public void updateConfigurationForQuotasObserver(Configuration conf) {
1467    // We're configured to not delete quotas on table deletion, so we don't need to add the obs.
1468    if (
1469      !conf.getBoolean(MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE,
1470        MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT)
1471    ) {
1472      return;
1473    }
1474    String[] masterCoprocs = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
1475    final int length = null == masterCoprocs ? 0 : masterCoprocs.length;
1476    String[] updatedCoprocs = new String[length + 1];
1477    if (length > 0) {
1478      System.arraycopy(masterCoprocs, 0, updatedCoprocs, 0, masterCoprocs.length);
1479    }
1480    updatedCoprocs[length] = MasterQuotasObserver.class.getName();
1481    conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, updatedCoprocs);
1482  }
1483
1484  private void initMobCleaner() {
1485    this.mobFileCleanerChore = new MobFileCleanerChore(this);
1486    getChoreService().scheduleChore(mobFileCleanerChore);
1487    this.mobFileCompactionChore = new MobFileCompactionChore(this);
1488    getChoreService().scheduleChore(mobFileCompactionChore);
1489  }
1490
1491  /**
1492   * <p>
1493   * Create a {@link ServerManager} instance.
1494   * </p>
1495   * <p>
1496   * Will be overridden in tests.
1497   * </p>
1498   */
1499  @InterfaceAudience.Private
1500  protected ServerManager createServerManager(MasterServices master, RegionServerList storage)
1501    throws IOException {
1502    // We put this out here in a method so can do a Mockito.spy and stub it out
1503    // w/ a mocked up ServerManager.
1504    setupClusterConnection();
1505    return new ServerManager(master, storage);
1506  }
1507
1508  private void waitForRegionServers(final MonitoredTask status)
1509    throws IOException, InterruptedException {
1510    this.serverManager.waitForRegionServers(status);
1511  }
1512
1513  // Will be overridden in tests
1514  @InterfaceAudience.Private
1515  protected void initClusterSchemaService() throws IOException, InterruptedException {
1516    this.clusterSchemaService = new ClusterSchemaServiceImpl(this);
1517    this.clusterSchemaService.startAsync();
1518    try {
1519      this.clusterSchemaService
1520        .awaitRunning(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,
1521          DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);
1522    } catch (TimeoutException toe) {
1523      throw new IOException("Timedout starting ClusterSchemaService", toe);
1524    }
1525  }
1526
1527  private void initQuotaManager() throws IOException {
1528    MasterQuotaManager quotaManager = new MasterQuotaManager(this);
1529    quotaManager.start();
1530    this.quotaManager = quotaManager;
1531  }
1532
1533  private SpaceQuotaSnapshotNotifier createQuotaSnapshotNotifier() {
1534    SpaceQuotaSnapshotNotifier notifier =
1535      SpaceQuotaSnapshotNotifierFactory.getInstance().create(getConfiguration());
1536    return notifier;
1537  }
1538
1539  public boolean isCatalogJanitorEnabled() {
1540    return catalogJanitorChore != null ? catalogJanitorChore.getEnabled() : false;
1541  }
1542
1543  boolean isCleanerChoreEnabled() {
1544    boolean hfileCleanerFlag = true, logCleanerFlag = true;
1545
1546    if (getHFileCleaner() != null) {
1547      hfileCleanerFlag = getHFileCleaner().getEnabled();
1548    }
1549
1550    if (logCleaner != null) {
1551      logCleanerFlag = logCleaner.getEnabled();
1552    }
1553
1554    return (hfileCleanerFlag && logCleanerFlag);
1555  }
1556
1557  @Override
1558  public ServerManager getServerManager() {
1559    return this.serverManager;
1560  }
1561
1562  @Override
1563  public MasterFileSystem getMasterFileSystem() {
1564    return this.fileSystemManager;
1565  }
1566
1567  @Override
1568  public MasterWalManager getMasterWalManager() {
1569    return this.walManager;
1570  }
1571
1572  @Override
1573  public SplitWALManager getSplitWALManager() {
1574    return splitWALManager;
1575  }
1576
1577  @Override
1578  public TableStateManager getTableStateManager() {
1579    return tableStateManager;
1580  }
1581
1582  /*
1583   * Start up all services. If any of these threads gets an unhandled exception then they just die
1584   * with a logged message. This should be fine because in general, we do not expect the master to
1585   * get such unhandled exceptions as OOMEs; it should be lightly loaded. See what HRegionServer
1586   * does if need to install an unexpected exception handler.
1587   */
1588  private void startServiceThreads() throws IOException {
1589    // Start the executor service pools
1590    final int masterOpenRegionPoolSize = conf.getInt(HConstants.MASTER_OPEN_REGION_THREADS,
1591      HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT);
1592    executorService.startExecutorService(executorService.new ExecutorConfig()
1593      .setExecutorType(ExecutorType.MASTER_OPEN_REGION).setCorePoolSize(masterOpenRegionPoolSize));
1594    final int masterCloseRegionPoolSize = conf.getInt(HConstants.MASTER_CLOSE_REGION_THREADS,
1595      HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT);
1596    executorService.startExecutorService(
1597      executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_CLOSE_REGION)
1598        .setCorePoolSize(masterCloseRegionPoolSize));
1599    final int masterServerOpThreads = conf.getInt(HConstants.MASTER_SERVER_OPERATIONS_THREADS,
1600      HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT);
1601    executorService.startExecutorService(
1602      executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SERVER_OPERATIONS)
1603        .setCorePoolSize(masterServerOpThreads));
1604    final int masterServerMetaOpsThreads =
1605      conf.getInt(HConstants.MASTER_META_SERVER_OPERATIONS_THREADS,
1606        HConstants.MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT);
1607    executorService.startExecutorService(executorService.new ExecutorConfig()
1608      .setExecutorType(ExecutorType.MASTER_META_SERVER_OPERATIONS)
1609      .setCorePoolSize(masterServerMetaOpsThreads));
1610    final int masterLogReplayThreads = conf.getInt(HConstants.MASTER_LOG_REPLAY_OPS_THREADS,
1611      HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT);
1612    executorService.startExecutorService(executorService.new ExecutorConfig()
1613      .setExecutorType(ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads));
1614    final int masterSnapshotThreads = conf.getInt(SnapshotManager.SNAPSHOT_POOL_THREADS_KEY,
1615      SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT);
1616    executorService.startExecutorService(
1617      executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SNAPSHOT_OPERATIONS)
1618        .setCorePoolSize(masterSnapshotThreads).setAllowCoreThreadTimeout(true));
1619    final int masterMergeDispatchThreads = conf.getInt(HConstants.MASTER_MERGE_DISPATCH_THREADS,
1620      HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT);
1621    executorService.startExecutorService(
1622      executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_MERGE_OPERATIONS)
1623        .setCorePoolSize(masterMergeDispatchThreads).setAllowCoreThreadTimeout(true));
1624
1625    // We depend on there being only one instance of this executor running
1626    // at a time. To do concurrency, would need fencing of enable/disable of
1627    // tables.
1628    // Any time changing this maxThreads to > 1, pls see the comment at
1629    // AccessController#postCompletedCreateTableAction
1630    executorService.startExecutorService(executorService.new ExecutorConfig()
1631      .setExecutorType(ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1));
1632    startProcedureExecutor();
1633
1634    // Create log cleaner thread pool
1635    logCleanerPool = DirScanPool.getLogCleanerScanPool(conf);
1636    Map<String, Object> params = new HashMap<>();
1637    params.put(MASTER, this);
1638    // Start log cleaner thread
1639    int cleanerInterval =
1640      conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL);
1641    this.logCleaner =
1642      new LogCleaner(cleanerInterval, this, conf, getMasterWalManager().getFileSystem(),
1643        getMasterWalManager().getOldLogDir(), logCleanerPool, params);
1644    getChoreService().scheduleChore(logCleaner);
1645
1646    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
1647
1648    // Create custom archive hfile cleaners
1649    String[] paths = conf.getStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS);
1650    // todo: handle the overlap issues for the custom paths
1651
1652    if (paths != null && paths.length > 0) {
1653      if (conf.getStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS) == null) {
1654        Set<String> cleanerClasses = new HashSet<>();
1655        String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
1656        if (cleaners != null) {
1657          Collections.addAll(cleanerClasses, cleaners);
1658        }
1659        conf.setStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS,
1660          cleanerClasses.toArray(new String[cleanerClasses.size()]));
1661        LOG.info("Archive custom cleaner paths: {}, plugins: {}", Arrays.asList(paths),
1662          cleanerClasses);
1663      }
1664      // share the hfile cleaner pool in custom paths
1665      sharedHFileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf.get(CUSTOM_POOL_SIZE, "6"));
1666      for (int i = 0; i < paths.length; i++) {
1667        Path path = new Path(paths[i].trim());
1668        HFileCleaner cleaner =
1669          new HFileCleaner("ArchiveCustomHFileCleaner-" + path.getName(), cleanerInterval, this,
1670            conf, getMasterFileSystem().getFileSystem(), new Path(archiveDir, path),
1671            HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS, sharedHFileCleanerPool, params, null);
1672        hfileCleaners.add(cleaner);
1673        hfileCleanerPaths.add(path);
1674      }
1675    }
1676
1677    // Create the whole archive dir cleaner thread pool
1678    exclusiveHFileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf);
1679    hfileCleaners.add(0,
1680      new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem().getFileSystem(),
1681        archiveDir, exclusiveHFileCleanerPool, params, hfileCleanerPaths));
1682    hfileCleanerPaths.add(0, archiveDir);
1683    // Schedule all the hfile cleaners
1684    for (HFileCleaner hFileCleaner : hfileCleaners) {
1685      getChoreService().scheduleChore(hFileCleaner);
1686    }
1687
1688    // Regions Reopen based on very high storeFileRefCount is considered enabled
1689    // only if hbase.regions.recovery.store.file.ref.count has value > 0
1690    final int maxStoreFileRefCount = conf.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD,
1691      HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD);
1692    if (maxStoreFileRefCount > 0) {
1693      this.regionsRecoveryChore = new RegionsRecoveryChore(this, conf, this);
1694      getChoreService().scheduleChore(this.regionsRecoveryChore);
1695    } else {
1696      LOG.info(
1697        "Reopening regions with very high storeFileRefCount is disabled. "
1698          + "Provide threshold value > 0 for {} to enable it.",
1699        HConstants.STORE_FILE_REF_COUNT_THRESHOLD);
1700    }
1701
1702    this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this);
1703
1704    replicationBarrierCleaner =
1705      new ReplicationBarrierCleaner(conf, this, getConnection(), replicationPeerManager);
1706    getChoreService().scheduleChore(replicationBarrierCleaner);
1707
1708    final boolean isSnapshotChoreEnabled = this.snapshotCleanupStateStore.get();
1709    this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager());
1710    if (isSnapshotChoreEnabled) {
1711      getChoreService().scheduleChore(this.snapshotCleanerChore);
1712    } else {
1713      if (LOG.isTraceEnabled()) {
1714        LOG.trace("Snapshot Cleaner Chore is disabled. Not starting up the chore..");
1715      }
1716    }
1717    serviceStarted = true;
1718    if (LOG.isTraceEnabled()) {
1719      LOG.trace("Started service threads");
1720    }
1721  }
1722
1723  protected void stopServiceThreads() {
1724    if (masterJettyServer != null) {
1725      LOG.info("Stopping master jetty server");
1726      try {
1727        masterJettyServer.stop();
1728      } catch (Exception e) {
1729        LOG.error("Failed to stop master jetty server", e);
1730      }
1731    }
1732    stopChoreService();
1733    stopExecutorService();
1734    if (exclusiveHFileCleanerPool != null) {
1735      exclusiveHFileCleanerPool.shutdownNow();
1736      exclusiveHFileCleanerPool = null;
1737    }
1738    if (logCleanerPool != null) {
1739      logCleanerPool.shutdownNow();
1740      logCleanerPool = null;
1741    }
1742    if (sharedHFileCleanerPool != null) {
1743      sharedHFileCleanerPool.shutdownNow();
1744      sharedHFileCleanerPool = null;
1745    }
1746    if (maintenanceRegionServer != null) {
1747      maintenanceRegionServer.getRegionServer().stop(HBASE_MASTER_CLEANER_INTERVAL);
1748    }
1749
1750    LOG.debug("Stopping service threads");
1751    // stop procedure executor prior to other services such as server manager and assignment
1752    // manager, as these services are important for some running procedures. See HBASE-24117 for
1753    // example.
1754    stopProcedureExecutor();
1755
1756    if (regionNormalizerManager != null) {
1757      regionNormalizerManager.stop();
1758    }
1759    if (this.quotaManager != null) {
1760      this.quotaManager.stop();
1761    }
1762
1763    if (this.activeMasterManager != null) {
1764      this.activeMasterManager.stop();
1765    }
1766    if (this.serverManager != null) {
1767      this.serverManager.stop();
1768    }
1769    if (this.assignmentManager != null) {
1770      this.assignmentManager.stop();
1771    }
1772
1773    if (masterRegion != null) {
1774      masterRegion.close(isAborted());
1775    }
1776    if (this.walManager != null) {
1777      this.walManager.stop();
1778    }
1779    if (this.fileSystemManager != null) {
1780      this.fileSystemManager.stop();
1781    }
1782    if (this.mpmHost != null) {
1783      this.mpmHost.stop("server shutting down.");
1784    }
1785    if (this.regionServerTracker != null) {
1786      this.regionServerTracker.stop();
1787    }
1788  }
1789
1790  private void createProcedureExecutor() throws IOException {
1791    MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
1792    procedureStore = new RegionProcedureStore(this, masterRegion,
1793      new MasterProcedureEnv.FsUtilsLeaseRecovery(this));
1794    procedureStore.registerListener(new ProcedureStoreListener() {
1795
1796      @Override
1797      public void abortProcess() {
1798        abort("The Procedure Store lost the lease", null);
1799      }
1800    });
1801    MasterProcedureScheduler procedureScheduler = procEnv.getProcedureScheduler();
1802    procedureExecutor = new ProcedureExecutor<>(conf, procEnv, procedureStore, procedureScheduler);
1803    configurationManager.registerObserver(procEnv);
1804
1805    int cpus = Runtime.getRuntime().availableProcessors();
1806    final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, Math.max(
1807      (cpus > 0 ? cpus / 4 : 0), MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
1808    final boolean abortOnCorruption =
1809      conf.getBoolean(MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
1810        MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
1811    procedureStore.start(numThreads);
1812    // Just initialize it but do not start the workers, we will start the workers later by calling
1813    // startProcedureExecutor. See the javadoc for finishActiveMasterInitialization for more
1814    // details.
1815    procedureExecutor.init(numThreads, abortOnCorruption);
1816    if (!procEnv.getRemoteDispatcher().start()) {
1817      throw new HBaseIOException("Failed start of remote dispatcher");
1818    }
1819  }
1820
1821  // will be override in UT
1822  protected void startProcedureExecutor() throws IOException {
1823    procedureExecutor.startWorkers();
1824  }
1825
1826  /**
1827   * Turn on/off Snapshot Cleanup Chore
1828   * @param on indicates whether Snapshot Cleanup Chore is to be run
1829   */
1830  void switchSnapshotCleanup(final boolean on, final boolean synchronous) throws IOException {
1831    if (synchronous) {
1832      synchronized (this.snapshotCleanerChore) {
1833        switchSnapshotCleanup(on);
1834      }
1835    } else {
1836      switchSnapshotCleanup(on);
1837    }
1838  }
1839
1840  private void switchSnapshotCleanup(final boolean on) throws IOException {
1841    snapshotCleanupStateStore.set(on);
1842    if (on) {
1843      getChoreService().scheduleChore(this.snapshotCleanerChore);
1844    } else {
1845      this.snapshotCleanerChore.cancel();
1846    }
1847  }
1848
1849  private void stopProcedureExecutor() {
1850    if (procedureExecutor != null) {
1851      configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
1852      procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
1853      procedureExecutor.stop();
1854      procedureExecutor.join();
1855      procedureExecutor = null;
1856    }
1857
1858    if (procedureStore != null) {
1859      procedureStore.stop(isAborted());
1860      procedureStore = null;
1861    }
1862  }
1863
1864  protected void stopChores() {
1865    shutdownChore(mobFileCleanerChore);
1866    shutdownChore(mobFileCompactionChore);
1867    shutdownChore(balancerChore);
1868    if (regionNormalizerManager != null) {
1869      shutdownChore(regionNormalizerManager.getRegionNormalizerChore());
1870    }
1871    shutdownChore(clusterStatusChore);
1872    shutdownChore(catalogJanitorChore);
1873    shutdownChore(clusterStatusPublisherChore);
1874    shutdownChore(snapshotQuotaChore);
1875    shutdownChore(logCleaner);
1876    if (hfileCleaners != null) {
1877      for (ScheduledChore chore : hfileCleaners) {
1878        chore.shutdown();
1879      }
1880      hfileCleaners = null;
1881    }
1882    shutdownChore(replicationBarrierCleaner);
1883    shutdownChore(snapshotCleanerChore);
1884    shutdownChore(hbckChore);
1885    shutdownChore(regionsRecoveryChore);
1886    shutdownChore(rollingUpgradeChore);
1887  }
1888
1889  /** Returns Get remote side's InetAddress */
1890  InetAddress getRemoteInetAddress(final int port, final long serverStartCode)
1891    throws UnknownHostException {
1892    // Do it out here in its own little method so can fake an address when
1893    // mocking up in tests.
1894    InetAddress ia = RpcServer.getRemoteIp();
1895
1896    // The call could be from the local regionserver,
1897    // in which case, there is no remote address.
1898    if (ia == null && serverStartCode == startcode) {
1899      InetSocketAddress isa = rpcServices.getSocketAddress();
1900      if (isa != null && isa.getPort() == port) {
1901        ia = isa.getAddress();
1902      }
1903    }
1904    return ia;
1905  }
1906
1907  /** Returns Maximum time we should run balancer for */
1908  private int getMaxBalancingTime() {
1909    // if max balancing time isn't set, defaulting it to period time
1910    int maxBalancingTime =
1911      getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, getConfiguration()
1912        .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD));
1913    return maxBalancingTime;
1914  }
1915
1916  /** Returns Maximum number of regions in transition */
1917  private int getMaxRegionsInTransition() {
1918    int numRegions = this.assignmentManager.getRegionStates().getRegionAssignments().size();
1919    return Math.max((int) Math.floor(numRegions * this.maxRitPercent), 1);
1920  }
1921
1922  /**
1923   * It first sleep to the next balance plan start time. Meanwhile, throttling by the max number
1924   * regions in transition to protect availability.
1925   * @param nextBalanceStartTime   The next balance plan start time
1926   * @param maxRegionsInTransition max number of regions in transition
1927   * @param cutoffTime             when to exit balancer
1928   */
1929  private void balanceThrottling(long nextBalanceStartTime, int maxRegionsInTransition,
1930    long cutoffTime) {
1931    boolean interrupted = false;
1932
1933    // Sleep to next balance plan start time
1934    // But if there are zero regions in transition, it can skip sleep to speed up.
1935    while (
1936      !interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime
1937        && this.assignmentManager.getRegionStates().hasRegionsInTransition()
1938    ) {
1939      try {
1940        Thread.sleep(100);
1941      } catch (InterruptedException ie) {
1942        interrupted = true;
1943      }
1944    }
1945
1946    // Throttling by max number regions in transition
1947    while (
1948      !interrupted && maxRegionsInTransition > 0
1949        && this.assignmentManager.getRegionStates().getRegionsInTransitionCount()
1950            >= maxRegionsInTransition
1951        && EnvironmentEdgeManager.currentTime() <= cutoffTime
1952    ) {
1953      try {
1954        // sleep if the number of regions in transition exceeds the limit
1955        Thread.sleep(100);
1956      } catch (InterruptedException ie) {
1957        interrupted = true;
1958      }
1959    }
1960
1961    if (interrupted) Thread.currentThread().interrupt();
1962  }
1963
1964  public BalanceResponse balance() throws IOException {
1965    return balance(BalanceRequest.defaultInstance());
1966  }
1967
1968  /**
1969   * Trigger a normal balance, see {@link HMaster#balance()} . If the balance is not executed this
1970   * time, the metrics related to the balance will be updated. When balance is running, related
1971   * metrics will be updated at the same time. But if some checking logic failed and cause the
1972   * balancer exit early, we lost the chance to update balancer metrics. This will lead to user
1973   * missing the latest balancer info.
1974   */
1975  public BalanceResponse balanceOrUpdateMetrics() throws IOException {
1976    synchronized (this.balancer) {
1977      BalanceResponse response = balance();
1978      if (!response.isBalancerRan()) {
1979        Map<TableName, Map<ServerName, List<RegionInfo>>> assignments =
1980          this.assignmentManager.getRegionStates().getAssignmentsForBalancer(this.tableStateManager,
1981            this.serverManager.getOnlineServersList());
1982        for (Map<ServerName, List<RegionInfo>> serverMap : assignments.values()) {
1983          serverMap.keySet().removeAll(this.serverManager.getDrainingServersList());
1984        }
1985        this.balancer.updateBalancerLoadInfo(assignments);
1986      }
1987      return response;
1988    }
1989  }
1990
1991  /**
1992   * Checks master state before initiating action over region topology.
1993   * @param action the name of the action under consideration, for logging.
1994   * @return {@code true} when the caller should exit early, {@code false} otherwise.
1995   */
1996  @Override
1997  public boolean skipRegionManagementAction(final String action) {
1998    // Note: this method could be `default` on MasterServices if but for logging.
1999    if (!isInitialized()) {
2000      LOG.debug("Master has not been initialized, don't run {}.", action);
2001      return true;
2002    }
2003    if (this.getServerManager().isClusterShutdown()) {
2004      LOG.info("Cluster is shutting down, don't run {}.", action);
2005      return true;
2006    }
2007    if (isInMaintenanceMode()) {
2008      LOG.info("Master is in maintenance mode, don't run {}.", action);
2009      return true;
2010    }
2011    return false;
2012  }
2013
2014  public BalanceResponse balance(BalanceRequest request) throws IOException {
2015    checkInitialized();
2016
2017    BalanceResponse.Builder responseBuilder = BalanceResponse.newBuilder();
2018
2019    if (loadBalancerStateStore == null || !(loadBalancerStateStore.get() || request.isDryRun())) {
2020      return responseBuilder.build();
2021    }
2022
2023    if (skipRegionManagementAction("balancer")) {
2024      return responseBuilder.build();
2025    }
2026
2027    synchronized (this.balancer) {
2028      // Only allow one balance run at at time.
2029      if (this.assignmentManager.hasRegionsInTransition()) {
2030        List<RegionStateNode> regionsInTransition = assignmentManager.getRegionsInTransition();
2031        // if hbase:meta region is in transition, result of assignment cannot be recorded
2032        // ignore the force flag in that case
2033        boolean metaInTransition = assignmentManager.isMetaRegionInTransition();
2034        List<RegionStateNode> toPrint = regionsInTransition;
2035        int max = 5;
2036        boolean truncated = false;
2037        if (regionsInTransition.size() > max) {
2038          toPrint = regionsInTransition.subList(0, max);
2039          truncated = true;
2040        }
2041
2042        if (!request.isIgnoreRegionsInTransition() || metaInTransition) {
2043          LOG.info("Not running balancer (ignoreRIT=false" + ", metaRIT=" + metaInTransition
2044            + ") because " + regionsInTransition.size() + " region(s) in transition: " + toPrint
2045            + (truncated ? "(truncated list)" : ""));
2046          return responseBuilder.build();
2047        }
2048      }
2049      if (this.serverManager.areDeadServersInProgress()) {
2050        LOG.info("Not running balancer because processing dead regionserver(s): "
2051          + this.serverManager.getDeadServers());
2052        return responseBuilder.build();
2053      }
2054
2055      if (this.cpHost != null) {
2056        try {
2057          if (this.cpHost.preBalance(request)) {
2058            LOG.debug("Coprocessor bypassing balancer request");
2059            return responseBuilder.build();
2060          }
2061        } catch (IOException ioe) {
2062          LOG.error("Error invoking master coprocessor preBalance()", ioe);
2063          return responseBuilder.build();
2064        }
2065      }
2066
2067      Map<TableName, Map<ServerName, List<RegionInfo>>> assignments =
2068        this.assignmentManager.getRegionStates().getAssignmentsForBalancer(tableStateManager,
2069          this.serverManager.getOnlineServersList());
2070      for (Map<ServerName, List<RegionInfo>> serverMap : assignments.values()) {
2071        serverMap.keySet().removeAll(this.serverManager.getDrainingServersList());
2072      }
2073
2074      // Give the balancer the current cluster state.
2075      this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
2076
2077      List<RegionPlan> plans = this.balancer.balanceCluster(assignments);
2078
2079      responseBuilder.setBalancerRan(true).setMovesCalculated(plans == null ? 0 : plans.size());
2080
2081      if (skipRegionManagementAction("balancer")) {
2082        // make one last check that the cluster isn't shutting down before proceeding.
2083        return responseBuilder.build();
2084      }
2085
2086      // For dry run we don't actually want to execute the moves, but we do want
2087      // to execute the coprocessor below
2088      List<RegionPlan> sucRPs =
2089        request.isDryRun() ? Collections.emptyList() : executeRegionPlansWithThrottling(plans);
2090
2091      if (this.cpHost != null) {
2092        try {
2093          this.cpHost.postBalance(request, sucRPs);
2094        } catch (IOException ioe) {
2095          // balancing already succeeded so don't change the result
2096          LOG.error("Error invoking master coprocessor postBalance()", ioe);
2097        }
2098      }
2099
2100      responseBuilder.setMovesExecuted(sucRPs.size());
2101    }
2102
2103    // If LoadBalancer did not generate any plans, it means the cluster is already balanced.
2104    // Return true indicating a success.
2105    return responseBuilder.build();
2106  }
2107
2108  /**
2109   * Execute region plans with throttling
2110   * @param plans to execute
2111   * @return succeeded plans
2112   */
2113  public List<RegionPlan> executeRegionPlansWithThrottling(List<RegionPlan> plans) {
2114    List<RegionPlan> successRegionPlans = new ArrayList<>();
2115    int maxRegionsInTransition = getMaxRegionsInTransition();
2116    long balanceStartTime = EnvironmentEdgeManager.currentTime();
2117    long cutoffTime = balanceStartTime + this.maxBalancingTime;
2118    int rpCount = 0; // number of RegionPlans balanced so far
2119    if (plans != null && !plans.isEmpty()) {
2120      int balanceInterval = this.maxBalancingTime / plans.size();
2121      LOG.info(
2122        "Balancer plans size is " + plans.size() + ", the balance interval is " + balanceInterval
2123          + " ms, and the max number regions in transition is " + maxRegionsInTransition);
2124
2125      for (RegionPlan plan : plans) {
2126        LOG.info("balance " + plan);
2127        // TODO: bulk assign
2128        try {
2129          this.assignmentManager.balance(plan);
2130        } catch (HBaseIOException hioe) {
2131          // should ignore failed plans here, avoiding the whole balance plans be aborted
2132          // later calls of balance() can fetch up the failed and skipped plans
2133          LOG.warn("Failed balance plan {}, skipping...", plan, hioe);
2134        }
2135        // rpCount records balance plans processed, does not care if a plan succeeds
2136        rpCount++;
2137        successRegionPlans.add(plan);
2138
2139        if (this.maxBalancingTime > 0) {
2140          balanceThrottling(balanceStartTime + rpCount * balanceInterval, maxRegionsInTransition,
2141            cutoffTime);
2142        }
2143
2144        // if performing next balance exceeds cutoff time, exit the loop
2145        if (
2146          this.maxBalancingTime > 0 && rpCount < plans.size()
2147            && EnvironmentEdgeManager.currentTime() > cutoffTime
2148        ) {
2149          // TODO: After balance, there should not be a cutoff time (keeping it as
2150          // a security net for now)
2151          LOG.debug(
2152            "No more balancing till next balance run; maxBalanceTime=" + this.maxBalancingTime);
2153          break;
2154        }
2155      }
2156    }
2157    LOG.debug("Balancer is going into sleep until next period in {}ms", getConfiguration()
2158      .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD));
2159    return successRegionPlans;
2160  }
2161
2162  @Override
2163  public RegionNormalizerManager getRegionNormalizerManager() {
2164    return regionNormalizerManager;
2165  }
2166
2167  @Override
2168  public boolean normalizeRegions(final NormalizeTableFilterParams ntfp,
2169    final boolean isHighPriority) throws IOException {
2170    if (regionNormalizerManager == null || !regionNormalizerManager.isNormalizerOn()) {
2171      LOG.debug("Region normalization is disabled, don't run region normalizer.");
2172      return false;
2173    }
2174    if (skipRegionManagementAction("region normalizer")) {
2175      return false;
2176    }
2177    if (assignmentManager.hasRegionsInTransition()) {
2178      return false;
2179    }
2180
2181    final Set<TableName> matchingTables = getTableDescriptors(new LinkedList<>(),
2182      ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false).stream()
2183        .map(TableDescriptor::getTableName).collect(Collectors.toSet());
2184    final Set<TableName> allEnabledTables =
2185      tableStateManager.getTablesInStates(TableState.State.ENABLED);
2186    final List<TableName> targetTables =
2187      new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables));
2188    Collections.shuffle(targetTables);
2189    return regionNormalizerManager.normalizeRegions(targetTables, isHighPriority);
2190  }
2191
2192  /** Returns Client info for use as prefix on an audit log string; who did an action */
2193  @Override
2194  public String getClientIdAuditPrefix() {
2195    return "Client=" + RpcServer.getRequestUserName().orElse(null) + "/"
2196      + RpcServer.getRemoteAddress().orElse(null);
2197  }
2198
2199  /**
2200   * Switch for the background CatalogJanitor thread. Used for testing. The thread will continue to
2201   * run. It will just be a noop if disabled.
2202   * @param b If false, the catalog janitor won't do anything.
2203   */
2204  public void setCatalogJanitorEnabled(final boolean b) {
2205    this.catalogJanitorChore.setEnabled(b);
2206  }
2207
2208  @Override
2209  public long mergeRegions(final RegionInfo[] regionsToMerge, final boolean forcible, final long ng,
2210    final long nonce) throws IOException {
2211    checkInitialized();
2212
2213    if (!isSplitOrMergeEnabled(MasterSwitchType.MERGE)) {
2214      String regionsStr = Arrays.deepToString(regionsToMerge);
2215      LOG.warn("Merge switch is off! skip merge of " + regionsStr);
2216      throw new DoNotRetryIOException(
2217        "Merge of " + regionsStr + " failed because merge switch is off");
2218    }
2219
2220    final String mergeRegionsStr = Arrays.stream(regionsToMerge).map(RegionInfo::getEncodedName)
2221      .collect(Collectors.joining(", "));
2222    return MasterProcedureUtil.submitProcedure(new NonceProcedureRunnable(this, ng, nonce) {
2223      @Override
2224      protected void run() throws IOException {
2225        getMaster().getMasterCoprocessorHost().preMergeRegions(regionsToMerge);
2226        String aid = getClientIdAuditPrefix();
2227        LOG.info("{} merge regions {}", aid, mergeRegionsStr);
2228        submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(),
2229          regionsToMerge, forcible));
2230        getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge);
2231      }
2232
2233      @Override
2234      protected String getDescription() {
2235        return "MergeTableProcedure";
2236      }
2237    });
2238  }
2239
2240  @Override
2241  public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, final long nonceGroup,
2242    final long nonce) throws IOException {
2243    checkInitialized();
2244
2245    if (!isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) {
2246      LOG.warn("Split switch is off! skip split of " + regionInfo);
2247      throw new DoNotRetryIOException(
2248        "Split region " + regionInfo.getRegionNameAsString() + " failed due to split switch off");
2249    }
2250
2251    return MasterProcedureUtil
2252      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2253        @Override
2254        protected void run() throws IOException {
2255          getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow);
2256          LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString());
2257
2258          // Execute the operation asynchronously
2259          submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow));
2260        }
2261
2262        @Override
2263        protected String getDescription() {
2264          return "SplitTableProcedure";
2265        }
2266      });
2267  }
2268
2269  private void warmUpRegion(ServerName server, RegionInfo region) {
2270    FutureUtils.addListener(asyncClusterConnection.getRegionServerAdmin(server)
2271      .warmupRegion(RequestConverter.buildWarmupRegionRequest(region)), (r, e) -> {
2272        if (e != null) {
2273          LOG.warn("Failed to warm up region {} on server {}", region, server, e);
2274        }
2275      });
2276  }
2277
2278  // Public so can be accessed by tests. Blocks until move is done.
2279  // Replace with an async implementation from which you can get
2280  // a success/failure result.
2281  @InterfaceAudience.Private
2282  public void move(final byte[] encodedRegionName, byte[] destServerName) throws IOException {
2283    RegionState regionState =
2284      assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName));
2285
2286    RegionInfo hri;
2287    if (regionState != null) {
2288      hri = regionState.getRegion();
2289    } else {
2290      throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
2291    }
2292
2293    ServerName dest;
2294    List<ServerName> exclude = hri.getTable().isSystemTable()
2295      ? assignmentManager.getExcludedServersForSystemTable()
2296      : new ArrayList<>(1);
2297    if (
2298      destServerName != null && exclude.contains(ServerName.valueOf(Bytes.toString(destServerName)))
2299    ) {
2300      LOG.info(Bytes.toString(encodedRegionName) + " can not move to "
2301        + Bytes.toString(destServerName) + " because the server is in exclude list");
2302      destServerName = null;
2303    }
2304    if (destServerName == null || destServerName.length == 0) {
2305      LOG.info("Passed destination servername is null/empty so " + "choosing a server at random");
2306      exclude.add(regionState.getServerName());
2307      final List<ServerName> destServers = this.serverManager.createDestinationServersList(exclude);
2308      dest = balancer.randomAssignment(hri, destServers);
2309      if (dest == null) {
2310        LOG.debug("Unable to determine a plan to assign " + hri);
2311        return;
2312      }
2313    } else {
2314      ServerName candidate = ServerName.valueOf(Bytes.toString(destServerName));
2315      dest = balancer.randomAssignment(hri, Lists.newArrayList(candidate));
2316      if (dest == null) {
2317        LOG.debug("Unable to determine a plan to assign " + hri);
2318        return;
2319      }
2320      // TODO: deal with table on master for rs group.
2321      if (dest.equals(serverName)) {
2322        // To avoid unnecessary region moving later by balancer. Don't put user
2323        // regions on master.
2324        LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
2325          + " to avoid unnecessary region moving later by load balancer,"
2326          + " because it should not be on master");
2327        return;
2328      }
2329    }
2330
2331    if (dest.equals(regionState.getServerName())) {
2332      LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
2333        + " because region already assigned to the same server " + dest + ".");
2334      return;
2335    }
2336
2337    // Now we can do the move
2338    RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest);
2339    assert rp.getDestination() != null : rp.toString() + " " + dest;
2340
2341    try {
2342      checkInitialized();
2343      if (this.cpHost != null) {
2344        this.cpHost.preMove(hri, rp.getSource(), rp.getDestination());
2345      }
2346
2347      TransitRegionStateProcedure proc =
2348        this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination());
2349      if (conf.getBoolean(WARMUP_BEFORE_MOVE, DEFAULT_WARMUP_BEFORE_MOVE)) {
2350        // Warmup the region on the destination before initiating the move.
2351        // A region server could reject the close request because it either does not
2352        // have the specified region or the region is being split.
2353        LOG.info(getClientIdAuditPrefix() + " move " + rp + ", warming up region on "
2354          + rp.getDestination());
2355        warmUpRegion(rp.getDestination(), hri);
2356      }
2357      LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
2358      Future<byte[]> future = ProcedureSyncWait.submitProcedure(this.procedureExecutor, proc);
2359      try {
2360        // Is this going to work? Will we throw exception on error?
2361        // TODO: CompletableFuture rather than this stunted Future.
2362        future.get();
2363      } catch (InterruptedException | ExecutionException e) {
2364        throw new HBaseIOException(e);
2365      }
2366      if (this.cpHost != null) {
2367        this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
2368      }
2369    } catch (IOException ioe) {
2370      if (ioe instanceof HBaseIOException) {
2371        throw (HBaseIOException) ioe;
2372      }
2373      throw new HBaseIOException(ioe);
2374    }
2375  }
2376
2377  @Override
2378  public long createTable(final TableDescriptor tableDescriptor, final byte[][] splitKeys,
2379    final long nonceGroup, final long nonce) throws IOException {
2380    checkInitialized();
2381    TableDescriptor desc = getMasterCoprocessorHost().preCreateTableRegionsInfos(tableDescriptor);
2382    if (desc == null) {
2383      throw new IOException("Creation for " + tableDescriptor + " is canceled by CP");
2384    }
2385    String namespace = desc.getTableName().getNamespaceAsString();
2386    this.clusterSchemaService.getNamespace(namespace);
2387
2388    RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(desc, splitKeys);
2389    TableDescriptorChecker.sanityCheck(conf, desc);
2390
2391    return MasterProcedureUtil
2392      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2393        @Override
2394        protected void run() throws IOException {
2395          getMaster().getMasterCoprocessorHost().preCreateTable(desc, newRegions);
2396
2397          LOG.info(getClientIdAuditPrefix() + " create " + desc);
2398
2399          // TODO: We can handle/merge duplicate requests, and differentiate the case of
2400          // TableExistsException by saying if the schema is the same or not.
2401          //
2402          // We need to wait for the procedure to potentially fail due to "prepare" sanity
2403          // checks. This will block only the beginning of the procedure. See HBASE-19953.
2404          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
2405          submitProcedure(
2406            new CreateTableProcedure(procedureExecutor.getEnvironment(), desc, newRegions, latch));
2407          latch.await();
2408
2409          getMaster().getMasterCoprocessorHost().postCreateTable(desc, newRegions);
2410        }
2411
2412        @Override
2413        protected String getDescription() {
2414          return "CreateTableProcedure";
2415        }
2416      });
2417  }
2418
2419  @Override
2420  public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException {
2421    if (isStopped()) {
2422      throw new MasterNotRunningException();
2423    }
2424
2425    TableName tableName = tableDescriptor.getTableName();
2426    if (!(tableName.isSystemTable())) {
2427      throw new IllegalArgumentException(
2428        "Only system table creation can use this createSystemTable API");
2429    }
2430
2431    RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(tableDescriptor, null);
2432
2433    LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
2434
2435    // This special create table is called locally to master. Therefore, no RPC means no need
2436    // to use nonce to detect duplicated RPC call.
2437    long procId = this.procedureExecutor.submitProcedure(
2438      new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions));
2439
2440    return procId;
2441  }
2442
2443  private void startActiveMasterManager(int infoPort) throws KeeperException {
2444    String backupZNode = ZNodePaths.joinZNode(zooKeeper.getZNodePaths().backupMasterAddressesZNode,
2445      serverName.toString());
2446    /*
2447     * Add a ZNode for ourselves in the backup master directory since we may not become the active
2448     * master. If so, we want the actual active master to know we are backup masters, so that it
2449     * won't assign regions to us if so configured. If we become the active master later,
2450     * ActiveMasterManager will delete this node explicitly. If we crash before then, ZooKeeper will
2451     * delete this node for us since it is ephemeral.
2452     */
2453    LOG.info("Adding backup master ZNode " + backupZNode);
2454    if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode, serverName, infoPort)) {
2455      LOG.warn("Failed create of " + backupZNode + " by " + serverName);
2456    }
2457    this.activeMasterManager.setInfoPort(infoPort);
2458    int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
2459    // If we're a backup master, stall until a primary to write this address
2460    if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP, HConstants.DEFAULT_MASTER_TYPE_BACKUP)) {
2461      LOG.debug("HMaster started in backup mode. Stalling until master znode is written.");
2462      // This will only be a minute or so while the cluster starts up,
2463      // so don't worry about setting watches on the parent znode
2464      while (!activeMasterManager.hasActiveMaster()) {
2465        LOG.debug("Waiting for master address and cluster state znode to be written.");
2466        Threads.sleep(timeout);
2467      }
2468    }
2469
2470    // Here for the master startup process, we use TaskGroup to monitor the whole progress.
2471    // The UI is similar to how Hadoop designed the startup page for the NameNode.
2472    // See HBASE-21521 for more details.
2473    // We do not cleanup the startupTaskGroup, let the startup progress information
2474    // be permanent in the MEM.
2475    startupTaskGroup = TaskMonitor.createTaskGroup(true, "Master startup");
2476    try {
2477      if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, startupTaskGroup)) {
2478        finishActiveMasterInitialization();
2479      }
2480    } catch (Throwable t) {
2481      startupTaskGroup.abort("Failed to become active master due to:" + t.getMessage());
2482      LOG.error(HBaseMarkers.FATAL, "Failed to become active master", t);
2483      // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility
2484      if (
2485        t instanceof NoClassDefFoundError
2486          && t.getMessage().contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction")
2487      ) {
2488        // improved error message for this special case
2489        abort("HBase is having a problem with its Hadoop jars.  You may need to recompile "
2490          + "HBase against Hadoop version " + org.apache.hadoop.util.VersionInfo.getVersion()
2491          + " or change your hadoop jars to start properly", t);
2492      } else {
2493        abort("Unhandled exception. Starting shutdown.", t);
2494      }
2495    }
2496  }
2497
2498  private static boolean isCatalogTable(final TableName tableName) {
2499    return tableName.equals(TableName.META_TABLE_NAME);
2500  }
2501
2502  @Override
2503  public long deleteTable(final TableName tableName, final long nonceGroup, final long nonce)
2504    throws IOException {
2505    checkInitialized();
2506
2507    return MasterProcedureUtil
2508      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2509        @Override
2510        protected void run() throws IOException {
2511          getMaster().getMasterCoprocessorHost().preDeleteTable(tableName);
2512
2513          LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
2514
2515          // TODO: We can handle/merge duplicate request
2516          //
2517          // We need to wait for the procedure to potentially fail due to "prepare" sanity
2518          // checks. This will block only the beginning of the procedure. See HBASE-19953.
2519          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
2520          submitProcedure(
2521            new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch));
2522          latch.await();
2523
2524          getMaster().getMasterCoprocessorHost().postDeleteTable(tableName);
2525        }
2526
2527        @Override
2528        protected String getDescription() {
2529          return "DeleteTableProcedure";
2530        }
2531      });
2532  }
2533
2534  @Override
2535  public long truncateTable(final TableName tableName, final boolean preserveSplits,
2536    final long nonceGroup, final long nonce) throws IOException {
2537    checkInitialized();
2538
2539    return MasterProcedureUtil
2540      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2541        @Override
2542        protected void run() throws IOException {
2543          getMaster().getMasterCoprocessorHost().preTruncateTable(tableName);
2544
2545          LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
2546          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0);
2547          submitProcedure(new TruncateTableProcedure(procedureExecutor.getEnvironment(), tableName,
2548            preserveSplits, latch));
2549          latch.await();
2550
2551          getMaster().getMasterCoprocessorHost().postTruncateTable(tableName);
2552        }
2553
2554        @Override
2555        protected String getDescription() {
2556          return "TruncateTableProcedure";
2557        }
2558      });
2559  }
2560
2561  @Override
2562  public long addColumn(final TableName tableName, final ColumnFamilyDescriptor column,
2563    final long nonceGroup, final long nonce) throws IOException {
2564    checkInitialized();
2565    checkTableExists(tableName);
2566
2567    return modifyTable(tableName, new TableDescriptorGetter() {
2568
2569      @Override
2570      public TableDescriptor get() throws IOException {
2571        TableDescriptor old = getTableDescriptors().get(tableName);
2572        if (old.hasColumnFamily(column.getName())) {
2573          throw new InvalidFamilyOperationException("Column family '" + column.getNameAsString()
2574            + "' in table '" + tableName + "' already exists so cannot be added");
2575        }
2576
2577        return TableDescriptorBuilder.newBuilder(old).setColumnFamily(column).build();
2578      }
2579    }, nonceGroup, nonce, true);
2580  }
2581
2582  /**
2583   * Implement to return TableDescriptor after pre-checks
2584   */
2585  protected interface TableDescriptorGetter {
2586    TableDescriptor get() throws IOException;
2587  }
2588
2589  @Override
2590  public long modifyColumn(final TableName tableName, final ColumnFamilyDescriptor descriptor,
2591    final long nonceGroup, final long nonce) throws IOException {
2592    checkInitialized();
2593    checkTableExists(tableName);
2594    return modifyTable(tableName, new TableDescriptorGetter() {
2595
2596      @Override
2597      public TableDescriptor get() throws IOException {
2598        TableDescriptor old = getTableDescriptors().get(tableName);
2599        if (!old.hasColumnFamily(descriptor.getName())) {
2600          throw new InvalidFamilyOperationException("Family '" + descriptor.getNameAsString()
2601            + "' does not exist, so it cannot be modified");
2602        }
2603
2604        return TableDescriptorBuilder.newBuilder(old).modifyColumnFamily(descriptor).build();
2605      }
2606    }, nonceGroup, nonce, true);
2607  }
2608
2609  @Override
2610  public long modifyColumnStoreFileTracker(TableName tableName, byte[] family, String dstSFT,
2611    long nonceGroup, long nonce) throws IOException {
2612    checkInitialized();
2613    return MasterProcedureUtil
2614      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2615
2616        @Override
2617        protected void run() throws IOException {
2618          String sft = getMaster().getMasterCoprocessorHost()
2619            .preModifyColumnFamilyStoreFileTracker(tableName, family, dstSFT);
2620          LOG.info("{} modify column {} store file tracker of table {} to {}",
2621            getClientIdAuditPrefix(), Bytes.toStringBinary(family), tableName, sft);
2622          submitProcedure(new ModifyColumnFamilyStoreFileTrackerProcedure(
2623            procedureExecutor.getEnvironment(), tableName, family, sft));
2624          getMaster().getMasterCoprocessorHost().postModifyColumnFamilyStoreFileTracker(tableName,
2625            family, dstSFT);
2626        }
2627
2628        @Override
2629        protected String getDescription() {
2630          return "ModifyColumnFamilyStoreFileTrackerProcedure";
2631        }
2632      });
2633  }
2634
2635  @Override
2636  public long deleteColumn(final TableName tableName, final byte[] columnName,
2637    final long nonceGroup, final long nonce) throws IOException {
2638    checkInitialized();
2639    checkTableExists(tableName);
2640
2641    return modifyTable(tableName, new TableDescriptorGetter() {
2642
2643      @Override
2644      public TableDescriptor get() throws IOException {
2645        TableDescriptor old = getTableDescriptors().get(tableName);
2646
2647        if (!old.hasColumnFamily(columnName)) {
2648          throw new InvalidFamilyOperationException(
2649            "Family '" + Bytes.toString(columnName) + "' does not exist, so it cannot be deleted");
2650        }
2651        if (old.getColumnFamilyCount() == 1) {
2652          throw new InvalidFamilyOperationException("Family '" + Bytes.toString(columnName)
2653            + "' is the only column family in the table, so it cannot be deleted");
2654        }
2655        return TableDescriptorBuilder.newBuilder(old).removeColumnFamily(columnName).build();
2656      }
2657    }, nonceGroup, nonce, true);
2658  }
2659
2660  @Override
2661  public long enableTable(final TableName tableName, final long nonceGroup, final long nonce)
2662    throws IOException {
2663    checkInitialized();
2664
2665    return MasterProcedureUtil
2666      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2667        @Override
2668        protected void run() throws IOException {
2669          getMaster().getMasterCoprocessorHost().preEnableTable(tableName);
2670
2671          // Normally, it would make sense for this authorization check to exist inside
2672          // AccessController, but because the authorization check is done based on internal state
2673          // (rather than explicit permissions) we'll do the check here instead of in the
2674          // coprocessor.
2675          MasterQuotaManager quotaManager = getMasterQuotaManager();
2676          if (quotaManager != null) {
2677            if (quotaManager.isQuotaInitialized()) {
2678              SpaceQuotaSnapshot currSnapshotOfTable =
2679                QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
2680              if (currSnapshotOfTable != null) {
2681                SpaceQuotaStatus quotaStatus = currSnapshotOfTable.getQuotaStatus();
2682                if (
2683                  quotaStatus.isInViolation()
2684                    && SpaceViolationPolicy.DISABLE == quotaStatus.getPolicy().orElse(null)
2685                ) {
2686                  throw new AccessDeniedException("Enabling the table '" + tableName
2687                    + "' is disallowed due to a violated space quota.");
2688                }
2689              }
2690            } else if (LOG.isTraceEnabled()) {
2691              LOG
2692                .trace("Unable to check for space quotas as the MasterQuotaManager is not enabled");
2693            }
2694          }
2695
2696          LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
2697
2698          // Execute the operation asynchronously - client will check the progress of the operation
2699          // In case the request is from a <1.1 client before returning,
2700          // we want to make sure that the table is prepared to be
2701          // enabled (the table is locked and the table state is set).
2702          // Note: if the procedure throws exception, we will catch it and rethrow.
2703          final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch();
2704          submitProcedure(
2705            new EnableTableProcedure(procedureExecutor.getEnvironment(), tableName, prepareLatch));
2706          prepareLatch.await();
2707
2708          getMaster().getMasterCoprocessorHost().postEnableTable(tableName);
2709        }
2710
2711        @Override
2712        protected String getDescription() {
2713          return "EnableTableProcedure";
2714        }
2715      });
2716  }
2717
2718  @Override
2719  public long disableTable(final TableName tableName, final long nonceGroup, final long nonce)
2720    throws IOException {
2721    checkInitialized();
2722
2723    return MasterProcedureUtil
2724      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2725        @Override
2726        protected void run() throws IOException {
2727          getMaster().getMasterCoprocessorHost().preDisableTable(tableName);
2728
2729          LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
2730
2731          // Execute the operation asynchronously - client will check the progress of the operation
2732          // In case the request is from a <1.1 client before returning,
2733          // we want to make sure that the table is prepared to be
2734          // enabled (the table is locked and the table state is set).
2735          // Note: if the procedure throws exception, we will catch it and rethrow.
2736          //
2737          // We need to wait for the procedure to potentially fail due to "prepare" sanity
2738          // checks. This will block only the beginning of the procedure. See HBASE-19953.
2739          final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createBlockingLatch();
2740          submitProcedure(new DisableTableProcedure(procedureExecutor.getEnvironment(), tableName,
2741            false, prepareLatch));
2742          prepareLatch.await();
2743
2744          getMaster().getMasterCoprocessorHost().postDisableTable(tableName);
2745        }
2746
2747        @Override
2748        protected String getDescription() {
2749          return "DisableTableProcedure";
2750        }
2751      });
2752  }
2753
2754  private long modifyTable(final TableName tableName,
2755    final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce,
2756    final boolean shouldCheckDescriptor) throws IOException {
2757    return MasterProcedureUtil
2758      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2759        @Override
2760        protected void run() throws IOException {
2761          TableDescriptor oldDescriptor = getMaster().getTableDescriptors().get(tableName);
2762          TableDescriptor newDescriptor = getMaster().getMasterCoprocessorHost()
2763            .preModifyTable(tableName, oldDescriptor, newDescriptorGetter.get());
2764          TableDescriptorChecker.sanityCheck(conf, newDescriptor);
2765          LOG.info("{} modify table {} from {} to {}", getClientIdAuditPrefix(), tableName,
2766            oldDescriptor, newDescriptor);
2767
2768          // Execute the operation synchronously - wait for the operation completes before
2769          // continuing.
2770          //
2771          // We need to wait for the procedure to potentially fail due to "prepare" sanity
2772          // checks. This will block only the beginning of the procedure. See HBASE-19953.
2773          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
2774          submitProcedure(new ModifyTableProcedure(procedureExecutor.getEnvironment(),
2775            newDescriptor, latch, oldDescriptor, shouldCheckDescriptor));
2776          latch.await();
2777
2778          getMaster().getMasterCoprocessorHost().postModifyTable(tableName, oldDescriptor,
2779            newDescriptor);
2780        }
2781
2782        @Override
2783        protected String getDescription() {
2784          return "ModifyTableProcedure";
2785        }
2786      });
2787
2788  }
2789
2790  @Override
2791  public long modifyTable(final TableName tableName, final TableDescriptor newDescriptor,
2792    final long nonceGroup, final long nonce) throws IOException {
2793    checkInitialized();
2794    return modifyTable(tableName, new TableDescriptorGetter() {
2795      @Override
2796      public TableDescriptor get() throws IOException {
2797        return newDescriptor;
2798      }
2799    }, nonceGroup, nonce, false);
2800
2801  }
2802
2803  @Override
2804  public long modifyTableStoreFileTracker(TableName tableName, String dstSFT, long nonceGroup,
2805    long nonce) throws IOException {
2806    checkInitialized();
2807    return MasterProcedureUtil
2808      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2809
2810        @Override
2811        protected void run() throws IOException {
2812          String sft = getMaster().getMasterCoprocessorHost()
2813            .preModifyTableStoreFileTracker(tableName, dstSFT);
2814          LOG.info("{} modify table store file tracker of table {} to {}", getClientIdAuditPrefix(),
2815            tableName, sft);
2816          submitProcedure(new ModifyTableStoreFileTrackerProcedure(
2817            procedureExecutor.getEnvironment(), tableName, sft));
2818          getMaster().getMasterCoprocessorHost().postModifyTableStoreFileTracker(tableName, sft);
2819        }
2820
2821        @Override
2822        protected String getDescription() {
2823          return "ModifyTableStoreFileTrackerProcedure";
2824        }
2825      });
2826  }
2827
2828  public long restoreSnapshot(final SnapshotDescription snapshotDesc, final long nonceGroup,
2829    final long nonce, final boolean restoreAcl, final String customSFT) throws IOException {
2830    checkInitialized();
2831    getSnapshotManager().checkSnapshotSupport();
2832
2833    // Ensure namespace exists. Will throw exception if non-known NS.
2834    final TableName dstTable = TableName.valueOf(snapshotDesc.getTable());
2835    getClusterSchema().getNamespace(dstTable.getNamespaceAsString());
2836
2837    return MasterProcedureUtil
2838      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2839        @Override
2840        protected void run() throws IOException {
2841          setProcId(getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(),
2842            restoreAcl, customSFT));
2843        }
2844
2845        @Override
2846        protected String getDescription() {
2847          return "RestoreSnapshotProcedure";
2848        }
2849      });
2850  }
2851
2852  private void checkTableExists(final TableName tableName)
2853    throws IOException, TableNotFoundException {
2854    if (!tableDescriptors.exists(tableName)) {
2855      throw new TableNotFoundException(tableName);
2856    }
2857  }
2858
2859  @Override
2860  public void checkTableModifiable(final TableName tableName)
2861    throws IOException, TableNotFoundException, TableNotDisabledException {
2862    if (isCatalogTable(tableName)) {
2863      throw new IOException("Can't modify catalog tables");
2864    }
2865    checkTableExists(tableName);
2866    TableState ts = getTableStateManager().getTableState(tableName);
2867    if (!ts.isDisabled()) {
2868      throw new TableNotDisabledException("Not DISABLED; " + ts);
2869    }
2870  }
2871
2872  public ClusterMetrics getClusterMetricsWithoutCoprocessor() throws InterruptedIOException {
2873    return getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class));
2874  }
2875
2876  public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet<Option> options)
2877    throws InterruptedIOException {
2878    ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder();
2879    // given that hbase1 can't submit the request with Option,
2880    // we return all information to client if the list of Option is empty.
2881    if (options.isEmpty()) {
2882      options = EnumSet.allOf(Option.class);
2883    }
2884
2885    // TASKS and/or LIVE_SERVERS will populate this map, which will be given to the builder if
2886    // not null after option processing completes.
2887    Map<ServerName, ServerMetrics> serverMetricsMap = null;
2888
2889    for (Option opt : options) {
2890      switch (opt) {
2891        case HBASE_VERSION:
2892          builder.setHBaseVersion(VersionInfo.getVersion());
2893          break;
2894        case CLUSTER_ID:
2895          builder.setClusterId(getClusterId());
2896          break;
2897        case MASTER:
2898          builder.setMasterName(getServerName());
2899          break;
2900        case BACKUP_MASTERS:
2901          builder.setBackerMasterNames(getBackupMasters());
2902          break;
2903        case TASKS: {
2904          // Master tasks
2905          builder.setMasterTasks(TaskMonitor.get().getTasks().stream()
2906            .map(task -> ServerTaskBuilder.newBuilder().setDescription(task.getDescription())
2907              .setStatus(task.getStatus())
2908              .setState(ServerTask.State.valueOf(task.getState().name()))
2909              .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTimestamp())
2910              .build())
2911            .collect(Collectors.toList()));
2912          // TASKS is also synonymous with LIVE_SERVERS for now because task information for
2913          // regionservers is carried in ServerLoad.
2914          // Add entries to serverMetricsMap for all live servers, if we haven't already done so
2915          if (serverMetricsMap == null) {
2916            serverMetricsMap = getOnlineServers();
2917          }
2918          break;
2919        }
2920        case LIVE_SERVERS: {
2921          // Add entries to serverMetricsMap for all live servers, if we haven't already done so
2922          if (serverMetricsMap == null) {
2923            serverMetricsMap = getOnlineServers();
2924          }
2925          break;
2926        }
2927        case DEAD_SERVERS: {
2928          if (serverManager != null) {
2929            builder.setDeadServerNames(
2930              new ArrayList<>(serverManager.getDeadServers().copyServerNames()));
2931          }
2932          break;
2933        }
2934        case UNKNOWN_SERVERS: {
2935          if (serverManager != null) {
2936            builder.setUnknownServerNames(getUnknownServers());
2937          }
2938          break;
2939        }
2940        case MASTER_COPROCESSORS: {
2941          if (cpHost != null) {
2942            builder.setMasterCoprocessorNames(Arrays.asList(getMasterCoprocessors()));
2943          }
2944          break;
2945        }
2946        case REGIONS_IN_TRANSITION: {
2947          if (assignmentManager != null) {
2948            builder.setRegionsInTransition(
2949              assignmentManager.getRegionStates().getRegionsStateInTransition());
2950          }
2951          break;
2952        }
2953        case BALANCER_ON: {
2954          if (loadBalancerStateStore != null) {
2955            builder.setBalancerOn(loadBalancerStateStore.get());
2956          }
2957          break;
2958        }
2959        case MASTER_INFO_PORT: {
2960          if (infoServer != null) {
2961            builder.setMasterInfoPort(infoServer.getPort());
2962          }
2963          break;
2964        }
2965        case SERVERS_NAME: {
2966          if (serverManager != null) {
2967            builder.setServerNames(serverManager.getOnlineServersList());
2968          }
2969          break;
2970        }
2971        case TABLE_TO_REGIONS_COUNT: {
2972          if (isActiveMaster() && isInitialized() && assignmentManager != null) {
2973            try {
2974              Map<TableName, RegionStatesCount> tableRegionStatesCountMap = new HashMap<>();
2975              Map<String, TableDescriptor> tableDescriptorMap = getTableDescriptors().getAll();
2976              for (TableDescriptor tableDescriptor : tableDescriptorMap.values()) {
2977                TableName tableName = tableDescriptor.getTableName();
2978                RegionStatesCount regionStatesCount =
2979                  assignmentManager.getRegionStatesCount(tableName);
2980                tableRegionStatesCountMap.put(tableName, regionStatesCount);
2981              }
2982              builder.setTableRegionStatesCount(tableRegionStatesCountMap);
2983            } catch (IOException e) {
2984              LOG.error("Error while populating TABLE_TO_REGIONS_COUNT for Cluster Metrics..", e);
2985            }
2986          }
2987          break;
2988        }
2989        case DECOMMISSIONED_SERVERS: {
2990          if (serverManager != null) {
2991            builder.setDecommissionedServerNames(serverManager.getDrainingServersList());
2992          }
2993          break;
2994        }
2995      }
2996    }
2997
2998    if (serverMetricsMap != null) {
2999      builder.setLiveServerMetrics(serverMetricsMap);
3000    }
3001
3002    return builder.build();
3003  }
3004
3005  private List<ServerName> getUnknownServers() {
3006    if (serverManager != null) {
3007      final Set<ServerName> serverNames = getAssignmentManager().getRegionStates().getRegionStates()
3008        .stream().map(RegionState::getServerName).collect(Collectors.toSet());
3009      final List<ServerName> unknownServerNames = serverNames.stream()
3010        .filter(sn -> sn != null && serverManager.isServerUnknown(sn)).collect(Collectors.toList());
3011      return unknownServerNames;
3012    }
3013    return null;
3014  }
3015
3016  private Map<ServerName, ServerMetrics> getOnlineServers() {
3017    if (serverManager != null) {
3018      final Map<ServerName, ServerMetrics> map = new HashMap<>();
3019      serverManager.getOnlineServers().entrySet().forEach(e -> map.put(e.getKey(), e.getValue()));
3020      return map;
3021    }
3022    return null;
3023  }
3024
3025  /** Returns cluster status */
3026  public ClusterMetrics getClusterMetrics() throws IOException {
3027    return getClusterMetrics(EnumSet.allOf(Option.class));
3028  }
3029
3030  public ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException {
3031    if (cpHost != null) {
3032      cpHost.preGetClusterMetrics();
3033    }
3034    ClusterMetrics status = getClusterMetricsWithoutCoprocessor(options);
3035    if (cpHost != null) {
3036      cpHost.postGetClusterMetrics(status);
3037    }
3038    return status;
3039  }
3040
3041  /** Returns info port of active master or 0 if any exception occurs. */
3042  public int getActiveMasterInfoPort() {
3043    return activeMasterManager.getActiveMasterInfoPort();
3044  }
3045
3046  /**
3047   * @param sn is ServerName of the backup master
3048   * @return info port of backup master or 0 if any exception occurs.
3049   */
3050  public int getBackupMasterInfoPort(final ServerName sn) {
3051    return activeMasterManager.getBackupMasterInfoPort(sn);
3052  }
3053
3054  /**
3055   * The set of loaded coprocessors is stored in a static set. Since it's statically allocated, it
3056   * does not require that HMaster's cpHost be initialized prior to accessing it.
3057   * @return a String representation of the set of names of the loaded coprocessors.
3058   */
3059  public static String getLoadedCoprocessors() {
3060    return CoprocessorHost.getLoadedCoprocessors().toString();
3061  }
3062
3063  /** Returns timestamp in millis when HMaster was started. */
3064  public long getMasterStartTime() {
3065    return startcode;
3066  }
3067
3068  /** Returns timestamp in millis when HMaster became the active master. */
3069  public long getMasterActiveTime() {
3070    return masterActiveTime;
3071  }
3072
3073  /** Returns timestamp in millis when HMaster finished becoming the active master */
3074  public long getMasterFinishedInitializationTime() {
3075    return masterFinishedInitializationTime;
3076  }
3077
3078  public int getNumWALFiles() {
3079    return 0;
3080  }
3081
3082  public ProcedureStore getProcedureStore() {
3083    return procedureStore;
3084  }
3085
3086  public int getRegionServerInfoPort(final ServerName sn) {
3087    int port = this.serverManager.getInfoPort(sn);
3088    return port == 0
3089      ? conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT)
3090      : port;
3091  }
3092
3093  @Override
3094  public String getRegionServerVersion(ServerName sn) {
3095    // Will return "0.0.0" if the server is not online to prevent move system region to unknown
3096    // version RS.
3097    return this.serverManager.getVersion(sn);
3098  }
3099
3100  @Override
3101  public void checkIfShouldMoveSystemRegionAsync() {
3102    assignmentManager.checkIfShouldMoveSystemRegionAsync();
3103  }
3104
3105  /** Returns array of coprocessor SimpleNames. */
3106  public String[] getMasterCoprocessors() {
3107    Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors();
3108    return masterCoprocessors.toArray(new String[masterCoprocessors.size()]);
3109  }
3110
3111  @Override
3112  public void abort(String reason, Throwable cause) {
3113    if (!setAbortRequested() || isStopped()) {
3114      LOG.debug("Abort called but aborted={}, stopped={}", isAborted(), isStopped());
3115      return;
3116    }
3117    if (cpHost != null) {
3118      // HBASE-4014: dump a list of loaded coprocessors.
3119      LOG.error(HBaseMarkers.FATAL,
3120        "Master server abort: loaded coprocessors are: " + getLoadedCoprocessors());
3121    }
3122    String msg = "***** ABORTING master " + this + ": " + reason + " *****";
3123    if (cause != null) {
3124      LOG.error(HBaseMarkers.FATAL, msg, cause);
3125    } else {
3126      LOG.error(HBaseMarkers.FATAL, msg);
3127    }
3128
3129    try {
3130      stopMaster();
3131    } catch (IOException e) {
3132      LOG.error("Exception occurred while stopping master", e);
3133    }
3134  }
3135
3136  @Override
3137  public MasterCoprocessorHost getMasterCoprocessorHost() {
3138    return cpHost;
3139  }
3140
3141  @Override
3142  public MasterQuotaManager getMasterQuotaManager() {
3143    return quotaManager;
3144  }
3145
3146  @Override
3147  public ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
3148    return procedureExecutor;
3149  }
3150
3151  @Override
3152  public ServerName getServerName() {
3153    return this.serverName;
3154  }
3155
3156  @Override
3157  public AssignmentManager getAssignmentManager() {
3158    return this.assignmentManager;
3159  }
3160
3161  @Override
3162  public CatalogJanitor getCatalogJanitor() {
3163    return this.catalogJanitorChore;
3164  }
3165
3166  public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
3167    return rsFatals;
3168  }
3169
3170  public TaskGroup getStartupProgress() {
3171    return startupTaskGroup;
3172  }
3173
3174  /**
3175   * Shutdown the cluster. Master runs a coordinated stop of all RegionServers and then itself.
3176   */
3177  public void shutdown() throws IOException {
3178    TraceUtil.trace(() -> {
3179      if (cpHost != null) {
3180        cpHost.preShutdown();
3181      }
3182
3183      // Tell the servermanager cluster shutdown has been called. This makes it so when Master is
3184      // last running server, it'll stop itself. Next, we broadcast the cluster shutdown by setting
3185      // the cluster status as down. RegionServers will notice this change in state and will start
3186      // shutting themselves down. When last has exited, Master can go down.
3187      if (this.serverManager != null) {
3188        this.serverManager.shutdownCluster();
3189      }
3190      if (this.clusterStatusTracker != null) {
3191        try {
3192          this.clusterStatusTracker.setClusterDown();
3193        } catch (KeeperException e) {
3194          LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
3195        }
3196      }
3197      // Stop the procedure executor. Will stop any ongoing assign, unassign, server crash etc.,
3198      // processing so we can go down.
3199      if (this.procedureExecutor != null) {
3200        this.procedureExecutor.stop();
3201      }
3202      // Shutdown our cluster connection. This will kill any hosted RPCs that might be going on;
3203      // this is what we want especially if the Master is in startup phase doing call outs to
3204      // hbase:meta, etc. when cluster is down. Without ths connection close, we'd have to wait on
3205      // the rpc to timeout.
3206      if (this.asyncClusterConnection != null) {
3207        this.asyncClusterConnection.close();
3208      }
3209    }, "HMaster.shutdown");
3210  }
3211
3212  public void stopMaster() throws IOException {
3213    if (cpHost != null) {
3214      cpHost.preStopMaster();
3215    }
3216    stop("Stopped by " + Thread.currentThread().getName());
3217  }
3218
3219  @Override
3220  public void stop(String msg) {
3221    if (!this.stopped) {
3222      LOG.info("***** STOPPING master '" + this + "' *****");
3223      this.stopped = true;
3224      LOG.info("STOPPED: " + msg);
3225      // Wakes run() if it is sleeping
3226      sleeper.skipSleepCycle();
3227      if (this.activeMasterManager != null) {
3228        this.activeMasterManager.stop();
3229      }
3230    }
3231  }
3232
3233  protected void checkServiceStarted() throws ServerNotRunningYetException {
3234    if (!serviceStarted) {
3235      throw new ServerNotRunningYetException("Server is not running yet");
3236    }
3237  }
3238
3239  void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException,
3240    MasterNotRunningException, MasterStoppedException {
3241    checkServiceStarted();
3242    if (!isInitialized()) {
3243      throw new PleaseHoldException("Master is initializing");
3244    }
3245    if (isStopped()) {
3246      throw new MasterStoppedException();
3247    }
3248  }
3249
3250  /**
3251   * Report whether this master is currently the active master or not. If not active master, we are
3252   * parked on ZK waiting to become active. This method is used for testing.
3253   * @return true if active master, false if not.
3254   */
3255  @Override
3256  public boolean isActiveMaster() {
3257    return activeMaster;
3258  }
3259
3260  /**
3261   * Report whether this master has completed with its initialization and is ready. If ready, the
3262   * master is also the active master. A standby master is never ready. This method is used for
3263   * testing.
3264   * @return true if master is ready to go, false if not.
3265   */
3266  @Override
3267  public boolean isInitialized() {
3268    return initialized.isReady();
3269  }
3270
3271  /**
3272   * Report whether this master is started This method is used for testing.
3273   * @return true if master is ready to go, false if not.
3274   */
3275  public boolean isOnline() {
3276    return serviceStarted;
3277  }
3278
3279  /**
3280   * Report whether this master is in maintenance mode.
3281   * @return true if master is in maintenanceMode
3282   */
3283  @Override
3284  public boolean isInMaintenanceMode() {
3285    return maintenanceMode;
3286  }
3287
3288  public void setInitialized(boolean isInitialized) {
3289    procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
3290  }
3291
3292  @Override
3293  public ProcedureEvent<?> getInitializedEvent() {
3294    return initialized;
3295  }
3296
3297  /**
3298   * Compute the average load across all region servers. Currently, this uses a very naive
3299   * computation - just uses the number of regions being served, ignoring stats about number of
3300   * requests.
3301   * @return the average load
3302   */
3303  public double getAverageLoad() {
3304    if (this.assignmentManager == null) {
3305      return 0;
3306    }
3307
3308    RegionStates regionStates = this.assignmentManager.getRegionStates();
3309    if (regionStates == null) {
3310      return 0;
3311    }
3312    return regionStates.getAverageLoad();
3313  }
3314
3315  @Override
3316  public boolean registerService(Service instance) {
3317    /*
3318     * No stacking of instances is allowed for a single service name
3319     */
3320    Descriptors.ServiceDescriptor serviceDesc = instance.getDescriptorForType();
3321    String serviceName = CoprocessorRpcUtils.getServiceName(serviceDesc);
3322    if (coprocessorServiceHandlers.containsKey(serviceName)) {
3323      LOG.error("Coprocessor service " + serviceName
3324        + " already registered, rejecting request from " + instance);
3325      return false;
3326    }
3327
3328    coprocessorServiceHandlers.put(serviceName, instance);
3329    if (LOG.isDebugEnabled()) {
3330      LOG.debug("Registered master coprocessor service: service=" + serviceName);
3331    }
3332    return true;
3333  }
3334
3335  /**
3336   * Utility for constructing an instance of the passed HMaster class.
3337   * @return HMaster instance.
3338   */
3339  public static HMaster constructMaster(Class<? extends HMaster> masterClass,
3340    final Configuration conf) {
3341    try {
3342      Constructor<? extends HMaster> c = masterClass.getConstructor(Configuration.class);
3343      return c.newInstance(conf);
3344    } catch (Exception e) {
3345      Throwable error = e;
3346      if (
3347        e instanceof InvocationTargetException
3348          && ((InvocationTargetException) e).getTargetException() != null
3349      ) {
3350        error = ((InvocationTargetException) e).getTargetException();
3351      }
3352      throw new RuntimeException("Failed construction of Master: " + masterClass.toString() + ". ",
3353        error);
3354    }
3355  }
3356
3357  /**
3358   * @see org.apache.hadoop.hbase.master.HMasterCommandLine
3359   */
3360  public static void main(String[] args) {
3361    LOG.info("STARTING service " + HMaster.class.getSimpleName());
3362    VersionInfo.logVersion();
3363    new HMasterCommandLine(HMaster.class).doMain(args);
3364  }
3365
3366  public HFileCleaner getHFileCleaner() {
3367    return this.hfileCleaners.get(0);
3368  }
3369
3370  public List<HFileCleaner> getHFileCleaners() {
3371    return this.hfileCleaners;
3372  }
3373
3374  public LogCleaner getLogCleaner() {
3375    return this.logCleaner;
3376  }
3377
3378  /** Returns the underlying snapshot manager */
3379  @Override
3380  public SnapshotManager getSnapshotManager() {
3381    return this.snapshotManager;
3382  }
3383
3384  /** Returns the underlying MasterProcedureManagerHost */
3385  @Override
3386  public MasterProcedureManagerHost getMasterProcedureManagerHost() {
3387    return mpmHost;
3388  }
3389
3390  @Override
3391  public ClusterSchema getClusterSchema() {
3392    return this.clusterSchemaService;
3393  }
3394
3395  /**
3396   * Create a new Namespace.
3397   * @param namespaceDescriptor descriptor for new Namespace
3398   * @param nonceGroup          Identifier for the source of the request, a client or process.
3399   * @param nonce               A unique identifier for this operation from the client or process
3400   *                            identified by <code>nonceGroup</code> (the source must ensure each
3401   *                            operation gets a unique id).
3402   * @return procedure id
3403   */
3404  long createNamespace(final NamespaceDescriptor namespaceDescriptor, final long nonceGroup,
3405    final long nonce) throws IOException {
3406    checkInitialized();
3407
3408    TableName.isLegalNamespaceName(Bytes.toBytes(namespaceDescriptor.getName()));
3409
3410    return MasterProcedureUtil
3411      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
3412        @Override
3413        protected void run() throws IOException {
3414          getMaster().getMasterCoprocessorHost().preCreateNamespace(namespaceDescriptor);
3415          // We need to wait for the procedure to potentially fail due to "prepare" sanity
3416          // checks. This will block only the beginning of the procedure. See HBASE-19953.
3417          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
3418          LOG.info(getClientIdAuditPrefix() + " creating " + namespaceDescriptor);
3419          // Execute the operation synchronously - wait for the operation to complete before
3420          // continuing.
3421          setProcId(getClusterSchema().createNamespace(namespaceDescriptor, getNonceKey(), latch));
3422          latch.await();
3423          getMaster().getMasterCoprocessorHost().postCreateNamespace(namespaceDescriptor);
3424        }
3425
3426        @Override
3427        protected String getDescription() {
3428          return "CreateNamespaceProcedure";
3429        }
3430      });
3431  }
3432
3433  /**
3434   * Modify an existing Namespace.
3435   * @param nonceGroup Identifier for the source of the request, a client or process.
3436   * @param nonce      A unique identifier for this operation from the client or process identified
3437   *                   by <code>nonceGroup</code> (the source must ensure each operation gets a
3438   *                   unique id).
3439   * @return procedure id
3440   */
3441  long modifyNamespace(final NamespaceDescriptor newNsDescriptor, final long nonceGroup,
3442    final long nonce) throws IOException {
3443    checkInitialized();
3444
3445    TableName.isLegalNamespaceName(Bytes.toBytes(newNsDescriptor.getName()));
3446
3447    return MasterProcedureUtil
3448      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
3449        @Override
3450        protected void run() throws IOException {
3451          NamespaceDescriptor oldNsDescriptor = getNamespace(newNsDescriptor.getName());
3452          getMaster().getMasterCoprocessorHost().preModifyNamespace(oldNsDescriptor,
3453            newNsDescriptor);
3454          // We need to wait for the procedure to potentially fail due to "prepare" sanity
3455          // checks. This will block only the beginning of the procedure. See HBASE-19953.
3456          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
3457          LOG.info(getClientIdAuditPrefix() + " modify " + newNsDescriptor);
3458          // Execute the operation synchronously - wait for the operation to complete before
3459          // continuing.
3460          setProcId(getClusterSchema().modifyNamespace(newNsDescriptor, getNonceKey(), latch));
3461          latch.await();
3462          getMaster().getMasterCoprocessorHost().postModifyNamespace(oldNsDescriptor,
3463            newNsDescriptor);
3464        }
3465
3466        @Override
3467        protected String getDescription() {
3468          return "ModifyNamespaceProcedure";
3469        }
3470      });
3471  }
3472
3473  /**
3474   * Delete an existing Namespace. Only empty Namespaces (no tables) can be removed.
3475   * @param nonceGroup Identifier for the source of the request, a client or process.
3476   * @param nonce      A unique identifier for this operation from the client or process identified
3477   *                   by <code>nonceGroup</code> (the source must ensure each operation gets a
3478   *                   unique id).
3479   * @return procedure id
3480   */
3481  long deleteNamespace(final String name, final long nonceGroup, final long nonce)
3482    throws IOException {
3483    checkInitialized();
3484
3485    return MasterProcedureUtil
3486      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
3487        @Override
3488        protected void run() throws IOException {
3489          getMaster().getMasterCoprocessorHost().preDeleteNamespace(name);
3490          LOG.info(getClientIdAuditPrefix() + " delete " + name);
3491          // Execute the operation synchronously - wait for the operation to complete before
3492          // continuing.
3493          //
3494          // We need to wait for the procedure to potentially fail due to "prepare" sanity
3495          // checks. This will block only the beginning of the procedure. See HBASE-19953.
3496          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
3497          setProcId(submitProcedure(
3498            new DeleteNamespaceProcedure(procedureExecutor.getEnvironment(), name, latch)));
3499          latch.await();
3500          // Will not be invoked in the face of Exception thrown by the Procedure's execution
3501          getMaster().getMasterCoprocessorHost().postDeleteNamespace(name);
3502        }
3503
3504        @Override
3505        protected String getDescription() {
3506          return "DeleteNamespaceProcedure";
3507        }
3508      });
3509  }
3510
3511  /**
3512   * Get a Namespace
3513   * @param name Name of the Namespace
3514   * @return Namespace descriptor for <code>name</code>
3515   */
3516  NamespaceDescriptor getNamespace(String name) throws IOException {
3517    checkInitialized();
3518    if (this.cpHost != null) this.cpHost.preGetNamespaceDescriptor(name);
3519    NamespaceDescriptor nsd = this.clusterSchemaService.getNamespace(name);
3520    if (this.cpHost != null) this.cpHost.postGetNamespaceDescriptor(nsd);
3521    return nsd;
3522  }
3523
3524  /**
3525   * Get all Namespaces
3526   * @return All Namespace descriptors
3527   */
3528  List<NamespaceDescriptor> getNamespaces() throws IOException {
3529    checkInitialized();
3530    final List<NamespaceDescriptor> nsds = new ArrayList<>();
3531    if (cpHost != null) {
3532      cpHost.preListNamespaceDescriptors(nsds);
3533    }
3534    nsds.addAll(this.clusterSchemaService.getNamespaces());
3535    if (this.cpHost != null) {
3536      this.cpHost.postListNamespaceDescriptors(nsds);
3537    }
3538    return nsds;
3539  }
3540
3541  /**
3542   * List namespace names
3543   * @return All namespace names
3544   */
3545  public List<String> listNamespaces() throws IOException {
3546    checkInitialized();
3547    List<String> namespaces = new ArrayList<>();
3548    if (cpHost != null) {
3549      cpHost.preListNamespaces(namespaces);
3550    }
3551    for (NamespaceDescriptor namespace : clusterSchemaService.getNamespaces()) {
3552      namespaces.add(namespace.getName());
3553    }
3554    if (cpHost != null) {
3555      cpHost.postListNamespaces(namespaces);
3556    }
3557    return namespaces;
3558  }
3559
3560  @Override
3561  public List<TableName> listTableNamesByNamespace(String name) throws IOException {
3562    checkInitialized();
3563    return listTableNames(name, null, true);
3564  }
3565
3566  @Override
3567  public List<TableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
3568    checkInitialized();
3569    return listTableDescriptors(name, null, null, true);
3570  }
3571
3572  @Override
3573  public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
3574    throws IOException {
3575    if (cpHost != null) {
3576      cpHost.preAbortProcedure(this.procedureExecutor, procId);
3577    }
3578
3579    final boolean result = this.procedureExecutor.abort(procId, mayInterruptIfRunning);
3580
3581    if (cpHost != null) {
3582      cpHost.postAbortProcedure();
3583    }
3584
3585    return result;
3586  }
3587
3588  @Override
3589  public List<Procedure<?>> getProcedures() throws IOException {
3590    if (cpHost != null) {
3591      cpHost.preGetProcedures();
3592    }
3593
3594    @SuppressWarnings({ "unchecked", "rawtypes" })
3595    List<Procedure<?>> procList = (List) this.procedureExecutor.getProcedures();
3596
3597    if (cpHost != null) {
3598      cpHost.postGetProcedures(procList);
3599    }
3600
3601    return procList;
3602  }
3603
3604  @Override
3605  public List<LockedResource> getLocks() throws IOException {
3606    if (cpHost != null) {
3607      cpHost.preGetLocks();
3608    }
3609
3610    MasterProcedureScheduler procedureScheduler =
3611      procedureExecutor.getEnvironment().getProcedureScheduler();
3612
3613    final List<LockedResource> lockedResources = procedureScheduler.getLocks();
3614
3615    if (cpHost != null) {
3616      cpHost.postGetLocks(lockedResources);
3617    }
3618
3619    return lockedResources;
3620  }
3621
3622  /**
3623   * Returns the list of table descriptors that match the specified request
3624   * @param namespace        the namespace to query, or null if querying for all
3625   * @param regex            The regular expression to match against, or null if querying for all
3626   * @param tableNameList    the list of table names, or null if querying for all
3627   * @param includeSysTables False to match only against userspace tables
3628   * @return the list of table descriptors
3629   */
3630  public List<TableDescriptor> listTableDescriptors(final String namespace, final String regex,
3631    final List<TableName> tableNameList, final boolean includeSysTables) throws IOException {
3632    List<TableDescriptor> htds = new ArrayList<>();
3633    if (cpHost != null) {
3634      cpHost.preGetTableDescriptors(tableNameList, htds, regex);
3635    }
3636    htds = getTableDescriptors(htds, namespace, regex, tableNameList, includeSysTables);
3637    if (cpHost != null) {
3638      cpHost.postGetTableDescriptors(tableNameList, htds, regex);
3639    }
3640    return htds;
3641  }
3642
3643  /**
3644   * Returns the list of table names that match the specified request
3645   * @param regex            The regular expression to match against, or null if querying for all
3646   * @param namespace        the namespace to query, or null if querying for all
3647   * @param includeSysTables False to match only against userspace tables
3648   * @return the list of table names
3649   */
3650  public List<TableName> listTableNames(final String namespace, final String regex,
3651    final boolean includeSysTables) throws IOException {
3652    List<TableDescriptor> htds = new ArrayList<>();
3653    if (cpHost != null) {
3654      cpHost.preGetTableNames(htds, regex);
3655    }
3656    htds = getTableDescriptors(htds, namespace, regex, null, includeSysTables);
3657    if (cpHost != null) {
3658      cpHost.postGetTableNames(htds, regex);
3659    }
3660    List<TableName> result = new ArrayList<>(htds.size());
3661    for (TableDescriptor htd : htds)
3662      result.add(htd.getTableName());
3663    return result;
3664  }
3665
3666  /**
3667   * Return a list of table table descriptors after applying any provided filter parameters. Note
3668   * that the user-facing description of this filter logic is presented on the class-level javadoc
3669   * of {@link NormalizeTableFilterParams}.
3670   */
3671  private List<TableDescriptor> getTableDescriptors(final List<TableDescriptor> htds,
3672    final String namespace, final String regex, final List<TableName> tableNameList,
3673    final boolean includeSysTables) throws IOException {
3674    if (tableNameList == null || tableNameList.isEmpty()) {
3675      // request for all TableDescriptors
3676      Collection<TableDescriptor> allHtds;
3677      if (namespace != null && namespace.length() > 0) {
3678        // Do a check on the namespace existence. Will fail if does not exist.
3679        this.clusterSchemaService.getNamespace(namespace);
3680        allHtds = tableDescriptors.getByNamespace(namespace).values();
3681      } else {
3682        allHtds = tableDescriptors.getAll().values();
3683      }
3684      for (TableDescriptor desc : allHtds) {
3685        if (
3686          tableStateManager.isTablePresent(desc.getTableName())
3687            && (includeSysTables || !desc.getTableName().isSystemTable())
3688        ) {
3689          htds.add(desc);
3690        }
3691      }
3692    } else {
3693      for (TableName s : tableNameList) {
3694        if (tableStateManager.isTablePresent(s)) {
3695          TableDescriptor desc = tableDescriptors.get(s);
3696          if (desc != null) {
3697            htds.add(desc);
3698          }
3699        }
3700      }
3701    }
3702
3703    // Retains only those matched by regular expression.
3704    if (regex != null) filterTablesByRegex(htds, Pattern.compile(regex));
3705    return htds;
3706  }
3707
3708  /**
3709   * Removes the table descriptors that don't match the pattern.
3710   * @param descriptors list of table descriptors to filter
3711   * @param pattern     the regex to use
3712   */
3713  private static void filterTablesByRegex(final Collection<TableDescriptor> descriptors,
3714    final Pattern pattern) {
3715    final String defaultNS = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR;
3716    Iterator<TableDescriptor> itr = descriptors.iterator();
3717    while (itr.hasNext()) {
3718      TableDescriptor htd = itr.next();
3719      String tableName = htd.getTableName().getNameAsString();
3720      boolean matched = pattern.matcher(tableName).matches();
3721      if (!matched && htd.getTableName().getNamespaceAsString().equals(defaultNS)) {
3722        matched = pattern.matcher(defaultNS + TableName.NAMESPACE_DELIM + tableName).matches();
3723      }
3724      if (!matched) {
3725        itr.remove();
3726      }
3727    }
3728  }
3729
3730  @Override
3731  public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
3732    return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
3733      .getLastMajorCompactionTimestamp(table);
3734  }
3735
3736  @Override
3737  public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
3738    return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
3739      .getLastMajorCompactionTimestamp(regionName);
3740  }
3741
3742  /**
3743   * Gets the mob file compaction state for a specific table. Whether all the mob files are selected
3744   * is known during the compaction execution, but the statistic is done just before compaction
3745   * starts, it is hard to know the compaction type at that time, so the rough statistics are chosen
3746   * for the mob file compaction. Only two compaction states are available,
3747   * CompactionState.MAJOR_AND_MINOR and CompactionState.NONE.
3748   * @param tableName The current table name.
3749   * @return If a given table is in mob file compaction now.
3750   */
3751  public GetRegionInfoResponse.CompactionState getMobCompactionState(TableName tableName) {
3752    AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
3753    if (compactionsCount != null && compactionsCount.get() != 0) {
3754      return GetRegionInfoResponse.CompactionState.MAJOR_AND_MINOR;
3755    }
3756    return GetRegionInfoResponse.CompactionState.NONE;
3757  }
3758
3759  public void reportMobCompactionStart(TableName tableName) throws IOException {
3760    IdLock.Entry lockEntry = null;
3761    try {
3762      lockEntry = mobCompactionLock.getLockEntry(tableName.hashCode());
3763      AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
3764      if (compactionsCount == null) {
3765        compactionsCount = new AtomicInteger(0);
3766        mobCompactionStates.put(tableName, compactionsCount);
3767      }
3768      compactionsCount.incrementAndGet();
3769    } finally {
3770      if (lockEntry != null) {
3771        mobCompactionLock.releaseLockEntry(lockEntry);
3772      }
3773    }
3774  }
3775
3776  public void reportMobCompactionEnd(TableName tableName) throws IOException {
3777    IdLock.Entry lockEntry = null;
3778    try {
3779      lockEntry = mobCompactionLock.getLockEntry(tableName.hashCode());
3780      AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
3781      if (compactionsCount != null) {
3782        int count = compactionsCount.decrementAndGet();
3783        // remove the entry if the count is 0.
3784        if (count == 0) {
3785          mobCompactionStates.remove(tableName);
3786        }
3787      }
3788    } finally {
3789      if (lockEntry != null) {
3790        mobCompactionLock.releaseLockEntry(lockEntry);
3791      }
3792    }
3793  }
3794
3795  /**
3796   * Queries the state of the {@link LoadBalancerStateStore}. If the balancer is not initialized,
3797   * false is returned.
3798   * @return The state of the load balancer, or false if the load balancer isn't defined.
3799   */
3800  public boolean isBalancerOn() {
3801    return !isInMaintenanceMode() && loadBalancerStateStore != null && loadBalancerStateStore.get();
3802  }
3803
3804  /**
3805   * Queries the state of the {@link RegionNormalizerStateStore}. If it's not initialized, false is
3806   * returned.
3807   */
3808  public boolean isNormalizerOn() {
3809    return !isInMaintenanceMode() && getRegionNormalizerManager().isNormalizerOn();
3810  }
3811
3812  /**
3813   * Queries the state of the {@link SplitOrMergeStateStore}. If it is not initialized, false is
3814   * returned. If switchType is illegal, false will return.
3815   * @param switchType see {@link org.apache.hadoop.hbase.client.MasterSwitchType}
3816   * @return The state of the switch
3817   */
3818  @Override
3819  public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) {
3820    return !isInMaintenanceMode() && splitOrMergeStateStore != null
3821      && splitOrMergeStateStore.isSplitOrMergeEnabled(switchType);
3822  }
3823
3824  /**
3825   * Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned.
3826   * <p/>
3827   * Notice that, the base load balancer will always be {@link RSGroupBasedLoadBalancer} now, so
3828   * this method will return the balancer used inside each rs group.
3829   * @return The name of the {@link LoadBalancer} in use.
3830   */
3831  public String getLoadBalancerClassName() {
3832    return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
3833      LoadBalancerFactory.getDefaultLoadBalancerClass().getName());
3834  }
3835
3836  public SplitOrMergeStateStore getSplitOrMergeStateStore() {
3837    return splitOrMergeStateStore;
3838  }
3839
3840  @Override
3841  public RSGroupBasedLoadBalancer getLoadBalancer() {
3842    return balancer;
3843  }
3844
3845  @Override
3846  public FavoredNodesManager getFavoredNodesManager() {
3847    return balancer.getFavoredNodesManager();
3848  }
3849
3850  private long executePeerProcedure(AbstractPeerProcedure<?> procedure) throws IOException {
3851    if (!isReplicationPeerModificationEnabled()) {
3852      throw new IOException("Replication peer modification disabled");
3853    }
3854    long procId = procedureExecutor.submitProcedure(procedure);
3855    procedure.getLatch().await();
3856    return procId;
3857  }
3858
3859  @Override
3860  public long addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
3861    throws ReplicationException, IOException {
3862    LOG.info(getClientIdAuditPrefix() + " creating replication peer, id=" + peerId + ", config="
3863      + peerConfig + ", state=" + (enabled ? "ENABLED" : "DISABLED"));
3864    return executePeerProcedure(new AddPeerProcedure(peerId, peerConfig, enabled));
3865  }
3866
3867  @Override
3868  public long removeReplicationPeer(String peerId) throws ReplicationException, IOException {
3869    LOG.info(getClientIdAuditPrefix() + " removing replication peer, id=" + peerId);
3870    return executePeerProcedure(new RemovePeerProcedure(peerId));
3871  }
3872
3873  @Override
3874  public long enableReplicationPeer(String peerId) throws ReplicationException, IOException {
3875    LOG.info(getClientIdAuditPrefix() + " enable replication peer, id=" + peerId);
3876    return executePeerProcedure(new EnablePeerProcedure(peerId));
3877  }
3878
3879  @Override
3880  public long disableReplicationPeer(String peerId) throws ReplicationException, IOException {
3881    LOG.info(getClientIdAuditPrefix() + " disable replication peer, id=" + peerId);
3882    return executePeerProcedure(new DisablePeerProcedure(peerId));
3883  }
3884
3885  @Override
3886  public ReplicationPeerConfig getReplicationPeerConfig(String peerId)
3887    throws ReplicationException, IOException {
3888    if (cpHost != null) {
3889      cpHost.preGetReplicationPeerConfig(peerId);
3890    }
3891    LOG.info(getClientIdAuditPrefix() + " get replication peer config, id=" + peerId);
3892    ReplicationPeerConfig peerConfig = this.replicationPeerManager.getPeerConfig(peerId)
3893      .orElseThrow(() -> new ReplicationPeerNotFoundException(peerId));
3894    if (cpHost != null) {
3895      cpHost.postGetReplicationPeerConfig(peerId);
3896    }
3897    return peerConfig;
3898  }
3899
3900  @Override
3901  public long updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
3902    throws ReplicationException, IOException {
3903    LOG.info(getClientIdAuditPrefix() + " update replication peer config, id=" + peerId
3904      + ", config=" + peerConfig);
3905    return executePeerProcedure(new UpdatePeerConfigProcedure(peerId, peerConfig));
3906  }
3907
3908  @Override
3909  public List<ReplicationPeerDescription> listReplicationPeers(String regex)
3910    throws ReplicationException, IOException {
3911    if (cpHost != null) {
3912      cpHost.preListReplicationPeers(regex);
3913    }
3914    LOG.debug("{} list replication peers, regex={}", getClientIdAuditPrefix(), regex);
3915    Pattern pattern = regex == null ? null : Pattern.compile(regex);
3916    List<ReplicationPeerDescription> peers = this.replicationPeerManager.listPeers(pattern);
3917    if (cpHost != null) {
3918      cpHost.postListReplicationPeers(regex);
3919    }
3920    return peers;
3921  }
3922
3923  @Override
3924  public long transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state)
3925    throws ReplicationException, IOException {
3926    LOG.info(
3927      getClientIdAuditPrefix()
3928        + " transit current cluster state to {} in a synchronous replication peer id={}",
3929      state, peerId);
3930    return executePeerProcedure(new TransitPeerSyncReplicationStateProcedure(peerId, state));
3931  }
3932
3933  @Override
3934  public boolean replicationPeerModificationSwitch(boolean on) throws IOException {
3935    return replicationPeerModificationStateStore.set(on);
3936  }
3937
3938  @Override
3939  public boolean isReplicationPeerModificationEnabled() {
3940    return replicationPeerModificationStateStore.get();
3941  }
3942
3943  /**
3944   * Mark region server(s) as decommissioned (previously called 'draining') to prevent additional
3945   * regions from getting assigned to them. Also unload the regions on the servers asynchronously.0
3946   * @param servers Region servers to decommission.
3947   */
3948  public void decommissionRegionServers(final List<ServerName> servers, final boolean offload)
3949    throws IOException {
3950    List<ServerName> serversAdded = new ArrayList<>(servers.size());
3951    // Place the decommission marker first.
3952    String parentZnode = getZooKeeper().getZNodePaths().drainingZNode;
3953    for (ServerName server : servers) {
3954      try {
3955        String node = ZNodePaths.joinZNode(parentZnode, server.getServerName());
3956        ZKUtil.createAndFailSilent(getZooKeeper(), node);
3957      } catch (KeeperException ke) {
3958        throw new HBaseIOException(
3959          this.zooKeeper.prefix("Unable to decommission '" + server.getServerName() + "'."), ke);
3960      }
3961      if (this.serverManager.addServerToDrainList(server)) {
3962        serversAdded.add(server);
3963      }
3964    }
3965    // Move the regions off the decommissioned servers.
3966    if (offload) {
3967      final List<ServerName> destServers = this.serverManager.createDestinationServersList();
3968      for (ServerName server : serversAdded) {
3969        final List<RegionInfo> regionsOnServer = this.assignmentManager.getRegionsOnServer(server);
3970        for (RegionInfo hri : regionsOnServer) {
3971          ServerName dest = balancer.randomAssignment(hri, destServers);
3972          if (dest == null) {
3973            throw new HBaseIOException("Unable to determine a plan to move " + hri);
3974          }
3975          RegionPlan rp = new RegionPlan(hri, server, dest);
3976          this.assignmentManager.moveAsync(rp);
3977        }
3978      }
3979    }
3980  }
3981
3982  /**
3983   * List region servers marked as decommissioned (previously called 'draining') to not get regions
3984   * assigned to them.
3985   * @return List of decommissioned servers.
3986   */
3987  public List<ServerName> listDecommissionedRegionServers() {
3988    return this.serverManager.getDrainingServersList();
3989  }
3990
3991  /**
3992   * Remove decommission marker (previously called 'draining') from a region server to allow regions
3993   * assignments. Load regions onto the server asynchronously if a list of regions is given
3994   * @param server Region server to remove decommission marker from.
3995   */
3996  public void recommissionRegionServer(final ServerName server,
3997    final List<byte[]> encodedRegionNames) throws IOException {
3998    // Remove the server from decommissioned (draining) server list.
3999    String parentZnode = getZooKeeper().getZNodePaths().drainingZNode;
4000    String node = ZNodePaths.joinZNode(parentZnode, server.getServerName());
4001    try {
4002      ZKUtil.deleteNodeFailSilent(getZooKeeper(), node);
4003    } catch (KeeperException ke) {
4004      throw new HBaseIOException(
4005        this.zooKeeper.prefix("Unable to recommission '" + server.getServerName() + "'."), ke);
4006    }
4007    this.serverManager.removeServerFromDrainList(server);
4008
4009    // Load the regions onto the server if we are given a list of regions.
4010    if (encodedRegionNames == null || encodedRegionNames.isEmpty()) {
4011      return;
4012    }
4013    if (!this.serverManager.isServerOnline(server)) {
4014      return;
4015    }
4016    for (byte[] encodedRegionName : encodedRegionNames) {
4017      RegionState regionState =
4018        assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName));
4019      if (regionState == null) {
4020        LOG.warn("Unknown region " + Bytes.toStringBinary(encodedRegionName));
4021        continue;
4022      }
4023      RegionInfo hri = regionState.getRegion();
4024      if (server.equals(regionState.getServerName())) {
4025        LOG.info("Skipping move of region " + hri.getRegionNameAsString()
4026          + " because region already assigned to the same server " + server + ".");
4027        continue;
4028      }
4029      RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), server);
4030      this.assignmentManager.moveAsync(rp);
4031    }
4032  }
4033
4034  @Override
4035  public LockManager getLockManager() {
4036    return lockManager;
4037  }
4038
4039  public QuotaObserverChore getQuotaObserverChore() {
4040    return this.quotaObserverChore;
4041  }
4042
4043  public SpaceQuotaSnapshotNotifier getSpaceQuotaSnapshotNotifier() {
4044    return this.spaceQuotaSnapshotNotifier;
4045  }
4046
4047  @SuppressWarnings("unchecked")
4048  private RemoteProcedure<MasterProcedureEnv, ?> getRemoteProcedure(long procId) {
4049    Procedure<?> procedure = procedureExecutor.getProcedure(procId);
4050    if (procedure == null) {
4051      return null;
4052    }
4053    assert procedure instanceof RemoteProcedure;
4054    return (RemoteProcedure<MasterProcedureEnv, ?>) procedure;
4055  }
4056
4057  public void remoteProcedureCompleted(long procId) {
4058    LOG.debug("Remote procedure done, pid={}", procId);
4059    RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId);
4060    if (procedure != null) {
4061      procedure.remoteOperationCompleted(procedureExecutor.getEnvironment());
4062    }
4063  }
4064
4065  public void remoteProcedureFailed(long procId, RemoteProcedureException error) {
4066    LOG.debug("Remote procedure failed, pid={}", procId, error);
4067    RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId);
4068    if (procedure != null) {
4069      procedure.remoteOperationFailed(procedureExecutor.getEnvironment(), error);
4070    }
4071  }
4072
4073  /**
4074   * Reopen regions provided in the argument
4075   * @param tableName   The current table name
4076   * @param regionNames The region names of the regions to reopen
4077   * @param nonceGroup  Identifier for the source of the request, a client or process
4078   * @param nonce       A unique identifier for this operation from the client or process identified
4079   *                    by <code>nonceGroup</code> (the source must ensure each operation gets a
4080   *                    unique id).
4081   * @return procedure Id
4082   * @throws IOException if reopening region fails while running procedure
4083   */
4084  long reopenRegions(final TableName tableName, final List<byte[]> regionNames,
4085    final long nonceGroup, final long nonce) throws IOException {
4086
4087    return MasterProcedureUtil
4088      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
4089
4090        @Override
4091        protected void run() throws IOException {
4092          submitProcedure(new ReopenTableRegionsProcedure(tableName, regionNames));
4093        }
4094
4095        @Override
4096        protected String getDescription() {
4097          return "ReopenTableRegionsProcedure";
4098        }
4099
4100      });
4101
4102  }
4103
4104  @Override
4105  public ReplicationPeerManager getReplicationPeerManager() {
4106    return replicationPeerManager;
4107  }
4108
4109  public HashMap<String, List<Pair<ServerName, ReplicationLoadSource>>>
4110    getReplicationLoad(ServerName[] serverNames) {
4111    List<ReplicationPeerDescription> peerList = this.getReplicationPeerManager().listPeers(null);
4112    if (peerList == null) {
4113      return null;
4114    }
4115    HashMap<String, List<Pair<ServerName, ReplicationLoadSource>>> replicationLoadSourceMap =
4116      new HashMap<>(peerList.size());
4117    peerList.stream()
4118      .forEach(peer -> replicationLoadSourceMap.put(peer.getPeerId(), new ArrayList<>()));
4119    for (ServerName serverName : serverNames) {
4120      List<ReplicationLoadSource> replicationLoadSources =
4121        getServerManager().getLoad(serverName).getReplicationLoadSourceList();
4122      for (ReplicationLoadSource replicationLoadSource : replicationLoadSources) {
4123        List<Pair<ServerName, ReplicationLoadSource>> replicationLoadSourceList =
4124          replicationLoadSourceMap.get(replicationLoadSource.getPeerID());
4125        if (replicationLoadSourceList == null) {
4126          LOG.debug("{} does not exist, but it exists "
4127            + "in znode(/hbase/replication/rs). when the rs restarts, peerId is deleted, so "
4128            + "we just need to ignore it", replicationLoadSource.getPeerID());
4129          continue;
4130        }
4131        replicationLoadSourceList.add(new Pair<>(serverName, replicationLoadSource));
4132      }
4133    }
4134    for (List<Pair<ServerName, ReplicationLoadSource>> loads : replicationLoadSourceMap.values()) {
4135      if (loads.size() > 0) {
4136        loads.sort(Comparator.comparingLong(load -> (-1) * load.getSecond().getReplicationLag()));
4137      }
4138    }
4139    return replicationLoadSourceMap;
4140  }
4141
4142  /**
4143   * This method modifies the master's configuration in order to inject replication-related features
4144   */
4145  @InterfaceAudience.Private
4146  public static void decorateMasterConfiguration(Configuration conf) {
4147    String plugins = conf.get(HBASE_MASTER_LOGCLEANER_PLUGINS);
4148    String cleanerClass = ReplicationLogCleaner.class.getCanonicalName();
4149    if (plugins == null || !plugins.contains(cleanerClass)) {
4150      conf.set(HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass);
4151    }
4152    if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf)) {
4153      plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
4154      cleanerClass = ReplicationHFileCleaner.class.getCanonicalName();
4155      if (!plugins.contains(cleanerClass)) {
4156        conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, plugins + "," + cleanerClass);
4157      }
4158    }
4159  }
4160
4161  public SnapshotQuotaObserverChore getSnapshotQuotaObserverChore() {
4162    return this.snapshotQuotaChore;
4163  }
4164
4165  public ActiveMasterManager getActiveMasterManager() {
4166    return activeMasterManager;
4167  }
4168
4169  @Override
4170  public SyncReplicationReplayWALManager getSyncReplicationReplayWALManager() {
4171    return this.syncReplicationReplayWALManager;
4172  }
4173
4174  public HbckChore getHbckChore() {
4175    return this.hbckChore;
4176  }
4177
4178  @Override
4179  public void runReplicationBarrierCleaner() {
4180    ReplicationBarrierCleaner rbc = this.replicationBarrierCleaner;
4181    if (rbc != null) {
4182      rbc.chore();
4183    }
4184  }
4185
4186  @Override
4187  public RSGroupInfoManager getRSGroupInfoManager() {
4188    return rsGroupInfoManager;
4189  }
4190
4191  /**
4192   * Get the compaction state of the table
4193   * @param tableName The table name
4194   * @return CompactionState Compaction state of the table
4195   */
4196  public CompactionState getCompactionState(final TableName tableName) {
4197    CompactionState compactionState = CompactionState.NONE;
4198    try {
4199      List<RegionInfo> regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName);
4200      for (RegionInfo regionInfo : regions) {
4201        ServerName serverName =
4202          assignmentManager.getRegionStates().getRegionServerOfRegion(regionInfo);
4203        if (serverName == null) {
4204          continue;
4205        }
4206        ServerMetrics sl = serverManager.getLoad(serverName);
4207        if (sl == null) {
4208          continue;
4209        }
4210        RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName());
4211        if (regionMetrics.getCompactionState() == CompactionState.MAJOR) {
4212          if (compactionState == CompactionState.MINOR) {
4213            compactionState = CompactionState.MAJOR_AND_MINOR;
4214          } else {
4215            compactionState = CompactionState.MAJOR;
4216          }
4217        } else if (regionMetrics.getCompactionState() == CompactionState.MINOR) {
4218          if (compactionState == CompactionState.MAJOR) {
4219            compactionState = CompactionState.MAJOR_AND_MINOR;
4220          } else {
4221            compactionState = CompactionState.MINOR;
4222          }
4223        }
4224      }
4225    } catch (Exception e) {
4226      compactionState = null;
4227      LOG.error("Exception when get compaction state for " + tableName.getNameAsString(), e);
4228    }
4229    return compactionState;
4230  }
4231
4232  @Override
4233  public MetaLocationSyncer getMetaLocationSyncer() {
4234    return metaLocationSyncer;
4235  }
4236
4237  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4238      allowedOnPath = ".*/src/test/.*")
4239  public MasterRegion getMasterRegion() {
4240    return masterRegion;
4241  }
4242
4243  @Override
4244  public void onConfigurationChange(Configuration newConf) {
4245    try {
4246      Superusers.initialize(newConf);
4247    } catch (IOException e) {
4248      LOG.warn("Failed to initialize SuperUsers on reloading of the configuration");
4249    }
4250    // append the quotas observer back to the master coprocessor key
4251    setQuotasObserver(newConf);
4252    // update region server coprocessor if the configuration has changed.
4253    if (
4254      CoprocessorConfigurationUtil.checkConfigurationChange(getConfiguration(), newConf,
4255        CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY) && !maintenanceMode
4256    ) {
4257      LOG.info("Update the master coprocessor(s) because the configuration has changed");
4258      initializeCoprocessorHost(newConf);
4259    }
4260  }
4261
4262  @Override
4263  protected NamedQueueRecorder createNamedQueueRecord() {
4264    final boolean isBalancerDecisionRecording =
4265      conf.getBoolean(BaseLoadBalancer.BALANCER_DECISION_BUFFER_ENABLED,
4266        BaseLoadBalancer.DEFAULT_BALANCER_DECISION_BUFFER_ENABLED);
4267    final boolean isBalancerRejectionRecording =
4268      conf.getBoolean(BaseLoadBalancer.BALANCER_REJECTION_BUFFER_ENABLED,
4269        BaseLoadBalancer.DEFAULT_BALANCER_REJECTION_BUFFER_ENABLED);
4270    if (isBalancerDecisionRecording || isBalancerRejectionRecording) {
4271      return NamedQueueRecorder.getInstance(conf);
4272    } else {
4273      return null;
4274    }
4275  }
4276
4277  @Override
4278  protected boolean clusterMode() {
4279    return true;
4280  }
4281
4282  public String getClusterId() {
4283    if (activeMaster) {
4284      return clusterId;
4285    }
4286    return cachedClusterId.getFromCacheOrFetch();
4287  }
4288
4289  public Optional<ServerName> getActiveMaster() {
4290    return activeMasterManager.getActiveMasterServerName();
4291  }
4292
4293  public List<ServerName> getBackupMasters() {
4294    return activeMasterManager.getBackupMasters();
4295  }
4296
4297  @Override
4298  public Iterator<ServerName> getBootstrapNodes() {
4299    return regionServerTracker.getRegionServers().iterator();
4300  }
4301
4302  @Override
4303  public List<HRegionLocation> getMetaLocations() {
4304    return metaRegionLocationCache.getMetaRegionLocations();
4305  }
4306
4307  @Override
4308  public void flushMasterStore() throws IOException {
4309    LOG.info("Force flush master local region.");
4310    if (this.cpHost != null) {
4311      try {
4312        cpHost.preMasterStoreFlush();
4313      } catch (IOException ioe) {
4314        LOG.error("Error invoking master coprocessor preMasterStoreFlush()", ioe);
4315      }
4316    }
4317    masterRegion.flush(true);
4318    if (this.cpHost != null) {
4319      try {
4320        cpHost.postMasterStoreFlush();
4321      } catch (IOException ioe) {
4322        LOG.error("Error invoking master coprocessor postMasterStoreFlush()", ioe);
4323      }
4324    }
4325  }
4326
4327  public Collection<ServerName> getLiveRegionServers() {
4328    return regionServerTracker.getRegionServers();
4329  }
4330
4331  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4332      allowedOnPath = ".*/src/test/.*")
4333  void setLoadBalancer(RSGroupBasedLoadBalancer loadBalancer) {
4334    this.balancer = loadBalancer;
4335  }
4336
4337  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4338      allowedOnPath = ".*/src/test/.*")
4339  void setAssignmentManager(AssignmentManager assignmentManager) {
4340    this.assignmentManager = assignmentManager;
4341  }
4342
4343  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4344      allowedOnPath = ".*/src/test/.*")
4345  static void setDisableBalancerChoreForTest(boolean disable) {
4346    disableBalancerChoreForTest = disable;
4347  }
4348
4349  private void setQuotasObserver(Configuration conf) {
4350    // Add the Observer to delete quotas on table deletion before starting all CPs by
4351    // default with quota support, avoiding if user specifically asks to not load this Observer.
4352    if (QuotaUtil.isQuotaEnabled(conf)) {
4353      updateConfigurationForQuotasObserver(conf);
4354    }
4355  }
4356
4357  private void initializeCoprocessorHost(Configuration conf) {
4358    // initialize master side coprocessors before we start handling requests
4359    this.cpHost = new MasterCoprocessorHost(this, conf);
4360  }
4361}