001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master;
019
020import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK;
021import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS;
022import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK;
023import static org.apache.hadoop.hbase.master.cleaner.HFileCleaner.CUSTOM_POOL_SIZE;
024import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY;
025
026import com.google.errorprone.annotations.RestrictedApi;
027import io.opentelemetry.api.trace.Span;
028import io.opentelemetry.api.trace.StatusCode;
029import io.opentelemetry.context.Scope;
030import java.io.IOException;
031import java.io.InterruptedIOException;
032import java.lang.reflect.Constructor;
033import java.lang.reflect.InvocationTargetException;
034import java.net.InetAddress;
035import java.net.InetSocketAddress;
036import java.net.UnknownHostException;
037import java.time.Instant;
038import java.time.ZoneId;
039import java.time.format.DateTimeFormatter;
040import java.util.ArrayList;
041import java.util.Arrays;
042import java.util.Collection;
043import java.util.Collections;
044import java.util.Comparator;
045import java.util.EnumSet;
046import java.util.HashMap;
047import java.util.HashSet;
048import java.util.Iterator;
049import java.util.LinkedList;
050import java.util.List;
051import java.util.Map;
052import java.util.Objects;
053import java.util.Optional;
054import java.util.Set;
055import java.util.concurrent.ExecutionException;
056import java.util.concurrent.Future;
057import java.util.concurrent.Semaphore;
058import java.util.concurrent.TimeUnit;
059import java.util.concurrent.TimeoutException;
060import java.util.concurrent.atomic.AtomicInteger;
061import java.util.regex.Pattern;
062import java.util.stream.Collectors;
063import javax.servlet.http.HttpServlet;
064import org.apache.commons.lang3.StringUtils;
065import org.apache.hadoop.conf.Configuration;
066import org.apache.hadoop.fs.FSDataInputStream;
067import org.apache.hadoop.fs.FSDataOutputStream;
068import org.apache.hadoop.fs.Path;
069import org.apache.hadoop.hbase.CatalogFamilyFormat;
070import org.apache.hadoop.hbase.Cell;
071import org.apache.hadoop.hbase.CellBuilderFactory;
072import org.apache.hadoop.hbase.CellBuilderType;
073import org.apache.hadoop.hbase.ClusterId;
074import org.apache.hadoop.hbase.ClusterMetrics;
075import org.apache.hadoop.hbase.ClusterMetrics.Option;
076import org.apache.hadoop.hbase.ClusterMetricsBuilder;
077import org.apache.hadoop.hbase.DoNotRetryIOException;
078import org.apache.hadoop.hbase.HBaseIOException;
079import org.apache.hadoop.hbase.HBaseInterfaceAudience;
080import org.apache.hadoop.hbase.HBaseServerBase;
081import org.apache.hadoop.hbase.HConstants;
082import org.apache.hadoop.hbase.HRegionLocation;
083import org.apache.hadoop.hbase.InvalidFamilyOperationException;
084import org.apache.hadoop.hbase.MasterNotRunningException;
085import org.apache.hadoop.hbase.MetaTableAccessor;
086import org.apache.hadoop.hbase.NamespaceDescriptor;
087import org.apache.hadoop.hbase.PleaseHoldException;
088import org.apache.hadoop.hbase.PleaseRestartMasterException;
089import org.apache.hadoop.hbase.RegionMetrics;
090import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
091import org.apache.hadoop.hbase.ScheduledChore;
092import org.apache.hadoop.hbase.ServerMetrics;
093import org.apache.hadoop.hbase.ServerName;
094import org.apache.hadoop.hbase.ServerTask;
095import org.apache.hadoop.hbase.ServerTaskBuilder;
096import org.apache.hadoop.hbase.TableName;
097import org.apache.hadoop.hbase.TableNotDisabledException;
098import org.apache.hadoop.hbase.TableNotFoundException;
099import org.apache.hadoop.hbase.UnknownRegionException;
100import org.apache.hadoop.hbase.client.BalanceRequest;
101import org.apache.hadoop.hbase.client.BalanceResponse;
102import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
103import org.apache.hadoop.hbase.client.CompactionState;
104import org.apache.hadoop.hbase.client.MasterSwitchType;
105import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
106import org.apache.hadoop.hbase.client.Put;
107import org.apache.hadoop.hbase.client.RegionInfo;
108import org.apache.hadoop.hbase.client.RegionInfoBuilder;
109import org.apache.hadoop.hbase.client.RegionStatesCount;
110import org.apache.hadoop.hbase.client.ResultScanner;
111import org.apache.hadoop.hbase.client.Scan;
112import org.apache.hadoop.hbase.client.TableDescriptor;
113import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
114import org.apache.hadoop.hbase.client.TableState;
115import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
116import org.apache.hadoop.hbase.exceptions.DeserializationException;
117import org.apache.hadoop.hbase.exceptions.MasterStoppedException;
118import org.apache.hadoop.hbase.executor.ExecutorType;
119import org.apache.hadoop.hbase.favored.FavoredNodesManager;
120import org.apache.hadoop.hbase.http.HttpServer;
121import org.apache.hadoop.hbase.http.InfoServer;
122import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
123import org.apache.hadoop.hbase.ipc.RpcServer;
124import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
125import org.apache.hadoop.hbase.log.HBaseMarkers;
126import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
127import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
128import org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
129import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
130import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
131import org.apache.hadoop.hbase.master.assignment.RegionStates;
132import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
133import org.apache.hadoop.hbase.master.balancer.BalancerChore;
134import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
135import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
136import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
137import org.apache.hadoop.hbase.master.balancer.LoadBalancerStateStore;
138import org.apache.hadoop.hbase.master.balancer.MaintenanceLoadBalancer;
139import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
140import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
141import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
142import org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
143import org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore;
144import org.apache.hadoop.hbase.master.hbck.HbckChore;
145import org.apache.hadoop.hbase.master.http.MasterDumpServlet;
146import org.apache.hadoop.hbase.master.http.MasterRedirectServlet;
147import org.apache.hadoop.hbase.master.http.MasterStatusServlet;
148import org.apache.hadoop.hbase.master.http.api_v1.ResourceConfigFactory;
149import org.apache.hadoop.hbase.master.http.hbck.HbckConfigFactory;
150import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
151import org.apache.hadoop.hbase.master.locking.LockManager;
152import org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore;
153import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
154import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager;
155import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerStateStore;
156import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
157import org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
158import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
159import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
160import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
161import org.apache.hadoop.hbase.master.procedure.FlushTableProcedure;
162import org.apache.hadoop.hbase.master.procedure.InitMetaProcedure;
163import org.apache.hadoop.hbase.master.procedure.LogRollProcedure;
164import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
165import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
166import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
167import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
168import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
169import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
170import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
171import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
172import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
173import org.apache.hadoop.hbase.master.procedure.ReloadQuotasProcedure;
174import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure;
175import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
176import org.apache.hadoop.hbase.master.procedure.TruncateRegionProcedure;
177import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
178import org.apache.hadoop.hbase.master.region.MasterRegion;
179import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
180import org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure;
181import org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
182import org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
183import org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
184import org.apache.hadoop.hbase.master.replication.MigrateReplicationQueueFromZkToTableProcedure;
185import org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
186import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
187import org.apache.hadoop.hbase.master.replication.ReplicationPeerModificationStateStore;
188import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager;
189import org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure;
190import org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
191import org.apache.hadoop.hbase.master.slowlog.SlowLogMasterService;
192import org.apache.hadoop.hbase.master.snapshot.SnapshotCleanupStateStore;
193import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
194import org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator;
195import org.apache.hadoop.hbase.master.zksyncer.MasterAddressSyncer;
196import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer;
197import org.apache.hadoop.hbase.mob.MobFileCleanerChore;
198import org.apache.hadoop.hbase.mob.MobFileCompactionChore;
199import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
200import org.apache.hadoop.hbase.monitoring.MonitoredTask;
201import org.apache.hadoop.hbase.monitoring.TaskGroup;
202import org.apache.hadoop.hbase.monitoring.TaskMonitor;
203import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder;
204import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
205import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
206import org.apache.hadoop.hbase.procedure2.LockedResource;
207import org.apache.hadoop.hbase.procedure2.Procedure;
208import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
209import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
210import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
211import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
212import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
213import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener;
214import org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore;
215import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
216import org.apache.hadoop.hbase.quotas.MasterQuotasObserver;
217import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
218import org.apache.hadoop.hbase.quotas.QuotaTableUtil;
219import org.apache.hadoop.hbase.quotas.QuotaUtil;
220import org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore;
221import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
222import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus;
223import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
224import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
225import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
226import org.apache.hadoop.hbase.regionserver.HRegionServer;
227import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
228import org.apache.hadoop.hbase.regionserver.storefiletracker.ModifyColumnFamilyStoreFileTrackerProcedure;
229import org.apache.hadoop.hbase.regionserver.storefiletracker.ModifyTableStoreFileTrackerProcedure;
230import org.apache.hadoop.hbase.replication.ReplicationException;
231import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
232import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
233import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
234import org.apache.hadoop.hbase.replication.ReplicationUtils;
235import org.apache.hadoop.hbase.replication.SyncReplicationState;
236import org.apache.hadoop.hbase.replication.ZKReplicationQueueStorageForMigration;
237import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
238import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
239import org.apache.hadoop.hbase.replication.master.ReplicationLogCleanerBarrier;
240import org.apache.hadoop.hbase.replication.master.ReplicationSinkTrackerTableCreator;
241import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp;
242import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp.ReplicationSyncUpToolInfo;
243import org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint;
244import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
245import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
246import org.apache.hadoop.hbase.rsgroup.RSGroupUtil;
247import org.apache.hadoop.hbase.security.AccessDeniedException;
248import org.apache.hadoop.hbase.security.SecurityConstants;
249import org.apache.hadoop.hbase.security.Superusers;
250import org.apache.hadoop.hbase.security.UserProvider;
251import org.apache.hadoop.hbase.trace.TraceUtil;
252import org.apache.hadoop.hbase.util.Addressing;
253import org.apache.hadoop.hbase.util.Bytes;
254import org.apache.hadoop.hbase.util.CommonFSUtils;
255import org.apache.hadoop.hbase.util.CoprocessorConfigurationUtil;
256import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
257import org.apache.hadoop.hbase.util.FSTableDescriptors;
258import org.apache.hadoop.hbase.util.FutureUtils;
259import org.apache.hadoop.hbase.util.HBaseFsck;
260import org.apache.hadoop.hbase.util.HFileArchiveUtil;
261import org.apache.hadoop.hbase.util.IdLock;
262import org.apache.hadoop.hbase.util.JVMClusterUtil;
263import org.apache.hadoop.hbase.util.JsonMapper;
264import org.apache.hadoop.hbase.util.ModifyRegionUtils;
265import org.apache.hadoop.hbase.util.Pair;
266import org.apache.hadoop.hbase.util.ReflectionUtils;
267import org.apache.hadoop.hbase.util.RetryCounter;
268import org.apache.hadoop.hbase.util.RetryCounterFactory;
269import org.apache.hadoop.hbase.util.TableDescriptorChecker;
270import org.apache.hadoop.hbase.util.Threads;
271import org.apache.hadoop.hbase.util.VersionInfo;
272import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
273import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
274import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
275import org.apache.hadoop.hbase.zookeeper.ZKUtil;
276import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
277import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
278import org.apache.yetus.audience.InterfaceAudience;
279import org.apache.zookeeper.KeeperException;
280import org.slf4j.Logger;
281import org.slf4j.LoggerFactory;
282
283import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
284import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
285import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
286import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams;
287import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
288import org.apache.hbase.thirdparty.com.google.gson.JsonParseException;
289import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors;
290import org.apache.hbase.thirdparty.com.google.protobuf.Service;
291import org.apache.hbase.thirdparty.org.eclipse.jetty.ee8.servlet.ServletHolder;
292import org.apache.hbase.thirdparty.org.eclipse.jetty.ee8.webapp.WebAppContext;
293import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server;
294import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector;
295import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig;
296import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer;
297
298import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
299import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
300import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
301
302/**
303 * HMaster is the "master server" for HBase. An HBase cluster has one active master. If many masters
304 * are started, all compete. Whichever wins goes on to run the cluster. All others park themselves
305 * in their constructor until master or cluster shutdown or until the active master loses its lease
306 * in zookeeper. Thereafter, all running master jostle to take over master role.
307 * <p/>
308 * The Master can be asked shutdown the cluster. See {@link #shutdown()}. In this case it will tell
309 * all regionservers to go down and then wait on them all reporting in that they are down. This
310 * master will then shut itself down.
311 * <p/>
312 * You can also shutdown just this master. Call {@link #stopMaster()}.
313 * @see org.apache.zookeeper.Watcher
314 */
315@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
316public class HMaster extends HBaseServerBase<MasterRpcServices> implements MasterServices {
317
318  private static final Logger LOG = LoggerFactory.getLogger(HMaster.class);
319
320  // MASTER is name of the webapp and the attribute name used stuffing this
321  // instance into a web context !! AND OTHER PLACES !!
322  public static final String MASTER = "master";
323
324  // Manager and zk listener for master election
325  private final ActiveMasterManager activeMasterManager;
326  // Region server tracker
327  private final RegionServerTracker regionServerTracker;
328  // Draining region server tracker
329  private DrainingServerTracker drainingServerTracker;
330  // Tracker for load balancer state
331  LoadBalancerStateStore loadBalancerStateStore;
332  // Tracker for meta location, if any client ZK quorum specified
333  private MetaLocationSyncer metaLocationSyncer;
334  // Tracker for active master location, if any client ZK quorum specified
335  @InterfaceAudience.Private
336  MasterAddressSyncer masterAddressSyncer;
337  // Tracker for auto snapshot cleanup state
338  SnapshotCleanupStateStore snapshotCleanupStateStore;
339
340  // Tracker for split and merge state
341  private SplitOrMergeStateStore splitOrMergeStateStore;
342
343  private ClusterSchemaService clusterSchemaService;
344
345  public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS =
346    "hbase.master.wait.on.service.seconds";
347  public static final int DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = 5 * 60;
348
349  public static final String HBASE_MASTER_CLEANER_INTERVAL = "hbase.master.cleaner.interval";
350
351  public static final int DEFAULT_HBASE_MASTER_CLEANER_INTERVAL = 600 * 1000;
352
353  private String clusterId;
354
355  // Metrics for the HMaster
356  final MetricsMaster metricsMaster;
357  // file system manager for the master FS operations
358  private MasterFileSystem fileSystemManager;
359  private MasterWalManager walManager;
360
361  // manager to manage procedure-based WAL splitting, can be null if current
362  // is zk-based WAL splitting. SplitWALManager will replace SplitLogManager
363  // and MasterWalManager, which means zk-based WAL splitting code will be
364  // useless after we switch to the procedure-based one. our eventual goal
365  // is to remove all the zk-based WAL splitting code.
366  private SplitWALManager splitWALManager;
367
368  // server manager to deal with region server info
369  private volatile ServerManager serverManager;
370
371  // manager of assignment nodes in zookeeper
372  private AssignmentManager assignmentManager;
373
374  private RSGroupInfoManager rsGroupInfoManager;
375
376  private final ReplicationLogCleanerBarrier replicationLogCleanerBarrier =
377    new ReplicationLogCleanerBarrier();
378
379  // Only allow to add one sync replication peer concurrently
380  private final Semaphore syncReplicationPeerLock = new Semaphore(1);
381
382  // manager of replication
383  private ReplicationPeerManager replicationPeerManager;
384
385  private SyncReplicationReplayWALManager syncReplicationReplayWALManager;
386
387  // buffer for "fatal error" notices from region servers
388  // in the cluster. This is only used for assisting
389  // operations/debugging.
390  MemoryBoundedLogMessageBuffer rsFatals;
391
392  // flag set after we become the active master (used for testing)
393  private volatile boolean activeMaster = false;
394
395  // flag set after we complete initialization once active
396  private final ProcedureEvent<?> initialized = new ProcedureEvent<>("master initialized");
397
398  // flag set after master services are started,
399  // initialization may have not completed yet.
400  volatile boolean serviceStarted = false;
401
402  // Maximum time we should run balancer for
403  private final int maxBalancingTime;
404  // Maximum percent of regions in transition when balancing
405  private final double maxRitPercent;
406
407  private final LockManager lockManager = new LockManager(this);
408
409  private RSGroupBasedLoadBalancer balancer;
410  private BalancerChore balancerChore;
411  private static boolean disableBalancerChoreForTest = false;
412  private RegionNormalizerManager regionNormalizerManager;
413  private ClusterStatusChore clusterStatusChore;
414  private ClusterStatusPublisher clusterStatusPublisherChore = null;
415  private SnapshotCleanerChore snapshotCleanerChore = null;
416
417  private HbckChore hbckChore;
418  CatalogJanitor catalogJanitorChore;
419  // Threadpool for scanning the Old logs directory, used by the LogCleaner
420  private DirScanPool logCleanerPool;
421  private LogCleaner logCleaner;
422  // HFile cleaners for the custom hfile archive paths and the default archive path
423  // The archive path cleaner is the first element
424  private List<HFileCleaner> hfileCleaners = new ArrayList<>();
425  // The hfile cleaner paths, including custom paths and the default archive path
426  private List<Path> hfileCleanerPaths = new ArrayList<>();
427  // The shared hfile cleaner pool for the custom archive paths
428  private DirScanPool sharedHFileCleanerPool;
429  // The exclusive hfile cleaner pool for scanning the archive directory
430  private DirScanPool exclusiveHFileCleanerPool;
431  private ReplicationBarrierCleaner replicationBarrierCleaner;
432  private MobFileCleanerChore mobFileCleanerChore;
433  private MobFileCompactionChore mobFileCompactionChore;
434  private RollingUpgradeChore rollingUpgradeChore;
435  // used to synchronize the mobCompactionStates
436  private final IdLock mobCompactionLock = new IdLock();
437  // save the information of mob compactions in tables.
438  // the key is table name, the value is the number of compactions in that table.
439  private Map<TableName, AtomicInteger> mobCompactionStates = Maps.newConcurrentMap();
440
441  volatile MasterCoprocessorHost cpHost;
442
443  private final boolean preLoadTableDescriptors;
444
445  // Time stamps for when a hmaster became active
446  private long masterActiveTime;
447
448  // Time stamp for when HMaster finishes becoming Active Master
449  private long masterFinishedInitializationTime;
450
451  Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
452
453  // monitor for snapshot of hbase tables
454  SnapshotManager snapshotManager;
455  // monitor for distributed procedures
456  private MasterProcedureManagerHost mpmHost;
457
458  private RegionsRecoveryChore regionsRecoveryChore = null;
459
460  private RegionsRecoveryConfigManager regionsRecoveryConfigManager = null;
461  // it is assigned after 'initialized' guard set to true, so should be volatile
462  private volatile MasterQuotaManager quotaManager;
463  private SpaceQuotaSnapshotNotifier spaceQuotaSnapshotNotifier;
464  private QuotaObserverChore quotaObserverChore;
465  private SnapshotQuotaObserverChore snapshotQuotaChore;
466  private OldWALsDirSizeChore oldWALsDirSizeChore;
467
468  private ProcedureExecutor<MasterProcedureEnv> procedureExecutor;
469  private ProcedureStore procedureStore;
470
471  // the master local storage to store procedure data, meta region locations, etc.
472  private MasterRegion masterRegion;
473
474  private RegionServerList rsListStorage;
475
476  // handle table states
477  private TableStateManager tableStateManager;
478
479  /** jetty server for master to redirect requests to regionserver infoServer */
480  private Server masterJettyServer;
481
482  // Determine if we should do normal startup or minimal "single-user" mode with no region
483  // servers and no user tables. Useful for repair and recovery of hbase:meta
484  private final boolean maintenanceMode;
485  static final String MAINTENANCE_MODE = "hbase.master.maintenance_mode";
486
487  // the in process region server for carry system regions in maintenanceMode
488  private JVMClusterUtil.RegionServerThread maintenanceRegionServer;
489
490  // Cached clusterId on stand by masters to serve clusterID requests from clients.
491  private final CachedClusterId cachedClusterId;
492
493  public static final String WARMUP_BEFORE_MOVE = "hbase.master.warmup.before.move";
494  private static final boolean DEFAULT_WARMUP_BEFORE_MOVE = true;
495
496  /**
497   * Use RSProcedureDispatcher instance to initiate master -> rs remote procedure execution. Use
498   * this config to extend RSProcedureDispatcher (mainly for testing purpose).
499   */
500  public static final String HBASE_MASTER_RSPROC_DISPATCHER_CLASS =
501    "hbase.master.rsproc.dispatcher.class";
502  private static final String DEFAULT_HBASE_MASTER_RSPROC_DISPATCHER_CLASS =
503    RSProcedureDispatcher.class.getName();
504
505  private TaskGroup startupTaskGroup;
506
507  /**
508   * Store whether we allow replication peer modification operations.
509   */
510  private ReplicationPeerModificationStateStore replicationPeerModificationStateStore;
511
512  /**
513   * Initializes the HMaster. The steps are as follows:
514   * <p>
515   * <ol>
516   * <li>Initialize the local HRegionServer
517   * <li>Start the ActiveMasterManager.
518   * </ol>
519   * <p>
520   * Remaining steps of initialization occur in {@link #finishActiveMasterInitialization()} after
521   * the master becomes the active one.
522   */
523  public HMaster(final Configuration conf) throws IOException {
524    super(conf, "Master");
525    final Span span = TraceUtil.createSpan("HMaster.cxtor");
526    try (Scope ignored = span.makeCurrent()) {
527      if (conf.getBoolean(MAINTENANCE_MODE, false)) {
528        LOG.info("Detected {}=true via configuration.", MAINTENANCE_MODE);
529        maintenanceMode = true;
530      } else if (Boolean.getBoolean(MAINTENANCE_MODE)) {
531        LOG.info("Detected {}=true via environment variables.", MAINTENANCE_MODE);
532        maintenanceMode = true;
533      } else {
534        maintenanceMode = false;
535      }
536      this.rsFatals = new MemoryBoundedLogMessageBuffer(
537        conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));
538      LOG.info("hbase.rootdir={}, hbase.cluster.distributed={}",
539        CommonFSUtils.getRootDir(this.conf),
540        this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));
541
542      // Disable usage of meta replicas in the master
543      this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);
544
545      decorateMasterConfiguration(this.conf);
546
547      // Hack! Maps DFSClient => Master for logs. HDFS made this
548      // config param for task trackers, but we can piggyback off of it.
549      if (this.conf.get("mapreduce.task.attempt.id") == null) {
550        this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
551      }
552
553      this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));
554
555      // preload table descriptor at startup
556      this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);
557
558      this.maxBalancingTime = getMaxBalancingTime();
559      this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT,
560        HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT);
561
562      // Do we publish the status?
563      boolean shouldPublish =
564        conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT);
565      Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
566        conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
567          ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
568          ClusterStatusPublisher.Publisher.class);
569
570      if (shouldPublish) {
571        if (publisherClass == null) {
572          LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but "
573            + ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS
574            + " is not set - not publishing status");
575        } else {
576          clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
577          LOG.debug("Created {}", this.clusterStatusPublisherChore);
578          getChoreService().scheduleChore(clusterStatusPublisherChore);
579        }
580      }
581      this.activeMasterManager = createActiveMasterManager(zooKeeper, serverName, this);
582      cachedClusterId = new CachedClusterId(this, conf);
583      this.regionServerTracker = new RegionServerTracker(zooKeeper, this);
584      this.rpcServices.start(zooKeeper);
585      span.setStatus(StatusCode.OK);
586    } catch (Throwable t) {
587      // Make sure we log the exception. HMaster is often started via reflection and the
588      // cause of failed startup is lost.
589      TraceUtil.setError(span, t);
590      LOG.error("Failed construction of Master", t);
591      throw t;
592    } finally {
593      span.end();
594    }
595  }
596
597  /**
598   * Protected to have custom implementations in tests override the default ActiveMaster
599   * implementation.
600   */
601  protected ActiveMasterManager createActiveMasterManager(ZKWatcher zk, ServerName sn,
602    org.apache.hadoop.hbase.Server server) throws InterruptedIOException {
603    return new ActiveMasterManager(zk, sn, server);
604  }
605
606  @Override
607  protected String getUseThisHostnameInstead(Configuration conf) {
608    return conf.get(MASTER_HOSTNAME_KEY);
609  }
610
611  private void registerConfigurationObservers() {
612    configurationManager.registerObserver(this.rpcServices);
613    configurationManager.registerObserver(this);
614  }
615
616  // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will
617  // block in here until then.
618  @Override
619  public void run() {
620    try {
621      installShutdownHook();
622      registerConfigurationObservers();
623      Threads.setDaemonThreadRunning(new Thread(TraceUtil.tracedRunnable(() -> {
624        try {
625          int infoPort = putUpJettyServer();
626          startActiveMasterManager(infoPort);
627        } catch (Throwable t) {
628          // Make sure we log the exception.
629          String error = "Failed to become Active Master";
630          LOG.error(error, t);
631          // Abort should have been called already.
632          if (!isAborted()) {
633            abort(error, t);
634          }
635        }
636      }, "HMaster.becomeActiveMaster")), getName() + ":becomeActiveMaster");
637      while (!isStopped() && !isAborted()) {
638        sleeper.sleep();
639      }
640      final Span span = TraceUtil.createSpan("HMaster exiting main loop");
641      try (Scope ignored = span.makeCurrent()) {
642        stopInfoServer();
643        closeClusterConnection();
644        stopServiceThreads();
645        if (this.rpcServices != null) {
646          this.rpcServices.stop();
647        }
648        closeZooKeeper();
649        closeTableDescriptors();
650        span.setStatus(StatusCode.OK);
651      } finally {
652        span.end();
653      }
654    } finally {
655      if (this.clusterSchemaService != null) {
656        // If on way out, then we are no longer active master.
657        this.clusterSchemaService.stopAsync();
658        try {
659          this.clusterSchemaService
660            .awaitTerminated(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,
661              DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);
662        } catch (TimeoutException te) {
663          LOG.warn("Failed shutdown of clusterSchemaService", te);
664        }
665      }
666      this.activeMaster = false;
667    }
668  }
669
670  // return the actual infoPort, -1 means disable info server.
671  private int putUpJettyServer() throws IOException {
672    if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
673      return -1;
674    }
675    final int infoPort =
676      conf.getInt("hbase.master.info.port.orig", HConstants.DEFAULT_MASTER_INFOPORT);
677    // -1 is for disabling info server, so no redirecting
678    if (infoPort < 0 || infoServer == null) {
679      return -1;
680    }
681    if (infoPort == infoServer.getPort()) {
682      // server is already running
683      return infoPort;
684    }
685    final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
686    if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
687      String msg = "Failed to start redirecting jetty server. Address " + addr
688        + " does not belong to this host. Correct configuration parameter: "
689        + "hbase.master.info.bindAddress";
690      LOG.error(msg);
691      throw new IOException(msg);
692    }
693
694    // TODO I'm pretty sure we could just add another binding to the InfoServer run by
695    // the RegionServer and have it run the RedirectServlet instead of standing up
696    // a second entire stack here.
697    masterJettyServer = new Server();
698    final ServerConnector connector = new ServerConnector(masterJettyServer);
699    connector.setHost(addr);
700    connector.setPort(infoPort);
701    masterJettyServer.addConnector(connector);
702    masterJettyServer.setStopAtShutdown(true);
703    masterJettyServer.setHandler(HttpServer.buildGzipHandler(masterJettyServer.getHandler()));
704
705    final String redirectHostname =
706      StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead;
707
708    final MasterRedirectServlet redirect = new MasterRedirectServlet(infoServer, redirectHostname);
709    final WebAppContext context =
710      new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);
711    context.addServlet(new ServletHolder(redirect), "/*");
712    context.setServer(masterJettyServer);
713
714    try {
715      masterJettyServer.start();
716    } catch (Exception e) {
717      throw new IOException("Failed to start redirecting jetty server", e);
718    }
719    return connector.getLocalPort();
720  }
721
722  /**
723   * For compatibility, if failed with regionserver credentials, try the master one
724   */
725  @Override
726  protected void login(UserProvider user, String host) throws IOException {
727    try {
728      user.login(SecurityConstants.REGIONSERVER_KRB_KEYTAB_FILE,
729        SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, host);
730    } catch (IOException ie) {
731      user.login(SecurityConstants.MASTER_KRB_KEYTAB_FILE, SecurityConstants.MASTER_KRB_PRINCIPAL,
732        host);
733    }
734  }
735
736  public MasterRpcServices getMasterRpcServices() {
737    return rpcServices;
738  }
739
740  @Override
741  protected MasterCoprocessorHost getCoprocessorHost() {
742    return getMasterCoprocessorHost();
743  }
744
745  public boolean balanceSwitch(final boolean b) throws IOException {
746    return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
747  }
748
749  @Override
750  protected String getProcessName() {
751    return MASTER;
752  }
753
754  @Override
755  protected boolean canCreateBaseZNode() {
756    return true;
757  }
758
759  @Override
760  protected boolean canUpdateTableDescriptor() {
761    return true;
762  }
763
764  @Override
765  protected boolean cacheTableDescriptor() {
766    return true;
767  }
768
769  protected MasterRpcServices createRpcServices() throws IOException {
770    return new MasterRpcServices(this);
771  }
772
773  @Override
774  protected void configureInfoServer(InfoServer infoServer) {
775    infoServer.addUnprivilegedServlet("master-status", "/master-status", MasterStatusServlet.class);
776    infoServer.addUnprivilegedServlet("api_v1", "/api/v1/*", buildApiV1Servlet());
777    infoServer.addUnprivilegedServlet("hbck", "/hbck/*", buildHbckServlet());
778
779    infoServer.setAttribute(MASTER, this);
780  }
781
782  private ServletHolder buildApiV1Servlet() {
783    final ResourceConfig config = ResourceConfigFactory.createResourceConfig(conf, this);
784    return new ServletHolder(new ServletContainer(config));
785  }
786
787  private ServletHolder buildHbckServlet() {
788    final ResourceConfig config = HbckConfigFactory.createResourceConfig(conf, this);
789    return new ServletHolder(new ServletContainer(config));
790  }
791
792  @Override
793  protected Class<? extends HttpServlet> getDumpServlet() {
794    return MasterDumpServlet.class;
795  }
796
797  @Override
798  public MetricsMaster getMasterMetrics() {
799    return metricsMaster;
800  }
801
802  /**
803   * Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it
804   * should have already been initialized along with {@link ServerManager}.
805   */
806  private void initializeZKBasedSystemTrackers()
807    throws IOException, KeeperException, ReplicationException, DeserializationException {
808    if (maintenanceMode) {
809      // in maintenance mode, always use MaintenanceLoadBalancer.
810      conf.unset(LoadBalancer.HBASE_RSGROUP_LOADBALANCER_CLASS);
811      conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, MaintenanceLoadBalancer.class,
812        LoadBalancer.class);
813    }
814    this.balancer = new RSGroupBasedLoadBalancer();
815    this.loadBalancerStateStore = new LoadBalancerStateStore(masterRegion, zooKeeper);
816
817    this.regionNormalizerManager =
818      RegionNormalizerFactory.createNormalizerManager(conf, masterRegion, zooKeeper, this);
819    this.configurationManager.registerObserver(regionNormalizerManager);
820    this.regionNormalizerManager.start();
821
822    this.splitOrMergeStateStore = new SplitOrMergeStateStore(masterRegion, zooKeeper, conf);
823
824    // This is for backwards compatible. We do not need the CP for rs group now but if user want to
825    // load it, we need to enable rs group.
826    String[] cpClasses = conf.getStrings(MasterCoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
827    if (cpClasses != null) {
828      for (String cpClass : cpClasses) {
829        if (RSGroupAdminEndpoint.class.getName().equals(cpClass)) {
830          RSGroupUtil.enableRSGroup(conf);
831          break;
832        }
833      }
834    }
835    this.rsGroupInfoManager = RSGroupInfoManager.create(this);
836
837    this.replicationPeerManager = ReplicationPeerManager.create(this, clusterId);
838    this.configurationManager.registerObserver(replicationPeerManager);
839    this.replicationPeerModificationStateStore =
840      new ReplicationPeerModificationStateStore(masterRegion);
841
842    this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);
843    this.drainingServerTracker.start();
844
845    this.snapshotCleanupStateStore = new SnapshotCleanupStateStore(masterRegion, zooKeeper);
846
847    String clientQuorumServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM);
848    boolean clientZkObserverMode = conf.getBoolean(HConstants.CLIENT_ZOOKEEPER_OBSERVER_MODE,
849      HConstants.DEFAULT_CLIENT_ZOOKEEPER_OBSERVER_MODE);
850    if (clientQuorumServers != null && !clientZkObserverMode) {
851      // we need to take care of the ZK information synchronization
852      // if given client ZK are not observer nodes
853      ZKWatcher clientZkWatcher = new ZKWatcher(conf,
854        getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this,
855        false, true);
856      this.metaLocationSyncer = new MetaLocationSyncer(zooKeeper, clientZkWatcher, this);
857      this.metaLocationSyncer.start();
858      this.masterAddressSyncer = new MasterAddressSyncer(zooKeeper, clientZkWatcher, this);
859      this.masterAddressSyncer.start();
860      // set cluster id is a one-go effort
861      ZKClusterId.setClusterId(clientZkWatcher, fileSystemManager.getClusterId());
862    }
863
864    // Set the cluster as up. If new RSs, they'll be waiting on this before
865    // going ahead with their startup.
866    boolean wasUp = this.clusterStatusTracker.isClusterUp();
867    if (!wasUp) this.clusterStatusTracker.setClusterUp();
868
869    LOG.info("Active/primary master=" + this.serverName + ", sessionid=0x"
870      + Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId())
871      + ", setting cluster-up flag (Was=" + wasUp + ")");
872
873    // create/initialize the snapshot manager and other procedure managers
874    this.snapshotManager = new SnapshotManager();
875    this.mpmHost = new MasterProcedureManagerHost();
876    this.mpmHost.register(this.snapshotManager);
877    this.mpmHost.register(new MasterFlushTableProcedureManager());
878    this.mpmHost.loadProcedures(conf);
879    this.mpmHost.initialize(this, this.metricsMaster);
880  }
881
882  // Will be overriden in test to inject customized AssignmentManager
883  @InterfaceAudience.Private
884  protected AssignmentManager createAssignmentManager(MasterServices master,
885    MasterRegion masterRegion) {
886    return new AssignmentManager(master, masterRegion);
887  }
888
889  private void tryMigrateMetaLocationsFromZooKeeper() throws IOException, KeeperException {
890    // try migrate data from zookeeper
891    try (ResultScanner scanner =
892      masterRegion.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) {
893      if (scanner.next() != null) {
894        // notice that all replicas for a region are in the same row, so the migration can be
895        // done with in a one row put, which means if we have data in catalog family then we can
896        // make sure that the migration is done.
897        LOG.info("The {} family in master local region already has data in it, skip migrating...",
898          HConstants.CATALOG_FAMILY_STR);
899        return;
900      }
901    }
902    // start migrating
903    byte[] row = CatalogFamilyFormat.getMetaKeyForRegion(RegionInfoBuilder.FIRST_META_REGIONINFO);
904    Put put = new Put(row);
905    List<String> metaReplicaNodes = zooKeeper.getMetaReplicaNodes();
906    StringBuilder info = new StringBuilder("Migrating meta locations:");
907    for (String metaReplicaNode : metaReplicaNodes) {
908      int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZNode(metaReplicaNode);
909      RegionState state = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId);
910      info.append(" ").append(state);
911      put.setTimestamp(state.getStamp());
912      MetaTableAccessor.addRegionInfo(put, state.getRegion());
913      if (state.getServerName() != null) {
914        MetaTableAccessor.addLocation(put, state.getServerName(), HConstants.NO_SEQNUM, replicaId);
915      }
916      put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
917        .setFamily(HConstants.CATALOG_FAMILY)
918        .setQualifier(RegionStateStore.getStateColumn(replicaId)).setTimestamp(put.getTimestamp())
919        .setType(Cell.Type.Put).setValue(Bytes.toBytes(state.getState().name())).build());
920    }
921    if (!put.isEmpty()) {
922      LOG.info(info.toString());
923      masterRegion.update(r -> r.put(put));
924    } else {
925      LOG.info("No meta location available on zookeeper, skip migrating...");
926    }
927  }
928
929  /**
930   * Finish initialization of HMaster after becoming the primary master.
931   * <p/>
932   * The startup order is a bit complicated but very important, do not change it unless you know
933   * what you are doing.
934   * <ol>
935   * <li>Initialize file system based components - file system manager, wal manager, table
936   * descriptors, etc</li>
937   * <li>Publish cluster id</li>
938   * <li>Here comes the most complicated part - initialize server manager, assignment manager and
939   * region server tracker
940   * <ol type='i'>
941   * <li>Create server manager</li>
942   * <li>Create master local region</li>
943   * <li>Create procedure executor, load the procedures, but do not start workers. We will start it
944   * later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same
945   * server</li>
946   * <li>Create assignment manager and start it, load the meta region state, but do not load data
947   * from meta region</li>
948   * <li>Start region server tracker, construct the online servers set and find out dead servers and
949   * schedule SCP for them. The online servers will be constructed by scanning zk, and we will also
950   * scan the wal directory and load from master local region to find out possible live region
951   * servers, and the differences between these two sets are the dead servers</li>
952   * </ol>
953   * </li>
954   * <li>If this is a new deploy, schedule a InitMetaProcedure to initialize meta</li>
955   * <li>Start necessary service threads - balancer, catalog janitor, executor services, and also
956   * the procedure executor, etc. Notice that the balancer must be created first as assignment
957   * manager may use it when assigning regions.</li>
958   * <li>Wait for meta to be initialized if necessary, start table state manager.</li>
959   * <li>Wait for enough region servers to check-in</li>
960   * <li>Let assignment manager load data from meta and construct region states</li>
961   * <li>Start all other things such as chore services, etc</li>
962   * </ol>
963   * <p/>
964   * Notice that now we will not schedule a special procedure to make meta online(unless the first
965   * time where meta has not been created yet), we will rely on SCP to bring meta online.
966   */
967  private void finishActiveMasterInitialization() throws IOException, InterruptedException,
968    KeeperException, ReplicationException, DeserializationException {
969    /*
970     * We are active master now... go initialize components we need to run.
971     */
972    startupTaskGroup.addTask("Initializing Master file system");
973
974    this.masterActiveTime = EnvironmentEdgeManager.currentTime();
975    // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
976
977    // always initialize the MemStoreLAB as we use a region to store data in master now, see
978    // localStore.
979    initializeMemStoreChunkCreator(null);
980    this.fileSystemManager = new MasterFileSystem(conf);
981    this.walManager = new MasterWalManager(this);
982
983    // warm-up HTDs cache on master initialization
984    if (preLoadTableDescriptors) {
985      startupTaskGroup.addTask("Pre-loading table descriptors");
986      this.tableDescriptors.getAll();
987    }
988
989    // Publish cluster ID; set it in Master too. The superclass RegionServer does this later but
990    // only after it has checked in with the Master. At least a few tests ask Master for clusterId
991    // before it has called its run method and before RegionServer has done the reportForDuty.
992    ClusterId clusterId = fileSystemManager.getClusterId();
993    startupTaskGroup.addTask("Publishing Cluster ID " + clusterId + " in ZooKeeper");
994    ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
995    this.clusterId = clusterId.toString();
996
997    // Precaution. Put in place the old hbck1 lock file to fence out old hbase1s running their
998    // hbck1s against an hbase2 cluster; it could do damage. To skip this behavior, set
999    // hbase.write.hbck1.lock.file to false.
1000    if (this.conf.getBoolean("hbase.write.hbck1.lock.file", true)) {
1001      Pair<Path, FSDataOutputStream> result = null;
1002      try {
1003        result = HBaseFsck.checkAndMarkRunningHbck(this.conf,
1004          HBaseFsck.createLockRetryCounterFactory(this.conf).create());
1005      } finally {
1006        if (result != null) {
1007          Closeables.close(result.getSecond(), true);
1008        }
1009      }
1010    }
1011
1012    startupTaskGroup.addTask("Initialize ServerManager and schedule SCP for crash servers");
1013    // The below two managers must be created before loading procedures, as they will be used during
1014    // loading.
1015    // initialize master local region
1016    masterRegion = MasterRegionFactory.create(this);
1017    rsListStorage = new MasterRegionServerList(masterRegion, this);
1018
1019    // Initialize the ServerManager and register it as a configuration observer
1020    this.serverManager = createServerManager(this, rsListStorage);
1021    this.configurationManager.registerObserver(this.serverManager);
1022
1023    this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);
1024    if (
1025      !conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)
1026    ) {
1027      this.splitWALManager = new SplitWALManager(this);
1028    }
1029
1030    tryMigrateMetaLocationsFromZooKeeper();
1031
1032    createProcedureExecutor();
1033    Map<Class<?>, List<Procedure<MasterProcedureEnv>>> procsByType = procedureExecutor
1034      .getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass()));
1035
1036    // Create Assignment Manager
1037    this.assignmentManager = createAssignmentManager(this, masterRegion);
1038    this.assignmentManager.start();
1039    // TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as
1040    // completed, it could still be in the procedure list. This is a bit strange but is another
1041    // story, need to verify the implementation for ProcedureExecutor and ProcedureStore.
1042    List<TransitRegionStateProcedure> ritList =
1043      procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream()
1044        .filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p)
1045        .collect(Collectors.toList());
1046    this.assignmentManager.setupRIT(ritList);
1047
1048    // Start RegionServerTracker with listing of servers found with exiting SCPs -- these should
1049    // be registered in the deadServers set -- and the servernames loaded from the WAL directory
1050    // and master local region that COULD BE 'alive'(we'll schedule SCPs for each and let SCP figure
1051    // it out).
1052    // We also pass dirs that are already 'splitting'... so we can do some checks down in tracker.
1053    // TODO: Generate the splitting and live Set in one pass instead of two as we currently do.
1054    this.regionServerTracker.upgrade(
1055      procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()
1056        .map(p -> (ServerCrashProcedure) p).collect(
1057          Collectors.toMap(ServerCrashProcedure::getServerName, Procedure::getSubmittedTime)),
1058      Sets.union(rsListStorage.getAll(), walManager.getLiveServersFromWALDir()),
1059      walManager.getSplittingServersFromWALDir());
1060    // This manager must be accessed AFTER hbase:meta is confirmed on line..
1061    this.tableStateManager = new TableStateManager(this);
1062
1063    startupTaskGroup.addTask("Initializing ZK system trackers");
1064    initializeZKBasedSystemTrackers();
1065    startupTaskGroup.addTask("Loading last flushed sequence id of regions");
1066    try {
1067      this.serverManager.loadLastFlushedSequenceIds();
1068    } catch (IOException e) {
1069      LOG.info("Failed to load last flushed sequence id of regions" + " from file system", e);
1070    }
1071    // Set ourselves as active Master now our claim has succeeded up in zk.
1072    this.activeMaster = true;
1073
1074    // Start the Zombie master detector after setting master as active, see HBASE-21535
1075    Thread zombieDetector = new Thread(new MasterInitializationMonitor(this),
1076      "ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime());
1077    zombieDetector.setDaemon(true);
1078    zombieDetector.start();
1079
1080    if (!maintenanceMode) {
1081      startupTaskGroup.addTask("Initializing master coprocessors");
1082      setQuotasObserver(conf);
1083      this.cpHost = new MasterCoprocessorHost(this, conf);
1084    } else {
1085      // start an in process region server for carrying system regions
1086      maintenanceRegionServer =
1087        JVMClusterUtil.createRegionServerThread(getConfiguration(), HRegionServer.class, 0);
1088      maintenanceRegionServer.start();
1089    }
1090
1091    // Checking if meta needs initializing.
1092    startupTaskGroup.addTask("Initializing meta table if this is a new deploy");
1093    InitMetaProcedure initMetaProc = null;
1094    // Print out state of hbase:meta on startup; helps debugging.
1095    if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) {
1096      Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream()
1097        .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
1098      initMetaProc = optProc.orElseGet(() -> {
1099        // schedule an init meta procedure if meta has not been deployed yet
1100        InitMetaProcedure temp = new InitMetaProcedure();
1101        procedureExecutor.submitProcedure(temp);
1102        return temp;
1103      });
1104    }
1105
1106    // initialize load balancer
1107    this.balancer.setMasterServices(this);
1108    this.balancer.initialize();
1109    this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
1110
1111    // try migrate replication data
1112    ZKReplicationQueueStorageForMigration oldReplicationQueueStorage =
1113      new ZKReplicationQueueStorageForMigration(zooKeeper, conf);
1114    // check whether there are something to migrate and we haven't scheduled a migration procedure
1115    // yet
1116    if (
1117      oldReplicationQueueStorage.hasData() && procedureExecutor.getProcedures().stream()
1118        .allMatch(p -> !(p instanceof MigrateReplicationQueueFromZkToTableProcedure))
1119    ) {
1120      procedureExecutor.submitProcedure(new MigrateReplicationQueueFromZkToTableProcedure());
1121    }
1122    // start up all service threads.
1123    startupTaskGroup.addTask("Initializing master service threads");
1124    startServiceThreads();
1125    // wait meta to be initialized after we start procedure executor
1126    if (initMetaProc != null) {
1127      initMetaProc.await();
1128      if (initMetaProc.isFailed() && initMetaProc.hasException()) {
1129        throw new IOException("Failed to initialize meta table", initMetaProc.getException());
1130      }
1131    }
1132    // Wake up this server to check in
1133    sleeper.skipSleepCycle();
1134
1135    // Wait for region servers to report in.
1136    // With this as part of master initialization, it precludes our being able to start a single
1137    // server that is both Master and RegionServer. Needs more thought. TODO.
1138    String statusStr = "Wait for region servers to report in";
1139    MonitoredTask waitRegionServer = startupTaskGroup.addTask(statusStr);
1140    LOG.info(Objects.toString(waitRegionServer));
1141    waitForRegionServers(waitRegionServer);
1142
1143    // Check if master is shutting down because issue initializing regionservers or balancer.
1144    if (isStopped()) {
1145      return;
1146    }
1147
1148    startupTaskGroup.addTask("Starting assignment manager");
1149    // FIRST HBASE:META READ!!!!
1150    // The below cannot make progress w/o hbase:meta being online.
1151    // This is the FIRST attempt at going to hbase:meta. Meta on-lining is going on in background
1152    // as procedures run -- in particular SCPs for crashed servers... One should put up hbase:meta
1153    // if it is down. It may take a while to come online. So, wait here until meta if for sure
1154    // available. That's what waitForMetaOnline does.
1155    if (!waitForMetaOnline()) {
1156      return;
1157    }
1158
1159    TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME);
1160    final ColumnFamilyDescriptor tableFamilyDesc =
1161      metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY);
1162    final ColumnFamilyDescriptor replBarrierFamilyDesc =
1163      metaDescriptor.getColumnFamily(HConstants.REPLICATION_BARRIER_FAMILY);
1164
1165    this.assignmentManager.initializationPostMetaOnline();
1166    this.assignmentManager.joinCluster();
1167    // The below depends on hbase:meta being online.
1168    this.assignmentManager.processOfflineRegions();
1169    // this must be called after the above processOfflineRegions to prevent race
1170    this.assignmentManager.wakeMetaLoadedEvent();
1171
1172    // for migrating from a version without HBASE-25099, and also for honoring the configuration
1173    // first.
1174    if (conf.get(HConstants.META_REPLICAS_NUM) != null) {
1175      int replicasNumInConf =
1176        conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM);
1177      TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME);
1178      if (metaDesc.getRegionReplication() != replicasNumInConf) {
1179        // it is possible that we already have some replicas before upgrading, so we must set the
1180        // region replication number in meta TableDescriptor directly first, without creating a
1181        // ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas.
1182        int existingReplicasCount =
1183          assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size();
1184        if (existingReplicasCount > metaDesc.getRegionReplication()) {
1185          LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)"
1186            + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount);
1187          metaDesc = TableDescriptorBuilder.newBuilder(metaDesc)
1188            .setRegionReplication(existingReplicasCount).build();
1189          tableDescriptors.update(metaDesc);
1190        }
1191        // check again, and issue a ModifyTableProcedure if needed
1192        if (metaDesc.getRegionReplication() != replicasNumInConf) {
1193          LOG.info(
1194            "The {} config is {} while the replica count in TableDescriptor is {}"
1195              + " for hbase:meta, altering...",
1196            HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication());
1197          procedureExecutor.submitProcedure(new ModifyTableProcedure(
1198            procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc)
1199              .setRegionReplication(replicasNumInConf).build(),
1200            null, metaDesc, false, true));
1201        }
1202      }
1203    }
1204    // Initialize after meta is up as below scans meta
1205    FavoredNodesManager fnm = getFavoredNodesManager();
1206    if (fnm != null) {
1207      fnm.initializeFromMeta();
1208    }
1209
1210    // set cluster status again after user regions are assigned
1211    this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
1212
1213    // Start balancer and meta catalog janitor after meta and regions have been assigned.
1214    startupTaskGroup.addTask("Starting balancer and catalog janitor");
1215    this.clusterStatusChore = new ClusterStatusChore(this, balancer);
1216    getChoreService().scheduleChore(clusterStatusChore);
1217    this.balancerChore = new BalancerChore(this);
1218    if (!disableBalancerChoreForTest) {
1219      getChoreService().scheduleChore(balancerChore);
1220    }
1221    if (regionNormalizerManager != null) {
1222      getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore());
1223    }
1224    this.catalogJanitorChore = new CatalogJanitor(this);
1225    getChoreService().scheduleChore(catalogJanitorChore);
1226    this.hbckChore = new HbckChore(this);
1227    getChoreService().scheduleChore(hbckChore);
1228    this.serverManager.startChore();
1229
1230    // Only for rolling upgrade, where we need to migrate the data in namespace table to meta table.
1231    if (!waitForNamespaceOnline()) {
1232      return;
1233    }
1234    startupTaskGroup.addTask("Starting cluster schema service");
1235    try {
1236      initClusterSchemaService();
1237    } catch (IllegalStateException e) {
1238      if (
1239        e.getCause() != null && e.getCause() instanceof NoSuchColumnFamilyException
1240          && tableFamilyDesc == null && replBarrierFamilyDesc == null
1241      ) {
1242        LOG.info("ClusterSchema service could not be initialized. This is "
1243          + "expected during HBase 1 to 2 upgrade", e);
1244      } else {
1245        throw e;
1246      }
1247    }
1248
1249    if (this.cpHost != null) {
1250      try {
1251        this.cpHost.preMasterInitialization();
1252      } catch (IOException e) {
1253        LOG.error("Coprocessor preMasterInitialization() hook failed", e);
1254      }
1255    }
1256
1257    LOG.info(String.format("Master has completed initialization %.3fsec",
1258      (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f));
1259    this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime();
1260    configurationManager.registerObserver(this.balancer);
1261    configurationManager.registerObserver(this.logCleanerPool);
1262    configurationManager.registerObserver(this.logCleaner);
1263    configurationManager.registerObserver(this.regionsRecoveryConfigManager);
1264    configurationManager.registerObserver(this.exclusiveHFileCleanerPool);
1265    if (this.sharedHFileCleanerPool != null) {
1266      configurationManager.registerObserver(this.sharedHFileCleanerPool);
1267    }
1268    if (this.hfileCleaners != null) {
1269      for (HFileCleaner cleaner : hfileCleaners) {
1270        configurationManager.registerObserver(cleaner);
1271      }
1272    }
1273    // Set master as 'initialized'.
1274    setInitialized(true);
1275    startupTaskGroup.markComplete("Initialization successful");
1276    MonitoredTask status =
1277      TaskMonitor.get().createStatus("Progress after master initialized", false, true);
1278
1279    if (tableFamilyDesc == null && replBarrierFamilyDesc == null) {
1280      // create missing CFs in meta table after master is set to 'initialized'.
1281      createMissingCFsInMetaDuringUpgrade(metaDescriptor);
1282
1283      // Throwing this Exception to abort active master is painful but this
1284      // seems the only way to add missing CFs in meta while upgrading from
1285      // HBase 1 to 2 (where HBase 2 has HBASE-23055 & HBASE-23782 checked-in).
1286      // So, why do we abort active master after adding missing CFs in meta?
1287      // When we reach here, we would have already bypassed NoSuchColumnFamilyException
1288      // in initClusterSchemaService(), meaning ClusterSchemaService is not
1289      // correctly initialized but we bypassed it. Similarly, we bypassed
1290      // tableStateManager.start() as well. Hence, we should better abort
1291      // current active master because our main task - adding missing CFs
1292      // in meta table is done (possible only after master state is set as
1293      // initialized) at the expense of bypassing few important tasks as part
1294      // of active master init routine. So now we abort active master so that
1295      // next active master init will not face any issues and all mandatory
1296      // services will be started during master init phase.
1297      throw new PleaseRestartMasterException("Aborting active master after missing"
1298        + " CFs are successfully added in meta. Subsequent active master "
1299        + "initialization should be uninterrupted");
1300    }
1301
1302    if (maintenanceMode) {
1303      LOG.info("Detected repair mode, skipping final initialization steps.");
1304      return;
1305    }
1306
1307    assignmentManager.checkIfShouldMoveSystemRegionAsync();
1308    status.setStatus("Starting quota manager");
1309    initQuotaManager();
1310    if (QuotaUtil.isQuotaEnabled(conf)) {
1311      // Create the quota snapshot notifier
1312      spaceQuotaSnapshotNotifier = createQuotaSnapshotNotifier();
1313      spaceQuotaSnapshotNotifier.initialize(getConnection());
1314      this.quotaObserverChore = new QuotaObserverChore(this, getMasterMetrics());
1315      // Start the chore to read the region FS space reports and act on them
1316      getChoreService().scheduleChore(quotaObserverChore);
1317
1318      this.snapshotQuotaChore = new SnapshotQuotaObserverChore(this, getMasterMetrics());
1319      // Start the chore to read snapshots and add their usage to table/NS quotas
1320      getChoreService().scheduleChore(snapshotQuotaChore);
1321    }
1322    final SlowLogMasterService slowLogMasterService = new SlowLogMasterService(conf, this);
1323    slowLogMasterService.init();
1324
1325    WALEventTrackerTableCreator.createIfNeededAndNotExists(conf, this);
1326    // Create REPLICATION.SINK_TRACKER table if needed.
1327    ReplicationSinkTrackerTableCreator.createIfNeededAndNotExists(conf, this);
1328
1329    // clear the dead servers with same host name and port of online server because we are not
1330    // removing dead server with same hostname and port of rs which is trying to check in before
1331    // master initialization. See HBASE-5916.
1332    this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
1333
1334    // Check and set the znode ACLs if needed in case we are overtaking a non-secure configuration
1335    status.setStatus("Checking ZNode ACLs");
1336    zooKeeper.checkAndSetZNodeAcls();
1337
1338    status.setStatus("Initializing MOB Cleaner");
1339    initMobCleaner();
1340
1341    // delete the stale data for replication sync up tool if necessary
1342    status.setStatus("Cleanup ReplicationSyncUp status if necessary");
1343    Path replicationSyncUpInfoFile =
1344      new Path(new Path(dataRootDir, ReplicationSyncUp.INFO_DIR), ReplicationSyncUp.INFO_FILE);
1345    if (dataFs.exists(replicationSyncUpInfoFile)) {
1346      // info file is available, load the timestamp and use it to clean up stale data in replication
1347      // queue storage.
1348      byte[] data;
1349      try (FSDataInputStream in = dataFs.open(replicationSyncUpInfoFile)) {
1350        data = ByteStreams.toByteArray(in);
1351      }
1352      ReplicationSyncUpToolInfo info = null;
1353      try {
1354        info = JsonMapper.fromJson(Bytes.toString(data), ReplicationSyncUpToolInfo.class);
1355      } catch (JsonParseException e) {
1356        // usually this should be a partial file, which means the ReplicationSyncUp tool did not
1357        // finish properly, so not a problem. Here we do not clean up the status as we do not know
1358        // the reason why the tool did not finish properly, so let users clean the status up
1359        // manually
1360        LOG.warn("failed to parse replication sync up info file, ignore and continue...", e);
1361      }
1362      if (info != null) {
1363        LOG.info("Remove last sequence ids and hfile references which are written before {}({})",
1364          info.getStartTimeMs(), DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.systemDefault())
1365            .format(Instant.ofEpochMilli(info.getStartTimeMs())));
1366        replicationPeerManager.getQueueStorage()
1367          .removeLastSequenceIdsAndHFileRefsBefore(info.getStartTimeMs());
1368        // delete the file after removing the stale data, so next time we do not need to do this
1369        // again.
1370        dataFs.delete(replicationSyncUpInfoFile, false);
1371      }
1372    }
1373    status.setStatus("Calling postStartMaster coprocessors");
1374    if (this.cpHost != null) {
1375      // don't let cp initialization errors kill the master
1376      try {
1377        this.cpHost.postStartMaster();
1378      } catch (IOException ioe) {
1379        LOG.error("Coprocessor postStartMaster() hook failed", ioe);
1380      }
1381    }
1382
1383    zombieDetector.interrupt();
1384
1385    /*
1386     * After master has started up, lets do balancer post startup initialization. Since this runs in
1387     * activeMasterManager thread, it should be fine.
1388     */
1389    long start = EnvironmentEdgeManager.currentTime();
1390    this.balancer.postMasterStartupInitialize();
1391    if (LOG.isDebugEnabled()) {
1392      LOG.debug("Balancer post startup initialization complete, took "
1393        + ((EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds");
1394    }
1395
1396    this.rollingUpgradeChore = new RollingUpgradeChore(this);
1397    getChoreService().scheduleChore(rollingUpgradeChore);
1398
1399    this.oldWALsDirSizeChore = new OldWALsDirSizeChore(this);
1400    getChoreService().scheduleChore(this.oldWALsDirSizeChore);
1401
1402    status.markComplete("Progress after master initialized complete");
1403  }
1404
1405  /**
1406   * Used for testing only to set Mock objects.
1407   * @param hbckChore hbckChore
1408   */
1409  public void setHbckChoreForTesting(HbckChore hbckChore) {
1410    this.hbckChore = hbckChore;
1411  }
1412
1413  /**
1414   * Used for testing only to set Mock objects.
1415   * @param catalogJanitorChore catalogJanitorChore
1416   */
1417  public void setCatalogJanitorChoreForTesting(CatalogJanitor catalogJanitorChore) {
1418    this.catalogJanitorChore = catalogJanitorChore;
1419  }
1420
1421  private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor)
1422    throws IOException {
1423    TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor)
1424      .setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf))
1425      .setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build();
1426    long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false);
1427    waitForProcedureToComplete(pid, "Failed to add table and rep_barrier CFs to meta");
1428  }
1429
1430  private void waitForProcedureToComplete(long pid, String errorMessage) throws IOException {
1431    int tries = 30;
1432    while (
1433      !(getMasterProcedureExecutor().isFinished(pid)) && getMasterProcedureExecutor().isRunning()
1434        && tries > 0
1435    ) {
1436      try {
1437        Thread.sleep(1000);
1438      } catch (InterruptedException e) {
1439        throw new IOException("Wait interrupted", e);
1440      }
1441      tries--;
1442    }
1443    if (tries <= 0) {
1444      throw new HBaseIOException(
1445        "Failed to add table and rep_barrier CFs to meta in a given time.");
1446    } else {
1447      Procedure<?> result = getMasterProcedureExecutor().getResult(pid);
1448      if (result != null && result.isFailed()) {
1449        throw new IOException(
1450          errorMessage + ". " + MasterProcedureUtil.unwrapRemoteIOException(result));
1451      }
1452    }
1453  }
1454
1455  /**
1456   * Check hbase:meta is up and ready for reading. For use during Master startup only.
1457   * @return True if meta is UP and online and startup can progress. Otherwise, meta is not online
1458   *         and we will hold here until operator intervention.
1459   */
1460  @InterfaceAudience.Private
1461  public boolean waitForMetaOnline() {
1462    return isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO);
1463  }
1464
1465  /**
1466   * @return True if region is online and scannable else false if an error or shutdown (Otherwise we
1467   *         just block in here holding up all forward-progess).
1468   */
1469  private boolean isRegionOnline(RegionInfo ri) {
1470    RetryCounter rc = null;
1471    while (!isStopped()) {
1472      RegionState rs = this.assignmentManager.getRegionStates().getRegionState(ri);
1473      if (rs != null && rs.isOpened()) {
1474        if (this.getServerManager().isServerOnline(rs.getServerName())) {
1475          return true;
1476        }
1477      }
1478      // Region is not OPEN.
1479      Optional<Procedure<MasterProcedureEnv>> optProc = this.procedureExecutor.getProcedures()
1480        .stream().filter(p -> p instanceof ServerCrashProcedure).findAny();
1481      // TODO: Add a page to refguide on how to do repair. Have this log message point to it.
1482      // Page will talk about loss of edits, how to schedule at least the meta WAL recovery, and
1483      // then how to assign including how to break region lock if one held.
1484      LOG.warn(
1485        "{} is NOT online; state={}; ServerCrashProcedures={}. Master startup cannot "
1486          + "progress, in holding-pattern until region onlined.",
1487        ri.getRegionNameAsString(), rs, optProc.isPresent());
1488      // Check once-a-minute.
1489      if (rc == null) {
1490        rc = new RetryCounterFactory(Integer.MAX_VALUE, 1000, 60_000).create();
1491      }
1492      Threads.sleep(rc.getBackoffTimeAndIncrementAttempts());
1493    }
1494    return false;
1495  }
1496
1497  /**
1498   * Check hbase:namespace table is assigned. If not, startup will hang looking for the ns table
1499   * <p/>
1500   * This is for rolling upgrading, later we will migrate the data in ns table to the ns family of
1501   * meta table. And if this is a new cluster, this method will return immediately as there will be
1502   * no namespace table/region.
1503   * @return True if namespace table is up/online.
1504   */
1505  private boolean waitForNamespaceOnline() throws IOException {
1506    TableState nsTableState =
1507      MetaTableAccessor.getTableState(getConnection(), TableName.NAMESPACE_TABLE_NAME);
1508    if (nsTableState == null || nsTableState.isDisabled()) {
1509      // this means we have already migrated the data and disabled or deleted the namespace table,
1510      // or this is a new deploy which does not have a namespace table from the beginning.
1511      return true;
1512    }
1513    List<RegionInfo> ris =
1514      this.assignmentManager.getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME);
1515    if (ris.isEmpty()) {
1516      // maybe this will not happen any more, but anyway, no harm to add a check here...
1517      return true;
1518    }
1519    // Else there are namespace regions up in meta. Ensure they are assigned before we go on.
1520    for (RegionInfo ri : ris) {
1521      if (!isRegionOnline(ri)) {
1522        return false;
1523      }
1524    }
1525    return true;
1526  }
1527
1528  /**
1529   * Adds the {@code MasterQuotasObserver} to the list of configured Master observers to
1530   * automatically remove quotas for a table when that table is deleted.
1531   */
1532  @InterfaceAudience.Private
1533  public void updateConfigurationForQuotasObserver(Configuration conf) {
1534    // We're configured to not delete quotas on table deletion, so we don't need to add the obs.
1535    if (
1536      !conf.getBoolean(MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE,
1537        MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT)
1538    ) {
1539      return;
1540    }
1541    String[] masterCoprocs = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
1542    final int length = null == masterCoprocs ? 0 : masterCoprocs.length;
1543    String[] updatedCoprocs = new String[length + 1];
1544    if (length > 0) {
1545      System.arraycopy(masterCoprocs, 0, updatedCoprocs, 0, masterCoprocs.length);
1546    }
1547    updatedCoprocs[length] = MasterQuotasObserver.class.getName();
1548    conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, updatedCoprocs);
1549  }
1550
1551  private void initMobCleaner() {
1552    this.mobFileCleanerChore = new MobFileCleanerChore(this);
1553    getChoreService().scheduleChore(mobFileCleanerChore);
1554    this.mobFileCompactionChore = new MobFileCompactionChore(this);
1555    getChoreService().scheduleChore(mobFileCompactionChore);
1556  }
1557
1558  /**
1559   * <p>
1560   * Create a {@link ServerManager} instance.
1561   * </p>
1562   * <p>
1563   * Will be overridden in tests.
1564   * </p>
1565   */
1566  @InterfaceAudience.Private
1567  protected ServerManager createServerManager(MasterServices master, RegionServerList storage)
1568    throws IOException {
1569    // We put this out here in a method so can do a Mockito.spy and stub it out
1570    // w/ a mocked up ServerManager.
1571    setupClusterConnection();
1572    return new ServerManager(master, storage);
1573  }
1574
1575  private void waitForRegionServers(final MonitoredTask status)
1576    throws IOException, InterruptedException {
1577    this.serverManager.waitForRegionServers(status);
1578  }
1579
1580  // Will be overridden in tests
1581  @InterfaceAudience.Private
1582  protected void initClusterSchemaService() throws IOException, InterruptedException {
1583    this.clusterSchemaService = new ClusterSchemaServiceImpl(this);
1584    this.clusterSchemaService.startAsync();
1585    try {
1586      this.clusterSchemaService
1587        .awaitRunning(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,
1588          DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);
1589    } catch (TimeoutException toe) {
1590      throw new IOException("Timedout starting ClusterSchemaService", toe);
1591    }
1592  }
1593
1594  private void initQuotaManager() throws IOException {
1595    MasterQuotaManager quotaManager = new MasterQuotaManager(this);
1596    quotaManager.start();
1597    this.quotaManager = quotaManager;
1598  }
1599
1600  private SpaceQuotaSnapshotNotifier createQuotaSnapshotNotifier() {
1601    SpaceQuotaSnapshotNotifier notifier =
1602      SpaceQuotaSnapshotNotifierFactory.getInstance().create(getConfiguration());
1603    return notifier;
1604  }
1605
1606  public boolean isCatalogJanitorEnabled() {
1607    return catalogJanitorChore != null ? catalogJanitorChore.getEnabled() : false;
1608  }
1609
1610  boolean isCleanerChoreEnabled() {
1611    boolean hfileCleanerFlag = true, logCleanerFlag = true;
1612
1613    if (getHFileCleaner() != null) {
1614      hfileCleanerFlag = getHFileCleaner().getEnabled();
1615    }
1616
1617    if (logCleaner != null) {
1618      logCleanerFlag = logCleaner.getEnabled();
1619    }
1620
1621    return (hfileCleanerFlag && logCleanerFlag);
1622  }
1623
1624  @Override
1625  public ServerManager getServerManager() {
1626    return this.serverManager;
1627  }
1628
1629  @Override
1630  public MasterFileSystem getMasterFileSystem() {
1631    return this.fileSystemManager;
1632  }
1633
1634  @Override
1635  public MasterWalManager getMasterWalManager() {
1636    return this.walManager;
1637  }
1638
1639  @Override
1640  public boolean rotateSystemKeyIfChanged() throws IOException {
1641    // STUB - Feature not yet implemented
1642    return false;
1643  }
1644
1645  @Override
1646  public SplitWALManager getSplitWALManager() {
1647    return splitWALManager;
1648  }
1649
1650  @Override
1651  public TableStateManager getTableStateManager() {
1652    return tableStateManager;
1653  }
1654
1655  /*
1656   * Start up all services. If any of these threads gets an unhandled exception then they just die
1657   * with a logged message. This should be fine because in general, we do not expect the master to
1658   * get such unhandled exceptions as OOMEs; it should be lightly loaded. See what HRegionServer
1659   * does if need to install an unexpected exception handler.
1660   */
1661  private void startServiceThreads() throws IOException {
1662    // Start the executor service pools
1663    final int masterOpenRegionPoolSize = conf.getInt(HConstants.MASTER_OPEN_REGION_THREADS,
1664      HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT);
1665    executorService.startExecutorService(executorService.new ExecutorConfig()
1666      .setExecutorType(ExecutorType.MASTER_OPEN_REGION).setCorePoolSize(masterOpenRegionPoolSize));
1667    final int masterCloseRegionPoolSize = conf.getInt(HConstants.MASTER_CLOSE_REGION_THREADS,
1668      HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT);
1669    executorService.startExecutorService(
1670      executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_CLOSE_REGION)
1671        .setCorePoolSize(masterCloseRegionPoolSize));
1672    final int masterServerOpThreads = conf.getInt(HConstants.MASTER_SERVER_OPERATIONS_THREADS,
1673      HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT);
1674    executorService.startExecutorService(
1675      executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SERVER_OPERATIONS)
1676        .setCorePoolSize(masterServerOpThreads));
1677    final int masterServerMetaOpsThreads =
1678      conf.getInt(HConstants.MASTER_META_SERVER_OPERATIONS_THREADS,
1679        HConstants.MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT);
1680    executorService.startExecutorService(executorService.new ExecutorConfig()
1681      .setExecutorType(ExecutorType.MASTER_META_SERVER_OPERATIONS)
1682      .setCorePoolSize(masterServerMetaOpsThreads));
1683    final int masterLogReplayThreads = conf.getInt(HConstants.MASTER_LOG_REPLAY_OPS_THREADS,
1684      HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT);
1685    executorService.startExecutorService(executorService.new ExecutorConfig()
1686      .setExecutorType(ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads));
1687    final int masterSnapshotThreads = conf.getInt(SnapshotManager.SNAPSHOT_POOL_THREADS_KEY,
1688      SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT);
1689    executorService.startExecutorService(
1690      executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SNAPSHOT_OPERATIONS)
1691        .setCorePoolSize(masterSnapshotThreads).setAllowCoreThreadTimeout(true));
1692    final int masterMergeDispatchThreads = conf.getInt(HConstants.MASTER_MERGE_DISPATCH_THREADS,
1693      HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT);
1694    executorService.startExecutorService(
1695      executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_MERGE_OPERATIONS)
1696        .setCorePoolSize(masterMergeDispatchThreads).setAllowCoreThreadTimeout(true));
1697
1698    // We depend on there being only one instance of this executor running
1699    // at a time. To do concurrency, would need fencing of enable/disable of
1700    // tables.
1701    // Any time changing this maxThreads to > 1, pls see the comment at
1702    // AccessController#postCompletedCreateTableAction
1703    executorService.startExecutorService(executorService.new ExecutorConfig()
1704      .setExecutorType(ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1));
1705    startProcedureExecutor();
1706
1707    // Create log cleaner thread pool
1708    logCleanerPool = DirScanPool.getLogCleanerScanPool(conf);
1709    Map<String, Object> params = new HashMap<>();
1710    params.put(MASTER, this);
1711    // Start log cleaner thread
1712    int cleanerInterval =
1713      conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL);
1714    this.logCleaner =
1715      new LogCleaner(cleanerInterval, this, conf, getMasterWalManager().getFileSystem(),
1716        getMasterWalManager().getOldLogDir(), logCleanerPool, params);
1717    getChoreService().scheduleChore(logCleaner);
1718
1719    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
1720
1721    // Create custom archive hfile cleaners
1722    String[] paths = conf.getStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS);
1723    // todo: handle the overlap issues for the custom paths
1724
1725    if (paths != null && paths.length > 0) {
1726      if (conf.getStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS) == null) {
1727        Set<String> cleanerClasses = new HashSet<>();
1728        String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
1729        if (cleaners != null) {
1730          Collections.addAll(cleanerClasses, cleaners);
1731        }
1732        conf.setStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS,
1733          cleanerClasses.toArray(new String[cleanerClasses.size()]));
1734        LOG.info("Archive custom cleaner paths: {}, plugins: {}", Arrays.asList(paths),
1735          cleanerClasses);
1736      }
1737      // share the hfile cleaner pool in custom paths
1738      sharedHFileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf.get(CUSTOM_POOL_SIZE, "6"));
1739      for (int i = 0; i < paths.length; i++) {
1740        Path path = new Path(paths[i].trim());
1741        HFileCleaner cleaner =
1742          new HFileCleaner("ArchiveCustomHFileCleaner-" + path.getName(), cleanerInterval, this,
1743            conf, getMasterFileSystem().getFileSystem(), new Path(archiveDir, path),
1744            HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS, sharedHFileCleanerPool, params, null);
1745        hfileCleaners.add(cleaner);
1746        hfileCleanerPaths.add(path);
1747      }
1748    }
1749
1750    // Create the whole archive dir cleaner thread pool
1751    exclusiveHFileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf);
1752    hfileCleaners.add(0,
1753      new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem().getFileSystem(),
1754        archiveDir, exclusiveHFileCleanerPool, params, hfileCleanerPaths));
1755    hfileCleanerPaths.add(0, archiveDir);
1756    // Schedule all the hfile cleaners
1757    for (HFileCleaner hFileCleaner : hfileCleaners) {
1758      getChoreService().scheduleChore(hFileCleaner);
1759    }
1760
1761    // Regions Reopen based on very high storeFileRefCount is considered enabled
1762    // only if hbase.regions.recovery.store.file.ref.count has value > 0
1763    final int maxStoreFileRefCount = conf.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD,
1764      HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD);
1765    if (maxStoreFileRefCount > 0) {
1766      this.regionsRecoveryChore = new RegionsRecoveryChore(this, conf, this);
1767      getChoreService().scheduleChore(this.regionsRecoveryChore);
1768    } else {
1769      LOG.info(
1770        "Reopening regions with very high storeFileRefCount is disabled. "
1771          + "Provide threshold value > 0 for {} to enable it.",
1772        HConstants.STORE_FILE_REF_COUNT_THRESHOLD);
1773    }
1774
1775    this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this);
1776
1777    replicationBarrierCleaner =
1778      new ReplicationBarrierCleaner(conf, this, getConnection(), replicationPeerManager);
1779    getChoreService().scheduleChore(replicationBarrierCleaner);
1780
1781    final boolean isSnapshotChoreEnabled = this.snapshotCleanupStateStore.get();
1782    this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager());
1783    if (isSnapshotChoreEnabled) {
1784      getChoreService().scheduleChore(this.snapshotCleanerChore);
1785    } else {
1786      if (LOG.isTraceEnabled()) {
1787        LOG.trace("Snapshot Cleaner Chore is disabled. Not starting up the chore..");
1788      }
1789    }
1790    serviceStarted = true;
1791    if (LOG.isTraceEnabled()) {
1792      LOG.trace("Started service threads");
1793    }
1794  }
1795
1796  protected void stopServiceThreads() {
1797    if (masterJettyServer != null) {
1798      LOG.info("Stopping master jetty server");
1799      try {
1800        masterJettyServer.stop();
1801      } catch (Exception e) {
1802        LOG.error("Failed to stop master jetty server", e);
1803      }
1804    }
1805    stopChoreService();
1806    stopExecutorService();
1807    if (exclusiveHFileCleanerPool != null) {
1808      exclusiveHFileCleanerPool.shutdownNow();
1809      exclusiveHFileCleanerPool = null;
1810    }
1811    if (logCleanerPool != null) {
1812      logCleanerPool.shutdownNow();
1813      logCleanerPool = null;
1814    }
1815    if (sharedHFileCleanerPool != null) {
1816      sharedHFileCleanerPool.shutdownNow();
1817      sharedHFileCleanerPool = null;
1818    }
1819    if (maintenanceRegionServer != null) {
1820      maintenanceRegionServer.getRegionServer().stop(HBASE_MASTER_CLEANER_INTERVAL);
1821    }
1822
1823    LOG.debug("Stopping service threads");
1824    // stop procedure executor prior to other services such as server manager and assignment
1825    // manager, as these services are important for some running procedures. See HBASE-24117 for
1826    // example.
1827    stopProcedureExecutor();
1828
1829    if (regionNormalizerManager != null) {
1830      regionNormalizerManager.stop();
1831    }
1832    if (this.quotaManager != null) {
1833      this.quotaManager.stop();
1834    }
1835
1836    if (this.activeMasterManager != null) {
1837      this.activeMasterManager.stop();
1838    }
1839    if (this.serverManager != null) {
1840      this.serverManager.stop();
1841    }
1842    if (this.assignmentManager != null) {
1843      this.assignmentManager.stop();
1844    }
1845
1846    if (masterRegion != null) {
1847      masterRegion.close(isAborted());
1848    }
1849    if (this.walManager != null) {
1850      this.walManager.stop();
1851    }
1852    if (this.fileSystemManager != null) {
1853      this.fileSystemManager.stop();
1854    }
1855    if (this.mpmHost != null) {
1856      this.mpmHost.stop("server shutting down.");
1857    }
1858    if (this.regionServerTracker != null) {
1859      this.regionServerTracker.stop();
1860    }
1861  }
1862
1863  private void createProcedureExecutor() throws IOException {
1864    final String procedureDispatcherClassName =
1865      conf.get(HBASE_MASTER_RSPROC_DISPATCHER_CLASS, DEFAULT_HBASE_MASTER_RSPROC_DISPATCHER_CLASS);
1866    final RSProcedureDispatcher procedureDispatcher = ReflectionUtils.instantiateWithCustomCtor(
1867      procedureDispatcherClassName, new Class[] { MasterServices.class }, new Object[] { this });
1868    final MasterProcedureEnv procEnv = new MasterProcedureEnv(this, procedureDispatcher);
1869    procedureStore = new RegionProcedureStore(this, masterRegion,
1870      new MasterProcedureEnv.FsUtilsLeaseRecovery(this));
1871    procedureStore.registerListener(new ProcedureStoreListener() {
1872
1873      @Override
1874      public void abortProcess() {
1875        abort("The Procedure Store lost the lease", null);
1876      }
1877    });
1878    MasterProcedureScheduler procedureScheduler = procEnv.getProcedureScheduler();
1879    procedureExecutor = new ProcedureExecutor<>(conf, procEnv, procedureStore, procedureScheduler);
1880    configurationManager.registerObserver(procEnv);
1881
1882    int cpus = Runtime.getRuntime().availableProcessors();
1883    int defaultNumThreads = Math.max((cpus > 0 ? cpus / 4 : 0),
1884      MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS);
1885    int numThreads =
1886      conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, defaultNumThreads);
1887    if (numThreads <= 0) {
1888      LOG.warn("{} is set to {}, which is invalid, using default value {} instead",
1889        MasterProcedureConstants.MASTER_PROCEDURE_THREADS, numThreads, defaultNumThreads);
1890      numThreads = defaultNumThreads;
1891    }
1892    final boolean abortOnCorruption =
1893      conf.getBoolean(MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
1894        MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
1895    procedureStore.start(numThreads);
1896    // Just initialize it but do not start the workers, we will start the workers later by calling
1897    // startProcedureExecutor. See the javadoc for finishActiveMasterInitialization for more
1898    // details.
1899    procedureExecutor.init(numThreads, abortOnCorruption);
1900    if (!procEnv.getRemoteDispatcher().start()) {
1901      throw new HBaseIOException("Failed start of remote dispatcher");
1902    }
1903  }
1904
1905  // will be override in UT
1906  protected void startProcedureExecutor() throws IOException {
1907    procedureExecutor.startWorkers();
1908  }
1909
1910  /**
1911   * Turn on/off Snapshot Cleanup Chore
1912   * @param on indicates whether Snapshot Cleanup Chore is to be run
1913   */
1914  void switchSnapshotCleanup(final boolean on, final boolean synchronous) throws IOException {
1915    if (synchronous) {
1916      synchronized (this.snapshotCleanerChore) {
1917        switchSnapshotCleanup(on);
1918      }
1919    } else {
1920      switchSnapshotCleanup(on);
1921    }
1922  }
1923
1924  private void switchSnapshotCleanup(final boolean on) throws IOException {
1925    snapshotCleanupStateStore.set(on);
1926    if (on) {
1927      getChoreService().scheduleChore(this.snapshotCleanerChore);
1928    } else {
1929      this.snapshotCleanerChore.cancel();
1930    }
1931  }
1932
1933  private void stopProcedureExecutor() {
1934    if (procedureExecutor != null) {
1935      configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
1936      procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
1937      procedureExecutor.stop();
1938      procedureExecutor.join();
1939      procedureExecutor = null;
1940    }
1941
1942    if (procedureStore != null) {
1943      procedureStore.stop(isAborted());
1944      procedureStore = null;
1945    }
1946  }
1947
1948  protected void stopChores() {
1949    shutdownChore(mobFileCleanerChore);
1950    shutdownChore(mobFileCompactionChore);
1951    shutdownChore(balancerChore);
1952    if (regionNormalizerManager != null) {
1953      shutdownChore(regionNormalizerManager.getRegionNormalizerChore());
1954    }
1955    shutdownChore(clusterStatusChore);
1956    shutdownChore(catalogJanitorChore);
1957    shutdownChore(clusterStatusPublisherChore);
1958    shutdownChore(snapshotQuotaChore);
1959    shutdownChore(logCleaner);
1960    if (hfileCleaners != null) {
1961      for (ScheduledChore chore : hfileCleaners) {
1962        chore.shutdown();
1963      }
1964      hfileCleaners = null;
1965    }
1966    shutdownChore(replicationBarrierCleaner);
1967    shutdownChore(snapshotCleanerChore);
1968    shutdownChore(hbckChore);
1969    shutdownChore(regionsRecoveryChore);
1970    shutdownChore(rollingUpgradeChore);
1971    shutdownChore(oldWALsDirSizeChore);
1972  }
1973
1974  /** Returns Get remote side's InetAddress */
1975  InetAddress getRemoteInetAddress(final int port, final long serverStartCode)
1976    throws UnknownHostException {
1977    // Do it out here in its own little method so can fake an address when
1978    // mocking up in tests.
1979    InetAddress ia = RpcServer.getRemoteIp();
1980
1981    // The call could be from the local regionserver,
1982    // in which case, there is no remote address.
1983    if (ia == null && serverStartCode == startcode) {
1984      InetSocketAddress isa = rpcServices.getSocketAddress();
1985      if (isa != null && isa.getPort() == port) {
1986        ia = isa.getAddress();
1987      }
1988    }
1989    return ia;
1990  }
1991
1992  /** Returns Maximum time we should run balancer for */
1993  private int getMaxBalancingTime() {
1994    // if max balancing time isn't set, defaulting it to period time
1995    int maxBalancingTime =
1996      getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, getConfiguration()
1997        .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD));
1998    return maxBalancingTime;
1999  }
2000
2001  /** Returns Maximum number of regions in transition */
2002  private int getMaxRegionsInTransition() {
2003    int numRegions = this.assignmentManager.getRegionStates().getRegionAssignments().size();
2004    return Math.max((int) Math.floor(numRegions * this.maxRitPercent), 1);
2005  }
2006
2007  /**
2008   * It first sleep to the next balance plan start time. Meanwhile, throttling by the max number
2009   * regions in transition to protect availability.
2010   * @param nextBalanceStartTime   The next balance plan start time
2011   * @param maxRegionsInTransition max number of regions in transition
2012   * @param cutoffTime             when to exit balancer
2013   */
2014  private void balanceThrottling(long nextBalanceStartTime, int maxRegionsInTransition,
2015    long cutoffTime) {
2016    boolean interrupted = false;
2017
2018    // Sleep to next balance plan start time
2019    // But if there are zero regions in transition, it can skip sleep to speed up.
2020    while (
2021      !interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime
2022        && this.assignmentManager.getRegionTransitScheduledCount() > 0
2023    ) {
2024      try {
2025        Thread.sleep(100);
2026      } catch (InterruptedException ie) {
2027        interrupted = true;
2028      }
2029    }
2030
2031    // Throttling by max number regions in transition
2032    while (
2033      !interrupted && maxRegionsInTransition > 0
2034        && this.assignmentManager.getRegionTransitScheduledCount() >= maxRegionsInTransition
2035        && EnvironmentEdgeManager.currentTime() <= cutoffTime
2036    ) {
2037      try {
2038        // sleep if the number of regions in transition exceeds the limit
2039        Thread.sleep(100);
2040      } catch (InterruptedException ie) {
2041        interrupted = true;
2042      }
2043    }
2044
2045    if (interrupted) Thread.currentThread().interrupt();
2046  }
2047
2048  public BalanceResponse balance() throws IOException {
2049    return balance(BalanceRequest.defaultInstance());
2050  }
2051
2052  /**
2053   * Trigger a normal balance, see {@link HMaster#balance()} . If the balance is not executed this
2054   * time, the metrics related to the balance will be updated. When balance is running, related
2055   * metrics will be updated at the same time. But if some checking logic failed and cause the
2056   * balancer exit early, we lost the chance to update balancer metrics. This will lead to user
2057   * missing the latest balancer info.
2058   */
2059  public BalanceResponse balanceOrUpdateMetrics() throws IOException {
2060    synchronized (this.balancer) {
2061      BalanceResponse response = balance();
2062      if (!response.isBalancerRan()) {
2063        Map<TableName, Map<ServerName, List<RegionInfo>>> assignments =
2064          this.assignmentManager.getRegionStates().getAssignmentsForBalancer(this.tableStateManager,
2065            this.serverManager.getOnlineServersList());
2066        for (Map<ServerName, List<RegionInfo>> serverMap : assignments.values()) {
2067          serverMap.keySet().removeAll(this.serverManager.getDrainingServersList());
2068        }
2069        this.balancer.updateBalancerLoadInfo(assignments);
2070      }
2071      return response;
2072    }
2073  }
2074
2075  /**
2076   * Checks master state before initiating action over region topology.
2077   * @param action the name of the action under consideration, for logging.
2078   * @return {@code true} when the caller should exit early, {@code false} otherwise.
2079   */
2080  @Override
2081  public boolean skipRegionManagementAction(final String action) {
2082    // Note: this method could be `default` on MasterServices if but for logging.
2083    if (!isInitialized()) {
2084      LOG.debug("Master has not been initialized, don't run {}.", action);
2085      return true;
2086    }
2087    if (this.getServerManager().isClusterShutdown()) {
2088      LOG.info("Cluster is shutting down, don't run {}.", action);
2089      return true;
2090    }
2091    if (isInMaintenanceMode()) {
2092      LOG.info("Master is in maintenance mode, don't run {}.", action);
2093      return true;
2094    }
2095    return false;
2096  }
2097
2098  public BalanceResponse balance(BalanceRequest request) throws IOException {
2099    checkInitialized();
2100
2101    BalanceResponse.Builder responseBuilder = BalanceResponse.newBuilder();
2102
2103    if (loadBalancerStateStore == null || !(loadBalancerStateStore.get() || request.isDryRun())) {
2104      return responseBuilder.build();
2105    }
2106
2107    if (skipRegionManagementAction("balancer")) {
2108      return responseBuilder.build();
2109    }
2110
2111    synchronized (this.balancer) {
2112      // Only allow one balance run at at time.
2113      if (this.assignmentManager.getRegionTransitScheduledCount() > 0) {
2114        List<RegionStateNode> regionsInTransition = assignmentManager.getRegionsInTransition();
2115        // if hbase:meta region is in transition, result of assignment cannot be recorded
2116        // ignore the force flag in that case
2117        boolean metaInTransition = assignmentManager.isMetaRegionInTransition();
2118        List<RegionStateNode> toPrint = regionsInTransition;
2119        int max = 5;
2120        boolean truncated = false;
2121        if (regionsInTransition.size() > max) {
2122          toPrint = regionsInTransition.subList(0, max);
2123          truncated = true;
2124        }
2125
2126        if (!request.isIgnoreRegionsInTransition() || metaInTransition) {
2127          LOG.info("Not running balancer (ignoreRIT=false" + ", metaRIT=" + metaInTransition
2128            + ") because " + assignmentManager.getRegionTransitScheduledCount()
2129            + " region(s) are scheduled to transit " + toPrint
2130            + (truncated ? "(truncated list)" : ""));
2131          return responseBuilder.build();
2132        }
2133      }
2134      if (this.serverManager.areDeadServersInProgress()) {
2135        LOG.info("Not running balancer because processing dead regionserver(s): "
2136          + this.serverManager.getDeadServers());
2137        return responseBuilder.build();
2138      }
2139
2140      if (this.cpHost != null) {
2141        try {
2142          if (this.cpHost.preBalance(request)) {
2143            LOG.debug("Coprocessor bypassing balancer request");
2144            return responseBuilder.build();
2145          }
2146        } catch (IOException ioe) {
2147          LOG.error("Error invoking master coprocessor preBalance()", ioe);
2148          return responseBuilder.build();
2149        }
2150      }
2151
2152      Map<TableName, Map<ServerName, List<RegionInfo>>> assignments =
2153        this.assignmentManager.getRegionStates().getAssignmentsForBalancer(tableStateManager,
2154          this.serverManager.getOnlineServersList());
2155      for (Map<ServerName, List<RegionInfo>> serverMap : assignments.values()) {
2156        serverMap.keySet().removeAll(this.serverManager.getDrainingServersList());
2157      }
2158
2159      // Give the balancer the current cluster state.
2160      this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
2161
2162      List<RegionPlan> plans = this.balancer.balanceCluster(assignments);
2163
2164      responseBuilder.setBalancerRan(true).setMovesCalculated(plans == null ? 0 : plans.size());
2165
2166      if (skipRegionManagementAction("balancer")) {
2167        // make one last check that the cluster isn't shutting down before proceeding.
2168        return responseBuilder.build();
2169      }
2170
2171      // For dry run we don't actually want to execute the moves, but we do want
2172      // to execute the coprocessor below
2173      List<RegionPlan> sucRPs =
2174        request.isDryRun() ? Collections.emptyList() : executeRegionPlansWithThrottling(plans);
2175
2176      if (this.cpHost != null) {
2177        try {
2178          this.cpHost.postBalance(request, sucRPs);
2179        } catch (IOException ioe) {
2180          // balancing already succeeded so don't change the result
2181          LOG.error("Error invoking master coprocessor postBalance()", ioe);
2182        }
2183      }
2184
2185      responseBuilder.setMovesExecuted(sucRPs.size());
2186    }
2187
2188    // If LoadBalancer did not generate any plans, it means the cluster is already balanced.
2189    // Return true indicating a success.
2190    return responseBuilder.build();
2191  }
2192
2193  /**
2194   * Execute region plans with throttling
2195   * @param plans to execute
2196   * @return succeeded plans
2197   */
2198  public List<RegionPlan> executeRegionPlansWithThrottling(List<RegionPlan> plans) {
2199    List<RegionPlan> successRegionPlans = new ArrayList<>();
2200    int maxRegionsInTransition = getMaxRegionsInTransition();
2201    long balanceStartTime = EnvironmentEdgeManager.currentTime();
2202    long cutoffTime = balanceStartTime + this.maxBalancingTime;
2203    int rpCount = 0; // number of RegionPlans balanced so far
2204    if (plans != null && !plans.isEmpty()) {
2205      int balanceInterval = this.maxBalancingTime / plans.size();
2206      LOG.info(
2207        "Balancer plans size is " + plans.size() + ", the balance interval is " + balanceInterval
2208          + " ms, and the max number regions in transition is " + maxRegionsInTransition);
2209
2210      for (RegionPlan plan : plans) {
2211        LOG.info("balance " + plan);
2212        // TODO: bulk assign
2213        try {
2214          this.assignmentManager.balance(plan);
2215          this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
2216          this.balancer.throttle(plan);
2217        } catch (HBaseIOException hioe) {
2218          // should ignore failed plans here, avoiding the whole balance plans be aborted
2219          // later calls of balance() can fetch up the failed and skipped plans
2220          LOG.warn("Failed balance plan {}, skipping...", plan, hioe);
2221        } catch (Exception e) {
2222          LOG.warn("Failed throttling assigning a new plan.", e);
2223        }
2224        // rpCount records balance plans processed, does not care if a plan succeeds
2225        rpCount++;
2226        successRegionPlans.add(plan);
2227
2228        if (this.maxBalancingTime > 0) {
2229          balanceThrottling(balanceStartTime + rpCount * balanceInterval, maxRegionsInTransition,
2230            cutoffTime);
2231        }
2232
2233        // if performing next balance exceeds cutoff time, exit the loop
2234        if (
2235          this.maxBalancingTime > 0 && rpCount < plans.size()
2236            && EnvironmentEdgeManager.currentTime() > cutoffTime
2237        ) {
2238          // TODO: After balance, there should not be a cutoff time (keeping it as
2239          // a security net for now)
2240          LOG.debug(
2241            "No more balancing till next balance run; maxBalanceTime=" + this.maxBalancingTime);
2242          break;
2243        }
2244      }
2245    }
2246    LOG.debug("Balancer is going into sleep until next period in {}ms", getConfiguration()
2247      .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD));
2248    return successRegionPlans;
2249  }
2250
2251  @Override
2252  public RegionNormalizerManager getRegionNormalizerManager() {
2253    return regionNormalizerManager;
2254  }
2255
2256  @Override
2257  public boolean normalizeRegions(final NormalizeTableFilterParams ntfp,
2258    final boolean isHighPriority) throws IOException {
2259    if (regionNormalizerManager == null || !regionNormalizerManager.isNormalizerOn()) {
2260      LOG.debug("Region normalization is disabled, don't run region normalizer.");
2261      return false;
2262    }
2263    if (skipRegionManagementAction("region normalizer")) {
2264      return false;
2265    }
2266    if (assignmentManager.getRegionTransitScheduledCount() > 0) {
2267      return false;
2268    }
2269
2270    final Set<TableName> matchingTables = getTableDescriptors(new LinkedList<>(),
2271      ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false).stream()
2272      .map(TableDescriptor::getTableName).collect(Collectors.toSet());
2273    final Set<TableName> allEnabledTables =
2274      tableStateManager.getTablesInStates(TableState.State.ENABLED);
2275    final List<TableName> targetTables =
2276      new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables));
2277    Collections.shuffle(targetTables);
2278    return regionNormalizerManager.normalizeRegions(targetTables, isHighPriority);
2279  }
2280
2281  /** Returns Client info for use as prefix on an audit log string; who did an action */
2282  @Override
2283  public String getClientIdAuditPrefix() {
2284    return "Client=" + RpcServer.getRequestUserName().orElse(null) + "/"
2285      + RpcServer.getRemoteAddress().orElse(null);
2286  }
2287
2288  /**
2289   * Switch for the background CatalogJanitor thread. Used for testing. The thread will continue to
2290   * run. It will just be a noop if disabled.
2291   * @param b If false, the catalog janitor won't do anything.
2292   */
2293  public void setCatalogJanitorEnabled(final boolean b) {
2294    this.catalogJanitorChore.setEnabled(b);
2295  }
2296
2297  @Override
2298  public long mergeRegions(final RegionInfo[] regionsToMerge, final boolean forcible, final long ng,
2299    final long nonce) throws IOException {
2300    checkInitialized();
2301
2302    final String regionNamesToLog = RegionInfo.getShortNameToLog(regionsToMerge);
2303
2304    if (!isSplitOrMergeEnabled(MasterSwitchType.MERGE)) {
2305      LOG.warn("Merge switch is off! skip merge of " + regionNamesToLog);
2306      throw new DoNotRetryIOException(
2307        "Merge of " + regionNamesToLog + " failed because merge switch is off");
2308    }
2309
2310    if (!getTableDescriptors().get(regionsToMerge[0].getTable()).isMergeEnabled()) {
2311      LOG.warn("Merge is disabled for the table! Skipping merge of {}", regionNamesToLog);
2312      throw new DoNotRetryIOException(
2313        "Merge of " + regionNamesToLog + " failed as region merge is disabled for the table");
2314    }
2315
2316    return MasterProcedureUtil.submitProcedure(new NonceProcedureRunnable(this, ng, nonce) {
2317      @Override
2318      protected void run() throws IOException {
2319        getMaster().getMasterCoprocessorHost().preMergeRegions(regionsToMerge);
2320        String aid = getClientIdAuditPrefix();
2321        LOG.info("{} merge regions {}", aid, regionNamesToLog);
2322        submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(),
2323          regionsToMerge, forcible));
2324        getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge);
2325      }
2326
2327      @Override
2328      protected String getDescription() {
2329        return "MergeTableProcedure";
2330      }
2331    });
2332  }
2333
2334  @Override
2335  public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, final long nonceGroup,
2336    final long nonce) throws IOException {
2337    checkInitialized();
2338
2339    if (!isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) {
2340      LOG.warn("Split switch is off! skip split of " + regionInfo);
2341      throw new DoNotRetryIOException(
2342        "Split region " + regionInfo.getRegionNameAsString() + " failed due to split switch off");
2343    }
2344
2345    if (!getTableDescriptors().get(regionInfo.getTable()).isSplitEnabled()) {
2346      LOG.warn("Split is disabled for the table! Skipping split of {}", regionInfo);
2347      throw new DoNotRetryIOException("Split region " + regionInfo.getRegionNameAsString()
2348        + " failed as region split is disabled for the table");
2349    }
2350
2351    return MasterProcedureUtil
2352      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2353        @Override
2354        protected void run() throws IOException {
2355          getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow);
2356          LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString());
2357
2358          // Execute the operation asynchronously
2359          submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow));
2360        }
2361
2362        @Override
2363        protected String getDescription() {
2364          return "SplitTableProcedure";
2365        }
2366      });
2367  }
2368
2369  private void warmUpRegion(ServerName server, RegionInfo region) {
2370    FutureUtils.addListener(asyncClusterConnection.getRegionServerAdmin(server)
2371      .warmupRegion(RequestConverter.buildWarmupRegionRequest(region)), (r, e) -> {
2372        if (e != null) {
2373          LOG.warn("Failed to warm up region {} on server {}", region, server, e);
2374        }
2375      });
2376  }
2377
2378  // Public so can be accessed by tests. Blocks until move is done.
2379  // Replace with an async implementation from which you can get
2380  // a success/failure result.
2381  @InterfaceAudience.Private
2382  public void move(final byte[] encodedRegionName, byte[] destServerName) throws IOException {
2383    RegionState regionState =
2384      assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName));
2385
2386    RegionInfo hri;
2387    if (regionState != null) {
2388      hri = regionState.getRegion();
2389    } else {
2390      throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
2391    }
2392
2393    ServerName dest;
2394    List<ServerName> exclude = hri.getTable().isSystemTable()
2395      ? assignmentManager.getExcludedServersForSystemTable()
2396      : new ArrayList<>(1);
2397    if (
2398      destServerName != null && exclude.contains(ServerName.valueOf(Bytes.toString(destServerName)))
2399    ) {
2400      LOG.info(Bytes.toString(encodedRegionName) + " can not move to "
2401        + Bytes.toString(destServerName) + " because the server is in exclude list");
2402      destServerName = null;
2403    }
2404    if (destServerName == null || destServerName.length == 0) {
2405      LOG.info("Passed destination servername is null/empty so " + "choosing a server at random");
2406      exclude.add(regionState.getServerName());
2407      final List<ServerName> destServers = this.serverManager.createDestinationServersList(exclude);
2408      dest = balancer.randomAssignment(hri, destServers);
2409      if (dest == null) {
2410        LOG.debug("Unable to determine a plan to assign " + hri);
2411        return;
2412      }
2413    } else {
2414      ServerName candidate = ServerName.valueOf(Bytes.toString(destServerName));
2415      dest = balancer.randomAssignment(hri, Lists.newArrayList(candidate));
2416      if (dest == null) {
2417        LOG.debug("Unable to determine a plan to assign " + hri);
2418        return;
2419      }
2420      // TODO: deal with table on master for rs group.
2421      if (dest.equals(serverName)) {
2422        // To avoid unnecessary region moving later by balancer. Don't put user
2423        // regions on master.
2424        LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
2425          + " to avoid unnecessary region moving later by load balancer,"
2426          + " because it should not be on master");
2427        return;
2428      }
2429    }
2430
2431    if (dest.equals(regionState.getServerName())) {
2432      LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
2433        + " because region already assigned to the same server " + dest + ".");
2434      return;
2435    }
2436
2437    // Now we can do the move
2438    RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest);
2439    assert rp.getDestination() != null : rp.toString() + " " + dest;
2440
2441    try {
2442      checkInitialized();
2443      if (this.cpHost != null) {
2444        this.cpHost.preMove(hri, rp.getSource(), rp.getDestination());
2445      }
2446
2447      TransitRegionStateProcedure proc =
2448        this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination());
2449      if (conf.getBoolean(WARMUP_BEFORE_MOVE, DEFAULT_WARMUP_BEFORE_MOVE)) {
2450        // Warmup the region on the destination before initiating the move.
2451        // A region server could reject the close request because it either does not
2452        // have the specified region or the region is being split.
2453        LOG.info(getClientIdAuditPrefix() + " move " + rp + ", warming up region on "
2454          + rp.getDestination());
2455        warmUpRegion(rp.getDestination(), hri);
2456      }
2457      LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
2458      Future<byte[]> future = ProcedureSyncWait.submitProcedure(this.procedureExecutor, proc);
2459      try {
2460        // Is this going to work? Will we throw exception on error?
2461        // TODO: CompletableFuture rather than this stunted Future.
2462        future.get();
2463      } catch (InterruptedException | ExecutionException e) {
2464        throw new HBaseIOException(e);
2465      }
2466      if (this.cpHost != null) {
2467        this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
2468      }
2469    } catch (IOException ioe) {
2470      if (ioe instanceof HBaseIOException) {
2471        throw (HBaseIOException) ioe;
2472      }
2473      throw new HBaseIOException(ioe);
2474    }
2475  }
2476
2477  @Override
2478  public long createTable(final TableDescriptor tableDescriptor, final byte[][] splitKeys,
2479    final long nonceGroup, final long nonce) throws IOException {
2480    checkInitialized();
2481    TableDescriptor desc = getMasterCoprocessorHost().preCreateTableRegionsInfos(tableDescriptor);
2482    if (desc == null) {
2483      throw new IOException("Creation for " + tableDescriptor + " is canceled by CP");
2484    }
2485    String namespace = desc.getTableName().getNamespaceAsString();
2486    this.clusterSchemaService.getNamespace(namespace);
2487
2488    RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(desc, splitKeys);
2489    TableDescriptorChecker.sanityCheck(conf, desc);
2490
2491    return MasterProcedureUtil
2492      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2493        @Override
2494        protected void run() throws IOException {
2495          getMaster().getMasterCoprocessorHost().preCreateTable(desc, newRegions);
2496
2497          LOG.info(getClientIdAuditPrefix() + " create " + desc);
2498
2499          // TODO: We can handle/merge duplicate requests, and differentiate the case of
2500          // TableExistsException by saying if the schema is the same or not.
2501          //
2502          // We need to wait for the procedure to potentially fail due to "prepare" sanity
2503          // checks. This will block only the beginning of the procedure. See HBASE-19953.
2504          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
2505          submitProcedure(
2506            new CreateTableProcedure(procedureExecutor.getEnvironment(), desc, newRegions, latch));
2507          latch.await();
2508
2509          getMaster().getMasterCoprocessorHost().postCreateTable(desc, newRegions);
2510        }
2511
2512        @Override
2513        protected String getDescription() {
2514          return "CreateTableProcedure";
2515        }
2516      });
2517  }
2518
2519  @Override
2520  public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException {
2521    return createSystemTable(tableDescriptor, false);
2522  }
2523
2524  private long createSystemTable(final TableDescriptor tableDescriptor, final boolean isCritical)
2525    throws IOException {
2526    if (isStopped()) {
2527      throw new MasterNotRunningException();
2528    }
2529
2530    TableName tableName = tableDescriptor.getTableName();
2531    if (!(tableName.isSystemTable())) {
2532      throw new IllegalArgumentException(
2533        "Only system table creation can use this createSystemTable API");
2534    }
2535
2536    RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(tableDescriptor, null);
2537
2538    LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
2539
2540    // This special create table is called locally to master. Therefore, no RPC means no need
2541    // to use nonce to detect duplicated RPC call.
2542    CreateTableProcedure proc =
2543      new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions);
2544    proc.setCriticalSystemTable(isCritical);
2545    return this.procedureExecutor.submitProcedure(proc);
2546  }
2547
2548  private void startActiveMasterManager(int infoPort) throws KeeperException {
2549    String backupZNode = ZNodePaths.joinZNode(zooKeeper.getZNodePaths().backupMasterAddressesZNode,
2550      serverName.toString());
2551    /*
2552     * Add a ZNode for ourselves in the backup master directory since we may not become the active
2553     * master. If so, we want the actual active master to know we are backup masters, so that it
2554     * won't assign regions to us if so configured. If we become the active master later,
2555     * ActiveMasterManager will delete this node explicitly. If we crash before then, ZooKeeper will
2556     * delete this node for us since it is ephemeral.
2557     */
2558    LOG.info("Adding backup master ZNode " + backupZNode);
2559    if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode, serverName, infoPort)) {
2560      LOG.warn("Failed create of " + backupZNode + " by " + serverName);
2561    }
2562    this.activeMasterManager.setInfoPort(infoPort);
2563    int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
2564    // If we're a backup master, stall until a primary to write this address
2565    if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP, HConstants.DEFAULT_MASTER_TYPE_BACKUP)) {
2566      LOG.debug("HMaster started in backup mode. Stalling until master znode is written.");
2567      // This will only be a minute or so while the cluster starts up,
2568      // so don't worry about setting watches on the parent znode
2569      while (!activeMasterManager.hasActiveMaster()) {
2570        LOG.debug("Waiting for master address and cluster state znode to be written.");
2571        Threads.sleep(timeout);
2572      }
2573    }
2574
2575    // Here for the master startup process, we use TaskGroup to monitor the whole progress.
2576    // The UI is similar to how Hadoop designed the startup page for the NameNode.
2577    // See HBASE-21521 for more details.
2578    // We do not cleanup the startupTaskGroup, let the startup progress information
2579    // be permanent in the MEM.
2580    startupTaskGroup = TaskMonitor.createTaskGroup(true, "Master startup");
2581    try {
2582      if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, startupTaskGroup)) {
2583        finishActiveMasterInitialization();
2584      }
2585    } catch (Throwable t) {
2586      startupTaskGroup.abort("Failed to become active master due to:" + t.getMessage());
2587      LOG.error(HBaseMarkers.FATAL, "Failed to become active master", t);
2588      // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility
2589      if (
2590        t instanceof NoClassDefFoundError
2591          && t.getMessage().contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction")
2592      ) {
2593        // improved error message for this special case
2594        abort("HBase is having a problem with its Hadoop jars.  You may need to recompile "
2595          + "HBase against Hadoop version " + org.apache.hadoop.util.VersionInfo.getVersion()
2596          + " or change your hadoop jars to start properly", t);
2597      } else {
2598        abort("Unhandled exception. Starting shutdown.", t);
2599      }
2600    }
2601  }
2602
2603  private static boolean isCatalogTable(final TableName tableName) {
2604    return tableName.equals(TableName.META_TABLE_NAME);
2605  }
2606
2607  @Override
2608  public long deleteTable(final TableName tableName, final long nonceGroup, final long nonce)
2609    throws IOException {
2610    checkInitialized();
2611
2612    return MasterProcedureUtil
2613      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2614        @Override
2615        protected void run() throws IOException {
2616          getMaster().getMasterCoprocessorHost().preDeleteTable(tableName);
2617
2618          LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
2619
2620          // TODO: We can handle/merge duplicate request
2621          //
2622          // We need to wait for the procedure to potentially fail due to "prepare" sanity
2623          // checks. This will block only the beginning of the procedure. See HBASE-19953.
2624          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
2625          submitProcedure(
2626            new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch));
2627          latch.await();
2628
2629          getMaster().getMasterCoprocessorHost().postDeleteTable(tableName);
2630        }
2631
2632        @Override
2633        protected String getDescription() {
2634          return "DeleteTableProcedure";
2635        }
2636      });
2637  }
2638
2639  @Override
2640  public long truncateTable(final TableName tableName, final boolean preserveSplits,
2641    final long nonceGroup, final long nonce) throws IOException {
2642    checkInitialized();
2643
2644    return MasterProcedureUtil
2645      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2646        @Override
2647        protected void run() throws IOException {
2648          getMaster().getMasterCoprocessorHost().preTruncateTable(tableName);
2649
2650          LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
2651          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0);
2652          submitProcedure(new TruncateTableProcedure(procedureExecutor.getEnvironment(), tableName,
2653            preserveSplits, latch));
2654          latch.await();
2655
2656          getMaster().getMasterCoprocessorHost().postTruncateTable(tableName);
2657        }
2658
2659        @Override
2660        protected String getDescription() {
2661          return "TruncateTableProcedure";
2662        }
2663      });
2664  }
2665
2666  @Override
2667  public long truncateRegion(final RegionInfo regionInfo, final long nonceGroup, final long nonce)
2668    throws IOException {
2669    checkInitialized();
2670
2671    return MasterProcedureUtil
2672      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2673        @Override
2674        protected void run() throws IOException {
2675          getMaster().getMasterCoprocessorHost().preTruncateRegion(regionInfo);
2676
2677          LOG.info(
2678            getClientIdAuditPrefix() + " truncate region " + regionInfo.getRegionNameAsString());
2679
2680          // Execute the operation asynchronously
2681          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0);
2682          submitProcedure(
2683            new TruncateRegionProcedure(procedureExecutor.getEnvironment(), regionInfo, latch));
2684          latch.await();
2685
2686          getMaster().getMasterCoprocessorHost().postTruncateRegion(regionInfo);
2687        }
2688
2689        @Override
2690        protected String getDescription() {
2691          return "TruncateRegionProcedure";
2692        }
2693      });
2694  }
2695
2696  @Override
2697  public long addColumn(final TableName tableName, final ColumnFamilyDescriptor column,
2698    final long nonceGroup, final long nonce) throws IOException {
2699    checkInitialized();
2700    checkTableExists(tableName);
2701
2702    return modifyTable(tableName, new TableDescriptorGetter() {
2703
2704      @Override
2705      public TableDescriptor get() throws IOException {
2706        TableDescriptor old = getTableDescriptors().get(tableName);
2707        if (old.hasColumnFamily(column.getName())) {
2708          throw new InvalidFamilyOperationException("Column family '" + column.getNameAsString()
2709            + "' in table '" + tableName + "' already exists so cannot be added");
2710        }
2711
2712        return TableDescriptorBuilder.newBuilder(old).setColumnFamily(column).build();
2713      }
2714    }, nonceGroup, nonce, true);
2715  }
2716
2717  /**
2718   * Implement to return TableDescriptor after pre-checks
2719   */
2720  protected interface TableDescriptorGetter {
2721    TableDescriptor get() throws IOException;
2722  }
2723
2724  @Override
2725  public long modifyColumn(final TableName tableName, final ColumnFamilyDescriptor descriptor,
2726    final long nonceGroup, final long nonce) throws IOException {
2727    checkInitialized();
2728    checkTableExists(tableName);
2729    return modifyTable(tableName, new TableDescriptorGetter() {
2730
2731      @Override
2732      public TableDescriptor get() throws IOException {
2733        TableDescriptor old = getTableDescriptors().get(tableName);
2734        if (!old.hasColumnFamily(descriptor.getName())) {
2735          throw new InvalidFamilyOperationException("Family '" + descriptor.getNameAsString()
2736            + "' does not exist, so it cannot be modified");
2737        }
2738
2739        return TableDescriptorBuilder.newBuilder(old).modifyColumnFamily(descriptor).build();
2740      }
2741    }, nonceGroup, nonce, true);
2742  }
2743
2744  @Override
2745  public long modifyColumnStoreFileTracker(TableName tableName, byte[] family, String dstSFT,
2746    long nonceGroup, long nonce) throws IOException {
2747    checkInitialized();
2748    return MasterProcedureUtil
2749      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2750
2751        @Override
2752        protected void run() throws IOException {
2753          String sft = getMaster().getMasterCoprocessorHost()
2754            .preModifyColumnFamilyStoreFileTracker(tableName, family, dstSFT);
2755          LOG.info("{} modify column {} store file tracker of table {} to {}",
2756            getClientIdAuditPrefix(), Bytes.toStringBinary(family), tableName, sft);
2757          submitProcedure(new ModifyColumnFamilyStoreFileTrackerProcedure(
2758            procedureExecutor.getEnvironment(), tableName, family, sft));
2759          getMaster().getMasterCoprocessorHost().postModifyColumnFamilyStoreFileTracker(tableName,
2760            family, dstSFT);
2761        }
2762
2763        @Override
2764        protected String getDescription() {
2765          return "ModifyColumnFamilyStoreFileTrackerProcedure";
2766        }
2767      });
2768  }
2769
2770  @Override
2771  public long deleteColumn(final TableName tableName, final byte[] columnName,
2772    final long nonceGroup, final long nonce) throws IOException {
2773    checkInitialized();
2774    checkTableExists(tableName);
2775
2776    return modifyTable(tableName, new TableDescriptorGetter() {
2777
2778      @Override
2779      public TableDescriptor get() throws IOException {
2780        TableDescriptor old = getTableDescriptors().get(tableName);
2781
2782        if (!old.hasColumnFamily(columnName)) {
2783          throw new InvalidFamilyOperationException(
2784            "Family '" + Bytes.toString(columnName) + "' does not exist, so it cannot be deleted");
2785        }
2786        if (old.getColumnFamilyCount() == 1) {
2787          throw new InvalidFamilyOperationException("Family '" + Bytes.toString(columnName)
2788            + "' is the only column family in the table, so it cannot be deleted");
2789        }
2790        return TableDescriptorBuilder.newBuilder(old).removeColumnFamily(columnName).build();
2791      }
2792    }, nonceGroup, nonce, true);
2793  }
2794
2795  @Override
2796  public long enableTable(final TableName tableName, final long nonceGroup, final long nonce)
2797    throws IOException {
2798    checkInitialized();
2799
2800    return MasterProcedureUtil
2801      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2802        @Override
2803        protected void run() throws IOException {
2804          getMaster().getMasterCoprocessorHost().preEnableTable(tableName);
2805
2806          // Normally, it would make sense for this authorization check to exist inside
2807          // AccessController, but because the authorization check is done based on internal state
2808          // (rather than explicit permissions) we'll do the check here instead of in the
2809          // coprocessor.
2810          MasterQuotaManager quotaManager = getMasterQuotaManager();
2811          if (quotaManager != null) {
2812            if (quotaManager.isQuotaInitialized()) {
2813              // skip checking quotas for system tables, see:
2814              // https://issues.apache.org/jira/browse/HBASE-28183
2815              if (!tableName.isSystemTable()) {
2816                SpaceQuotaSnapshot currSnapshotOfTable =
2817                  QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
2818                if (currSnapshotOfTable != null) {
2819                  SpaceQuotaStatus quotaStatus = currSnapshotOfTable.getQuotaStatus();
2820                  if (
2821                    quotaStatus.isInViolation()
2822                      && SpaceViolationPolicy.DISABLE == quotaStatus.getPolicy().orElse(null)
2823                  ) {
2824                    throw new AccessDeniedException("Enabling the table '" + tableName
2825                      + "' is disallowed due to a violated space quota.");
2826                  }
2827                }
2828              }
2829            } else if (LOG.isTraceEnabled()) {
2830              LOG
2831                .trace("Unable to check for space quotas as the MasterQuotaManager is not enabled");
2832            }
2833          }
2834
2835          LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
2836
2837          // Execute the operation asynchronously - client will check the progress of the operation
2838          // In case the request is from a <1.1 client before returning,
2839          // we want to make sure that the table is prepared to be
2840          // enabled (the table is locked and the table state is set).
2841          // Note: if the procedure throws exception, we will catch it and rethrow.
2842          final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch();
2843          submitProcedure(
2844            new EnableTableProcedure(procedureExecutor.getEnvironment(), tableName, prepareLatch));
2845          prepareLatch.await();
2846
2847          getMaster().getMasterCoprocessorHost().postEnableTable(tableName);
2848        }
2849
2850        @Override
2851        protected String getDescription() {
2852          return "EnableTableProcedure";
2853        }
2854      });
2855  }
2856
2857  @Override
2858  public long disableTable(final TableName tableName, final long nonceGroup, final long nonce)
2859    throws IOException {
2860    checkInitialized();
2861
2862    return MasterProcedureUtil
2863      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2864        @Override
2865        protected void run() throws IOException {
2866          getMaster().getMasterCoprocessorHost().preDisableTable(tableName);
2867
2868          LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
2869
2870          // Execute the operation asynchronously - client will check the progress of the operation
2871          // In case the request is from a <1.1 client before returning,
2872          // we want to make sure that the table is prepared to be
2873          // enabled (the table is locked and the table state is set).
2874          // Note: if the procedure throws exception, we will catch it and rethrow.
2875          //
2876          // We need to wait for the procedure to potentially fail due to "prepare" sanity
2877          // checks. This will block only the beginning of the procedure. See HBASE-19953.
2878          final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createBlockingLatch();
2879          submitProcedure(new DisableTableProcedure(procedureExecutor.getEnvironment(), tableName,
2880            false, prepareLatch));
2881          prepareLatch.await();
2882
2883          getMaster().getMasterCoprocessorHost().postDisableTable(tableName);
2884        }
2885
2886        @Override
2887        protected String getDescription() {
2888          return "DisableTableProcedure";
2889        }
2890      });
2891  }
2892
2893  private long modifyTable(final TableName tableName,
2894    final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce,
2895    final boolean shouldCheckDescriptor) throws IOException {
2896    return modifyTable(tableName, newDescriptorGetter, nonceGroup, nonce, shouldCheckDescriptor,
2897      true);
2898  }
2899
2900  private long modifyTable(final TableName tableName,
2901    final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce,
2902    final boolean shouldCheckDescriptor, final boolean reopenRegions) throws IOException {
2903    return MasterProcedureUtil
2904      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2905        @Override
2906        protected void run() throws IOException {
2907          TableDescriptor oldDescriptor = getMaster().getTableDescriptors().get(tableName);
2908          TableDescriptor newDescriptor = getMaster().getMasterCoprocessorHost()
2909            .preModifyTable(tableName, oldDescriptor, newDescriptorGetter.get());
2910          TableDescriptorChecker.sanityCheck(conf, newDescriptor);
2911          LOG.info("{} modify table {} from {} to {}", getClientIdAuditPrefix(), tableName,
2912            oldDescriptor, newDescriptor);
2913
2914          // Execute the operation synchronously - wait for the operation completes before
2915          // continuing.
2916          //
2917          // We need to wait for the procedure to potentially fail due to "prepare" sanity
2918          // checks. This will block only the beginning of the procedure. See HBASE-19953.
2919          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
2920          submitProcedure(new ModifyTableProcedure(procedureExecutor.getEnvironment(),
2921            newDescriptor, latch, oldDescriptor, shouldCheckDescriptor, reopenRegions));
2922          latch.await();
2923
2924          getMaster().getMasterCoprocessorHost().postModifyTable(tableName, oldDescriptor,
2925            newDescriptor);
2926        }
2927
2928        @Override
2929        protected String getDescription() {
2930          return "ModifyTableProcedure";
2931        }
2932      });
2933
2934  }
2935
2936  @Override
2937  public long modifyTable(final TableName tableName, final TableDescriptor newDescriptor,
2938    final long nonceGroup, final long nonce, final boolean reopenRegions) throws IOException {
2939    checkInitialized();
2940    return modifyTable(tableName, new TableDescriptorGetter() {
2941      @Override
2942      public TableDescriptor get() throws IOException {
2943        return newDescriptor;
2944      }
2945    }, nonceGroup, nonce, false, reopenRegions);
2946
2947  }
2948
2949  @Override
2950  public long modifyTableStoreFileTracker(TableName tableName, String dstSFT, long nonceGroup,
2951    long nonce) throws IOException {
2952    checkInitialized();
2953    return MasterProcedureUtil
2954      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2955
2956        @Override
2957        protected void run() throws IOException {
2958          String sft = getMaster().getMasterCoprocessorHost()
2959            .preModifyTableStoreFileTracker(tableName, dstSFT);
2960          LOG.info("{} modify table store file tracker of table {} to {}", getClientIdAuditPrefix(),
2961            tableName, sft);
2962          submitProcedure(new ModifyTableStoreFileTrackerProcedure(
2963            procedureExecutor.getEnvironment(), tableName, sft));
2964          getMaster().getMasterCoprocessorHost().postModifyTableStoreFileTracker(tableName, sft);
2965        }
2966
2967        @Override
2968        protected String getDescription() {
2969          return "ModifyTableStoreFileTrackerProcedure";
2970        }
2971      });
2972  }
2973
2974  public long restoreSnapshot(final SnapshotDescription snapshotDesc, final long nonceGroup,
2975    final long nonce, final boolean restoreAcl, final String customSFT) throws IOException {
2976    checkInitialized();
2977    getSnapshotManager().checkSnapshotSupport();
2978
2979    // Ensure namespace exists. Will throw exception if non-known NS.
2980    final TableName dstTable = TableName.valueOf(snapshotDesc.getTable());
2981    getClusterSchema().getNamespace(dstTable.getNamespaceAsString());
2982
2983    return MasterProcedureUtil
2984      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2985        @Override
2986        protected void run() throws IOException {
2987          setProcId(getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(),
2988            restoreAcl, customSFT));
2989        }
2990
2991        @Override
2992        protected String getDescription() {
2993          return "RestoreSnapshotProcedure";
2994        }
2995      });
2996  }
2997
2998  private void checkTableExists(final TableName tableName)
2999    throws IOException, TableNotFoundException {
3000    if (!tableDescriptors.exists(tableName)) {
3001      throw new TableNotFoundException(tableName);
3002    }
3003  }
3004
3005  @Override
3006  public void checkTableModifiable(final TableName tableName)
3007    throws IOException, TableNotFoundException, TableNotDisabledException {
3008    if (isCatalogTable(tableName)) {
3009      throw new IOException("Can't modify catalog tables");
3010    }
3011    checkTableExists(tableName);
3012    TableState ts = getTableStateManager().getTableState(tableName);
3013    if (!ts.isDisabled()) {
3014      throw new TableNotDisabledException("Not DISABLED; " + ts);
3015    }
3016  }
3017
3018  public void reloadRegionServerQuotas() {
3019    // multiple reloads are harmless, so no need for NonceProcedureRunnable
3020    getLiveRegionServers()
3021      .forEach(sn -> procedureExecutor.submitProcedure(new ReloadQuotasProcedure(sn)));
3022  }
3023
3024  public ClusterMetrics getClusterMetricsWithoutCoprocessor() throws InterruptedIOException {
3025    return getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class));
3026  }
3027
3028  public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet<Option> options)
3029    throws InterruptedIOException {
3030    ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder();
3031    // given that hbase1 can't submit the request with Option,
3032    // we return all information to client if the list of Option is empty.
3033    if (options.isEmpty()) {
3034      options = EnumSet.allOf(Option.class);
3035    }
3036
3037    // TASKS and/or LIVE_SERVERS will populate this map, which will be given to the builder if
3038    // not null after option processing completes.
3039    Map<ServerName, ServerMetrics> serverMetricsMap = null;
3040
3041    for (Option opt : options) {
3042      switch (opt) {
3043        case HBASE_VERSION:
3044          builder.setHBaseVersion(VersionInfo.getVersion());
3045          break;
3046        case CLUSTER_ID:
3047          builder.setClusterId(getClusterId());
3048          break;
3049        case MASTER:
3050          builder.setMasterName(getServerName());
3051          break;
3052        case BACKUP_MASTERS:
3053          builder.setBackerMasterNames(getBackupMasters());
3054          break;
3055        case TASKS: {
3056          // Master tasks
3057          builder.setMasterTasks(TaskMonitor.get().getTasks().stream()
3058            .map(task -> ServerTaskBuilder.newBuilder().setDescription(task.getDescription())
3059              .setStatus(task.getStatus())
3060              .setState(ServerTask.State.valueOf(task.getState().name()))
3061              .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTimestamp())
3062              .build())
3063            .collect(Collectors.toList()));
3064          // TASKS is also synonymous with LIVE_SERVERS for now because task information for
3065          // regionservers is carried in ServerLoad.
3066          // Add entries to serverMetricsMap for all live servers, if we haven't already done so
3067          if (serverMetricsMap == null) {
3068            serverMetricsMap = getOnlineServers();
3069          }
3070          break;
3071        }
3072        case LIVE_SERVERS: {
3073          // Add entries to serverMetricsMap for all live servers, if we haven't already done so
3074          if (serverMetricsMap == null) {
3075            serverMetricsMap = getOnlineServers();
3076          }
3077          break;
3078        }
3079        case DEAD_SERVERS: {
3080          if (serverManager != null) {
3081            builder.setDeadServerNames(
3082              new ArrayList<>(serverManager.getDeadServers().copyServerNames()));
3083          }
3084          break;
3085        }
3086        case UNKNOWN_SERVERS: {
3087          if (serverManager != null) {
3088            builder.setUnknownServerNames(getUnknownServers());
3089          }
3090          break;
3091        }
3092        case MASTER_COPROCESSORS: {
3093          if (cpHost != null) {
3094            builder.setMasterCoprocessorNames(Arrays.asList(getMasterCoprocessors()));
3095          }
3096          break;
3097        }
3098        case REGIONS_IN_TRANSITION: {
3099          if (assignmentManager != null) {
3100            builder.setRegionsInTransition(
3101              new ArrayList<>(assignmentManager.getRegionsStateInTransition()));
3102          }
3103          break;
3104        }
3105        case BALANCER_ON: {
3106          if (loadBalancerStateStore != null) {
3107            builder.setBalancerOn(loadBalancerStateStore.get());
3108          }
3109          break;
3110        }
3111        case MASTER_INFO_PORT: {
3112          if (infoServer != null) {
3113            builder.setMasterInfoPort(infoServer.getPort());
3114          }
3115          break;
3116        }
3117        case SERVERS_NAME: {
3118          if (serverManager != null) {
3119            builder.setServerNames(serverManager.getOnlineServersList());
3120          }
3121          break;
3122        }
3123        case TABLE_TO_REGIONS_COUNT: {
3124          if (isActiveMaster() && isInitialized() && assignmentManager != null) {
3125            try {
3126              Map<TableName, RegionStatesCount> tableRegionStatesCountMap = new HashMap<>();
3127              Map<String, TableDescriptor> tableDescriptorMap = getTableDescriptors().getAll();
3128              for (TableDescriptor tableDescriptor : tableDescriptorMap.values()) {
3129                TableName tableName = tableDescriptor.getTableName();
3130                RegionStatesCount regionStatesCount =
3131                  assignmentManager.getRegionStatesCount(tableName);
3132                tableRegionStatesCountMap.put(tableName, regionStatesCount);
3133              }
3134              builder.setTableRegionStatesCount(tableRegionStatesCountMap);
3135            } catch (IOException e) {
3136              LOG.error("Error while populating TABLE_TO_REGIONS_COUNT for Cluster Metrics..", e);
3137            }
3138          }
3139          break;
3140        }
3141        case DECOMMISSIONED_SERVERS: {
3142          if (serverManager != null) {
3143            builder.setDecommissionedServerNames(serverManager.getDrainingServersList());
3144          }
3145          break;
3146        }
3147      }
3148    }
3149
3150    if (serverMetricsMap != null) {
3151      builder.setLiveServerMetrics(serverMetricsMap);
3152    }
3153
3154    return builder.build();
3155  }
3156
3157  private List<ServerName> getUnknownServers() {
3158    if (serverManager != null) {
3159      final Set<ServerName> serverNames = getAssignmentManager().getRegionStates().getRegionStates()
3160        .stream().map(RegionState::getServerName).collect(Collectors.toSet());
3161      final List<ServerName> unknownServerNames = serverNames.stream()
3162        .filter(sn -> sn != null && serverManager.isServerUnknown(sn)).collect(Collectors.toList());
3163      return unknownServerNames;
3164    }
3165    return null;
3166  }
3167
3168  private Map<ServerName, ServerMetrics> getOnlineServers() {
3169    if (serverManager != null) {
3170      final Map<ServerName, ServerMetrics> map = new HashMap<>();
3171      serverManager.getOnlineServers().entrySet().forEach(e -> map.put(e.getKey(), e.getValue()));
3172      return map;
3173    }
3174    return null;
3175  }
3176
3177  /** Returns cluster status */
3178  public ClusterMetrics getClusterMetrics() throws IOException {
3179    return getClusterMetrics(EnumSet.allOf(Option.class));
3180  }
3181
3182  public ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException {
3183    if (cpHost != null) {
3184      cpHost.preGetClusterMetrics();
3185    }
3186    ClusterMetrics status = getClusterMetricsWithoutCoprocessor(options);
3187    if (cpHost != null) {
3188      cpHost.postGetClusterMetrics(status);
3189    }
3190    return status;
3191  }
3192
3193  /** Returns info port of active master or 0 if any exception occurs. */
3194  public int getActiveMasterInfoPort() {
3195    return activeMasterManager.getActiveMasterInfoPort();
3196  }
3197
3198  /**
3199   * @param sn is ServerName of the backup master
3200   * @return info port of backup master or 0 if any exception occurs.
3201   */
3202  public int getBackupMasterInfoPort(final ServerName sn) {
3203    return activeMasterManager.getBackupMasterInfoPort(sn);
3204  }
3205
3206  /**
3207   * The set of loaded coprocessors is stored in a static set. Since it's statically allocated, it
3208   * does not require that HMaster's cpHost be initialized prior to accessing it.
3209   * @return a String representation of the set of names of the loaded coprocessors.
3210   */
3211  public static String getLoadedCoprocessors() {
3212    return CoprocessorHost.getLoadedCoprocessors().toString();
3213  }
3214
3215  /** Returns timestamp in millis when HMaster was started. */
3216  public long getMasterStartTime() {
3217    return startcode;
3218  }
3219
3220  /** Returns timestamp in millis when HMaster became the active master. */
3221  @Override
3222  public long getMasterActiveTime() {
3223    return masterActiveTime;
3224  }
3225
3226  /** Returns timestamp in millis when HMaster finished becoming the active master */
3227  public long getMasterFinishedInitializationTime() {
3228    return masterFinishedInitializationTime;
3229  }
3230
3231  public int getNumWALFiles() {
3232    return 0;
3233  }
3234
3235  public ProcedureStore getProcedureStore() {
3236    return procedureStore;
3237  }
3238
3239  public int getRegionServerInfoPort(final ServerName sn) {
3240    int port = this.serverManager.getInfoPort(sn);
3241    return port == 0
3242      ? conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT)
3243      : port;
3244  }
3245
3246  @Override
3247  public String getRegionServerVersion(ServerName sn) {
3248    // Will return "0.0.0" if the server is not online to prevent move system region to unknown
3249    // version RS.
3250    return this.serverManager.getVersion(sn);
3251  }
3252
3253  @Override
3254  public void checkIfShouldMoveSystemRegionAsync() {
3255    assignmentManager.checkIfShouldMoveSystemRegionAsync();
3256  }
3257
3258  /** Returns array of coprocessor SimpleNames. */
3259  public String[] getMasterCoprocessors() {
3260    Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors();
3261    return masterCoprocessors.toArray(new String[masterCoprocessors.size()]);
3262  }
3263
3264  @Override
3265  public void abort(String reason, Throwable cause) {
3266    if (!setAbortRequested() || isStopped()) {
3267      LOG.debug("Abort called but aborted={}, stopped={}", isAborted(), isStopped());
3268      return;
3269    }
3270    if (cpHost != null) {
3271      // HBASE-4014: dump a list of loaded coprocessors.
3272      LOG.error(HBaseMarkers.FATAL,
3273        "Master server abort: loaded coprocessors are: " + getLoadedCoprocessors());
3274    }
3275    String msg = "***** ABORTING master " + this + ": " + reason + " *****";
3276    if (cause != null) {
3277      LOG.error(HBaseMarkers.FATAL, msg, cause);
3278    } else {
3279      LOG.error(HBaseMarkers.FATAL, msg);
3280    }
3281
3282    try {
3283      stopMaster();
3284    } catch (IOException e) {
3285      LOG.error("Exception occurred while stopping master", e);
3286    }
3287  }
3288
3289  @Override
3290  public MasterCoprocessorHost getMasterCoprocessorHost() {
3291    return cpHost;
3292  }
3293
3294  @Override
3295  public MasterQuotaManager getMasterQuotaManager() {
3296    return quotaManager;
3297  }
3298
3299  @Override
3300  public ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
3301    return procedureExecutor;
3302  }
3303
3304  @Override
3305  public ServerName getServerName() {
3306    return this.serverName;
3307  }
3308
3309  @Override
3310  public AssignmentManager getAssignmentManager() {
3311    return this.assignmentManager;
3312  }
3313
3314  @Override
3315  public CatalogJanitor getCatalogJanitor() {
3316    return this.catalogJanitorChore;
3317  }
3318
3319  public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
3320    return rsFatals;
3321  }
3322
3323  public TaskGroup getStartupProgress() {
3324    return startupTaskGroup;
3325  }
3326
3327  /**
3328   * Shutdown the cluster. Master runs a coordinated stop of all RegionServers and then itself.
3329   */
3330  public void shutdown() throws IOException {
3331    TraceUtil.trace(() -> {
3332      if (cpHost != null) {
3333        cpHost.preShutdown();
3334      }
3335
3336      // Tell the servermanager cluster shutdown has been called. This makes it so when Master is
3337      // last running server, it'll stop itself. Next, we broadcast the cluster shutdown by setting
3338      // the cluster status as down. RegionServers will notice this change in state and will start
3339      // shutting themselves down. When last has exited, Master can go down.
3340      if (this.serverManager != null) {
3341        this.serverManager.shutdownCluster();
3342      }
3343      if (this.clusterStatusTracker != null) {
3344        try {
3345          this.clusterStatusTracker.setClusterDown();
3346        } catch (KeeperException e) {
3347          LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
3348        }
3349      }
3350      // Stop the procedure executor. Will stop any ongoing assign, unassign, server crash etc.,
3351      // processing so we can go down.
3352      if (this.procedureExecutor != null) {
3353        this.procedureExecutor.stop();
3354      }
3355      // Shutdown our cluster connection. This will kill any hosted RPCs that might be going on;
3356      // this is what we want especially if the Master is in startup phase doing call outs to
3357      // hbase:meta, etc. when cluster is down. Without ths connection close, we'd have to wait on
3358      // the rpc to timeout.
3359      if (this.asyncClusterConnection != null) {
3360        this.asyncClusterConnection.close();
3361      }
3362    }, "HMaster.shutdown");
3363  }
3364
3365  public void stopMaster() throws IOException {
3366    if (cpHost != null) {
3367      cpHost.preStopMaster();
3368    }
3369    stop("Stopped by " + Thread.currentThread().getName());
3370  }
3371
3372  @Override
3373  public void stop(String msg) {
3374    if (!this.stopped) {
3375      LOG.info("***** STOPPING master '" + this + "' *****");
3376      this.stopped = true;
3377      LOG.info("STOPPED: " + msg);
3378      // Wakes run() if it is sleeping
3379      sleeper.skipSleepCycle();
3380      if (this.activeMasterManager != null) {
3381        this.activeMasterManager.stop();
3382      }
3383    }
3384  }
3385
3386  protected void checkServiceStarted() throws ServerNotRunningYetException {
3387    if (!serviceStarted) {
3388      throw new ServerNotRunningYetException("Server is not running yet");
3389    }
3390  }
3391
3392  void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException,
3393    MasterNotRunningException, MasterStoppedException {
3394    checkServiceStarted();
3395    if (!isInitialized()) {
3396      throw new PleaseHoldException("Master is initializing");
3397    }
3398    if (isStopped()) {
3399      throw new MasterStoppedException();
3400    }
3401  }
3402
3403  /**
3404   * Report whether this master is currently the active master or not. If not active master, we are
3405   * parked on ZK waiting to become active. This method is used for testing.
3406   * @return true if active master, false if not.
3407   */
3408  @Override
3409  public boolean isActiveMaster() {
3410    return activeMaster;
3411  }
3412
3413  /**
3414   * Report whether this master has completed with its initialization and is ready. If ready, the
3415   * master is also the active master. A standby master is never ready. This method is used for
3416   * testing.
3417   * @return true if master is ready to go, false if not.
3418   */
3419  @Override
3420  public boolean isInitialized() {
3421    return initialized.isReady();
3422  }
3423
3424  /**
3425   * Report whether this master is started This method is used for testing.
3426   * @return true if master is ready to go, false if not.
3427   */
3428  public boolean isOnline() {
3429    return serviceStarted;
3430  }
3431
3432  /**
3433   * Report whether this master is in maintenance mode.
3434   * @return true if master is in maintenanceMode
3435   */
3436  @Override
3437  public boolean isInMaintenanceMode() {
3438    return maintenanceMode;
3439  }
3440
3441  public void setInitialized(boolean isInitialized) {
3442    procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
3443  }
3444
3445  /**
3446   * Mainly used in procedure related tests, where we will restart ProcedureExecutor and
3447   * AssignmentManager, but we do not want to restart master(to speed up the test), so we need to
3448   * disable rpc for a while otherwise some critical rpc requests such as
3449   * reportRegionStateTransition could fail and cause region server to abort.
3450   */
3451  @RestrictedApi(explanation = "Should only be called in tests", link = "",
3452      allowedOnPath = ".*/src/test/.*")
3453  public void setServiceStarted(boolean started) {
3454    this.serviceStarted = started;
3455  }
3456
3457  @Override
3458  public ProcedureEvent<?> getInitializedEvent() {
3459    return initialized;
3460  }
3461
3462  /**
3463   * Compute the average load across all region servers. Currently, this uses a very naive
3464   * computation - just uses the number of regions being served, ignoring stats about number of
3465   * requests.
3466   * @return the average load
3467   */
3468  public double getAverageLoad() {
3469    if (this.assignmentManager == null) {
3470      return 0;
3471    }
3472
3473    RegionStates regionStates = this.assignmentManager.getRegionStates();
3474    if (regionStates == null) {
3475      return 0;
3476    }
3477    return regionStates.getAverageLoad();
3478  }
3479
3480  @Override
3481  public boolean registerService(Service instance) {
3482    /*
3483     * No stacking of instances is allowed for a single service name
3484     */
3485    Descriptors.ServiceDescriptor serviceDesc = instance.getDescriptorForType();
3486    String serviceName = CoprocessorRpcUtils.getServiceName(serviceDesc);
3487    if (coprocessorServiceHandlers.containsKey(serviceName)) {
3488      LOG.error("Coprocessor service " + serviceName
3489        + " already registered, rejecting request from " + instance);
3490      return false;
3491    }
3492
3493    coprocessorServiceHandlers.put(serviceName, instance);
3494    if (LOG.isDebugEnabled()) {
3495      LOG.debug("Registered master coprocessor service: service=" + serviceName);
3496    }
3497    return true;
3498  }
3499
3500  /**
3501   * Utility for constructing an instance of the passed HMaster class.
3502   * @return HMaster instance.
3503   */
3504  public static HMaster constructMaster(Class<? extends HMaster> masterClass,
3505    final Configuration conf) {
3506    try {
3507      Constructor<? extends HMaster> c = masterClass.getConstructor(Configuration.class);
3508      return c.newInstance(conf);
3509    } catch (Exception e) {
3510      Throwable error = e;
3511      if (
3512        e instanceof InvocationTargetException
3513          && ((InvocationTargetException) e).getTargetException() != null
3514      ) {
3515        error = ((InvocationTargetException) e).getTargetException();
3516      }
3517      throw new RuntimeException("Failed construction of Master: " + masterClass.toString() + ". ",
3518        error);
3519    }
3520  }
3521
3522  /**
3523   * @see org.apache.hadoop.hbase.master.HMasterCommandLine
3524   */
3525  public static void main(String[] args) {
3526    LOG.info("STARTING service " + HMaster.class.getSimpleName());
3527    VersionInfo.logVersion();
3528    new HMasterCommandLine(HMaster.class).doMain(args);
3529  }
3530
3531  public HFileCleaner getHFileCleaner() {
3532    return this.hfileCleaners.get(0);
3533  }
3534
3535  public List<HFileCleaner> getHFileCleaners() {
3536    return this.hfileCleaners;
3537  }
3538
3539  public LogCleaner getLogCleaner() {
3540    return this.logCleaner;
3541  }
3542
3543  /** Returns the underlying snapshot manager */
3544  @Override
3545  public SnapshotManager getSnapshotManager() {
3546    return this.snapshotManager;
3547  }
3548
3549  /** Returns the underlying MasterProcedureManagerHost */
3550  @Override
3551  public MasterProcedureManagerHost getMasterProcedureManagerHost() {
3552    return mpmHost;
3553  }
3554
3555  @Override
3556  public ClusterSchema getClusterSchema() {
3557    return this.clusterSchemaService;
3558  }
3559
3560  /**
3561   * Create a new Namespace.
3562   * @param namespaceDescriptor descriptor for new Namespace
3563   * @param nonceGroup          Identifier for the source of the request, a client or process.
3564   * @param nonce               A unique identifier for this operation from the client or process
3565   *                            identified by <code>nonceGroup</code> (the source must ensure each
3566   *                            operation gets a unique id).
3567   * @return procedure id
3568   */
3569  long createNamespace(final NamespaceDescriptor namespaceDescriptor, final long nonceGroup,
3570    final long nonce) throws IOException {
3571    checkInitialized();
3572
3573    TableName.isLegalNamespaceName(Bytes.toBytes(namespaceDescriptor.getName()));
3574
3575    return MasterProcedureUtil
3576      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
3577        @Override
3578        protected void run() throws IOException {
3579          getMaster().getMasterCoprocessorHost().preCreateNamespace(namespaceDescriptor);
3580          // We need to wait for the procedure to potentially fail due to "prepare" sanity
3581          // checks. This will block only the beginning of the procedure. See HBASE-19953.
3582          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
3583          LOG.info(getClientIdAuditPrefix() + " creating " + namespaceDescriptor);
3584          // Execute the operation synchronously - wait for the operation to complete before
3585          // continuing.
3586          setProcId(getClusterSchema().createNamespace(namespaceDescriptor, getNonceKey(), latch));
3587          latch.await();
3588          getMaster().getMasterCoprocessorHost().postCreateNamespace(namespaceDescriptor);
3589        }
3590
3591        @Override
3592        protected String getDescription() {
3593          return "CreateNamespaceProcedure";
3594        }
3595      });
3596  }
3597
3598  /**
3599   * Modify an existing Namespace.
3600   * @param nonceGroup Identifier for the source of the request, a client or process.
3601   * @param nonce      A unique identifier for this operation from the client or process identified
3602   *                   by <code>nonceGroup</code> (the source must ensure each operation gets a
3603   *                   unique id).
3604   * @return procedure id
3605   */
3606  long modifyNamespace(final NamespaceDescriptor newNsDescriptor, final long nonceGroup,
3607    final long nonce) throws IOException {
3608    checkInitialized();
3609
3610    TableName.isLegalNamespaceName(Bytes.toBytes(newNsDescriptor.getName()));
3611
3612    return MasterProcedureUtil
3613      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
3614        @Override
3615        protected void run() throws IOException {
3616          NamespaceDescriptor oldNsDescriptor = getNamespace(newNsDescriptor.getName());
3617          getMaster().getMasterCoprocessorHost().preModifyNamespace(oldNsDescriptor,
3618            newNsDescriptor);
3619          // We need to wait for the procedure to potentially fail due to "prepare" sanity
3620          // checks. This will block only the beginning of the procedure. See HBASE-19953.
3621          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
3622          LOG.info(getClientIdAuditPrefix() + " modify " + newNsDescriptor);
3623          // Execute the operation synchronously - wait for the operation to complete before
3624          // continuing.
3625          setProcId(getClusterSchema().modifyNamespace(newNsDescriptor, getNonceKey(), latch));
3626          latch.await();
3627          getMaster().getMasterCoprocessorHost().postModifyNamespace(oldNsDescriptor,
3628            newNsDescriptor);
3629        }
3630
3631        @Override
3632        protected String getDescription() {
3633          return "ModifyNamespaceProcedure";
3634        }
3635      });
3636  }
3637
3638  /**
3639   * Delete an existing Namespace. Only empty Namespaces (no tables) can be removed.
3640   * @param nonceGroup Identifier for the source of the request, a client or process.
3641   * @param nonce      A unique identifier for this operation from the client or process identified
3642   *                   by <code>nonceGroup</code> (the source must ensure each operation gets a
3643   *                   unique id).
3644   * @return procedure id
3645   */
3646  long deleteNamespace(final String name, final long nonceGroup, final long nonce)
3647    throws IOException {
3648    checkInitialized();
3649
3650    return MasterProcedureUtil
3651      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
3652        @Override
3653        protected void run() throws IOException {
3654          getMaster().getMasterCoprocessorHost().preDeleteNamespace(name);
3655          LOG.info(getClientIdAuditPrefix() + " delete " + name);
3656          // Execute the operation synchronously - wait for the operation to complete before
3657          // continuing.
3658          //
3659          // We need to wait for the procedure to potentially fail due to "prepare" sanity
3660          // checks. This will block only the beginning of the procedure. See HBASE-19953.
3661          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
3662          setProcId(submitProcedure(
3663            new DeleteNamespaceProcedure(procedureExecutor.getEnvironment(), name, latch)));
3664          latch.await();
3665          // Will not be invoked in the face of Exception thrown by the Procedure's execution
3666          getMaster().getMasterCoprocessorHost().postDeleteNamespace(name);
3667        }
3668
3669        @Override
3670        protected String getDescription() {
3671          return "DeleteNamespaceProcedure";
3672        }
3673      });
3674  }
3675
3676  /**
3677   * Get a Namespace
3678   * @param name Name of the Namespace
3679   * @return Namespace descriptor for <code>name</code>
3680   */
3681  NamespaceDescriptor getNamespace(String name) throws IOException {
3682    checkInitialized();
3683    if (this.cpHost != null) this.cpHost.preGetNamespaceDescriptor(name);
3684    NamespaceDescriptor nsd = this.clusterSchemaService.getNamespace(name);
3685    if (this.cpHost != null) this.cpHost.postGetNamespaceDescriptor(nsd);
3686    return nsd;
3687  }
3688
3689  /**
3690   * Get all Namespaces
3691   * @return All Namespace descriptors
3692   */
3693  List<NamespaceDescriptor> getNamespaces() throws IOException {
3694    checkInitialized();
3695    final List<NamespaceDescriptor> nsds = new ArrayList<>();
3696    if (cpHost != null) {
3697      cpHost.preListNamespaceDescriptors(nsds);
3698    }
3699    nsds.addAll(this.clusterSchemaService.getNamespaces());
3700    if (this.cpHost != null) {
3701      this.cpHost.postListNamespaceDescriptors(nsds);
3702    }
3703    return nsds;
3704  }
3705
3706  /**
3707   * List namespace names
3708   * @return All namespace names
3709   */
3710  public List<String> listNamespaces() throws IOException {
3711    checkInitialized();
3712    List<String> namespaces = new ArrayList<>();
3713    if (cpHost != null) {
3714      cpHost.preListNamespaces(namespaces);
3715    }
3716    for (NamespaceDescriptor namespace : clusterSchemaService.getNamespaces()) {
3717      namespaces.add(namespace.getName());
3718    }
3719    if (cpHost != null) {
3720      cpHost.postListNamespaces(namespaces);
3721    }
3722    return namespaces;
3723  }
3724
3725  @Override
3726  public List<TableName> listTableNamesByNamespace(String name) throws IOException {
3727    checkInitialized();
3728    return listTableNames(name, null, true);
3729  }
3730
3731  @Override
3732  public List<TableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
3733    checkInitialized();
3734    return listTableDescriptors(name, null, null, true);
3735  }
3736
3737  @Override
3738  public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
3739    throws IOException {
3740    if (cpHost != null) {
3741      cpHost.preAbortProcedure(this.procedureExecutor, procId);
3742    }
3743
3744    final boolean result = this.procedureExecutor.abort(procId, mayInterruptIfRunning);
3745
3746    if (cpHost != null) {
3747      cpHost.postAbortProcedure();
3748    }
3749
3750    return result;
3751  }
3752
3753  @Override
3754  public List<Procedure<?>> getProcedures() throws IOException {
3755    if (cpHost != null) {
3756      cpHost.preGetProcedures();
3757    }
3758
3759    @SuppressWarnings({ "unchecked", "rawtypes" })
3760    List<Procedure<?>> procList = (List) this.procedureExecutor.getProcedures();
3761
3762    if (cpHost != null) {
3763      cpHost.postGetProcedures(procList);
3764    }
3765
3766    return procList;
3767  }
3768
3769  @Override
3770  public List<LockedResource> getLocks() throws IOException {
3771    if (cpHost != null) {
3772      cpHost.preGetLocks();
3773    }
3774
3775    MasterProcedureScheduler procedureScheduler =
3776      procedureExecutor.getEnvironment().getProcedureScheduler();
3777
3778    final List<LockedResource> lockedResources = procedureScheduler.getLocks();
3779
3780    if (cpHost != null) {
3781      cpHost.postGetLocks(lockedResources);
3782    }
3783
3784    return lockedResources;
3785  }
3786
3787  /**
3788   * Returns the list of table descriptors that match the specified request
3789   * @param namespace        the namespace to query, or null if querying for all
3790   * @param regex            The regular expression to match against, or null if querying for all
3791   * @param tableNameList    the list of table names, or null if querying for all
3792   * @param includeSysTables False to match only against userspace tables
3793   * @return the list of table descriptors
3794   */
3795  public List<TableDescriptor> listTableDescriptors(final String namespace, final String regex,
3796    final List<TableName> tableNameList, final boolean includeSysTables) throws IOException {
3797    List<TableDescriptor> htds = new ArrayList<>();
3798    if (cpHost != null) {
3799      cpHost.preGetTableDescriptors(tableNameList, htds, regex);
3800    }
3801    htds = getTableDescriptors(htds, namespace, regex, tableNameList, includeSysTables);
3802    if (cpHost != null) {
3803      cpHost.postGetTableDescriptors(tableNameList, htds, regex);
3804    }
3805    return htds;
3806  }
3807
3808  /**
3809   * Returns the list of table names that match the specified request
3810   * @param regex            The regular expression to match against, or null if querying for all
3811   * @param namespace        the namespace to query, or null if querying for all
3812   * @param includeSysTables False to match only against userspace tables
3813   * @return the list of table names
3814   */
3815  public List<TableName> listTableNames(final String namespace, final String regex,
3816    final boolean includeSysTables) throws IOException {
3817    List<TableDescriptor> htds = new ArrayList<>();
3818    if (cpHost != null) {
3819      cpHost.preGetTableNames(htds, regex);
3820    }
3821    htds = getTableDescriptors(htds, namespace, regex, null, includeSysTables);
3822    if (cpHost != null) {
3823      cpHost.postGetTableNames(htds, regex);
3824    }
3825    List<TableName> result = new ArrayList<>(htds.size());
3826    for (TableDescriptor htd : htds)
3827      result.add(htd.getTableName());
3828    return result;
3829  }
3830
3831  /**
3832   * Return a list of table table descriptors after applying any provided filter parameters. Note
3833   * that the user-facing description of this filter logic is presented on the class-level javadoc
3834   * of {@link NormalizeTableFilterParams}.
3835   */
3836  private List<TableDescriptor> getTableDescriptors(final List<TableDescriptor> htds,
3837    final String namespace, final String regex, final List<TableName> tableNameList,
3838    final boolean includeSysTables) throws IOException {
3839    if (tableNameList == null || tableNameList.isEmpty()) {
3840      // request for all TableDescriptors
3841      Collection<TableDescriptor> allHtds;
3842      if (namespace != null && namespace.length() > 0) {
3843        // Do a check on the namespace existence. Will fail if does not exist.
3844        this.clusterSchemaService.getNamespace(namespace);
3845        allHtds = tableDescriptors.getByNamespace(namespace).values();
3846      } else {
3847        allHtds = tableDescriptors.getAll().values();
3848      }
3849      for (TableDescriptor desc : allHtds) {
3850        if (
3851          tableStateManager.isTablePresent(desc.getTableName())
3852            && (includeSysTables || !desc.getTableName().isSystemTable())
3853        ) {
3854          htds.add(desc);
3855        }
3856      }
3857    } else {
3858      for (TableName s : tableNameList) {
3859        if (tableStateManager.isTablePresent(s)) {
3860          TableDescriptor desc = tableDescriptors.get(s);
3861          if (desc != null) {
3862            htds.add(desc);
3863          }
3864        }
3865      }
3866    }
3867
3868    // Retains only those matched by regular expression.
3869    if (regex != null) filterTablesByRegex(htds, Pattern.compile(regex));
3870    return htds;
3871  }
3872
3873  /**
3874   * Removes the table descriptors that don't match the pattern.
3875   * @param descriptors list of table descriptors to filter
3876   * @param pattern     the regex to use
3877   */
3878  private static void filterTablesByRegex(final Collection<TableDescriptor> descriptors,
3879    final Pattern pattern) {
3880    final String defaultNS = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR;
3881    Iterator<TableDescriptor> itr = descriptors.iterator();
3882    while (itr.hasNext()) {
3883      TableDescriptor htd = itr.next();
3884      String tableName = htd.getTableName().getNameAsString();
3885      boolean matched = pattern.matcher(tableName).matches();
3886      if (!matched && htd.getTableName().getNamespaceAsString().equals(defaultNS)) {
3887        matched = pattern.matcher(defaultNS + TableName.NAMESPACE_DELIM + tableName).matches();
3888      }
3889      if (!matched) {
3890        itr.remove();
3891      }
3892    }
3893  }
3894
3895  @Override
3896  public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
3897    return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
3898      .getLastMajorCompactionTimestamp(table);
3899  }
3900
3901  @Override
3902  public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
3903    return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
3904      .getLastMajorCompactionTimestamp(regionName);
3905  }
3906
3907  /**
3908   * Gets the mob file compaction state for a specific table. Whether all the mob files are selected
3909   * is known during the compaction execution, but the statistic is done just before compaction
3910   * starts, it is hard to know the compaction type at that time, so the rough statistics are chosen
3911   * for the mob file compaction. Only two compaction states are available,
3912   * CompactionState.MAJOR_AND_MINOR and CompactionState.NONE.
3913   * @param tableName The current table name.
3914   * @return If a given table is in mob file compaction now.
3915   */
3916  public GetRegionInfoResponse.CompactionState getMobCompactionState(TableName tableName) {
3917    AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
3918    if (compactionsCount != null && compactionsCount.get() != 0) {
3919      return GetRegionInfoResponse.CompactionState.MAJOR_AND_MINOR;
3920    }
3921    return GetRegionInfoResponse.CompactionState.NONE;
3922  }
3923
3924  public void reportMobCompactionStart(TableName tableName) throws IOException {
3925    IdLock.Entry lockEntry = null;
3926    try {
3927      lockEntry = mobCompactionLock.getLockEntry(tableName.hashCode());
3928      AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
3929      if (compactionsCount == null) {
3930        compactionsCount = new AtomicInteger(0);
3931        mobCompactionStates.put(tableName, compactionsCount);
3932      }
3933      compactionsCount.incrementAndGet();
3934    } finally {
3935      if (lockEntry != null) {
3936        mobCompactionLock.releaseLockEntry(lockEntry);
3937      }
3938    }
3939  }
3940
3941  public void reportMobCompactionEnd(TableName tableName) throws IOException {
3942    IdLock.Entry lockEntry = null;
3943    try {
3944      lockEntry = mobCompactionLock.getLockEntry(tableName.hashCode());
3945      AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
3946      if (compactionsCount != null) {
3947        int count = compactionsCount.decrementAndGet();
3948        // remove the entry if the count is 0.
3949        if (count == 0) {
3950          mobCompactionStates.remove(tableName);
3951        }
3952      }
3953    } finally {
3954      if (lockEntry != null) {
3955        mobCompactionLock.releaseLockEntry(lockEntry);
3956      }
3957    }
3958  }
3959
3960  /**
3961   * Queries the state of the {@link LoadBalancerStateStore}. If the balancer is not initialized,
3962   * false is returned.
3963   * @return The state of the load balancer, or false if the load balancer isn't defined.
3964   */
3965  public boolean isBalancerOn() {
3966    return !isInMaintenanceMode() && loadBalancerStateStore != null && loadBalancerStateStore.get();
3967  }
3968
3969  /**
3970   * Queries the state of the {@link RegionNormalizerStateStore}. If it's not initialized, false is
3971   * returned.
3972   */
3973  public boolean isNormalizerOn() {
3974    return !isInMaintenanceMode() && getRegionNormalizerManager().isNormalizerOn();
3975  }
3976
3977  /**
3978   * Queries the state of the {@link SplitOrMergeStateStore}. If it is not initialized, false is
3979   * returned. If switchType is illegal, false will return.
3980   * @param switchType see {@link org.apache.hadoop.hbase.client.MasterSwitchType}
3981   * @return The state of the switch
3982   */
3983  @Override
3984  public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) {
3985    return !isInMaintenanceMode() && splitOrMergeStateStore != null
3986      && splitOrMergeStateStore.isSplitOrMergeEnabled(switchType);
3987  }
3988
3989  /**
3990   * Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned.
3991   * <p/>
3992   * Notice that, the base load balancer will always be {@link RSGroupBasedLoadBalancer} now, so
3993   * this method will return the balancer used inside each rs group.
3994   * @return The name of the {@link LoadBalancer} in use.
3995   */
3996  public String getLoadBalancerClassName() {
3997    return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
3998      LoadBalancerFactory.getDefaultLoadBalancerClass().getName());
3999  }
4000
4001  public SplitOrMergeStateStore getSplitOrMergeStateStore() {
4002    return splitOrMergeStateStore;
4003  }
4004
4005  @Override
4006  public RSGroupBasedLoadBalancer getLoadBalancer() {
4007    return balancer;
4008  }
4009
4010  @Override
4011  public FavoredNodesManager getFavoredNodesManager() {
4012    return balancer.getFavoredNodesManager();
4013  }
4014
4015  private long executePeerProcedure(AbstractPeerProcedure<?> procedure) throws IOException {
4016    if (!isReplicationPeerModificationEnabled()) {
4017      throw new IOException("Replication peer modification disabled");
4018    }
4019    long procId = procedureExecutor.submitProcedure(procedure);
4020    procedure.getLatch().await();
4021    return procId;
4022  }
4023
4024  @Override
4025  public long addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
4026    throws ReplicationException, IOException {
4027    LOG.info(getClientIdAuditPrefix() + " creating replication peer, id=" + peerId + ", config="
4028      + peerConfig + ", state=" + (enabled ? "ENABLED" : "DISABLED"));
4029    return executePeerProcedure(new AddPeerProcedure(peerId, peerConfig, enabled));
4030  }
4031
4032  @Override
4033  public long removeReplicationPeer(String peerId) throws ReplicationException, IOException {
4034    LOG.info(getClientIdAuditPrefix() + " removing replication peer, id=" + peerId);
4035    return executePeerProcedure(new RemovePeerProcedure(peerId));
4036  }
4037
4038  @Override
4039  public long enableReplicationPeer(String peerId) throws ReplicationException, IOException {
4040    LOG.info(getClientIdAuditPrefix() + " enable replication peer, id=" + peerId);
4041    return executePeerProcedure(new EnablePeerProcedure(peerId));
4042  }
4043
4044  @Override
4045  public long disableReplicationPeer(String peerId) throws ReplicationException, IOException {
4046    LOG.info(getClientIdAuditPrefix() + " disable replication peer, id=" + peerId);
4047    return executePeerProcedure(new DisablePeerProcedure(peerId));
4048  }
4049
4050  @Override
4051  public ReplicationPeerConfig getReplicationPeerConfig(String peerId)
4052    throws ReplicationException, IOException {
4053    if (cpHost != null) {
4054      cpHost.preGetReplicationPeerConfig(peerId);
4055    }
4056    LOG.info(getClientIdAuditPrefix() + " get replication peer config, id=" + peerId);
4057    ReplicationPeerConfig peerConfig = this.replicationPeerManager.getPeerConfig(peerId)
4058      .orElseThrow(() -> new ReplicationPeerNotFoundException(peerId));
4059    if (cpHost != null) {
4060      cpHost.postGetReplicationPeerConfig(peerId);
4061    }
4062    return peerConfig;
4063  }
4064
4065  @Override
4066  public long updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
4067    throws ReplicationException, IOException {
4068    LOG.info(getClientIdAuditPrefix() + " update replication peer config, id=" + peerId
4069      + ", config=" + peerConfig);
4070    return executePeerProcedure(new UpdatePeerConfigProcedure(peerId, peerConfig));
4071  }
4072
4073  @Override
4074  public List<ReplicationPeerDescription> listReplicationPeers(String regex)
4075    throws ReplicationException, IOException {
4076    if (cpHost != null) {
4077      cpHost.preListReplicationPeers(regex);
4078    }
4079    LOG.debug("{} list replication peers, regex={}", getClientIdAuditPrefix(), regex);
4080    Pattern pattern = regex == null ? null : Pattern.compile(regex);
4081    List<ReplicationPeerDescription> peers = this.replicationPeerManager.listPeers(pattern);
4082    if (cpHost != null) {
4083      cpHost.postListReplicationPeers(regex);
4084    }
4085    return peers;
4086  }
4087
4088  @Override
4089  public long transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state)
4090    throws ReplicationException, IOException {
4091    LOG.info(
4092      getClientIdAuditPrefix()
4093        + " transit current cluster state to {} in a synchronous replication peer id={}",
4094      state, peerId);
4095    return executePeerProcedure(new TransitPeerSyncReplicationStateProcedure(peerId, state));
4096  }
4097
4098  @Override
4099  public boolean replicationPeerModificationSwitch(boolean on) throws IOException {
4100    return replicationPeerModificationStateStore.set(on);
4101  }
4102
4103  @Override
4104  public boolean isReplicationPeerModificationEnabled() {
4105    return replicationPeerModificationStateStore.get();
4106  }
4107
4108  /**
4109   * Mark region server(s) as decommissioned (previously called 'draining') to prevent additional
4110   * regions from getting assigned to them. Also unload the regions on the servers asynchronously.0
4111   * @param servers Region servers to decommission.
4112   */
4113  public void decommissionRegionServers(final List<ServerName> servers, final boolean offload)
4114    throws IOException {
4115    List<ServerName> serversAdded = new ArrayList<>(servers.size());
4116    // Place the decommission marker first.
4117    String parentZnode = getZooKeeper().getZNodePaths().drainingZNode;
4118    for (ServerName server : servers) {
4119      try {
4120        String node = ZNodePaths.joinZNode(parentZnode, server.getServerName());
4121        ZKUtil.createAndFailSilent(getZooKeeper(), node);
4122      } catch (KeeperException ke) {
4123        throw new HBaseIOException(
4124          this.zooKeeper.prefix("Unable to decommission '" + server.getServerName() + "'."), ke);
4125      }
4126      if (this.serverManager.addServerToDrainList(server)) {
4127        serversAdded.add(server);
4128      }
4129    }
4130    // Move the regions off the decommissioned servers.
4131    if (offload) {
4132      final List<ServerName> destServers = this.serverManager.createDestinationServersList();
4133      for (ServerName server : serversAdded) {
4134        final List<RegionInfo> regionsOnServer = this.assignmentManager.getRegionsOnServer(server);
4135        for (RegionInfo hri : regionsOnServer) {
4136          ServerName dest = balancer.randomAssignment(hri, destServers);
4137          if (dest == null) {
4138            throw new HBaseIOException("Unable to determine a plan to move " + hri);
4139          }
4140          RegionPlan rp = new RegionPlan(hri, server, dest);
4141          this.assignmentManager.moveAsync(rp);
4142        }
4143      }
4144    }
4145  }
4146
4147  /**
4148   * List region servers marked as decommissioned (previously called 'draining') to not get regions
4149   * assigned to them.
4150   * @return List of decommissioned servers.
4151   */
4152  public List<ServerName> listDecommissionedRegionServers() {
4153    return this.serverManager.getDrainingServersList();
4154  }
4155
4156  /**
4157   * Remove decommission marker (previously called 'draining') from a region server to allow regions
4158   * assignments. Load regions onto the server asynchronously if a list of regions is given
4159   * @param server Region server to remove decommission marker from.
4160   */
4161  public void recommissionRegionServer(final ServerName server,
4162    final List<byte[]> encodedRegionNames) throws IOException {
4163    // Remove the server from decommissioned (draining) server list.
4164    String parentZnode = getZooKeeper().getZNodePaths().drainingZNode;
4165    String node = ZNodePaths.joinZNode(parentZnode, server.getServerName());
4166    try {
4167      ZKUtil.deleteNodeFailSilent(getZooKeeper(), node);
4168    } catch (KeeperException ke) {
4169      throw new HBaseIOException(
4170        this.zooKeeper.prefix("Unable to recommission '" + server.getServerName() + "'."), ke);
4171    }
4172    this.serverManager.removeServerFromDrainList(server);
4173
4174    // Load the regions onto the server if we are given a list of regions.
4175    if (encodedRegionNames == null || encodedRegionNames.isEmpty()) {
4176      return;
4177    }
4178    if (!this.serverManager.isServerOnline(server)) {
4179      return;
4180    }
4181    for (byte[] encodedRegionName : encodedRegionNames) {
4182      RegionState regionState =
4183        assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName));
4184      if (regionState == null) {
4185        LOG.warn("Unknown region " + Bytes.toStringBinary(encodedRegionName));
4186        continue;
4187      }
4188      RegionInfo hri = regionState.getRegion();
4189      if (server.equals(regionState.getServerName())) {
4190        LOG.info("Skipping move of region " + hri.getRegionNameAsString()
4191          + " because region already assigned to the same server " + server + ".");
4192        continue;
4193      }
4194      RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), server);
4195      this.assignmentManager.moveAsync(rp);
4196    }
4197  }
4198
4199  @Override
4200  public LockManager getLockManager() {
4201    return lockManager;
4202  }
4203
4204  public QuotaObserverChore getQuotaObserverChore() {
4205    return this.quotaObserverChore;
4206  }
4207
4208  public SpaceQuotaSnapshotNotifier getSpaceQuotaSnapshotNotifier() {
4209    return this.spaceQuotaSnapshotNotifier;
4210  }
4211
4212  @SuppressWarnings("unchecked")
4213  private RemoteProcedure<MasterProcedureEnv, ?> getRemoteProcedure(long procId) {
4214    Procedure<?> procedure = procedureExecutor.getProcedure(procId);
4215    if (procedure == null) {
4216      return null;
4217    }
4218    assert procedure instanceof RemoteProcedure;
4219    return (RemoteProcedure<MasterProcedureEnv, ?>) procedure;
4220  }
4221
4222  public void remoteProcedureCompleted(long procId, byte[] remoteResultData) {
4223    LOG.debug("Remote procedure done, pid={}", procId);
4224    RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId);
4225    if (procedure != null) {
4226      procedure.remoteOperationCompleted(procedureExecutor.getEnvironment(), remoteResultData);
4227    }
4228  }
4229
4230  public void remoteProcedureFailed(long procId, RemoteProcedureException error) {
4231    LOG.debug("Remote procedure failed, pid={}", procId, error);
4232    RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId);
4233    if (procedure != null) {
4234      procedure.remoteOperationFailed(procedureExecutor.getEnvironment(), error);
4235    }
4236  }
4237
4238  /**
4239   * Reopen regions provided in the argument
4240   * @param tableName   The current table name
4241   * @param regionNames The region names of the regions to reopen
4242   * @param nonceGroup  Identifier for the source of the request, a client or process
4243   * @param nonce       A unique identifier for this operation from the client or process identified
4244   *                    by <code>nonceGroup</code> (the source must ensure each operation gets a
4245   *                    unique id).
4246   * @return procedure Id
4247   * @throws IOException if reopening region fails while running procedure
4248   */
4249  long reopenRegions(final TableName tableName, final List<byte[]> regionNames,
4250    final long nonceGroup, final long nonce) throws IOException {
4251
4252    return MasterProcedureUtil
4253      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
4254
4255        @Override
4256        protected void run() throws IOException {
4257          submitProcedure(new ReopenTableRegionsProcedure(tableName, regionNames));
4258        }
4259
4260        @Override
4261        protected String getDescription() {
4262          return "ReopenTableRegionsProcedure";
4263        }
4264
4265      });
4266
4267  }
4268
4269  /**
4270   * Reopen regions provided in the argument. Applies throttling to the procedure to avoid
4271   * overwhelming the system. This is used by the reopenTableRegions methods in the Admin API via
4272   * HMaster.
4273   * @param tableName   The current table name
4274   * @param regionNames The region names of the regions to reopen
4275   * @param nonceGroup  Identifier for the source of the request, a client or process
4276   * @param nonce       A unique identifier for this operation from the client or process identified
4277   *                    by <code>nonceGroup</code> (the source must ensure each operation gets a
4278   *                    unique id).
4279   * @return procedure Id
4280   * @throws IOException if reopening region fails while running procedure
4281   */
4282  long reopenRegionsThrottled(final TableName tableName, final List<byte[]> regionNames,
4283    final long nonceGroup, final long nonce) throws IOException {
4284
4285    checkInitialized();
4286
4287    if (!tableStateManager.isTablePresent(tableName)) {
4288      throw new TableNotFoundException(tableName);
4289    }
4290
4291    return MasterProcedureUtil
4292      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
4293        @Override
4294        protected void run() throws IOException {
4295          ReopenTableRegionsProcedure proc;
4296          if (regionNames.isEmpty()) {
4297            proc = ReopenTableRegionsProcedure.throttled(getConfiguration(),
4298              getTableDescriptors().get(tableName));
4299          } else {
4300            proc = ReopenTableRegionsProcedure.throttled(getConfiguration(),
4301              getTableDescriptors().get(tableName), regionNames);
4302          }
4303
4304          LOG.info("{} throttled reopening {} regions for table {}", getClientIdAuditPrefix(),
4305            regionNames.isEmpty() ? "all" : regionNames.size(), tableName);
4306
4307          submitProcedure(proc);
4308        }
4309
4310        @Override
4311        protected String getDescription() {
4312          return "Throttled ReopenTableRegionsProcedure for " + tableName;
4313        }
4314      });
4315  }
4316
4317  @Override
4318  public ReplicationPeerManager getReplicationPeerManager() {
4319    return replicationPeerManager;
4320  }
4321
4322  @Override
4323  public ReplicationLogCleanerBarrier getReplicationLogCleanerBarrier() {
4324    return replicationLogCleanerBarrier;
4325  }
4326
4327  @Override
4328  public Semaphore getSyncReplicationPeerLock() {
4329    return syncReplicationPeerLock;
4330  }
4331
4332  public HashMap<String, List<Pair<ServerName, ReplicationLoadSource>>>
4333    getReplicationLoad(ServerName[] serverNames) {
4334    List<ReplicationPeerDescription> peerList = this.getReplicationPeerManager().listPeers(null);
4335    if (peerList == null) {
4336      return null;
4337    }
4338    HashMap<String, List<Pair<ServerName, ReplicationLoadSource>>> replicationLoadSourceMap =
4339      new HashMap<>(peerList.size());
4340    peerList.stream()
4341      .forEach(peer -> replicationLoadSourceMap.put(peer.getPeerId(), new ArrayList<>()));
4342    for (ServerName serverName : serverNames) {
4343      List<ReplicationLoadSource> replicationLoadSources =
4344        getServerManager().getLoad(serverName).getReplicationLoadSourceList();
4345      for (ReplicationLoadSource replicationLoadSource : replicationLoadSources) {
4346        List<Pair<ServerName, ReplicationLoadSource>> replicationLoadSourceList =
4347          replicationLoadSourceMap.get(replicationLoadSource.getPeerID());
4348        if (replicationLoadSourceList == null) {
4349          LOG.debug("{} does not exist, but it exists "
4350            + "in znode(/hbase/replication/rs). when the rs restarts, peerId is deleted, so "
4351            + "we just need to ignore it", replicationLoadSource.getPeerID());
4352          continue;
4353        }
4354        replicationLoadSourceList.add(new Pair<>(serverName, replicationLoadSource));
4355      }
4356    }
4357    for (List<Pair<ServerName, ReplicationLoadSource>> loads : replicationLoadSourceMap.values()) {
4358      if (loads.size() > 0) {
4359        loads.sort(Comparator.comparingLong(load -> (-1) * load.getSecond().getReplicationLag()));
4360      }
4361    }
4362    return replicationLoadSourceMap;
4363  }
4364
4365  /**
4366   * This method modifies the master's configuration in order to inject replication-related features
4367   */
4368  @InterfaceAudience.Private
4369  public static void decorateMasterConfiguration(Configuration conf) {
4370    String plugins = conf.get(HBASE_MASTER_LOGCLEANER_PLUGINS);
4371    String cleanerClass = ReplicationLogCleaner.class.getCanonicalName();
4372    if (plugins == null || !plugins.contains(cleanerClass)) {
4373      conf.set(HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass);
4374    }
4375    if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf)) {
4376      plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
4377      cleanerClass = ReplicationHFileCleaner.class.getCanonicalName();
4378      if (!plugins.contains(cleanerClass)) {
4379        conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, plugins + "," + cleanerClass);
4380      }
4381    }
4382  }
4383
4384  public SnapshotQuotaObserverChore getSnapshotQuotaObserverChore() {
4385    return this.snapshotQuotaChore;
4386  }
4387
4388  public ActiveMasterManager getActiveMasterManager() {
4389    return activeMasterManager;
4390  }
4391
4392  @Override
4393  public SyncReplicationReplayWALManager getSyncReplicationReplayWALManager() {
4394    return this.syncReplicationReplayWALManager;
4395  }
4396
4397  @Override
4398  public HbckChore getHbckChore() {
4399    return this.hbckChore;
4400  }
4401
4402  @Override
4403  public void runReplicationBarrierCleaner() {
4404    ReplicationBarrierCleaner rbc = this.replicationBarrierCleaner;
4405    if (rbc != null) {
4406      rbc.chore();
4407    }
4408  }
4409
4410  @Override
4411  public RSGroupInfoManager getRSGroupInfoManager() {
4412    return rsGroupInfoManager;
4413  }
4414
4415  /**
4416   * Get the compaction state of the table
4417   * @param tableName The table name
4418   * @return CompactionState Compaction state of the table
4419   */
4420  public CompactionState getCompactionState(final TableName tableName) {
4421    CompactionState compactionState = CompactionState.NONE;
4422    try {
4423      List<RegionInfo> regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName);
4424      for (RegionInfo regionInfo : regions) {
4425        ServerName serverName =
4426          assignmentManager.getRegionStates().getRegionServerOfRegion(regionInfo);
4427        if (serverName == null) {
4428          continue;
4429        }
4430        ServerMetrics sl = serverManager.getLoad(serverName);
4431        if (sl == null) {
4432          continue;
4433        }
4434        RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName());
4435        if (regionMetrics == null) {
4436          LOG.warn("Can not get compaction details for the region: {} , it may be not online.",
4437            regionInfo.getRegionNameAsString());
4438          continue;
4439        }
4440        if (regionMetrics.getCompactionState() == CompactionState.MAJOR) {
4441          if (compactionState == CompactionState.MINOR) {
4442            compactionState = CompactionState.MAJOR_AND_MINOR;
4443          } else {
4444            compactionState = CompactionState.MAJOR;
4445          }
4446        } else if (regionMetrics.getCompactionState() == CompactionState.MINOR) {
4447          if (compactionState == CompactionState.MAJOR) {
4448            compactionState = CompactionState.MAJOR_AND_MINOR;
4449          } else {
4450            compactionState = CompactionState.MINOR;
4451          }
4452        }
4453      }
4454    } catch (Exception e) {
4455      compactionState = null;
4456      LOG.error("Exception when get compaction state for " + tableName.getNameAsString(), e);
4457    }
4458    return compactionState;
4459  }
4460
4461  @Override
4462  public MetaLocationSyncer getMetaLocationSyncer() {
4463    return metaLocationSyncer;
4464  }
4465
4466  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4467      allowedOnPath = ".*/src/test/.*")
4468  public MasterRegion getMasterRegion() {
4469    return masterRegion;
4470  }
4471
4472  @Override
4473  public void onConfigurationChange(Configuration newConf) {
4474    try {
4475      Superusers.initialize(newConf);
4476    } catch (IOException e) {
4477      LOG.warn("Failed to initialize SuperUsers on reloading of the configuration");
4478    }
4479    // append the quotas observer back to the master coprocessor key
4480    setQuotasObserver(newConf);
4481    // update region server coprocessor if the configuration has changed.
4482    if (
4483      CoprocessorConfigurationUtil.checkConfigurationChange(this.cpHost, newConf,
4484        CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY) && !maintenanceMode
4485    ) {
4486      LOG.info("Update the master coprocessor(s) because the configuration has changed");
4487      this.cpHost = new MasterCoprocessorHost(this, newConf);
4488    }
4489  }
4490
4491  @Override
4492  protected NamedQueueRecorder createNamedQueueRecord() {
4493    final boolean isBalancerDecisionRecording =
4494      conf.getBoolean(BaseLoadBalancer.BALANCER_DECISION_BUFFER_ENABLED,
4495        BaseLoadBalancer.DEFAULT_BALANCER_DECISION_BUFFER_ENABLED);
4496    final boolean isBalancerRejectionRecording =
4497      conf.getBoolean(BaseLoadBalancer.BALANCER_REJECTION_BUFFER_ENABLED,
4498        BaseLoadBalancer.DEFAULT_BALANCER_REJECTION_BUFFER_ENABLED);
4499    if (isBalancerDecisionRecording || isBalancerRejectionRecording) {
4500      return NamedQueueRecorder.getInstance(conf);
4501    } else {
4502      return null;
4503    }
4504  }
4505
4506  @Override
4507  protected boolean clusterMode() {
4508    return true;
4509  }
4510
4511  public String getClusterId() {
4512    if (activeMaster) {
4513      return clusterId;
4514    }
4515    return cachedClusterId.getFromCacheOrFetch();
4516  }
4517
4518  public Optional<ServerName> getActiveMaster() {
4519    return activeMasterManager.getActiveMasterServerName();
4520  }
4521
4522  public List<ServerName> getBackupMasters() {
4523    return activeMasterManager.getBackupMasters();
4524  }
4525
4526  @Override
4527  public Iterator<ServerName> getBootstrapNodes() {
4528    return regionServerTracker.getRegionServers().iterator();
4529  }
4530
4531  @Override
4532  public List<HRegionLocation> getMetaLocations() {
4533    return metaRegionLocationCache.getMetaRegionLocations();
4534  }
4535
4536  @Override
4537  public void flushMasterStore() throws IOException {
4538    LOG.info("Force flush master local region.");
4539    if (this.cpHost != null) {
4540      try {
4541        cpHost.preMasterStoreFlush();
4542      } catch (IOException ioe) {
4543        LOG.error("Error invoking master coprocessor preMasterStoreFlush()", ioe);
4544      }
4545    }
4546    masterRegion.flush(true);
4547    if (this.cpHost != null) {
4548      try {
4549        cpHost.postMasterStoreFlush();
4550      } catch (IOException ioe) {
4551        LOG.error("Error invoking master coprocessor postMasterStoreFlush()", ioe);
4552      }
4553    }
4554  }
4555
4556  public Collection<ServerName> getLiveRegionServers() {
4557    return regionServerTracker.getRegionServers();
4558  }
4559
4560  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4561      allowedOnPath = ".*/src/test/.*")
4562  void setLoadBalancer(RSGroupBasedLoadBalancer loadBalancer) {
4563    this.balancer = loadBalancer;
4564  }
4565
4566  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4567      allowedOnPath = ".*/src/test/.*")
4568  void setAssignmentManager(AssignmentManager assignmentManager) {
4569    this.assignmentManager = assignmentManager;
4570  }
4571
4572  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4573      allowedOnPath = ".*/src/test/.*")
4574  static void setDisableBalancerChoreForTest(boolean disable) {
4575    disableBalancerChoreForTest = disable;
4576  }
4577
4578  private void setQuotasObserver(Configuration conf) {
4579    // Add the Observer to delete quotas on table deletion before starting all CPs by
4580    // default with quota support, avoiding if user specifically asks to not load this Observer.
4581    if (QuotaUtil.isQuotaEnabled(conf)) {
4582      updateConfigurationForQuotasObserver(conf);
4583    }
4584  }
4585
4586  @Override
4587  public long flushTable(TableName tableName, List<byte[]> columnFamilies, long nonceGroup,
4588    long nonce) throws IOException {
4589    checkInitialized();
4590
4591    if (
4592      !getConfiguration().getBoolean(MasterFlushTableProcedureManager.FLUSH_PROCEDURE_ENABLED,
4593        MasterFlushTableProcedureManager.FLUSH_PROCEDURE_ENABLED_DEFAULT)
4594    ) {
4595      throw new DoNotRetryIOException("FlushTableProcedureV2 is DISABLED");
4596    }
4597
4598    return MasterProcedureUtil
4599      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
4600        @Override
4601        protected void run() throws IOException {
4602          getMaster().getMasterCoprocessorHost().preTableFlush(tableName);
4603          LOG.info("{} flush {}", getClientIdAuditPrefix(), tableName);
4604          submitProcedure(
4605            new FlushTableProcedure(procedureExecutor.getEnvironment(), tableName, columnFamilies));
4606          getMaster().getMasterCoprocessorHost().postTableFlush(tableName);
4607        }
4608
4609        @Override
4610        protected String getDescription() {
4611          return "FlushTableProcedure";
4612        }
4613      });
4614  }
4615
4616  @Override
4617  public long rollAllWALWriters(long nonceGroup, long nonce) throws IOException {
4618    return MasterProcedureUtil
4619      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
4620        @Override
4621        protected void run() {
4622          LOG.info("{} roll all wal writers", getClientIdAuditPrefix());
4623          submitProcedure(new LogRollProcedure());
4624        }
4625
4626        @Override
4627        protected String getDescription() {
4628          return "RollAllWALWriters";
4629        }
4630      });
4631  }
4632
4633  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4634      allowedOnPath = ".*/src/test/.*")
4635  public MobFileCleanerChore getMobFileCleanerChore() {
4636    return mobFileCleanerChore;
4637  }
4638
4639}