001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master;
019
020import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK;
021import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS;
022import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK;
023import static org.apache.hadoop.hbase.master.cleaner.HFileCleaner.CUSTOM_POOL_SIZE;
024import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY;
025
026import com.google.errorprone.annotations.RestrictedApi;
027import io.opentelemetry.api.trace.Span;
028import io.opentelemetry.api.trace.StatusCode;
029import io.opentelemetry.context.Scope;
030import java.io.IOException;
031import java.io.InterruptedIOException;
032import java.lang.reflect.Constructor;
033import java.lang.reflect.InvocationTargetException;
034import java.net.InetAddress;
035import java.net.InetSocketAddress;
036import java.net.UnknownHostException;
037import java.time.Instant;
038import java.time.ZoneId;
039import java.time.format.DateTimeFormatter;
040import java.util.ArrayList;
041import java.util.Arrays;
042import java.util.Collection;
043import java.util.Collections;
044import java.util.Comparator;
045import java.util.EnumSet;
046import java.util.HashMap;
047import java.util.HashSet;
048import java.util.Iterator;
049import java.util.LinkedList;
050import java.util.List;
051import java.util.Map;
052import java.util.Objects;
053import java.util.Optional;
054import java.util.Set;
055import java.util.concurrent.ExecutionException;
056import java.util.concurrent.Future;
057import java.util.concurrent.Semaphore;
058import java.util.concurrent.TimeUnit;
059import java.util.concurrent.TimeoutException;
060import java.util.concurrent.atomic.AtomicInteger;
061import java.util.regex.Pattern;
062import java.util.stream.Collectors;
063import javax.servlet.http.HttpServlet;
064import org.apache.commons.lang3.StringUtils;
065import org.apache.hadoop.conf.Configuration;
066import org.apache.hadoop.fs.FSDataInputStream;
067import org.apache.hadoop.fs.FSDataOutputStream;
068import org.apache.hadoop.fs.Path;
069import org.apache.hadoop.hbase.CatalogFamilyFormat;
070import org.apache.hadoop.hbase.Cell;
071import org.apache.hadoop.hbase.CellBuilderFactory;
072import org.apache.hadoop.hbase.CellBuilderType;
073import org.apache.hadoop.hbase.ClusterId;
074import org.apache.hadoop.hbase.ClusterMetrics;
075import org.apache.hadoop.hbase.ClusterMetrics.Option;
076import org.apache.hadoop.hbase.ClusterMetricsBuilder;
077import org.apache.hadoop.hbase.DoNotRetryIOException;
078import org.apache.hadoop.hbase.HBaseIOException;
079import org.apache.hadoop.hbase.HBaseInterfaceAudience;
080import org.apache.hadoop.hbase.HBaseServerBase;
081import org.apache.hadoop.hbase.HConstants;
082import org.apache.hadoop.hbase.HRegionLocation;
083import org.apache.hadoop.hbase.InvalidFamilyOperationException;
084import org.apache.hadoop.hbase.MasterNotRunningException;
085import org.apache.hadoop.hbase.MetaTableAccessor;
086import org.apache.hadoop.hbase.NamespaceDescriptor;
087import org.apache.hadoop.hbase.PleaseHoldException;
088import org.apache.hadoop.hbase.PleaseRestartMasterException;
089import org.apache.hadoop.hbase.RegionMetrics;
090import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
091import org.apache.hadoop.hbase.ScheduledChore;
092import org.apache.hadoop.hbase.ServerMetrics;
093import org.apache.hadoop.hbase.ServerName;
094import org.apache.hadoop.hbase.ServerTask;
095import org.apache.hadoop.hbase.ServerTaskBuilder;
096import org.apache.hadoop.hbase.TableName;
097import org.apache.hadoop.hbase.TableNotDisabledException;
098import org.apache.hadoop.hbase.TableNotFoundException;
099import org.apache.hadoop.hbase.UnknownRegionException;
100import org.apache.hadoop.hbase.client.BalanceRequest;
101import org.apache.hadoop.hbase.client.BalanceResponse;
102import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
103import org.apache.hadoop.hbase.client.CompactionState;
104import org.apache.hadoop.hbase.client.MasterSwitchType;
105import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
106import org.apache.hadoop.hbase.client.Put;
107import org.apache.hadoop.hbase.client.RegionInfo;
108import org.apache.hadoop.hbase.client.RegionInfoBuilder;
109import org.apache.hadoop.hbase.client.RegionStatesCount;
110import org.apache.hadoop.hbase.client.ResultScanner;
111import org.apache.hadoop.hbase.client.Scan;
112import org.apache.hadoop.hbase.client.TableDescriptor;
113import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
114import org.apache.hadoop.hbase.client.TableState;
115import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
116import org.apache.hadoop.hbase.exceptions.DeserializationException;
117import org.apache.hadoop.hbase.exceptions.MasterStoppedException;
118import org.apache.hadoop.hbase.executor.ExecutorType;
119import org.apache.hadoop.hbase.favored.FavoredNodesManager;
120import org.apache.hadoop.hbase.http.HttpServer;
121import org.apache.hadoop.hbase.http.InfoServer;
122import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
123import org.apache.hadoop.hbase.ipc.RpcServer;
124import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
125import org.apache.hadoop.hbase.log.HBaseMarkers;
126import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
127import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
128import org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
129import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
130import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
131import org.apache.hadoop.hbase.master.assignment.RegionStates;
132import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
133import org.apache.hadoop.hbase.master.balancer.BalancerChore;
134import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
135import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
136import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
137import org.apache.hadoop.hbase.master.balancer.LoadBalancerStateStore;
138import org.apache.hadoop.hbase.master.balancer.MaintenanceLoadBalancer;
139import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
140import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
141import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
142import org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
143import org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore;
144import org.apache.hadoop.hbase.master.hbck.HbckChore;
145import org.apache.hadoop.hbase.master.http.MasterDumpServlet;
146import org.apache.hadoop.hbase.master.http.MasterRedirectServlet;
147import org.apache.hadoop.hbase.master.http.MasterStatusServlet;
148import org.apache.hadoop.hbase.master.http.api_v1.ResourceConfigFactory;
149import org.apache.hadoop.hbase.master.http.hbck.HbckConfigFactory;
150import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
151import org.apache.hadoop.hbase.master.locking.LockManager;
152import org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore;
153import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
154import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager;
155import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerStateStore;
156import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
157import org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
158import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
159import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
160import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
161import org.apache.hadoop.hbase.master.procedure.FlushTableProcedure;
162import org.apache.hadoop.hbase.master.procedure.InitMetaProcedure;
163import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
164import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
165import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
166import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
167import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
168import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
169import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
170import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
171import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
172import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure;
173import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
174import org.apache.hadoop.hbase.master.procedure.TruncateRegionProcedure;
175import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
176import org.apache.hadoop.hbase.master.region.MasterRegion;
177import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
178import org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure;
179import org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
180import org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
181import org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
182import org.apache.hadoop.hbase.master.replication.MigrateReplicationQueueFromZkToTableProcedure;
183import org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
184import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
185import org.apache.hadoop.hbase.master.replication.ReplicationPeerModificationStateStore;
186import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager;
187import org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure;
188import org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
189import org.apache.hadoop.hbase.master.slowlog.SlowLogMasterService;
190import org.apache.hadoop.hbase.master.snapshot.SnapshotCleanupStateStore;
191import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
192import org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator;
193import org.apache.hadoop.hbase.master.zksyncer.MasterAddressSyncer;
194import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer;
195import org.apache.hadoop.hbase.mob.MobFileCleanerChore;
196import org.apache.hadoop.hbase.mob.MobFileCompactionChore;
197import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
198import org.apache.hadoop.hbase.monitoring.MonitoredTask;
199import org.apache.hadoop.hbase.monitoring.TaskGroup;
200import org.apache.hadoop.hbase.monitoring.TaskMonitor;
201import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder;
202import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
203import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
204import org.apache.hadoop.hbase.procedure2.LockedResource;
205import org.apache.hadoop.hbase.procedure2.Procedure;
206import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
207import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
208import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
209import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
210import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
211import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener;
212import org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore;
213import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
214import org.apache.hadoop.hbase.quotas.MasterQuotasObserver;
215import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
216import org.apache.hadoop.hbase.quotas.QuotaTableUtil;
217import org.apache.hadoop.hbase.quotas.QuotaUtil;
218import org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore;
219import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
220import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus;
221import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
222import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
223import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
224import org.apache.hadoop.hbase.regionserver.HRegionServer;
225import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
226import org.apache.hadoop.hbase.regionserver.storefiletracker.ModifyColumnFamilyStoreFileTrackerProcedure;
227import org.apache.hadoop.hbase.regionserver.storefiletracker.ModifyTableStoreFileTrackerProcedure;
228import org.apache.hadoop.hbase.replication.ReplicationException;
229import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
230import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
231import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
232import org.apache.hadoop.hbase.replication.ReplicationUtils;
233import org.apache.hadoop.hbase.replication.SyncReplicationState;
234import org.apache.hadoop.hbase.replication.ZKReplicationQueueStorageForMigration;
235import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
236import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
237import org.apache.hadoop.hbase.replication.master.ReplicationLogCleanerBarrier;
238import org.apache.hadoop.hbase.replication.master.ReplicationSinkTrackerTableCreator;
239import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp;
240import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp.ReplicationSyncUpToolInfo;
241import org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint;
242import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
243import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
244import org.apache.hadoop.hbase.rsgroup.RSGroupUtil;
245import org.apache.hadoop.hbase.security.AccessDeniedException;
246import org.apache.hadoop.hbase.security.SecurityConstants;
247import org.apache.hadoop.hbase.security.Superusers;
248import org.apache.hadoop.hbase.security.UserProvider;
249import org.apache.hadoop.hbase.trace.TraceUtil;
250import org.apache.hadoop.hbase.util.Addressing;
251import org.apache.hadoop.hbase.util.Bytes;
252import org.apache.hadoop.hbase.util.CommonFSUtils;
253import org.apache.hadoop.hbase.util.CoprocessorConfigurationUtil;
254import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
255import org.apache.hadoop.hbase.util.FSTableDescriptors;
256import org.apache.hadoop.hbase.util.FutureUtils;
257import org.apache.hadoop.hbase.util.HBaseFsck;
258import org.apache.hadoop.hbase.util.HFileArchiveUtil;
259import org.apache.hadoop.hbase.util.IdLock;
260import org.apache.hadoop.hbase.util.JVMClusterUtil;
261import org.apache.hadoop.hbase.util.JsonMapper;
262import org.apache.hadoop.hbase.util.ModifyRegionUtils;
263import org.apache.hadoop.hbase.util.Pair;
264import org.apache.hadoop.hbase.util.ReflectionUtils;
265import org.apache.hadoop.hbase.util.RetryCounter;
266import org.apache.hadoop.hbase.util.RetryCounterFactory;
267import org.apache.hadoop.hbase.util.TableDescriptorChecker;
268import org.apache.hadoop.hbase.util.Threads;
269import org.apache.hadoop.hbase.util.VersionInfo;
270import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
271import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
272import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
273import org.apache.hadoop.hbase.zookeeper.ZKUtil;
274import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
275import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
276import org.apache.yetus.audience.InterfaceAudience;
277import org.apache.zookeeper.KeeperException;
278import org.slf4j.Logger;
279import org.slf4j.LoggerFactory;
280
281import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
282import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
283import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
284import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams;
285import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
286import org.apache.hbase.thirdparty.com.google.gson.JsonParseException;
287import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors;
288import org.apache.hbase.thirdparty.com.google.protobuf.Service;
289import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server;
290import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector;
291import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder;
292import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext;
293import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig;
294import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer;
295
296import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
297import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
298import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
299
300/**
301 * HMaster is the "master server" for HBase. An HBase cluster has one active master. If many masters
302 * are started, all compete. Whichever wins goes on to run the cluster. All others park themselves
303 * in their constructor until master or cluster shutdown or until the active master loses its lease
304 * in zookeeper. Thereafter, all running master jostle to take over master role.
305 * <p/>
306 * The Master can be asked shutdown the cluster. See {@link #shutdown()}. In this case it will tell
307 * all regionservers to go down and then wait on them all reporting in that they are down. This
308 * master will then shut itself down.
309 * <p/>
310 * You can also shutdown just this master. Call {@link #stopMaster()}.
311 * @see org.apache.zookeeper.Watcher
312 */
313@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
314public class HMaster extends HBaseServerBase<MasterRpcServices> implements MasterServices {
315
316  private static final Logger LOG = LoggerFactory.getLogger(HMaster.class);
317
318  // MASTER is name of the webapp and the attribute name used stuffing this
319  // instance into a web context !! AND OTHER PLACES !!
320  public static final String MASTER = "master";
321
322  // Manager and zk listener for master election
323  private final ActiveMasterManager activeMasterManager;
324  // Region server tracker
325  private final RegionServerTracker regionServerTracker;
326  // Draining region server tracker
327  private DrainingServerTracker drainingServerTracker;
328  // Tracker for load balancer state
329  LoadBalancerStateStore loadBalancerStateStore;
330  // Tracker for meta location, if any client ZK quorum specified
331  private MetaLocationSyncer metaLocationSyncer;
332  // Tracker for active master location, if any client ZK quorum specified
333  @InterfaceAudience.Private
334  MasterAddressSyncer masterAddressSyncer;
335  // Tracker for auto snapshot cleanup state
336  SnapshotCleanupStateStore snapshotCleanupStateStore;
337
338  // Tracker for split and merge state
339  private SplitOrMergeStateStore splitOrMergeStateStore;
340
341  private ClusterSchemaService clusterSchemaService;
342
343  public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS =
344    "hbase.master.wait.on.service.seconds";
345  public static final int DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = 5 * 60;
346
347  public static final String HBASE_MASTER_CLEANER_INTERVAL = "hbase.master.cleaner.interval";
348
349  public static final int DEFAULT_HBASE_MASTER_CLEANER_INTERVAL = 600 * 1000;
350
351  private String clusterId;
352
353  // Metrics for the HMaster
354  final MetricsMaster metricsMaster;
355  // file system manager for the master FS operations
356  private MasterFileSystem fileSystemManager;
357  private MasterWalManager walManager;
358
359  // manager to manage procedure-based WAL splitting, can be null if current
360  // is zk-based WAL splitting. SplitWALManager will replace SplitLogManager
361  // and MasterWalManager, which means zk-based WAL splitting code will be
362  // useless after we switch to the procedure-based one. our eventual goal
363  // is to remove all the zk-based WAL splitting code.
364  private SplitWALManager splitWALManager;
365
366  // server manager to deal with region server info
367  private volatile ServerManager serverManager;
368
369  // manager of assignment nodes in zookeeper
370  private AssignmentManager assignmentManager;
371
372  private RSGroupInfoManager rsGroupInfoManager;
373
374  private final ReplicationLogCleanerBarrier replicationLogCleanerBarrier =
375    new ReplicationLogCleanerBarrier();
376
377  // Only allow to add one sync replication peer concurrently
378  private final Semaphore syncReplicationPeerLock = new Semaphore(1);
379
380  // manager of replication
381  private ReplicationPeerManager replicationPeerManager;
382
383  private SyncReplicationReplayWALManager syncReplicationReplayWALManager;
384
385  // buffer for "fatal error" notices from region servers
386  // in the cluster. This is only used for assisting
387  // operations/debugging.
388  MemoryBoundedLogMessageBuffer rsFatals;
389
390  // flag set after we become the active master (used for testing)
391  private volatile boolean activeMaster = false;
392
393  // flag set after we complete initialization once active
394  private final ProcedureEvent<?> initialized = new ProcedureEvent<>("master initialized");
395
396  // flag set after master services are started,
397  // initialization may have not completed yet.
398  volatile boolean serviceStarted = false;
399
400  // Maximum time we should run balancer for
401  private final int maxBalancingTime;
402  // Maximum percent of regions in transition when balancing
403  private final double maxRitPercent;
404
405  private final LockManager lockManager = new LockManager(this);
406
407  private RSGroupBasedLoadBalancer balancer;
408  private BalancerChore balancerChore;
409  private static boolean disableBalancerChoreForTest = false;
410  private RegionNormalizerManager regionNormalizerManager;
411  private ClusterStatusChore clusterStatusChore;
412  private ClusterStatusPublisher clusterStatusPublisherChore = null;
413  private SnapshotCleanerChore snapshotCleanerChore = null;
414
415  private HbckChore hbckChore;
416  CatalogJanitor catalogJanitorChore;
417  // Threadpool for scanning the Old logs directory, used by the LogCleaner
418  private DirScanPool logCleanerPool;
419  private LogCleaner logCleaner;
420  // HFile cleaners for the custom hfile archive paths and the default archive path
421  // The archive path cleaner is the first element
422  private List<HFileCleaner> hfileCleaners = new ArrayList<>();
423  // The hfile cleaner paths, including custom paths and the default archive path
424  private List<Path> hfileCleanerPaths = new ArrayList<>();
425  // The shared hfile cleaner pool for the custom archive paths
426  private DirScanPool sharedHFileCleanerPool;
427  // The exclusive hfile cleaner pool for scanning the archive directory
428  private DirScanPool exclusiveHFileCleanerPool;
429  private ReplicationBarrierCleaner replicationBarrierCleaner;
430  private MobFileCleanerChore mobFileCleanerChore;
431  private MobFileCompactionChore mobFileCompactionChore;
432  private RollingUpgradeChore rollingUpgradeChore;
433  // used to synchronize the mobCompactionStates
434  private final IdLock mobCompactionLock = new IdLock();
435  // save the information of mob compactions in tables.
436  // the key is table name, the value is the number of compactions in that table.
437  private Map<TableName, AtomicInteger> mobCompactionStates = Maps.newConcurrentMap();
438
439  volatile MasterCoprocessorHost cpHost;
440
441  private final boolean preLoadTableDescriptors;
442
443  // Time stamps for when a hmaster became active
444  private long masterActiveTime;
445
446  // Time stamp for when HMaster finishes becoming Active Master
447  private long masterFinishedInitializationTime;
448
449  Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
450
451  // monitor for snapshot of hbase tables
452  SnapshotManager snapshotManager;
453  // monitor for distributed procedures
454  private MasterProcedureManagerHost mpmHost;
455
456  private RegionsRecoveryChore regionsRecoveryChore = null;
457
458  private RegionsRecoveryConfigManager regionsRecoveryConfigManager = null;
459  // it is assigned after 'initialized' guard set to true, so should be volatile
460  private volatile MasterQuotaManager quotaManager;
461  private SpaceQuotaSnapshotNotifier spaceQuotaSnapshotNotifier;
462  private QuotaObserverChore quotaObserverChore;
463  private SnapshotQuotaObserverChore snapshotQuotaChore;
464  private OldWALsDirSizeChore oldWALsDirSizeChore;
465
466  private ProcedureExecutor<MasterProcedureEnv> procedureExecutor;
467  private ProcedureStore procedureStore;
468
469  // the master local storage to store procedure data, meta region locations, etc.
470  private MasterRegion masterRegion;
471
472  private RegionServerList rsListStorage;
473
474  // handle table states
475  private TableStateManager tableStateManager;
476
477  /** jetty server for master to redirect requests to regionserver infoServer */
478  private Server masterJettyServer;
479
480  // Determine if we should do normal startup or minimal "single-user" mode with no region
481  // servers and no user tables. Useful for repair and recovery of hbase:meta
482  private final boolean maintenanceMode;
483  static final String MAINTENANCE_MODE = "hbase.master.maintenance_mode";
484
485  // the in process region server for carry system regions in maintenanceMode
486  private JVMClusterUtil.RegionServerThread maintenanceRegionServer;
487
488  // Cached clusterId on stand by masters to serve clusterID requests from clients.
489  private final CachedClusterId cachedClusterId;
490
491  public static final String WARMUP_BEFORE_MOVE = "hbase.master.warmup.before.move";
492  private static final boolean DEFAULT_WARMUP_BEFORE_MOVE = true;
493
494  /**
495   * Use RSProcedureDispatcher instance to initiate master -> rs remote procedure execution. Use
496   * this config to extend RSProcedureDispatcher (mainly for testing purpose).
497   */
498  public static final String HBASE_MASTER_RSPROC_DISPATCHER_CLASS =
499    "hbase.master.rsproc.dispatcher.class";
500  private static final String DEFAULT_HBASE_MASTER_RSPROC_DISPATCHER_CLASS =
501    RSProcedureDispatcher.class.getName();
502
503  private TaskGroup startupTaskGroup;
504
505  /**
506   * Store whether we allow replication peer modification operations.
507   */
508  private ReplicationPeerModificationStateStore replicationPeerModificationStateStore;
509
510  /**
511   * Initializes the HMaster. The steps are as follows:
512   * <p>
513   * <ol>
514   * <li>Initialize the local HRegionServer
515   * <li>Start the ActiveMasterManager.
516   * </ol>
517   * <p>
518   * Remaining steps of initialization occur in {@link #finishActiveMasterInitialization()} after
519   * the master becomes the active one.
520   */
521  public HMaster(final Configuration conf) throws IOException {
522    super(conf, "Master");
523    final Span span = TraceUtil.createSpan("HMaster.cxtor");
524    try (Scope ignored = span.makeCurrent()) {
525      if (conf.getBoolean(MAINTENANCE_MODE, false)) {
526        LOG.info("Detected {}=true via configuration.", MAINTENANCE_MODE);
527        maintenanceMode = true;
528      } else if (Boolean.getBoolean(MAINTENANCE_MODE)) {
529        LOG.info("Detected {}=true via environment variables.", MAINTENANCE_MODE);
530        maintenanceMode = true;
531      } else {
532        maintenanceMode = false;
533      }
534      this.rsFatals = new MemoryBoundedLogMessageBuffer(
535        conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));
536      LOG.info("hbase.rootdir={}, hbase.cluster.distributed={}",
537        CommonFSUtils.getRootDir(this.conf),
538        this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));
539
540      // Disable usage of meta replicas in the master
541      this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);
542
543      decorateMasterConfiguration(this.conf);
544
545      // Hack! Maps DFSClient => Master for logs. HDFS made this
546      // config param for task trackers, but we can piggyback off of it.
547      if (this.conf.get("mapreduce.task.attempt.id") == null) {
548        this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
549      }
550
551      this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));
552
553      // preload table descriptor at startup
554      this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);
555
556      this.maxBalancingTime = getMaxBalancingTime();
557      this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT,
558        HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT);
559
560      // Do we publish the status?
561      boolean shouldPublish =
562        conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT);
563      Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
564        conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
565          ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
566          ClusterStatusPublisher.Publisher.class);
567
568      if (shouldPublish) {
569        if (publisherClass == null) {
570          LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but "
571            + ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS
572            + " is not set - not publishing status");
573        } else {
574          clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
575          LOG.debug("Created {}", this.clusterStatusPublisherChore);
576          getChoreService().scheduleChore(clusterStatusPublisherChore);
577        }
578      }
579      this.activeMasterManager = createActiveMasterManager(zooKeeper, serverName, this);
580      cachedClusterId = new CachedClusterId(this, conf);
581      this.regionServerTracker = new RegionServerTracker(zooKeeper, this);
582      this.rpcServices.start(zooKeeper);
583      span.setStatus(StatusCode.OK);
584    } catch (Throwable t) {
585      // Make sure we log the exception. HMaster is often started via reflection and the
586      // cause of failed startup is lost.
587      TraceUtil.setError(span, t);
588      LOG.error("Failed construction of Master", t);
589      throw t;
590    } finally {
591      span.end();
592    }
593  }
594
595  /**
596   * Protected to have custom implementations in tests override the default ActiveMaster
597   * implementation.
598   */
599  protected ActiveMasterManager createActiveMasterManager(ZKWatcher zk, ServerName sn,
600    org.apache.hadoop.hbase.Server server) throws InterruptedIOException {
601    return new ActiveMasterManager(zk, sn, server);
602  }
603
604  @Override
605  protected String getUseThisHostnameInstead(Configuration conf) {
606    return conf.get(MASTER_HOSTNAME_KEY);
607  }
608
609  private void registerConfigurationObservers() {
610    configurationManager.registerObserver(this.rpcServices);
611    configurationManager.registerObserver(this);
612  }
613
614  // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will
615  // block in here until then.
616  @Override
617  public void run() {
618    try {
619      installShutdownHook();
620      registerConfigurationObservers();
621      Threads.setDaemonThreadRunning(new Thread(TraceUtil.tracedRunnable(() -> {
622        try {
623          int infoPort = putUpJettyServer();
624          startActiveMasterManager(infoPort);
625        } catch (Throwable t) {
626          // Make sure we log the exception.
627          String error = "Failed to become Active Master";
628          LOG.error(error, t);
629          // Abort should have been called already.
630          if (!isAborted()) {
631            abort(error, t);
632          }
633        }
634      }, "HMaster.becomeActiveMaster")), getName() + ":becomeActiveMaster");
635      while (!isStopped() && !isAborted()) {
636        sleeper.sleep();
637      }
638      final Span span = TraceUtil.createSpan("HMaster exiting main loop");
639      try (Scope ignored = span.makeCurrent()) {
640        stopInfoServer();
641        closeClusterConnection();
642        stopServiceThreads();
643        if (this.rpcServices != null) {
644          this.rpcServices.stop();
645        }
646        closeZooKeeper();
647        closeTableDescriptors();
648        span.setStatus(StatusCode.OK);
649      } finally {
650        span.end();
651      }
652    } finally {
653      if (this.clusterSchemaService != null) {
654        // If on way out, then we are no longer active master.
655        this.clusterSchemaService.stopAsync();
656        try {
657          this.clusterSchemaService
658            .awaitTerminated(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,
659              DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);
660        } catch (TimeoutException te) {
661          LOG.warn("Failed shutdown of clusterSchemaService", te);
662        }
663      }
664      this.activeMaster = false;
665    }
666  }
667
668  // return the actual infoPort, -1 means disable info server.
669  private int putUpJettyServer() throws IOException {
670    if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
671      return -1;
672    }
673    final int infoPort =
674      conf.getInt("hbase.master.info.port.orig", HConstants.DEFAULT_MASTER_INFOPORT);
675    // -1 is for disabling info server, so no redirecting
676    if (infoPort < 0 || infoServer == null) {
677      return -1;
678    }
679    if (infoPort == infoServer.getPort()) {
680      // server is already running
681      return infoPort;
682    }
683    final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
684    if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
685      String msg = "Failed to start redirecting jetty server. Address " + addr
686        + " does not belong to this host. Correct configuration parameter: "
687        + "hbase.master.info.bindAddress";
688      LOG.error(msg);
689      throw new IOException(msg);
690    }
691
692    // TODO I'm pretty sure we could just add another binding to the InfoServer run by
693    // the RegionServer and have it run the RedirectServlet instead of standing up
694    // a second entire stack here.
695    masterJettyServer = new Server();
696    final ServerConnector connector = new ServerConnector(masterJettyServer);
697    connector.setHost(addr);
698    connector.setPort(infoPort);
699    masterJettyServer.addConnector(connector);
700    masterJettyServer.setStopAtShutdown(true);
701    masterJettyServer.setHandler(HttpServer.buildGzipHandler(masterJettyServer.getHandler()));
702
703    final String redirectHostname =
704      StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead;
705
706    final MasterRedirectServlet redirect = new MasterRedirectServlet(infoServer, redirectHostname);
707    final WebAppContext context =
708      new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);
709    context.addServlet(new ServletHolder(redirect), "/*");
710    context.setServer(masterJettyServer);
711
712    try {
713      masterJettyServer.start();
714    } catch (Exception e) {
715      throw new IOException("Failed to start redirecting jetty server", e);
716    }
717    return connector.getLocalPort();
718  }
719
720  /**
721   * For compatibility, if failed with regionserver credentials, try the master one
722   */
723  @Override
724  protected void login(UserProvider user, String host) throws IOException {
725    try {
726      user.login(SecurityConstants.REGIONSERVER_KRB_KEYTAB_FILE,
727        SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, host);
728    } catch (IOException ie) {
729      user.login(SecurityConstants.MASTER_KRB_KEYTAB_FILE, SecurityConstants.MASTER_KRB_PRINCIPAL,
730        host);
731    }
732  }
733
734  public MasterRpcServices getMasterRpcServices() {
735    return rpcServices;
736  }
737
738  @Override
739  protected MasterCoprocessorHost getCoprocessorHost() {
740    return getMasterCoprocessorHost();
741  }
742
743  public boolean balanceSwitch(final boolean b) throws IOException {
744    return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
745  }
746
747  @Override
748  protected String getProcessName() {
749    return MASTER;
750  }
751
752  @Override
753  protected boolean canCreateBaseZNode() {
754    return true;
755  }
756
757  @Override
758  protected boolean canUpdateTableDescriptor() {
759    return true;
760  }
761
762  @Override
763  protected boolean cacheTableDescriptor() {
764    return true;
765  }
766
767  protected MasterRpcServices createRpcServices() throws IOException {
768    return new MasterRpcServices(this);
769  }
770
771  @Override
772  protected void configureInfoServer(InfoServer infoServer) {
773    infoServer.addUnprivilegedServlet("master-status", "/master-status", MasterStatusServlet.class);
774    infoServer.addUnprivilegedServlet("api_v1", "/api/v1/*", buildApiV1Servlet());
775    infoServer.addUnprivilegedServlet("hbck", "/hbck/*", buildHbckServlet());
776
777    infoServer.setAttribute(MASTER, this);
778  }
779
780  private ServletHolder buildApiV1Servlet() {
781    final ResourceConfig config = ResourceConfigFactory.createResourceConfig(conf, this);
782    return new ServletHolder(new ServletContainer(config));
783  }
784
785  private ServletHolder buildHbckServlet() {
786    final ResourceConfig config = HbckConfigFactory.createResourceConfig(conf, this);
787    return new ServletHolder(new ServletContainer(config));
788  }
789
790  @Override
791  protected Class<? extends HttpServlet> getDumpServlet() {
792    return MasterDumpServlet.class;
793  }
794
795  @Override
796  public MetricsMaster getMasterMetrics() {
797    return metricsMaster;
798  }
799
800  /**
801   * Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it
802   * should have already been initialized along with {@link ServerManager}.
803   */
804  private void initializeZKBasedSystemTrackers()
805    throws IOException, KeeperException, ReplicationException, DeserializationException {
806    if (maintenanceMode) {
807      // in maintenance mode, always use MaintenanceLoadBalancer.
808      conf.unset(LoadBalancer.HBASE_RSGROUP_LOADBALANCER_CLASS);
809      conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, MaintenanceLoadBalancer.class,
810        LoadBalancer.class);
811    }
812    this.balancer = new RSGroupBasedLoadBalancer();
813    this.loadBalancerStateStore = new LoadBalancerStateStore(masterRegion, zooKeeper);
814
815    this.regionNormalizerManager =
816      RegionNormalizerFactory.createNormalizerManager(conf, masterRegion, zooKeeper, this);
817    this.configurationManager.registerObserver(regionNormalizerManager);
818    this.regionNormalizerManager.start();
819
820    this.splitOrMergeStateStore = new SplitOrMergeStateStore(masterRegion, zooKeeper, conf);
821
822    // This is for backwards compatible. We do not need the CP for rs group now but if user want to
823    // load it, we need to enable rs group.
824    String[] cpClasses = conf.getStrings(MasterCoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
825    if (cpClasses != null) {
826      for (String cpClass : cpClasses) {
827        if (RSGroupAdminEndpoint.class.getName().equals(cpClass)) {
828          RSGroupUtil.enableRSGroup(conf);
829          break;
830        }
831      }
832    }
833    this.rsGroupInfoManager = RSGroupInfoManager.create(this);
834
835    this.replicationPeerManager = ReplicationPeerManager.create(this, clusterId);
836    this.configurationManager.registerObserver(replicationPeerManager);
837    this.replicationPeerModificationStateStore =
838      new ReplicationPeerModificationStateStore(masterRegion);
839
840    this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);
841    this.drainingServerTracker.start();
842
843    this.snapshotCleanupStateStore = new SnapshotCleanupStateStore(masterRegion, zooKeeper);
844
845    String clientQuorumServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM);
846    boolean clientZkObserverMode = conf.getBoolean(HConstants.CLIENT_ZOOKEEPER_OBSERVER_MODE,
847      HConstants.DEFAULT_CLIENT_ZOOKEEPER_OBSERVER_MODE);
848    if (clientQuorumServers != null && !clientZkObserverMode) {
849      // we need to take care of the ZK information synchronization
850      // if given client ZK are not observer nodes
851      ZKWatcher clientZkWatcher = new ZKWatcher(conf,
852        getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this,
853        false, true);
854      this.metaLocationSyncer = new MetaLocationSyncer(zooKeeper, clientZkWatcher, this);
855      this.metaLocationSyncer.start();
856      this.masterAddressSyncer = new MasterAddressSyncer(zooKeeper, clientZkWatcher, this);
857      this.masterAddressSyncer.start();
858      // set cluster id is a one-go effort
859      ZKClusterId.setClusterId(clientZkWatcher, fileSystemManager.getClusterId());
860    }
861
862    // Set the cluster as up. If new RSs, they'll be waiting on this before
863    // going ahead with their startup.
864    boolean wasUp = this.clusterStatusTracker.isClusterUp();
865    if (!wasUp) this.clusterStatusTracker.setClusterUp();
866
867    LOG.info("Active/primary master=" + this.serverName + ", sessionid=0x"
868      + Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId())
869      + ", setting cluster-up flag (Was=" + wasUp + ")");
870
871    // create/initialize the snapshot manager and other procedure managers
872    this.snapshotManager = new SnapshotManager();
873    this.mpmHost = new MasterProcedureManagerHost();
874    this.mpmHost.register(this.snapshotManager);
875    this.mpmHost.register(new MasterFlushTableProcedureManager());
876    this.mpmHost.loadProcedures(conf);
877    this.mpmHost.initialize(this, this.metricsMaster);
878  }
879
880  // Will be overriden in test to inject customized AssignmentManager
881  @InterfaceAudience.Private
882  protected AssignmentManager createAssignmentManager(MasterServices master,
883    MasterRegion masterRegion) {
884    return new AssignmentManager(master, masterRegion);
885  }
886
887  private void tryMigrateMetaLocationsFromZooKeeper() throws IOException, KeeperException {
888    // try migrate data from zookeeper
889    try (ResultScanner scanner =
890      masterRegion.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) {
891      if (scanner.next() != null) {
892        // notice that all replicas for a region are in the same row, so the migration can be
893        // done with in a one row put, which means if we have data in catalog family then we can
894        // make sure that the migration is done.
895        LOG.info("The {} family in master local region already has data in it, skip migrating...",
896          HConstants.CATALOG_FAMILY_STR);
897        return;
898      }
899    }
900    // start migrating
901    byte[] row = CatalogFamilyFormat.getMetaKeyForRegion(RegionInfoBuilder.FIRST_META_REGIONINFO);
902    Put put = new Put(row);
903    List<String> metaReplicaNodes = zooKeeper.getMetaReplicaNodes();
904    StringBuilder info = new StringBuilder("Migrating meta locations:");
905    for (String metaReplicaNode : metaReplicaNodes) {
906      int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZNode(metaReplicaNode);
907      RegionState state = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId);
908      info.append(" ").append(state);
909      put.setTimestamp(state.getStamp());
910      MetaTableAccessor.addRegionInfo(put, state.getRegion());
911      if (state.getServerName() != null) {
912        MetaTableAccessor.addLocation(put, state.getServerName(), HConstants.NO_SEQNUM, replicaId);
913      }
914      put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
915        .setFamily(HConstants.CATALOG_FAMILY)
916        .setQualifier(RegionStateStore.getStateColumn(replicaId)).setTimestamp(put.getTimestamp())
917        .setType(Cell.Type.Put).setValue(Bytes.toBytes(state.getState().name())).build());
918    }
919    if (!put.isEmpty()) {
920      LOG.info(info.toString());
921      masterRegion.update(r -> r.put(put));
922    } else {
923      LOG.info("No meta location available on zookeeper, skip migrating...");
924    }
925  }
926
927  /**
928   * Finish initialization of HMaster after becoming the primary master.
929   * <p/>
930   * The startup order is a bit complicated but very important, do not change it unless you know
931   * what you are doing.
932   * <ol>
933   * <li>Initialize file system based components - file system manager, wal manager, table
934   * descriptors, etc</li>
935   * <li>Publish cluster id</li>
936   * <li>Here comes the most complicated part - initialize server manager, assignment manager and
937   * region server tracker
938   * <ol type='i'>
939   * <li>Create server manager</li>
940   * <li>Create master local region</li>
941   * <li>Create procedure executor, load the procedures, but do not start workers. We will start it
942   * later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same
943   * server</li>
944   * <li>Create assignment manager and start it, load the meta region state, but do not load data
945   * from meta region</li>
946   * <li>Start region server tracker, construct the online servers set and find out dead servers and
947   * schedule SCP for them. The online servers will be constructed by scanning zk, and we will also
948   * scan the wal directory and load from master local region to find out possible live region
949   * servers, and the differences between these two sets are the dead servers</li>
950   * </ol>
951   * </li>
952   * <li>If this is a new deploy, schedule a InitMetaProcedure to initialize meta</li>
953   * <li>Start necessary service threads - balancer, catalog janitor, executor services, and also
954   * the procedure executor, etc. Notice that the balancer must be created first as assignment
955   * manager may use it when assigning regions.</li>
956   * <li>Wait for meta to be initialized if necessary, start table state manager.</li>
957   * <li>Wait for enough region servers to check-in</li>
958   * <li>Let assignment manager load data from meta and construct region states</li>
959   * <li>Start all other things such as chore services, etc</li>
960   * </ol>
961   * <p/>
962   * Notice that now we will not schedule a special procedure to make meta online(unless the first
963   * time where meta has not been created yet), we will rely on SCP to bring meta online.
964   */
965  private void finishActiveMasterInitialization() throws IOException, InterruptedException,
966    KeeperException, ReplicationException, DeserializationException {
967    /*
968     * We are active master now... go initialize components we need to run.
969     */
970    startupTaskGroup.addTask("Initializing Master file system");
971
972    this.masterActiveTime = EnvironmentEdgeManager.currentTime();
973    // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
974
975    // always initialize the MemStoreLAB as we use a region to store data in master now, see
976    // localStore.
977    initializeMemStoreChunkCreator(null);
978    this.fileSystemManager = new MasterFileSystem(conf);
979    this.walManager = new MasterWalManager(this);
980
981    // warm-up HTDs cache on master initialization
982    if (preLoadTableDescriptors) {
983      startupTaskGroup.addTask("Pre-loading table descriptors");
984      this.tableDescriptors.getAll();
985    }
986
987    // Publish cluster ID; set it in Master too. The superclass RegionServer does this later but
988    // only after it has checked in with the Master. At least a few tests ask Master for clusterId
989    // before it has called its run method and before RegionServer has done the reportForDuty.
990    ClusterId clusterId = fileSystemManager.getClusterId();
991    startupTaskGroup.addTask("Publishing Cluster ID " + clusterId + " in ZooKeeper");
992    ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
993    this.clusterId = clusterId.toString();
994
995    // Precaution. Put in place the old hbck1 lock file to fence out old hbase1s running their
996    // hbck1s against an hbase2 cluster; it could do damage. To skip this behavior, set
997    // hbase.write.hbck1.lock.file to false.
998    if (this.conf.getBoolean("hbase.write.hbck1.lock.file", true)) {
999      Pair<Path, FSDataOutputStream> result = null;
1000      try {
1001        result = HBaseFsck.checkAndMarkRunningHbck(this.conf,
1002          HBaseFsck.createLockRetryCounterFactory(this.conf).create());
1003      } finally {
1004        if (result != null) {
1005          Closeables.close(result.getSecond(), true);
1006        }
1007      }
1008    }
1009
1010    startupTaskGroup.addTask("Initialize ServerManager and schedule SCP for crash servers");
1011    // The below two managers must be created before loading procedures, as they will be used during
1012    // loading.
1013    // initialize master local region
1014    masterRegion = MasterRegionFactory.create(this);
1015    rsListStorage = new MasterRegionServerList(masterRegion, this);
1016
1017    // Initialize the ServerManager and register it as a configuration observer
1018    this.serverManager = createServerManager(this, rsListStorage);
1019    this.configurationManager.registerObserver(this.serverManager);
1020
1021    this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);
1022    if (
1023      !conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)
1024    ) {
1025      this.splitWALManager = new SplitWALManager(this);
1026    }
1027
1028    tryMigrateMetaLocationsFromZooKeeper();
1029
1030    createProcedureExecutor();
1031    Map<Class<?>, List<Procedure<MasterProcedureEnv>>> procsByType = procedureExecutor
1032      .getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass()));
1033
1034    // Create Assignment Manager
1035    this.assignmentManager = createAssignmentManager(this, masterRegion);
1036    this.assignmentManager.start();
1037    // TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as
1038    // completed, it could still be in the procedure list. This is a bit strange but is another
1039    // story, need to verify the implementation for ProcedureExecutor and ProcedureStore.
1040    List<TransitRegionStateProcedure> ritList =
1041      procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream()
1042        .filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p)
1043        .collect(Collectors.toList());
1044    this.assignmentManager.setupRIT(ritList);
1045
1046    // Start RegionServerTracker with listing of servers found with exiting SCPs -- these should
1047    // be registered in the deadServers set -- and the servernames loaded from the WAL directory
1048    // and master local region that COULD BE 'alive'(we'll schedule SCPs for each and let SCP figure
1049    // it out).
1050    // We also pass dirs that are already 'splitting'... so we can do some checks down in tracker.
1051    // TODO: Generate the splitting and live Set in one pass instead of two as we currently do.
1052    this.regionServerTracker.upgrade(
1053      procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()
1054        .map(p -> (ServerCrashProcedure) p).map(p -> p.getServerName()).collect(Collectors.toSet()),
1055      Sets.union(rsListStorage.getAll(), walManager.getLiveServersFromWALDir()),
1056      walManager.getSplittingServersFromWALDir());
1057    // This manager must be accessed AFTER hbase:meta is confirmed on line..
1058    this.tableStateManager = new TableStateManager(this);
1059
1060    startupTaskGroup.addTask("Initializing ZK system trackers");
1061    initializeZKBasedSystemTrackers();
1062    startupTaskGroup.addTask("Loading last flushed sequence id of regions");
1063    try {
1064      this.serverManager.loadLastFlushedSequenceIds();
1065    } catch (IOException e) {
1066      LOG.info("Failed to load last flushed sequence id of regions" + " from file system", e);
1067    }
1068    // Set ourselves as active Master now our claim has succeeded up in zk.
1069    this.activeMaster = true;
1070
1071    // Start the Zombie master detector after setting master as active, see HBASE-21535
1072    Thread zombieDetector = new Thread(new MasterInitializationMonitor(this),
1073      "ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime());
1074    zombieDetector.setDaemon(true);
1075    zombieDetector.start();
1076
1077    if (!maintenanceMode) {
1078      startupTaskGroup.addTask("Initializing master coprocessors");
1079      setQuotasObserver(conf);
1080      initializeCoprocessorHost(conf);
1081    } else {
1082      // start an in process region server for carrying system regions
1083      maintenanceRegionServer =
1084        JVMClusterUtil.createRegionServerThread(getConfiguration(), HRegionServer.class, 0);
1085      maintenanceRegionServer.start();
1086    }
1087
1088    // Checking if meta needs initializing.
1089    startupTaskGroup.addTask("Initializing meta table if this is a new deploy");
1090    InitMetaProcedure initMetaProc = null;
1091    // Print out state of hbase:meta on startup; helps debugging.
1092    if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) {
1093      Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream()
1094        .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
1095      initMetaProc = optProc.orElseGet(() -> {
1096        // schedule an init meta procedure if meta has not been deployed yet
1097        InitMetaProcedure temp = new InitMetaProcedure();
1098        procedureExecutor.submitProcedure(temp);
1099        return temp;
1100      });
1101    }
1102
1103    // initialize load balancer
1104    this.balancer.setMasterServices(this);
1105    this.balancer.initialize();
1106    this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
1107
1108    // try migrate replication data
1109    ZKReplicationQueueStorageForMigration oldReplicationQueueStorage =
1110      new ZKReplicationQueueStorageForMigration(zooKeeper, conf);
1111    // check whether there are something to migrate and we haven't scheduled a migration procedure
1112    // yet
1113    if (
1114      oldReplicationQueueStorage.hasData() && procedureExecutor.getProcedures().stream()
1115        .allMatch(p -> !(p instanceof MigrateReplicationQueueFromZkToTableProcedure))
1116    ) {
1117      procedureExecutor.submitProcedure(new MigrateReplicationQueueFromZkToTableProcedure());
1118    }
1119    // start up all service threads.
1120    startupTaskGroup.addTask("Initializing master service threads");
1121    startServiceThreads();
1122    // wait meta to be initialized after we start procedure executor
1123    if (initMetaProc != null) {
1124      initMetaProc.await();
1125      if (initMetaProc.isFailed() && initMetaProc.hasException()) {
1126        throw new IOException("Failed to initialize meta table", initMetaProc.getException());
1127      }
1128    }
1129    // Wake up this server to check in
1130    sleeper.skipSleepCycle();
1131
1132    // Wait for region servers to report in.
1133    // With this as part of master initialization, it precludes our being able to start a single
1134    // server that is both Master and RegionServer. Needs more thought. TODO.
1135    String statusStr = "Wait for region servers to report in";
1136    MonitoredTask waitRegionServer = startupTaskGroup.addTask(statusStr);
1137    LOG.info(Objects.toString(waitRegionServer));
1138    waitForRegionServers(waitRegionServer);
1139
1140    // Check if master is shutting down because issue initializing regionservers or balancer.
1141    if (isStopped()) {
1142      return;
1143    }
1144
1145    startupTaskGroup.addTask("Starting assignment manager");
1146    // FIRST HBASE:META READ!!!!
1147    // The below cannot make progress w/o hbase:meta being online.
1148    // This is the FIRST attempt at going to hbase:meta. Meta on-lining is going on in background
1149    // as procedures run -- in particular SCPs for crashed servers... One should put up hbase:meta
1150    // if it is down. It may take a while to come online. So, wait here until meta if for sure
1151    // available. That's what waitForMetaOnline does.
1152    if (!waitForMetaOnline()) {
1153      return;
1154    }
1155
1156    TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME);
1157    final ColumnFamilyDescriptor tableFamilyDesc =
1158      metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY);
1159    final ColumnFamilyDescriptor replBarrierFamilyDesc =
1160      metaDescriptor.getColumnFamily(HConstants.REPLICATION_BARRIER_FAMILY);
1161
1162    this.assignmentManager.joinCluster();
1163    // The below depends on hbase:meta being online.
1164    this.assignmentManager.processOfflineRegions();
1165    // this must be called after the above processOfflineRegions to prevent race
1166    this.assignmentManager.wakeMetaLoadedEvent();
1167
1168    // for migrating from a version without HBASE-25099, and also for honoring the configuration
1169    // first.
1170    if (conf.get(HConstants.META_REPLICAS_NUM) != null) {
1171      int replicasNumInConf =
1172        conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM);
1173      TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME);
1174      int existingReplicasCount =
1175        assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size();
1176
1177      if (metaDesc.getRegionReplication() != replicasNumInConf) {
1178        // it is possible that we already have some replicas before upgrading, so we must set the
1179        // region replication number in meta TableDescriptor directly first, without creating a
1180        // ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas.
1181        LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)"
1182          + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount);
1183        metaDesc = TableDescriptorBuilder.newBuilder(metaDesc)
1184          .setRegionReplication(existingReplicasCount).build();
1185        tableDescriptors.update(metaDesc);
1186      }
1187      // check again, and issue a ModifyTableProcedure if needed
1188      if (
1189        metaDesc.getRegionReplication() != replicasNumInConf
1190          || existingReplicasCount != metaDesc.getRegionReplication()
1191      ) {
1192        LOG.info(
1193          "The {} config is {} while the replica count in TableDescriptor is {},"
1194            + " The number of replicas seen on ZK {} for hbase:meta, altering...",
1195          HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication(),
1196          existingReplicasCount);
1197        procedureExecutor.submitProcedure(new ModifyTableProcedure(
1198          procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc)
1199            .setRegionReplication(replicasNumInConf).build(),
1200          null, metaDesc, false, true));
1201      }
1202    }
1203    // Initialize after meta is up as below scans meta
1204    FavoredNodesManager fnm = getFavoredNodesManager();
1205    if (fnm != null) {
1206      fnm.initializeFromMeta();
1207    }
1208
1209    // set cluster status again after user regions are assigned
1210    this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
1211
1212    // Start balancer and meta catalog janitor after meta and regions have been assigned.
1213    startupTaskGroup.addTask("Starting balancer and catalog janitor");
1214    this.clusterStatusChore = new ClusterStatusChore(this, balancer);
1215    getChoreService().scheduleChore(clusterStatusChore);
1216    this.balancerChore = new BalancerChore(this);
1217    if (!disableBalancerChoreForTest) {
1218      getChoreService().scheduleChore(balancerChore);
1219    }
1220    if (regionNormalizerManager != null) {
1221      getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore());
1222    }
1223    this.catalogJanitorChore = new CatalogJanitor(this);
1224    getChoreService().scheduleChore(catalogJanitorChore);
1225    this.hbckChore = new HbckChore(this);
1226    getChoreService().scheduleChore(hbckChore);
1227    this.serverManager.startChore();
1228
1229    // Only for rolling upgrade, where we need to migrate the data in namespace table to meta table.
1230    if (!waitForNamespaceOnline()) {
1231      return;
1232    }
1233    startupTaskGroup.addTask("Starting cluster schema service");
1234    try {
1235      initClusterSchemaService();
1236    } catch (IllegalStateException e) {
1237      if (
1238        e.getCause() != null && e.getCause() instanceof NoSuchColumnFamilyException
1239          && tableFamilyDesc == null && replBarrierFamilyDesc == null
1240      ) {
1241        LOG.info("ClusterSchema service could not be initialized. This is "
1242          + "expected during HBase 1 to 2 upgrade", e);
1243      } else {
1244        throw e;
1245      }
1246    }
1247
1248    if (this.cpHost != null) {
1249      try {
1250        this.cpHost.preMasterInitialization();
1251      } catch (IOException e) {
1252        LOG.error("Coprocessor preMasterInitialization() hook failed", e);
1253      }
1254    }
1255
1256    LOG.info(String.format("Master has completed initialization %.3fsec",
1257      (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f));
1258    this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime();
1259    configurationManager.registerObserver(this.balancer);
1260    configurationManager.registerObserver(this.logCleanerPool);
1261    configurationManager.registerObserver(this.logCleaner);
1262    configurationManager.registerObserver(this.regionsRecoveryConfigManager);
1263    configurationManager.registerObserver(this.exclusiveHFileCleanerPool);
1264    if (this.sharedHFileCleanerPool != null) {
1265      configurationManager.registerObserver(this.sharedHFileCleanerPool);
1266    }
1267    if (this.hfileCleaners != null) {
1268      for (HFileCleaner cleaner : hfileCleaners) {
1269        configurationManager.registerObserver(cleaner);
1270      }
1271    }
1272    // Set master as 'initialized'.
1273    setInitialized(true);
1274    startupTaskGroup.markComplete("Initialization successful");
1275    MonitoredTask status =
1276      TaskMonitor.get().createStatus("Progress after master initialized", false, true);
1277
1278    if (tableFamilyDesc == null && replBarrierFamilyDesc == null) {
1279      // create missing CFs in meta table after master is set to 'initialized'.
1280      createMissingCFsInMetaDuringUpgrade(metaDescriptor);
1281
1282      // Throwing this Exception to abort active master is painful but this
1283      // seems the only way to add missing CFs in meta while upgrading from
1284      // HBase 1 to 2 (where HBase 2 has HBASE-23055 & HBASE-23782 checked-in).
1285      // So, why do we abort active master after adding missing CFs in meta?
1286      // When we reach here, we would have already bypassed NoSuchColumnFamilyException
1287      // in initClusterSchemaService(), meaning ClusterSchemaService is not
1288      // correctly initialized but we bypassed it. Similarly, we bypassed
1289      // tableStateManager.start() as well. Hence, we should better abort
1290      // current active master because our main task - adding missing CFs
1291      // in meta table is done (possible only after master state is set as
1292      // initialized) at the expense of bypassing few important tasks as part
1293      // of active master init routine. So now we abort active master so that
1294      // next active master init will not face any issues and all mandatory
1295      // services will be started during master init phase.
1296      throw new PleaseRestartMasterException("Aborting active master after missing"
1297        + " CFs are successfully added in meta. Subsequent active master "
1298        + "initialization should be uninterrupted");
1299    }
1300
1301    if (maintenanceMode) {
1302      LOG.info("Detected repair mode, skipping final initialization steps.");
1303      return;
1304    }
1305
1306    assignmentManager.checkIfShouldMoveSystemRegionAsync();
1307    status.setStatus("Starting quota manager");
1308    initQuotaManager();
1309    if (QuotaUtil.isQuotaEnabled(conf)) {
1310      // Create the quota snapshot notifier
1311      spaceQuotaSnapshotNotifier = createQuotaSnapshotNotifier();
1312      spaceQuotaSnapshotNotifier.initialize(getConnection());
1313      this.quotaObserverChore = new QuotaObserverChore(this, getMasterMetrics());
1314      // Start the chore to read the region FS space reports and act on them
1315      getChoreService().scheduleChore(quotaObserverChore);
1316
1317      this.snapshotQuotaChore = new SnapshotQuotaObserverChore(this, getMasterMetrics());
1318      // Start the chore to read snapshots and add their usage to table/NS quotas
1319      getChoreService().scheduleChore(snapshotQuotaChore);
1320    }
1321    final SlowLogMasterService slowLogMasterService = new SlowLogMasterService(conf, this);
1322    slowLogMasterService.init();
1323
1324    WALEventTrackerTableCreator.createIfNeededAndNotExists(conf, this);
1325    // Create REPLICATION.SINK_TRACKER table if needed.
1326    ReplicationSinkTrackerTableCreator.createIfNeededAndNotExists(conf, this);
1327
1328    // clear the dead servers with same host name and port of online server because we are not
1329    // removing dead server with same hostname and port of rs which is trying to check in before
1330    // master initialization. See HBASE-5916.
1331    this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
1332
1333    // Check and set the znode ACLs if needed in case we are overtaking a non-secure configuration
1334    status.setStatus("Checking ZNode ACLs");
1335    zooKeeper.checkAndSetZNodeAcls();
1336
1337    status.setStatus("Initializing MOB Cleaner");
1338    initMobCleaner();
1339
1340    // delete the stale data for replication sync up tool if necessary
1341    status.setStatus("Cleanup ReplicationSyncUp status if necessary");
1342    Path replicationSyncUpInfoFile =
1343      new Path(new Path(dataRootDir, ReplicationSyncUp.INFO_DIR), ReplicationSyncUp.INFO_FILE);
1344    if (dataFs.exists(replicationSyncUpInfoFile)) {
1345      // info file is available, load the timestamp and use it to clean up stale data in replication
1346      // queue storage.
1347      byte[] data;
1348      try (FSDataInputStream in = dataFs.open(replicationSyncUpInfoFile)) {
1349        data = ByteStreams.toByteArray(in);
1350      }
1351      ReplicationSyncUpToolInfo info = null;
1352      try {
1353        info = JsonMapper.fromJson(Bytes.toString(data), ReplicationSyncUpToolInfo.class);
1354      } catch (JsonParseException e) {
1355        // usually this should be a partial file, which means the ReplicationSyncUp tool did not
1356        // finish properly, so not a problem. Here we do not clean up the status as we do not know
1357        // the reason why the tool did not finish properly, so let users clean the status up
1358        // manually
1359        LOG.warn("failed to parse replication sync up info file, ignore and continue...", e);
1360      }
1361      if (info != null) {
1362        LOG.info("Remove last sequence ids and hfile references which are written before {}({})",
1363          info.getStartTimeMs(), DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.systemDefault())
1364            .format(Instant.ofEpochMilli(info.getStartTimeMs())));
1365        replicationPeerManager.getQueueStorage()
1366          .removeLastSequenceIdsAndHFileRefsBefore(info.getStartTimeMs());
1367        // delete the file after removing the stale data, so next time we do not need to do this
1368        // again.
1369        dataFs.delete(replicationSyncUpInfoFile, false);
1370      }
1371    }
1372    status.setStatus("Calling postStartMaster coprocessors");
1373    if (this.cpHost != null) {
1374      // don't let cp initialization errors kill the master
1375      try {
1376        this.cpHost.postStartMaster();
1377      } catch (IOException ioe) {
1378        LOG.error("Coprocessor postStartMaster() hook failed", ioe);
1379      }
1380    }
1381
1382    zombieDetector.interrupt();
1383
1384    /*
1385     * After master has started up, lets do balancer post startup initialization. Since this runs in
1386     * activeMasterManager thread, it should be fine.
1387     */
1388    long start = EnvironmentEdgeManager.currentTime();
1389    this.balancer.postMasterStartupInitialize();
1390    if (LOG.isDebugEnabled()) {
1391      LOG.debug("Balancer post startup initialization complete, took "
1392        + ((EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds");
1393    }
1394
1395    this.rollingUpgradeChore = new RollingUpgradeChore(this);
1396    getChoreService().scheduleChore(rollingUpgradeChore);
1397
1398    this.oldWALsDirSizeChore = new OldWALsDirSizeChore(this);
1399    getChoreService().scheduleChore(this.oldWALsDirSizeChore);
1400
1401    status.markComplete("Progress after master initialized complete");
1402  }
1403
1404  /**
1405   * Used for testing only to set Mock objects.
1406   * @param hbckChore hbckChore
1407   */
1408  public void setHbckChoreForTesting(HbckChore hbckChore) {
1409    this.hbckChore = hbckChore;
1410  }
1411
1412  /**
1413   * Used for testing only to set Mock objects.
1414   * @param catalogJanitorChore catalogJanitorChore
1415   */
1416  public void setCatalogJanitorChoreForTesting(CatalogJanitor catalogJanitorChore) {
1417    this.catalogJanitorChore = catalogJanitorChore;
1418  }
1419
1420  private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor)
1421    throws IOException {
1422    TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor)
1423      .setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf))
1424      .setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build();
1425    long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false);
1426    int tries = 30;
1427    while (
1428      !(getMasterProcedureExecutor().isFinished(pid)) && getMasterProcedureExecutor().isRunning()
1429        && tries > 0
1430    ) {
1431      try {
1432        Thread.sleep(1000);
1433      } catch (InterruptedException e) {
1434        throw new IOException("Wait interrupted", e);
1435      }
1436      tries--;
1437    }
1438    if (tries <= 0) {
1439      throw new HBaseIOException(
1440        "Failed to add table and rep_barrier CFs to meta in a given time.");
1441    } else {
1442      Procedure<?> result = getMasterProcedureExecutor().getResult(pid);
1443      if (result != null && result.isFailed()) {
1444        throw new IOException("Failed to add table and rep_barrier CFs to meta. "
1445          + MasterProcedureUtil.unwrapRemoteIOException(result));
1446      }
1447    }
1448  }
1449
1450  /**
1451   * Check hbase:meta is up and ready for reading. For use during Master startup only.
1452   * @return True if meta is UP and online and startup can progress. Otherwise, meta is not online
1453   *         and we will hold here until operator intervention.
1454   */
1455  @InterfaceAudience.Private
1456  public boolean waitForMetaOnline() {
1457    return isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO);
1458  }
1459
1460  /**
1461   * @return True if region is online and scannable else false if an error or shutdown (Otherwise we
1462   *         just block in here holding up all forward-progess).
1463   */
1464  private boolean isRegionOnline(RegionInfo ri) {
1465    RetryCounter rc = null;
1466    while (!isStopped()) {
1467      RegionState rs = this.assignmentManager.getRegionStates().getRegionState(ri);
1468      if (rs != null && rs.isOpened()) {
1469        if (this.getServerManager().isServerOnline(rs.getServerName())) {
1470          return true;
1471        }
1472      }
1473      // Region is not OPEN.
1474      Optional<Procedure<MasterProcedureEnv>> optProc = this.procedureExecutor.getProcedures()
1475        .stream().filter(p -> p instanceof ServerCrashProcedure).findAny();
1476      // TODO: Add a page to refguide on how to do repair. Have this log message point to it.
1477      // Page will talk about loss of edits, how to schedule at least the meta WAL recovery, and
1478      // then how to assign including how to break region lock if one held.
1479      LOG.warn(
1480        "{} is NOT online; state={}; ServerCrashProcedures={}. Master startup cannot "
1481          + "progress, in holding-pattern until region onlined.",
1482        ri.getRegionNameAsString(), rs, optProc.isPresent());
1483      // Check once-a-minute.
1484      if (rc == null) {
1485        rc = new RetryCounterFactory(Integer.MAX_VALUE, 1000, 60_000).create();
1486      }
1487      Threads.sleep(rc.getBackoffTimeAndIncrementAttempts());
1488    }
1489    return false;
1490  }
1491
1492  /**
1493   * Check hbase:namespace table is assigned. If not, startup will hang looking for the ns table
1494   * <p/>
1495   * This is for rolling upgrading, later we will migrate the data in ns table to the ns family of
1496   * meta table. And if this is a new cluster, this method will return immediately as there will be
1497   * no namespace table/region.
1498   * @return True if namespace table is up/online.
1499   */
1500  private boolean waitForNamespaceOnline() throws IOException {
1501    TableState nsTableState =
1502      MetaTableAccessor.getTableState(getConnection(), TableName.NAMESPACE_TABLE_NAME);
1503    if (nsTableState == null || nsTableState.isDisabled()) {
1504      // this means we have already migrated the data and disabled or deleted the namespace table,
1505      // or this is a new deploy which does not have a namespace table from the beginning.
1506      return true;
1507    }
1508    List<RegionInfo> ris =
1509      this.assignmentManager.getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME);
1510    if (ris.isEmpty()) {
1511      // maybe this will not happen any more, but anyway, no harm to add a check here...
1512      return true;
1513    }
1514    // Else there are namespace regions up in meta. Ensure they are assigned before we go on.
1515    for (RegionInfo ri : ris) {
1516      if (!isRegionOnline(ri)) {
1517        return false;
1518      }
1519    }
1520    return true;
1521  }
1522
1523  /**
1524   * Adds the {@code MasterQuotasObserver} to the list of configured Master observers to
1525   * automatically remove quotas for a table when that table is deleted.
1526   */
1527  @InterfaceAudience.Private
1528  public void updateConfigurationForQuotasObserver(Configuration conf) {
1529    // We're configured to not delete quotas on table deletion, so we don't need to add the obs.
1530    if (
1531      !conf.getBoolean(MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE,
1532        MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT)
1533    ) {
1534      return;
1535    }
1536    String[] masterCoprocs = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
1537    final int length = null == masterCoprocs ? 0 : masterCoprocs.length;
1538    String[] updatedCoprocs = new String[length + 1];
1539    if (length > 0) {
1540      System.arraycopy(masterCoprocs, 0, updatedCoprocs, 0, masterCoprocs.length);
1541    }
1542    updatedCoprocs[length] = MasterQuotasObserver.class.getName();
1543    conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, updatedCoprocs);
1544  }
1545
1546  private void initMobCleaner() {
1547    this.mobFileCleanerChore = new MobFileCleanerChore(this);
1548    getChoreService().scheduleChore(mobFileCleanerChore);
1549    this.mobFileCompactionChore = new MobFileCompactionChore(this);
1550    getChoreService().scheduleChore(mobFileCompactionChore);
1551  }
1552
1553  /**
1554   * <p>
1555   * Create a {@link ServerManager} instance.
1556   * </p>
1557   * <p>
1558   * Will be overridden in tests.
1559   * </p>
1560   */
1561  @InterfaceAudience.Private
1562  protected ServerManager createServerManager(MasterServices master, RegionServerList storage)
1563    throws IOException {
1564    // We put this out here in a method so can do a Mockito.spy and stub it out
1565    // w/ a mocked up ServerManager.
1566    setupClusterConnection();
1567    return new ServerManager(master, storage);
1568  }
1569
1570  private void waitForRegionServers(final MonitoredTask status)
1571    throws IOException, InterruptedException {
1572    this.serverManager.waitForRegionServers(status);
1573  }
1574
1575  // Will be overridden in tests
1576  @InterfaceAudience.Private
1577  protected void initClusterSchemaService() throws IOException, InterruptedException {
1578    this.clusterSchemaService = new ClusterSchemaServiceImpl(this);
1579    this.clusterSchemaService.startAsync();
1580    try {
1581      this.clusterSchemaService
1582        .awaitRunning(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,
1583          DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);
1584    } catch (TimeoutException toe) {
1585      throw new IOException("Timedout starting ClusterSchemaService", toe);
1586    }
1587  }
1588
1589  private void initQuotaManager() throws IOException {
1590    MasterQuotaManager quotaManager = new MasterQuotaManager(this);
1591    quotaManager.start();
1592    this.quotaManager = quotaManager;
1593  }
1594
1595  private SpaceQuotaSnapshotNotifier createQuotaSnapshotNotifier() {
1596    SpaceQuotaSnapshotNotifier notifier =
1597      SpaceQuotaSnapshotNotifierFactory.getInstance().create(getConfiguration());
1598    return notifier;
1599  }
1600
1601  public boolean isCatalogJanitorEnabled() {
1602    return catalogJanitorChore != null ? catalogJanitorChore.getEnabled() : false;
1603  }
1604
1605  boolean isCleanerChoreEnabled() {
1606    boolean hfileCleanerFlag = true, logCleanerFlag = true;
1607
1608    if (getHFileCleaner() != null) {
1609      hfileCleanerFlag = getHFileCleaner().getEnabled();
1610    }
1611
1612    if (logCleaner != null) {
1613      logCleanerFlag = logCleaner.getEnabled();
1614    }
1615
1616    return (hfileCleanerFlag && logCleanerFlag);
1617  }
1618
1619  @Override
1620  public ServerManager getServerManager() {
1621    return this.serverManager;
1622  }
1623
1624  @Override
1625  public MasterFileSystem getMasterFileSystem() {
1626    return this.fileSystemManager;
1627  }
1628
1629  @Override
1630  public MasterWalManager getMasterWalManager() {
1631    return this.walManager;
1632  }
1633
1634  @Override
1635  public SplitWALManager getSplitWALManager() {
1636    return splitWALManager;
1637  }
1638
1639  @Override
1640  public TableStateManager getTableStateManager() {
1641    return tableStateManager;
1642  }
1643
1644  /*
1645   * Start up all services. If any of these threads gets an unhandled exception then they just die
1646   * with a logged message. This should be fine because in general, we do not expect the master to
1647   * get such unhandled exceptions as OOMEs; it should be lightly loaded. See what HRegionServer
1648   * does if need to install an unexpected exception handler.
1649   */
1650  private void startServiceThreads() throws IOException {
1651    // Start the executor service pools
1652    final int masterOpenRegionPoolSize = conf.getInt(HConstants.MASTER_OPEN_REGION_THREADS,
1653      HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT);
1654    executorService.startExecutorService(executorService.new ExecutorConfig()
1655      .setExecutorType(ExecutorType.MASTER_OPEN_REGION).setCorePoolSize(masterOpenRegionPoolSize));
1656    final int masterCloseRegionPoolSize = conf.getInt(HConstants.MASTER_CLOSE_REGION_THREADS,
1657      HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT);
1658    executorService.startExecutorService(
1659      executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_CLOSE_REGION)
1660        .setCorePoolSize(masterCloseRegionPoolSize));
1661    final int masterServerOpThreads = conf.getInt(HConstants.MASTER_SERVER_OPERATIONS_THREADS,
1662      HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT);
1663    executorService.startExecutorService(
1664      executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SERVER_OPERATIONS)
1665        .setCorePoolSize(masterServerOpThreads));
1666    final int masterServerMetaOpsThreads =
1667      conf.getInt(HConstants.MASTER_META_SERVER_OPERATIONS_THREADS,
1668        HConstants.MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT);
1669    executorService.startExecutorService(executorService.new ExecutorConfig()
1670      .setExecutorType(ExecutorType.MASTER_META_SERVER_OPERATIONS)
1671      .setCorePoolSize(masterServerMetaOpsThreads));
1672    final int masterLogReplayThreads = conf.getInt(HConstants.MASTER_LOG_REPLAY_OPS_THREADS,
1673      HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT);
1674    executorService.startExecutorService(executorService.new ExecutorConfig()
1675      .setExecutorType(ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads));
1676    final int masterSnapshotThreads = conf.getInt(SnapshotManager.SNAPSHOT_POOL_THREADS_KEY,
1677      SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT);
1678    executorService.startExecutorService(
1679      executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SNAPSHOT_OPERATIONS)
1680        .setCorePoolSize(masterSnapshotThreads).setAllowCoreThreadTimeout(true));
1681    final int masterMergeDispatchThreads = conf.getInt(HConstants.MASTER_MERGE_DISPATCH_THREADS,
1682      HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT);
1683    executorService.startExecutorService(
1684      executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_MERGE_OPERATIONS)
1685        .setCorePoolSize(masterMergeDispatchThreads).setAllowCoreThreadTimeout(true));
1686
1687    // We depend on there being only one instance of this executor running
1688    // at a time. To do concurrency, would need fencing of enable/disable of
1689    // tables.
1690    // Any time changing this maxThreads to > 1, pls see the comment at
1691    // AccessController#postCompletedCreateTableAction
1692    executorService.startExecutorService(executorService.new ExecutorConfig()
1693      .setExecutorType(ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1));
1694    startProcedureExecutor();
1695
1696    // Create log cleaner thread pool
1697    logCleanerPool = DirScanPool.getLogCleanerScanPool(conf);
1698    Map<String, Object> params = new HashMap<>();
1699    params.put(MASTER, this);
1700    // Start log cleaner thread
1701    int cleanerInterval =
1702      conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL);
1703    this.logCleaner =
1704      new LogCleaner(cleanerInterval, this, conf, getMasterWalManager().getFileSystem(),
1705        getMasterWalManager().getOldLogDir(), logCleanerPool, params);
1706    getChoreService().scheduleChore(logCleaner);
1707
1708    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
1709
1710    // Create custom archive hfile cleaners
1711    String[] paths = conf.getStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS);
1712    // todo: handle the overlap issues for the custom paths
1713
1714    if (paths != null && paths.length > 0) {
1715      if (conf.getStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS) == null) {
1716        Set<String> cleanerClasses = new HashSet<>();
1717        String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
1718        if (cleaners != null) {
1719          Collections.addAll(cleanerClasses, cleaners);
1720        }
1721        conf.setStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS,
1722          cleanerClasses.toArray(new String[cleanerClasses.size()]));
1723        LOG.info("Archive custom cleaner paths: {}, plugins: {}", Arrays.asList(paths),
1724          cleanerClasses);
1725      }
1726      // share the hfile cleaner pool in custom paths
1727      sharedHFileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf.get(CUSTOM_POOL_SIZE, "6"));
1728      for (int i = 0; i < paths.length; i++) {
1729        Path path = new Path(paths[i].trim());
1730        HFileCleaner cleaner =
1731          new HFileCleaner("ArchiveCustomHFileCleaner-" + path.getName(), cleanerInterval, this,
1732            conf, getMasterFileSystem().getFileSystem(), new Path(archiveDir, path),
1733            HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS, sharedHFileCleanerPool, params, null);
1734        hfileCleaners.add(cleaner);
1735        hfileCleanerPaths.add(path);
1736      }
1737    }
1738
1739    // Create the whole archive dir cleaner thread pool
1740    exclusiveHFileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf);
1741    hfileCleaners.add(0,
1742      new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem().getFileSystem(),
1743        archiveDir, exclusiveHFileCleanerPool, params, hfileCleanerPaths));
1744    hfileCleanerPaths.add(0, archiveDir);
1745    // Schedule all the hfile cleaners
1746    for (HFileCleaner hFileCleaner : hfileCleaners) {
1747      getChoreService().scheduleChore(hFileCleaner);
1748    }
1749
1750    // Regions Reopen based on very high storeFileRefCount is considered enabled
1751    // only if hbase.regions.recovery.store.file.ref.count has value > 0
1752    final int maxStoreFileRefCount = conf.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD,
1753      HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD);
1754    if (maxStoreFileRefCount > 0) {
1755      this.regionsRecoveryChore = new RegionsRecoveryChore(this, conf, this);
1756      getChoreService().scheduleChore(this.regionsRecoveryChore);
1757    } else {
1758      LOG.info(
1759        "Reopening regions with very high storeFileRefCount is disabled. "
1760          + "Provide threshold value > 0 for {} to enable it.",
1761        HConstants.STORE_FILE_REF_COUNT_THRESHOLD);
1762    }
1763
1764    this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this);
1765
1766    replicationBarrierCleaner =
1767      new ReplicationBarrierCleaner(conf, this, getConnection(), replicationPeerManager);
1768    getChoreService().scheduleChore(replicationBarrierCleaner);
1769
1770    final boolean isSnapshotChoreEnabled = this.snapshotCleanupStateStore.get();
1771    this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager());
1772    if (isSnapshotChoreEnabled) {
1773      getChoreService().scheduleChore(this.snapshotCleanerChore);
1774    } else {
1775      if (LOG.isTraceEnabled()) {
1776        LOG.trace("Snapshot Cleaner Chore is disabled. Not starting up the chore..");
1777      }
1778    }
1779    serviceStarted = true;
1780    if (LOG.isTraceEnabled()) {
1781      LOG.trace("Started service threads");
1782    }
1783  }
1784
1785  protected void stopServiceThreads() {
1786    if (masterJettyServer != null) {
1787      LOG.info("Stopping master jetty server");
1788      try {
1789        masterJettyServer.stop();
1790      } catch (Exception e) {
1791        LOG.error("Failed to stop master jetty server", e);
1792      }
1793    }
1794    stopChoreService();
1795    stopExecutorService();
1796    if (exclusiveHFileCleanerPool != null) {
1797      exclusiveHFileCleanerPool.shutdownNow();
1798      exclusiveHFileCleanerPool = null;
1799    }
1800    if (logCleanerPool != null) {
1801      logCleanerPool.shutdownNow();
1802      logCleanerPool = null;
1803    }
1804    if (sharedHFileCleanerPool != null) {
1805      sharedHFileCleanerPool.shutdownNow();
1806      sharedHFileCleanerPool = null;
1807    }
1808    if (maintenanceRegionServer != null) {
1809      maintenanceRegionServer.getRegionServer().stop(HBASE_MASTER_CLEANER_INTERVAL);
1810    }
1811
1812    LOG.debug("Stopping service threads");
1813    // stop procedure executor prior to other services such as server manager and assignment
1814    // manager, as these services are important for some running procedures. See HBASE-24117 for
1815    // example.
1816    stopProcedureExecutor();
1817
1818    if (regionNormalizerManager != null) {
1819      regionNormalizerManager.stop();
1820    }
1821    if (this.quotaManager != null) {
1822      this.quotaManager.stop();
1823    }
1824
1825    if (this.activeMasterManager != null) {
1826      this.activeMasterManager.stop();
1827    }
1828    if (this.serverManager != null) {
1829      this.serverManager.stop();
1830    }
1831    if (this.assignmentManager != null) {
1832      this.assignmentManager.stop();
1833    }
1834
1835    if (masterRegion != null) {
1836      masterRegion.close(isAborted());
1837    }
1838    if (this.walManager != null) {
1839      this.walManager.stop();
1840    }
1841    if (this.fileSystemManager != null) {
1842      this.fileSystemManager.stop();
1843    }
1844    if (this.mpmHost != null) {
1845      this.mpmHost.stop("server shutting down.");
1846    }
1847    if (this.regionServerTracker != null) {
1848      this.regionServerTracker.stop();
1849    }
1850  }
1851
1852  private void createProcedureExecutor() throws IOException {
1853    final String procedureDispatcherClassName =
1854      conf.get(HBASE_MASTER_RSPROC_DISPATCHER_CLASS, DEFAULT_HBASE_MASTER_RSPROC_DISPATCHER_CLASS);
1855    final RSProcedureDispatcher procedureDispatcher = ReflectionUtils.instantiateWithCustomCtor(
1856      procedureDispatcherClassName, new Class[] { MasterServices.class }, new Object[] { this });
1857    final MasterProcedureEnv procEnv = new MasterProcedureEnv(this, procedureDispatcher);
1858    procedureStore = new RegionProcedureStore(this, masterRegion,
1859      new MasterProcedureEnv.FsUtilsLeaseRecovery(this));
1860    procedureStore.registerListener(new ProcedureStoreListener() {
1861
1862      @Override
1863      public void abortProcess() {
1864        abort("The Procedure Store lost the lease", null);
1865      }
1866    });
1867    MasterProcedureScheduler procedureScheduler = procEnv.getProcedureScheduler();
1868    procedureExecutor = new ProcedureExecutor<>(conf, procEnv, procedureStore, procedureScheduler);
1869    configurationManager.registerObserver(procEnv);
1870
1871    int cpus = Runtime.getRuntime().availableProcessors();
1872    final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, Math.max(
1873      (cpus > 0 ? cpus / 4 : 0), MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
1874    final boolean abortOnCorruption =
1875      conf.getBoolean(MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
1876        MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
1877    procedureStore.start(numThreads);
1878    // Just initialize it but do not start the workers, we will start the workers later by calling
1879    // startProcedureExecutor. See the javadoc for finishActiveMasterInitialization for more
1880    // details.
1881    procedureExecutor.init(numThreads, abortOnCorruption);
1882    if (!procEnv.getRemoteDispatcher().start()) {
1883      throw new HBaseIOException("Failed start of remote dispatcher");
1884    }
1885  }
1886
1887  // will be override in UT
1888  protected void startProcedureExecutor() throws IOException {
1889    procedureExecutor.startWorkers();
1890  }
1891
1892  /**
1893   * Turn on/off Snapshot Cleanup Chore
1894   * @param on indicates whether Snapshot Cleanup Chore is to be run
1895   */
1896  void switchSnapshotCleanup(final boolean on, final boolean synchronous) throws IOException {
1897    if (synchronous) {
1898      synchronized (this.snapshotCleanerChore) {
1899        switchSnapshotCleanup(on);
1900      }
1901    } else {
1902      switchSnapshotCleanup(on);
1903    }
1904  }
1905
1906  private void switchSnapshotCleanup(final boolean on) throws IOException {
1907    snapshotCleanupStateStore.set(on);
1908    if (on) {
1909      getChoreService().scheduleChore(this.snapshotCleanerChore);
1910    } else {
1911      this.snapshotCleanerChore.cancel();
1912    }
1913  }
1914
1915  private void stopProcedureExecutor() {
1916    if (procedureExecutor != null) {
1917      configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
1918      procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
1919      procedureExecutor.stop();
1920      procedureExecutor.join();
1921      procedureExecutor = null;
1922    }
1923
1924    if (procedureStore != null) {
1925      procedureStore.stop(isAborted());
1926      procedureStore = null;
1927    }
1928  }
1929
1930  protected void stopChores() {
1931    shutdownChore(mobFileCleanerChore);
1932    shutdownChore(mobFileCompactionChore);
1933    shutdownChore(balancerChore);
1934    if (regionNormalizerManager != null) {
1935      shutdownChore(regionNormalizerManager.getRegionNormalizerChore());
1936    }
1937    shutdownChore(clusterStatusChore);
1938    shutdownChore(catalogJanitorChore);
1939    shutdownChore(clusterStatusPublisherChore);
1940    shutdownChore(snapshotQuotaChore);
1941    shutdownChore(logCleaner);
1942    if (hfileCleaners != null) {
1943      for (ScheduledChore chore : hfileCleaners) {
1944        chore.shutdown();
1945      }
1946      hfileCleaners = null;
1947    }
1948    shutdownChore(replicationBarrierCleaner);
1949    shutdownChore(snapshotCleanerChore);
1950    shutdownChore(hbckChore);
1951    shutdownChore(regionsRecoveryChore);
1952    shutdownChore(rollingUpgradeChore);
1953    shutdownChore(oldWALsDirSizeChore);
1954  }
1955
1956  /** Returns Get remote side's InetAddress */
1957  InetAddress getRemoteInetAddress(final int port, final long serverStartCode)
1958    throws UnknownHostException {
1959    // Do it out here in its own little method so can fake an address when
1960    // mocking up in tests.
1961    InetAddress ia = RpcServer.getRemoteIp();
1962
1963    // The call could be from the local regionserver,
1964    // in which case, there is no remote address.
1965    if (ia == null && serverStartCode == startcode) {
1966      InetSocketAddress isa = rpcServices.getSocketAddress();
1967      if (isa != null && isa.getPort() == port) {
1968        ia = isa.getAddress();
1969      }
1970    }
1971    return ia;
1972  }
1973
1974  /** Returns Maximum time we should run balancer for */
1975  private int getMaxBalancingTime() {
1976    // if max balancing time isn't set, defaulting it to period time
1977    int maxBalancingTime =
1978      getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, getConfiguration()
1979        .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD));
1980    return maxBalancingTime;
1981  }
1982
1983  /** Returns Maximum number of regions in transition */
1984  private int getMaxRegionsInTransition() {
1985    int numRegions = this.assignmentManager.getRegionStates().getRegionAssignments().size();
1986    return Math.max((int) Math.floor(numRegions * this.maxRitPercent), 1);
1987  }
1988
1989  /**
1990   * It first sleep to the next balance plan start time. Meanwhile, throttling by the max number
1991   * regions in transition to protect availability.
1992   * @param nextBalanceStartTime   The next balance plan start time
1993   * @param maxRegionsInTransition max number of regions in transition
1994   * @param cutoffTime             when to exit balancer
1995   */
1996  private void balanceThrottling(long nextBalanceStartTime, int maxRegionsInTransition,
1997    long cutoffTime) {
1998    boolean interrupted = false;
1999
2000    // Sleep to next balance plan start time
2001    // But if there are zero regions in transition, it can skip sleep to speed up.
2002    while (
2003      !interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime
2004        && this.assignmentManager.getRegionStates().hasRegionsInTransition()
2005    ) {
2006      try {
2007        Thread.sleep(100);
2008      } catch (InterruptedException ie) {
2009        interrupted = true;
2010      }
2011    }
2012
2013    // Throttling by max number regions in transition
2014    while (
2015      !interrupted && maxRegionsInTransition > 0
2016        && this.assignmentManager.getRegionStates().getRegionsInTransitionCount()
2017            >= maxRegionsInTransition
2018        && EnvironmentEdgeManager.currentTime() <= cutoffTime
2019    ) {
2020      try {
2021        // sleep if the number of regions in transition exceeds the limit
2022        Thread.sleep(100);
2023      } catch (InterruptedException ie) {
2024        interrupted = true;
2025      }
2026    }
2027
2028    if (interrupted) Thread.currentThread().interrupt();
2029  }
2030
2031  public BalanceResponse balance() throws IOException {
2032    return balance(BalanceRequest.defaultInstance());
2033  }
2034
2035  /**
2036   * Trigger a normal balance, see {@link HMaster#balance()} . If the balance is not executed this
2037   * time, the metrics related to the balance will be updated. When balance is running, related
2038   * metrics will be updated at the same time. But if some checking logic failed and cause the
2039   * balancer exit early, we lost the chance to update balancer metrics. This will lead to user
2040   * missing the latest balancer info.
2041   */
2042  public BalanceResponse balanceOrUpdateMetrics() throws IOException {
2043    synchronized (this.balancer) {
2044      BalanceResponse response = balance();
2045      if (!response.isBalancerRan()) {
2046        Map<TableName, Map<ServerName, List<RegionInfo>>> assignments =
2047          this.assignmentManager.getRegionStates().getAssignmentsForBalancer(this.tableStateManager,
2048            this.serverManager.getOnlineServersList());
2049        for (Map<ServerName, List<RegionInfo>> serverMap : assignments.values()) {
2050          serverMap.keySet().removeAll(this.serverManager.getDrainingServersList());
2051        }
2052        this.balancer.updateBalancerLoadInfo(assignments);
2053      }
2054      return response;
2055    }
2056  }
2057
2058  /**
2059   * Checks master state before initiating action over region topology.
2060   * @param action the name of the action under consideration, for logging.
2061   * @return {@code true} when the caller should exit early, {@code false} otherwise.
2062   */
2063  @Override
2064  public boolean skipRegionManagementAction(final String action) {
2065    // Note: this method could be `default` on MasterServices if but for logging.
2066    if (!isInitialized()) {
2067      LOG.debug("Master has not been initialized, don't run {}.", action);
2068      return true;
2069    }
2070    if (this.getServerManager().isClusterShutdown()) {
2071      LOG.info("Cluster is shutting down, don't run {}.", action);
2072      return true;
2073    }
2074    if (isInMaintenanceMode()) {
2075      LOG.info("Master is in maintenance mode, don't run {}.", action);
2076      return true;
2077    }
2078    return false;
2079  }
2080
2081  public BalanceResponse balance(BalanceRequest request) throws IOException {
2082    checkInitialized();
2083
2084    BalanceResponse.Builder responseBuilder = BalanceResponse.newBuilder();
2085
2086    if (loadBalancerStateStore == null || !(loadBalancerStateStore.get() || request.isDryRun())) {
2087      return responseBuilder.build();
2088    }
2089
2090    if (skipRegionManagementAction("balancer")) {
2091      return responseBuilder.build();
2092    }
2093
2094    synchronized (this.balancer) {
2095      // Only allow one balance run at at time.
2096      if (this.assignmentManager.hasRegionsInTransition()) {
2097        List<RegionStateNode> regionsInTransition = assignmentManager.getRegionsInTransition();
2098        // if hbase:meta region is in transition, result of assignment cannot be recorded
2099        // ignore the force flag in that case
2100        boolean metaInTransition = assignmentManager.isMetaRegionInTransition();
2101        List<RegionStateNode> toPrint = regionsInTransition;
2102        int max = 5;
2103        boolean truncated = false;
2104        if (regionsInTransition.size() > max) {
2105          toPrint = regionsInTransition.subList(0, max);
2106          truncated = true;
2107        }
2108
2109        if (!request.isIgnoreRegionsInTransition() || metaInTransition) {
2110          LOG.info("Not running balancer (ignoreRIT=false" + ", metaRIT=" + metaInTransition
2111            + ") because " + regionsInTransition.size() + " region(s) in transition: " + toPrint
2112            + (truncated ? "(truncated list)" : ""));
2113          return responseBuilder.build();
2114        }
2115      }
2116      if (this.serverManager.areDeadServersInProgress()) {
2117        LOG.info("Not running balancer because processing dead regionserver(s): "
2118          + this.serverManager.getDeadServers());
2119        return responseBuilder.build();
2120      }
2121
2122      if (this.cpHost != null) {
2123        try {
2124          if (this.cpHost.preBalance(request)) {
2125            LOG.debug("Coprocessor bypassing balancer request");
2126            return responseBuilder.build();
2127          }
2128        } catch (IOException ioe) {
2129          LOG.error("Error invoking master coprocessor preBalance()", ioe);
2130          return responseBuilder.build();
2131        }
2132      }
2133
2134      Map<TableName, Map<ServerName, List<RegionInfo>>> assignments =
2135        this.assignmentManager.getRegionStates().getAssignmentsForBalancer(tableStateManager,
2136          this.serverManager.getOnlineServersList());
2137      for (Map<ServerName, List<RegionInfo>> serverMap : assignments.values()) {
2138        serverMap.keySet().removeAll(this.serverManager.getDrainingServersList());
2139      }
2140
2141      // Give the balancer the current cluster state.
2142      this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
2143
2144      List<RegionPlan> plans = this.balancer.balanceCluster(assignments);
2145
2146      responseBuilder.setBalancerRan(true).setMovesCalculated(plans == null ? 0 : plans.size());
2147
2148      if (skipRegionManagementAction("balancer")) {
2149        // make one last check that the cluster isn't shutting down before proceeding.
2150        return responseBuilder.build();
2151      }
2152
2153      // For dry run we don't actually want to execute the moves, but we do want
2154      // to execute the coprocessor below
2155      List<RegionPlan> sucRPs =
2156        request.isDryRun() ? Collections.emptyList() : executeRegionPlansWithThrottling(plans);
2157
2158      if (this.cpHost != null) {
2159        try {
2160          this.cpHost.postBalance(request, sucRPs);
2161        } catch (IOException ioe) {
2162          // balancing already succeeded so don't change the result
2163          LOG.error("Error invoking master coprocessor postBalance()", ioe);
2164        }
2165      }
2166
2167      responseBuilder.setMovesExecuted(sucRPs.size());
2168    }
2169
2170    // If LoadBalancer did not generate any plans, it means the cluster is already balanced.
2171    // Return true indicating a success.
2172    return responseBuilder.build();
2173  }
2174
2175  /**
2176   * Execute region plans with throttling
2177   * @param plans to execute
2178   * @return succeeded plans
2179   */
2180  public List<RegionPlan> executeRegionPlansWithThrottling(List<RegionPlan> plans) {
2181    List<RegionPlan> successRegionPlans = new ArrayList<>();
2182    int maxRegionsInTransition = getMaxRegionsInTransition();
2183    long balanceStartTime = EnvironmentEdgeManager.currentTime();
2184    long cutoffTime = balanceStartTime + this.maxBalancingTime;
2185    int rpCount = 0; // number of RegionPlans balanced so far
2186    if (plans != null && !plans.isEmpty()) {
2187      int balanceInterval = this.maxBalancingTime / plans.size();
2188      LOG.info(
2189        "Balancer plans size is " + plans.size() + ", the balance interval is " + balanceInterval
2190          + " ms, and the max number regions in transition is " + maxRegionsInTransition);
2191
2192      for (RegionPlan plan : plans) {
2193        LOG.info("balance " + plan);
2194        // TODO: bulk assign
2195        try {
2196          this.assignmentManager.balance(plan);
2197          this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
2198          this.balancer.throttle(plan);
2199        } catch (HBaseIOException hioe) {
2200          // should ignore failed plans here, avoiding the whole balance plans be aborted
2201          // later calls of balance() can fetch up the failed and skipped plans
2202          LOG.warn("Failed balance plan {}, skipping...", plan, hioe);
2203        } catch (Exception e) {
2204          LOG.warn("Failed throttling assigning a new plan.", e);
2205        }
2206        // rpCount records balance plans processed, does not care if a plan succeeds
2207        rpCount++;
2208        successRegionPlans.add(plan);
2209
2210        if (this.maxBalancingTime > 0) {
2211          balanceThrottling(balanceStartTime + rpCount * balanceInterval, maxRegionsInTransition,
2212            cutoffTime);
2213        }
2214
2215        // if performing next balance exceeds cutoff time, exit the loop
2216        if (
2217          this.maxBalancingTime > 0 && rpCount < plans.size()
2218            && EnvironmentEdgeManager.currentTime() > cutoffTime
2219        ) {
2220          // TODO: After balance, there should not be a cutoff time (keeping it as
2221          // a security net for now)
2222          LOG.debug(
2223            "No more balancing till next balance run; maxBalanceTime=" + this.maxBalancingTime);
2224          break;
2225        }
2226      }
2227    }
2228    LOG.debug("Balancer is going into sleep until next period in {}ms", getConfiguration()
2229      .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD));
2230    return successRegionPlans;
2231  }
2232
2233  @Override
2234  public RegionNormalizerManager getRegionNormalizerManager() {
2235    return regionNormalizerManager;
2236  }
2237
2238  @Override
2239  public boolean normalizeRegions(final NormalizeTableFilterParams ntfp,
2240    final boolean isHighPriority) throws IOException {
2241    if (regionNormalizerManager == null || !regionNormalizerManager.isNormalizerOn()) {
2242      LOG.debug("Region normalization is disabled, don't run region normalizer.");
2243      return false;
2244    }
2245    if (skipRegionManagementAction("region normalizer")) {
2246      return false;
2247    }
2248    if (assignmentManager.hasRegionsInTransition()) {
2249      return false;
2250    }
2251
2252    final Set<TableName> matchingTables = getTableDescriptors(new LinkedList<>(),
2253      ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false).stream()
2254      .map(TableDescriptor::getTableName).collect(Collectors.toSet());
2255    final Set<TableName> allEnabledTables =
2256      tableStateManager.getTablesInStates(TableState.State.ENABLED);
2257    final List<TableName> targetTables =
2258      new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables));
2259    Collections.shuffle(targetTables);
2260    return regionNormalizerManager.normalizeRegions(targetTables, isHighPriority);
2261  }
2262
2263  /** Returns Client info for use as prefix on an audit log string; who did an action */
2264  @Override
2265  public String getClientIdAuditPrefix() {
2266    return "Client=" + RpcServer.getRequestUserName().orElse(null) + "/"
2267      + RpcServer.getRemoteAddress().orElse(null);
2268  }
2269
2270  /**
2271   * Switch for the background CatalogJanitor thread. Used for testing. The thread will continue to
2272   * run. It will just be a noop if disabled.
2273   * @param b If false, the catalog janitor won't do anything.
2274   */
2275  public void setCatalogJanitorEnabled(final boolean b) {
2276    this.catalogJanitorChore.setEnabled(b);
2277  }
2278
2279  @Override
2280  public long mergeRegions(final RegionInfo[] regionsToMerge, final boolean forcible, final long ng,
2281    final long nonce) throws IOException {
2282    checkInitialized();
2283
2284    final String regionNamesToLog = RegionInfo.getShortNameToLog(regionsToMerge);
2285
2286    if (!isSplitOrMergeEnabled(MasterSwitchType.MERGE)) {
2287      LOG.warn("Merge switch is off! skip merge of " + regionNamesToLog);
2288      throw new DoNotRetryIOException(
2289        "Merge of " + regionNamesToLog + " failed because merge switch is off");
2290    }
2291
2292    if (!getTableDescriptors().get(regionsToMerge[0].getTable()).isMergeEnabled()) {
2293      LOG.warn("Merge is disabled for the table! Skipping merge of {}", regionNamesToLog);
2294      throw new DoNotRetryIOException(
2295        "Merge of " + regionNamesToLog + " failed as region merge is disabled for the table");
2296    }
2297
2298    return MasterProcedureUtil.submitProcedure(new NonceProcedureRunnable(this, ng, nonce) {
2299      @Override
2300      protected void run() throws IOException {
2301        getMaster().getMasterCoprocessorHost().preMergeRegions(regionsToMerge);
2302        String aid = getClientIdAuditPrefix();
2303        LOG.info("{} merge regions {}", aid, regionNamesToLog);
2304        submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(),
2305          regionsToMerge, forcible));
2306        getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge);
2307      }
2308
2309      @Override
2310      protected String getDescription() {
2311        return "MergeTableProcedure";
2312      }
2313    });
2314  }
2315
2316  @Override
2317  public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, final long nonceGroup,
2318    final long nonce) throws IOException {
2319    checkInitialized();
2320
2321    if (!isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) {
2322      LOG.warn("Split switch is off! skip split of " + regionInfo);
2323      throw new DoNotRetryIOException(
2324        "Split region " + regionInfo.getRegionNameAsString() + " failed due to split switch off");
2325    }
2326
2327    if (!getTableDescriptors().get(regionInfo.getTable()).isSplitEnabled()) {
2328      LOG.warn("Split is disabled for the table! Skipping split of {}", regionInfo);
2329      throw new DoNotRetryIOException("Split region " + regionInfo.getRegionNameAsString()
2330        + " failed as region split is disabled for the table");
2331    }
2332
2333    return MasterProcedureUtil
2334      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2335        @Override
2336        protected void run() throws IOException {
2337          getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow);
2338          LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString());
2339
2340          // Execute the operation asynchronously
2341          submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow));
2342        }
2343
2344        @Override
2345        protected String getDescription() {
2346          return "SplitTableProcedure";
2347        }
2348      });
2349  }
2350
2351  private void warmUpRegion(ServerName server, RegionInfo region) {
2352    FutureUtils.addListener(asyncClusterConnection.getRegionServerAdmin(server)
2353      .warmupRegion(RequestConverter.buildWarmupRegionRequest(region)), (r, e) -> {
2354        if (e != null) {
2355          LOG.warn("Failed to warm up region {} on server {}", region, server, e);
2356        }
2357      });
2358  }
2359
2360  // Public so can be accessed by tests. Blocks until move is done.
2361  // Replace with an async implementation from which you can get
2362  // a success/failure result.
2363  @InterfaceAudience.Private
2364  public void move(final byte[] encodedRegionName, byte[] destServerName) throws IOException {
2365    RegionState regionState =
2366      assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName));
2367
2368    RegionInfo hri;
2369    if (regionState != null) {
2370      hri = regionState.getRegion();
2371    } else {
2372      throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
2373    }
2374
2375    ServerName dest;
2376    List<ServerName> exclude = hri.getTable().isSystemTable()
2377      ? assignmentManager.getExcludedServersForSystemTable()
2378      : new ArrayList<>(1);
2379    if (
2380      destServerName != null && exclude.contains(ServerName.valueOf(Bytes.toString(destServerName)))
2381    ) {
2382      LOG.info(Bytes.toString(encodedRegionName) + " can not move to "
2383        + Bytes.toString(destServerName) + " because the server is in exclude list");
2384      destServerName = null;
2385    }
2386    if (destServerName == null || destServerName.length == 0) {
2387      LOG.info("Passed destination servername is null/empty so " + "choosing a server at random");
2388      exclude.add(regionState.getServerName());
2389      final List<ServerName> destServers = this.serverManager.createDestinationServersList(exclude);
2390      dest = balancer.randomAssignment(hri, destServers);
2391      if (dest == null) {
2392        LOG.debug("Unable to determine a plan to assign " + hri);
2393        return;
2394      }
2395    } else {
2396      ServerName candidate = ServerName.valueOf(Bytes.toString(destServerName));
2397      dest = balancer.randomAssignment(hri, Lists.newArrayList(candidate));
2398      if (dest == null) {
2399        LOG.debug("Unable to determine a plan to assign " + hri);
2400        return;
2401      }
2402      // TODO: deal with table on master for rs group.
2403      if (dest.equals(serverName)) {
2404        // To avoid unnecessary region moving later by balancer. Don't put user
2405        // regions on master.
2406        LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
2407          + " to avoid unnecessary region moving later by load balancer,"
2408          + " because it should not be on master");
2409        return;
2410      }
2411    }
2412
2413    if (dest.equals(regionState.getServerName())) {
2414      LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
2415        + " because region already assigned to the same server " + dest + ".");
2416      return;
2417    }
2418
2419    // Now we can do the move
2420    RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest);
2421    assert rp.getDestination() != null : rp.toString() + " " + dest;
2422
2423    try {
2424      checkInitialized();
2425      if (this.cpHost != null) {
2426        this.cpHost.preMove(hri, rp.getSource(), rp.getDestination());
2427      }
2428
2429      TransitRegionStateProcedure proc =
2430        this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination());
2431      if (conf.getBoolean(WARMUP_BEFORE_MOVE, DEFAULT_WARMUP_BEFORE_MOVE)) {
2432        // Warmup the region on the destination before initiating the move.
2433        // A region server could reject the close request because it either does not
2434        // have the specified region or the region is being split.
2435        LOG.info(getClientIdAuditPrefix() + " move " + rp + ", warming up region on "
2436          + rp.getDestination());
2437        warmUpRegion(rp.getDestination(), hri);
2438      }
2439      LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
2440      Future<byte[]> future = ProcedureSyncWait.submitProcedure(this.procedureExecutor, proc);
2441      try {
2442        // Is this going to work? Will we throw exception on error?
2443        // TODO: CompletableFuture rather than this stunted Future.
2444        future.get();
2445      } catch (InterruptedException | ExecutionException e) {
2446        throw new HBaseIOException(e);
2447      }
2448      if (this.cpHost != null) {
2449        this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
2450      }
2451    } catch (IOException ioe) {
2452      if (ioe instanceof HBaseIOException) {
2453        throw (HBaseIOException) ioe;
2454      }
2455      throw new HBaseIOException(ioe);
2456    }
2457  }
2458
2459  @Override
2460  public long createTable(final TableDescriptor tableDescriptor, final byte[][] splitKeys,
2461    final long nonceGroup, final long nonce) throws IOException {
2462    checkInitialized();
2463    TableDescriptor desc = getMasterCoprocessorHost().preCreateTableRegionsInfos(tableDescriptor);
2464    if (desc == null) {
2465      throw new IOException("Creation for " + tableDescriptor + " is canceled by CP");
2466    }
2467    String namespace = desc.getTableName().getNamespaceAsString();
2468    this.clusterSchemaService.getNamespace(namespace);
2469
2470    RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(desc, splitKeys);
2471    TableDescriptorChecker.sanityCheck(conf, desc);
2472
2473    return MasterProcedureUtil
2474      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2475        @Override
2476        protected void run() throws IOException {
2477          getMaster().getMasterCoprocessorHost().preCreateTable(desc, newRegions);
2478
2479          LOG.info(getClientIdAuditPrefix() + " create " + desc);
2480
2481          // TODO: We can handle/merge duplicate requests, and differentiate the case of
2482          // TableExistsException by saying if the schema is the same or not.
2483          //
2484          // We need to wait for the procedure to potentially fail due to "prepare" sanity
2485          // checks. This will block only the beginning of the procedure. See HBASE-19953.
2486          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
2487          submitProcedure(
2488            new CreateTableProcedure(procedureExecutor.getEnvironment(), desc, newRegions, latch));
2489          latch.await();
2490
2491          getMaster().getMasterCoprocessorHost().postCreateTable(desc, newRegions);
2492        }
2493
2494        @Override
2495        protected String getDescription() {
2496          return "CreateTableProcedure";
2497        }
2498      });
2499  }
2500
2501  @Override
2502  public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException {
2503    if (isStopped()) {
2504      throw new MasterNotRunningException();
2505    }
2506
2507    TableName tableName = tableDescriptor.getTableName();
2508    if (!(tableName.isSystemTable())) {
2509      throw new IllegalArgumentException(
2510        "Only system table creation can use this createSystemTable API");
2511    }
2512
2513    RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(tableDescriptor, null);
2514
2515    LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
2516
2517    // This special create table is called locally to master. Therefore, no RPC means no need
2518    // to use nonce to detect duplicated RPC call.
2519    long procId = this.procedureExecutor.submitProcedure(
2520      new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions));
2521
2522    return procId;
2523  }
2524
2525  private void startActiveMasterManager(int infoPort) throws KeeperException {
2526    String backupZNode = ZNodePaths.joinZNode(zooKeeper.getZNodePaths().backupMasterAddressesZNode,
2527      serverName.toString());
2528    /*
2529     * Add a ZNode for ourselves in the backup master directory since we may not become the active
2530     * master. If so, we want the actual active master to know we are backup masters, so that it
2531     * won't assign regions to us if so configured. If we become the active master later,
2532     * ActiveMasterManager will delete this node explicitly. If we crash before then, ZooKeeper will
2533     * delete this node for us since it is ephemeral.
2534     */
2535    LOG.info("Adding backup master ZNode " + backupZNode);
2536    if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode, serverName, infoPort)) {
2537      LOG.warn("Failed create of " + backupZNode + " by " + serverName);
2538    }
2539    this.activeMasterManager.setInfoPort(infoPort);
2540    int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
2541    // If we're a backup master, stall until a primary to write this address
2542    if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP, HConstants.DEFAULT_MASTER_TYPE_BACKUP)) {
2543      LOG.debug("HMaster started in backup mode. Stalling until master znode is written.");
2544      // This will only be a minute or so while the cluster starts up,
2545      // so don't worry about setting watches on the parent znode
2546      while (!activeMasterManager.hasActiveMaster()) {
2547        LOG.debug("Waiting for master address and cluster state znode to be written.");
2548        Threads.sleep(timeout);
2549      }
2550    }
2551
2552    // Here for the master startup process, we use TaskGroup to monitor the whole progress.
2553    // The UI is similar to how Hadoop designed the startup page for the NameNode.
2554    // See HBASE-21521 for more details.
2555    // We do not cleanup the startupTaskGroup, let the startup progress information
2556    // be permanent in the MEM.
2557    startupTaskGroup = TaskMonitor.createTaskGroup(true, "Master startup");
2558    try {
2559      if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, startupTaskGroup)) {
2560        finishActiveMasterInitialization();
2561      }
2562    } catch (Throwable t) {
2563      startupTaskGroup.abort("Failed to become active master due to:" + t.getMessage());
2564      LOG.error(HBaseMarkers.FATAL, "Failed to become active master", t);
2565      // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility
2566      if (
2567        t instanceof NoClassDefFoundError
2568          && t.getMessage().contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction")
2569      ) {
2570        // improved error message for this special case
2571        abort("HBase is having a problem with its Hadoop jars.  You may need to recompile "
2572          + "HBase against Hadoop version " + org.apache.hadoop.util.VersionInfo.getVersion()
2573          + " or change your hadoop jars to start properly", t);
2574      } else {
2575        abort("Unhandled exception. Starting shutdown.", t);
2576      }
2577    }
2578  }
2579
2580  private static boolean isCatalogTable(final TableName tableName) {
2581    return tableName.equals(TableName.META_TABLE_NAME);
2582  }
2583
2584  @Override
2585  public long deleteTable(final TableName tableName, final long nonceGroup, final long nonce)
2586    throws IOException {
2587    checkInitialized();
2588
2589    return MasterProcedureUtil
2590      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2591        @Override
2592        protected void run() throws IOException {
2593          getMaster().getMasterCoprocessorHost().preDeleteTable(tableName);
2594
2595          LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
2596
2597          // TODO: We can handle/merge duplicate request
2598          //
2599          // We need to wait for the procedure to potentially fail due to "prepare" sanity
2600          // checks. This will block only the beginning of the procedure. See HBASE-19953.
2601          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
2602          submitProcedure(
2603            new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch));
2604          latch.await();
2605
2606          getMaster().getMasterCoprocessorHost().postDeleteTable(tableName);
2607        }
2608
2609        @Override
2610        protected String getDescription() {
2611          return "DeleteTableProcedure";
2612        }
2613      });
2614  }
2615
2616  @Override
2617  public long truncateTable(final TableName tableName, final boolean preserveSplits,
2618    final long nonceGroup, final long nonce) throws IOException {
2619    checkInitialized();
2620
2621    return MasterProcedureUtil
2622      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2623        @Override
2624        protected void run() throws IOException {
2625          getMaster().getMasterCoprocessorHost().preTruncateTable(tableName);
2626
2627          LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
2628          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0);
2629          submitProcedure(new TruncateTableProcedure(procedureExecutor.getEnvironment(), tableName,
2630            preserveSplits, latch));
2631          latch.await();
2632
2633          getMaster().getMasterCoprocessorHost().postTruncateTable(tableName);
2634        }
2635
2636        @Override
2637        protected String getDescription() {
2638          return "TruncateTableProcedure";
2639        }
2640      });
2641  }
2642
2643  @Override
2644  public long truncateRegion(final RegionInfo regionInfo, final long nonceGroup, final long nonce)
2645    throws IOException {
2646    checkInitialized();
2647
2648    return MasterProcedureUtil
2649      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2650        @Override
2651        protected void run() throws IOException {
2652          getMaster().getMasterCoprocessorHost().preTruncateRegion(regionInfo);
2653
2654          LOG.info(
2655            getClientIdAuditPrefix() + " truncate region " + regionInfo.getRegionNameAsString());
2656
2657          // Execute the operation asynchronously
2658          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0);
2659          submitProcedure(
2660            new TruncateRegionProcedure(procedureExecutor.getEnvironment(), regionInfo, latch));
2661          latch.await();
2662
2663          getMaster().getMasterCoprocessorHost().postTruncateRegion(regionInfo);
2664        }
2665
2666        @Override
2667        protected String getDescription() {
2668          return "TruncateRegionProcedure";
2669        }
2670      });
2671  }
2672
2673  @Override
2674  public long addColumn(final TableName tableName, final ColumnFamilyDescriptor column,
2675    final long nonceGroup, final long nonce) throws IOException {
2676    checkInitialized();
2677    checkTableExists(tableName);
2678
2679    return modifyTable(tableName, new TableDescriptorGetter() {
2680
2681      @Override
2682      public TableDescriptor get() throws IOException {
2683        TableDescriptor old = getTableDescriptors().get(tableName);
2684        if (old.hasColumnFamily(column.getName())) {
2685          throw new InvalidFamilyOperationException("Column family '" + column.getNameAsString()
2686            + "' in table '" + tableName + "' already exists so cannot be added");
2687        }
2688
2689        return TableDescriptorBuilder.newBuilder(old).setColumnFamily(column).build();
2690      }
2691    }, nonceGroup, nonce, true);
2692  }
2693
2694  /**
2695   * Implement to return TableDescriptor after pre-checks
2696   */
2697  protected interface TableDescriptorGetter {
2698    TableDescriptor get() throws IOException;
2699  }
2700
2701  @Override
2702  public long modifyColumn(final TableName tableName, final ColumnFamilyDescriptor descriptor,
2703    final long nonceGroup, final long nonce) throws IOException {
2704    checkInitialized();
2705    checkTableExists(tableName);
2706    return modifyTable(tableName, new TableDescriptorGetter() {
2707
2708      @Override
2709      public TableDescriptor get() throws IOException {
2710        TableDescriptor old = getTableDescriptors().get(tableName);
2711        if (!old.hasColumnFamily(descriptor.getName())) {
2712          throw new InvalidFamilyOperationException("Family '" + descriptor.getNameAsString()
2713            + "' does not exist, so it cannot be modified");
2714        }
2715
2716        return TableDescriptorBuilder.newBuilder(old).modifyColumnFamily(descriptor).build();
2717      }
2718    }, nonceGroup, nonce, true);
2719  }
2720
2721  @Override
2722  public long modifyColumnStoreFileTracker(TableName tableName, byte[] family, String dstSFT,
2723    long nonceGroup, long nonce) throws IOException {
2724    checkInitialized();
2725    return MasterProcedureUtil
2726      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2727
2728        @Override
2729        protected void run() throws IOException {
2730          String sft = getMaster().getMasterCoprocessorHost()
2731            .preModifyColumnFamilyStoreFileTracker(tableName, family, dstSFT);
2732          LOG.info("{} modify column {} store file tracker of table {} to {}",
2733            getClientIdAuditPrefix(), Bytes.toStringBinary(family), tableName, sft);
2734          submitProcedure(new ModifyColumnFamilyStoreFileTrackerProcedure(
2735            procedureExecutor.getEnvironment(), tableName, family, sft));
2736          getMaster().getMasterCoprocessorHost().postModifyColumnFamilyStoreFileTracker(tableName,
2737            family, dstSFT);
2738        }
2739
2740        @Override
2741        protected String getDescription() {
2742          return "ModifyColumnFamilyStoreFileTrackerProcedure";
2743        }
2744      });
2745  }
2746
2747  @Override
2748  public long deleteColumn(final TableName tableName, final byte[] columnName,
2749    final long nonceGroup, final long nonce) throws IOException {
2750    checkInitialized();
2751    checkTableExists(tableName);
2752
2753    return modifyTable(tableName, new TableDescriptorGetter() {
2754
2755      @Override
2756      public TableDescriptor get() throws IOException {
2757        TableDescriptor old = getTableDescriptors().get(tableName);
2758
2759        if (!old.hasColumnFamily(columnName)) {
2760          throw new InvalidFamilyOperationException(
2761            "Family '" + Bytes.toString(columnName) + "' does not exist, so it cannot be deleted");
2762        }
2763        if (old.getColumnFamilyCount() == 1) {
2764          throw new InvalidFamilyOperationException("Family '" + Bytes.toString(columnName)
2765            + "' is the only column family in the table, so it cannot be deleted");
2766        }
2767        return TableDescriptorBuilder.newBuilder(old).removeColumnFamily(columnName).build();
2768      }
2769    }, nonceGroup, nonce, true);
2770  }
2771
2772  @Override
2773  public long enableTable(final TableName tableName, final long nonceGroup, final long nonce)
2774    throws IOException {
2775    checkInitialized();
2776
2777    return MasterProcedureUtil
2778      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2779        @Override
2780        protected void run() throws IOException {
2781          getMaster().getMasterCoprocessorHost().preEnableTable(tableName);
2782
2783          // Normally, it would make sense for this authorization check to exist inside
2784          // AccessController, but because the authorization check is done based on internal state
2785          // (rather than explicit permissions) we'll do the check here instead of in the
2786          // coprocessor.
2787          MasterQuotaManager quotaManager = getMasterQuotaManager();
2788          if (quotaManager != null) {
2789            if (quotaManager.isQuotaInitialized()) {
2790              // skip checking quotas for system tables, see:
2791              // https://issues.apache.org/jira/browse/HBASE-28183
2792              if (!tableName.isSystemTable()) {
2793                SpaceQuotaSnapshot currSnapshotOfTable =
2794                  QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName);
2795                if (currSnapshotOfTable != null) {
2796                  SpaceQuotaStatus quotaStatus = currSnapshotOfTable.getQuotaStatus();
2797                  if (
2798                    quotaStatus.isInViolation()
2799                      && SpaceViolationPolicy.DISABLE == quotaStatus.getPolicy().orElse(null)
2800                  ) {
2801                    throw new AccessDeniedException("Enabling the table '" + tableName
2802                      + "' is disallowed due to a violated space quota.");
2803                  }
2804                }
2805              }
2806            } else if (LOG.isTraceEnabled()) {
2807              LOG
2808                .trace("Unable to check for space quotas as the MasterQuotaManager is not enabled");
2809            }
2810          }
2811
2812          LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
2813
2814          // Execute the operation asynchronously - client will check the progress of the operation
2815          // In case the request is from a <1.1 client before returning,
2816          // we want to make sure that the table is prepared to be
2817          // enabled (the table is locked and the table state is set).
2818          // Note: if the procedure throws exception, we will catch it and rethrow.
2819          final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch();
2820          submitProcedure(
2821            new EnableTableProcedure(procedureExecutor.getEnvironment(), tableName, prepareLatch));
2822          prepareLatch.await();
2823
2824          getMaster().getMasterCoprocessorHost().postEnableTable(tableName);
2825        }
2826
2827        @Override
2828        protected String getDescription() {
2829          return "EnableTableProcedure";
2830        }
2831      });
2832  }
2833
2834  @Override
2835  public long disableTable(final TableName tableName, final long nonceGroup, final long nonce)
2836    throws IOException {
2837    checkInitialized();
2838
2839    return MasterProcedureUtil
2840      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2841        @Override
2842        protected void run() throws IOException {
2843          getMaster().getMasterCoprocessorHost().preDisableTable(tableName);
2844
2845          LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
2846
2847          // Execute the operation asynchronously - client will check the progress of the operation
2848          // In case the request is from a <1.1 client before returning,
2849          // we want to make sure that the table is prepared to be
2850          // enabled (the table is locked and the table state is set).
2851          // Note: if the procedure throws exception, we will catch it and rethrow.
2852          //
2853          // We need to wait for the procedure to potentially fail due to "prepare" sanity
2854          // checks. This will block only the beginning of the procedure. See HBASE-19953.
2855          final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createBlockingLatch();
2856          submitProcedure(new DisableTableProcedure(procedureExecutor.getEnvironment(), tableName,
2857            false, prepareLatch));
2858          prepareLatch.await();
2859
2860          getMaster().getMasterCoprocessorHost().postDisableTable(tableName);
2861        }
2862
2863        @Override
2864        protected String getDescription() {
2865          return "DisableTableProcedure";
2866        }
2867      });
2868  }
2869
2870  private long modifyTable(final TableName tableName,
2871    final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce,
2872    final boolean shouldCheckDescriptor) throws IOException {
2873    return modifyTable(tableName, newDescriptorGetter, nonceGroup, nonce, shouldCheckDescriptor,
2874      true);
2875  }
2876
2877  private long modifyTable(final TableName tableName,
2878    final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce,
2879    final boolean shouldCheckDescriptor, final boolean reopenRegions) throws IOException {
2880    return MasterProcedureUtil
2881      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2882        @Override
2883        protected void run() throws IOException {
2884          TableDescriptor oldDescriptor = getMaster().getTableDescriptors().get(tableName);
2885          TableDescriptor newDescriptor = getMaster().getMasterCoprocessorHost()
2886            .preModifyTable(tableName, oldDescriptor, newDescriptorGetter.get());
2887          TableDescriptorChecker.sanityCheck(conf, newDescriptor);
2888          LOG.info("{} modify table {} from {} to {}", getClientIdAuditPrefix(), tableName,
2889            oldDescriptor, newDescriptor);
2890
2891          // Execute the operation synchronously - wait for the operation completes before
2892          // continuing.
2893          //
2894          // We need to wait for the procedure to potentially fail due to "prepare" sanity
2895          // checks. This will block only the beginning of the procedure. See HBASE-19953.
2896          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
2897          submitProcedure(new ModifyTableProcedure(procedureExecutor.getEnvironment(),
2898            newDescriptor, latch, oldDescriptor, shouldCheckDescriptor, reopenRegions));
2899          latch.await();
2900
2901          getMaster().getMasterCoprocessorHost().postModifyTable(tableName, oldDescriptor,
2902            newDescriptor);
2903        }
2904
2905        @Override
2906        protected String getDescription() {
2907          return "ModifyTableProcedure";
2908        }
2909      });
2910
2911  }
2912
2913  @Override
2914  public long modifyTable(final TableName tableName, final TableDescriptor newDescriptor,
2915    final long nonceGroup, final long nonce, final boolean reopenRegions) throws IOException {
2916    checkInitialized();
2917    return modifyTable(tableName, new TableDescriptorGetter() {
2918      @Override
2919      public TableDescriptor get() throws IOException {
2920        return newDescriptor;
2921      }
2922    }, nonceGroup, nonce, false, reopenRegions);
2923
2924  }
2925
2926  @Override
2927  public long modifyTableStoreFileTracker(TableName tableName, String dstSFT, long nonceGroup,
2928    long nonce) throws IOException {
2929    checkInitialized();
2930    return MasterProcedureUtil
2931      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2932
2933        @Override
2934        protected void run() throws IOException {
2935          String sft = getMaster().getMasterCoprocessorHost()
2936            .preModifyTableStoreFileTracker(tableName, dstSFT);
2937          LOG.info("{} modify table store file tracker of table {} to {}", getClientIdAuditPrefix(),
2938            tableName, sft);
2939          submitProcedure(new ModifyTableStoreFileTrackerProcedure(
2940            procedureExecutor.getEnvironment(), tableName, sft));
2941          getMaster().getMasterCoprocessorHost().postModifyTableStoreFileTracker(tableName, sft);
2942        }
2943
2944        @Override
2945        protected String getDescription() {
2946          return "ModifyTableStoreFileTrackerProcedure";
2947        }
2948      });
2949  }
2950
2951  public long restoreSnapshot(final SnapshotDescription snapshotDesc, final long nonceGroup,
2952    final long nonce, final boolean restoreAcl, final String customSFT) throws IOException {
2953    checkInitialized();
2954    getSnapshotManager().checkSnapshotSupport();
2955
2956    // Ensure namespace exists. Will throw exception if non-known NS.
2957    final TableName dstTable = TableName.valueOf(snapshotDesc.getTable());
2958    getClusterSchema().getNamespace(dstTable.getNamespaceAsString());
2959
2960    return MasterProcedureUtil
2961      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
2962        @Override
2963        protected void run() throws IOException {
2964          setProcId(getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(),
2965            restoreAcl, customSFT));
2966        }
2967
2968        @Override
2969        protected String getDescription() {
2970          return "RestoreSnapshotProcedure";
2971        }
2972      });
2973  }
2974
2975  private void checkTableExists(final TableName tableName)
2976    throws IOException, TableNotFoundException {
2977    if (!tableDescriptors.exists(tableName)) {
2978      throw new TableNotFoundException(tableName);
2979    }
2980  }
2981
2982  @Override
2983  public void checkTableModifiable(final TableName tableName)
2984    throws IOException, TableNotFoundException, TableNotDisabledException {
2985    if (isCatalogTable(tableName)) {
2986      throw new IOException("Can't modify catalog tables");
2987    }
2988    checkTableExists(tableName);
2989    TableState ts = getTableStateManager().getTableState(tableName);
2990    if (!ts.isDisabled()) {
2991      throw new TableNotDisabledException("Not DISABLED; " + ts);
2992    }
2993  }
2994
2995  public ClusterMetrics getClusterMetricsWithoutCoprocessor() throws InterruptedIOException {
2996    return getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class));
2997  }
2998
2999  public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet<Option> options)
3000    throws InterruptedIOException {
3001    ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder();
3002    // given that hbase1 can't submit the request with Option,
3003    // we return all information to client if the list of Option is empty.
3004    if (options.isEmpty()) {
3005      options = EnumSet.allOf(Option.class);
3006    }
3007
3008    // TASKS and/or LIVE_SERVERS will populate this map, which will be given to the builder if
3009    // not null after option processing completes.
3010    Map<ServerName, ServerMetrics> serverMetricsMap = null;
3011
3012    for (Option opt : options) {
3013      switch (opt) {
3014        case HBASE_VERSION:
3015          builder.setHBaseVersion(VersionInfo.getVersion());
3016          break;
3017        case CLUSTER_ID:
3018          builder.setClusterId(getClusterId());
3019          break;
3020        case MASTER:
3021          builder.setMasterName(getServerName());
3022          break;
3023        case BACKUP_MASTERS:
3024          builder.setBackerMasterNames(getBackupMasters());
3025          break;
3026        case TASKS: {
3027          // Master tasks
3028          builder.setMasterTasks(TaskMonitor.get().getTasks().stream()
3029            .map(task -> ServerTaskBuilder.newBuilder().setDescription(task.getDescription())
3030              .setStatus(task.getStatus())
3031              .setState(ServerTask.State.valueOf(task.getState().name()))
3032              .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTimestamp())
3033              .build())
3034            .collect(Collectors.toList()));
3035          // TASKS is also synonymous with LIVE_SERVERS for now because task information for
3036          // regionservers is carried in ServerLoad.
3037          // Add entries to serverMetricsMap for all live servers, if we haven't already done so
3038          if (serverMetricsMap == null) {
3039            serverMetricsMap = getOnlineServers();
3040          }
3041          break;
3042        }
3043        case LIVE_SERVERS: {
3044          // Add entries to serverMetricsMap for all live servers, if we haven't already done so
3045          if (serverMetricsMap == null) {
3046            serverMetricsMap = getOnlineServers();
3047          }
3048          break;
3049        }
3050        case DEAD_SERVERS: {
3051          if (serverManager != null) {
3052            builder.setDeadServerNames(
3053              new ArrayList<>(serverManager.getDeadServers().copyServerNames()));
3054          }
3055          break;
3056        }
3057        case UNKNOWN_SERVERS: {
3058          if (serverManager != null) {
3059            builder.setUnknownServerNames(getUnknownServers());
3060          }
3061          break;
3062        }
3063        case MASTER_COPROCESSORS: {
3064          if (cpHost != null) {
3065            builder.setMasterCoprocessorNames(Arrays.asList(getMasterCoprocessors()));
3066          }
3067          break;
3068        }
3069        case REGIONS_IN_TRANSITION: {
3070          if (assignmentManager != null) {
3071            builder.setRegionsInTransition(
3072              assignmentManager.getRegionStates().getRegionsStateInTransition());
3073          }
3074          break;
3075        }
3076        case BALANCER_ON: {
3077          if (loadBalancerStateStore != null) {
3078            builder.setBalancerOn(loadBalancerStateStore.get());
3079          }
3080          break;
3081        }
3082        case MASTER_INFO_PORT: {
3083          if (infoServer != null) {
3084            builder.setMasterInfoPort(infoServer.getPort());
3085          }
3086          break;
3087        }
3088        case SERVERS_NAME: {
3089          if (serverManager != null) {
3090            builder.setServerNames(serverManager.getOnlineServersList());
3091          }
3092          break;
3093        }
3094        case TABLE_TO_REGIONS_COUNT: {
3095          if (isActiveMaster() && isInitialized() && assignmentManager != null) {
3096            try {
3097              Map<TableName, RegionStatesCount> tableRegionStatesCountMap = new HashMap<>();
3098              Map<String, TableDescriptor> tableDescriptorMap = getTableDescriptors().getAll();
3099              for (TableDescriptor tableDescriptor : tableDescriptorMap.values()) {
3100                TableName tableName = tableDescriptor.getTableName();
3101                RegionStatesCount regionStatesCount =
3102                  assignmentManager.getRegionStatesCount(tableName);
3103                tableRegionStatesCountMap.put(tableName, regionStatesCount);
3104              }
3105              builder.setTableRegionStatesCount(tableRegionStatesCountMap);
3106            } catch (IOException e) {
3107              LOG.error("Error while populating TABLE_TO_REGIONS_COUNT for Cluster Metrics..", e);
3108            }
3109          }
3110          break;
3111        }
3112        case DECOMMISSIONED_SERVERS: {
3113          if (serverManager != null) {
3114            builder.setDecommissionedServerNames(serverManager.getDrainingServersList());
3115          }
3116          break;
3117        }
3118      }
3119    }
3120
3121    if (serverMetricsMap != null) {
3122      builder.setLiveServerMetrics(serverMetricsMap);
3123    }
3124
3125    return builder.build();
3126  }
3127
3128  private List<ServerName> getUnknownServers() {
3129    if (serverManager != null) {
3130      final Set<ServerName> serverNames = getAssignmentManager().getRegionStates().getRegionStates()
3131        .stream().map(RegionState::getServerName).collect(Collectors.toSet());
3132      final List<ServerName> unknownServerNames = serverNames.stream()
3133        .filter(sn -> sn != null && serverManager.isServerUnknown(sn)).collect(Collectors.toList());
3134      return unknownServerNames;
3135    }
3136    return null;
3137  }
3138
3139  private Map<ServerName, ServerMetrics> getOnlineServers() {
3140    if (serverManager != null) {
3141      final Map<ServerName, ServerMetrics> map = new HashMap<>();
3142      serverManager.getOnlineServers().entrySet().forEach(e -> map.put(e.getKey(), e.getValue()));
3143      return map;
3144    }
3145    return null;
3146  }
3147
3148  /** Returns cluster status */
3149  public ClusterMetrics getClusterMetrics() throws IOException {
3150    return getClusterMetrics(EnumSet.allOf(Option.class));
3151  }
3152
3153  public ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException {
3154    if (cpHost != null) {
3155      cpHost.preGetClusterMetrics();
3156    }
3157    ClusterMetrics status = getClusterMetricsWithoutCoprocessor(options);
3158    if (cpHost != null) {
3159      cpHost.postGetClusterMetrics(status);
3160    }
3161    return status;
3162  }
3163
3164  /** Returns info port of active master or 0 if any exception occurs. */
3165  public int getActiveMasterInfoPort() {
3166    return activeMasterManager.getActiveMasterInfoPort();
3167  }
3168
3169  /**
3170   * @param sn is ServerName of the backup master
3171   * @return info port of backup master or 0 if any exception occurs.
3172   */
3173  public int getBackupMasterInfoPort(final ServerName sn) {
3174    return activeMasterManager.getBackupMasterInfoPort(sn);
3175  }
3176
3177  /**
3178   * The set of loaded coprocessors is stored in a static set. Since it's statically allocated, it
3179   * does not require that HMaster's cpHost be initialized prior to accessing it.
3180   * @return a String representation of the set of names of the loaded coprocessors.
3181   */
3182  public static String getLoadedCoprocessors() {
3183    return CoprocessorHost.getLoadedCoprocessors().toString();
3184  }
3185
3186  /** Returns timestamp in millis when HMaster was started. */
3187  public long getMasterStartTime() {
3188    return startcode;
3189  }
3190
3191  /** Returns timestamp in millis when HMaster became the active master. */
3192  @Override
3193  public long getMasterActiveTime() {
3194    return masterActiveTime;
3195  }
3196
3197  /** Returns timestamp in millis when HMaster finished becoming the active master */
3198  public long getMasterFinishedInitializationTime() {
3199    return masterFinishedInitializationTime;
3200  }
3201
3202  public int getNumWALFiles() {
3203    return 0;
3204  }
3205
3206  public ProcedureStore getProcedureStore() {
3207    return procedureStore;
3208  }
3209
3210  public int getRegionServerInfoPort(final ServerName sn) {
3211    int port = this.serverManager.getInfoPort(sn);
3212    return port == 0
3213      ? conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT)
3214      : port;
3215  }
3216
3217  @Override
3218  public String getRegionServerVersion(ServerName sn) {
3219    // Will return "0.0.0" if the server is not online to prevent move system region to unknown
3220    // version RS.
3221    return this.serverManager.getVersion(sn);
3222  }
3223
3224  @Override
3225  public void checkIfShouldMoveSystemRegionAsync() {
3226    assignmentManager.checkIfShouldMoveSystemRegionAsync();
3227  }
3228
3229  /** Returns array of coprocessor SimpleNames. */
3230  public String[] getMasterCoprocessors() {
3231    Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors();
3232    return masterCoprocessors.toArray(new String[masterCoprocessors.size()]);
3233  }
3234
3235  @Override
3236  public void abort(String reason, Throwable cause) {
3237    if (!setAbortRequested() || isStopped()) {
3238      LOG.debug("Abort called but aborted={}, stopped={}", isAborted(), isStopped());
3239      return;
3240    }
3241    if (cpHost != null) {
3242      // HBASE-4014: dump a list of loaded coprocessors.
3243      LOG.error(HBaseMarkers.FATAL,
3244        "Master server abort: loaded coprocessors are: " + getLoadedCoprocessors());
3245    }
3246    String msg = "***** ABORTING master " + this + ": " + reason + " *****";
3247    if (cause != null) {
3248      LOG.error(HBaseMarkers.FATAL, msg, cause);
3249    } else {
3250      LOG.error(HBaseMarkers.FATAL, msg);
3251    }
3252
3253    try {
3254      stopMaster();
3255    } catch (IOException e) {
3256      LOG.error("Exception occurred while stopping master", e);
3257    }
3258  }
3259
3260  @Override
3261  public MasterCoprocessorHost getMasterCoprocessorHost() {
3262    return cpHost;
3263  }
3264
3265  @Override
3266  public MasterQuotaManager getMasterQuotaManager() {
3267    return quotaManager;
3268  }
3269
3270  @Override
3271  public ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
3272    return procedureExecutor;
3273  }
3274
3275  @Override
3276  public ServerName getServerName() {
3277    return this.serverName;
3278  }
3279
3280  @Override
3281  public AssignmentManager getAssignmentManager() {
3282    return this.assignmentManager;
3283  }
3284
3285  @Override
3286  public CatalogJanitor getCatalogJanitor() {
3287    return this.catalogJanitorChore;
3288  }
3289
3290  public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
3291    return rsFatals;
3292  }
3293
3294  public TaskGroup getStartupProgress() {
3295    return startupTaskGroup;
3296  }
3297
3298  /**
3299   * Shutdown the cluster. Master runs a coordinated stop of all RegionServers and then itself.
3300   */
3301  public void shutdown() throws IOException {
3302    TraceUtil.trace(() -> {
3303      if (cpHost != null) {
3304        cpHost.preShutdown();
3305      }
3306
3307      // Tell the servermanager cluster shutdown has been called. This makes it so when Master is
3308      // last running server, it'll stop itself. Next, we broadcast the cluster shutdown by setting
3309      // the cluster status as down. RegionServers will notice this change in state and will start
3310      // shutting themselves down. When last has exited, Master can go down.
3311      if (this.serverManager != null) {
3312        this.serverManager.shutdownCluster();
3313      }
3314      if (this.clusterStatusTracker != null) {
3315        try {
3316          this.clusterStatusTracker.setClusterDown();
3317        } catch (KeeperException e) {
3318          LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
3319        }
3320      }
3321      // Stop the procedure executor. Will stop any ongoing assign, unassign, server crash etc.,
3322      // processing so we can go down.
3323      if (this.procedureExecutor != null) {
3324        this.procedureExecutor.stop();
3325      }
3326      // Shutdown our cluster connection. This will kill any hosted RPCs that might be going on;
3327      // this is what we want especially if the Master is in startup phase doing call outs to
3328      // hbase:meta, etc. when cluster is down. Without ths connection close, we'd have to wait on
3329      // the rpc to timeout.
3330      if (this.asyncClusterConnection != null) {
3331        this.asyncClusterConnection.close();
3332      }
3333    }, "HMaster.shutdown");
3334  }
3335
3336  public void stopMaster() throws IOException {
3337    if (cpHost != null) {
3338      cpHost.preStopMaster();
3339    }
3340    stop("Stopped by " + Thread.currentThread().getName());
3341  }
3342
3343  @Override
3344  public void stop(String msg) {
3345    if (!this.stopped) {
3346      LOG.info("***** STOPPING master '" + this + "' *****");
3347      this.stopped = true;
3348      LOG.info("STOPPED: " + msg);
3349      // Wakes run() if it is sleeping
3350      sleeper.skipSleepCycle();
3351      if (this.activeMasterManager != null) {
3352        this.activeMasterManager.stop();
3353      }
3354    }
3355  }
3356
3357  protected void checkServiceStarted() throws ServerNotRunningYetException {
3358    if (!serviceStarted) {
3359      throw new ServerNotRunningYetException("Server is not running yet");
3360    }
3361  }
3362
3363  void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException,
3364    MasterNotRunningException, MasterStoppedException {
3365    checkServiceStarted();
3366    if (!isInitialized()) {
3367      throw new PleaseHoldException("Master is initializing");
3368    }
3369    if (isStopped()) {
3370      throw new MasterStoppedException();
3371    }
3372  }
3373
3374  /**
3375   * Report whether this master is currently the active master or not. If not active master, we are
3376   * parked on ZK waiting to become active. This method is used for testing.
3377   * @return true if active master, false if not.
3378   */
3379  @Override
3380  public boolean isActiveMaster() {
3381    return activeMaster;
3382  }
3383
3384  /**
3385   * Report whether this master has completed with its initialization and is ready. If ready, the
3386   * master is also the active master. A standby master is never ready. This method is used for
3387   * testing.
3388   * @return true if master is ready to go, false if not.
3389   */
3390  @Override
3391  public boolean isInitialized() {
3392    return initialized.isReady();
3393  }
3394
3395  /**
3396   * Report whether this master is started This method is used for testing.
3397   * @return true if master is ready to go, false if not.
3398   */
3399  public boolean isOnline() {
3400    return serviceStarted;
3401  }
3402
3403  /**
3404   * Report whether this master is in maintenance mode.
3405   * @return true if master is in maintenanceMode
3406   */
3407  @Override
3408  public boolean isInMaintenanceMode() {
3409    return maintenanceMode;
3410  }
3411
3412  public void setInitialized(boolean isInitialized) {
3413    procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
3414  }
3415
3416  /**
3417   * Mainly used in procedure related tests, where we will restart ProcedureExecutor and
3418   * AssignmentManager, but we do not want to restart master(to speed up the test), so we need to
3419   * disable rpc for a while otherwise some critical rpc requests such as
3420   * reportRegionStateTransition could fail and cause region server to abort.
3421   */
3422  @RestrictedApi(explanation = "Should only be called in tests", link = "",
3423      allowedOnPath = ".*/src/test/.*")
3424  public void setServiceStarted(boolean started) {
3425    this.serviceStarted = started;
3426  }
3427
3428  @Override
3429  public ProcedureEvent<?> getInitializedEvent() {
3430    return initialized;
3431  }
3432
3433  /**
3434   * Compute the average load across all region servers. Currently, this uses a very naive
3435   * computation - just uses the number of regions being served, ignoring stats about number of
3436   * requests.
3437   * @return the average load
3438   */
3439  public double getAverageLoad() {
3440    if (this.assignmentManager == null) {
3441      return 0;
3442    }
3443
3444    RegionStates regionStates = this.assignmentManager.getRegionStates();
3445    if (regionStates == null) {
3446      return 0;
3447    }
3448    return regionStates.getAverageLoad();
3449  }
3450
3451  @Override
3452  public boolean registerService(Service instance) {
3453    /*
3454     * No stacking of instances is allowed for a single service name
3455     */
3456    Descriptors.ServiceDescriptor serviceDesc = instance.getDescriptorForType();
3457    String serviceName = CoprocessorRpcUtils.getServiceName(serviceDesc);
3458    if (coprocessorServiceHandlers.containsKey(serviceName)) {
3459      LOG.error("Coprocessor service " + serviceName
3460        + " already registered, rejecting request from " + instance);
3461      return false;
3462    }
3463
3464    coprocessorServiceHandlers.put(serviceName, instance);
3465    if (LOG.isDebugEnabled()) {
3466      LOG.debug("Registered master coprocessor service: service=" + serviceName);
3467    }
3468    return true;
3469  }
3470
3471  /**
3472   * Utility for constructing an instance of the passed HMaster class.
3473   * @return HMaster instance.
3474   */
3475  public static HMaster constructMaster(Class<? extends HMaster> masterClass,
3476    final Configuration conf) {
3477    try {
3478      Constructor<? extends HMaster> c = masterClass.getConstructor(Configuration.class);
3479      return c.newInstance(conf);
3480    } catch (Exception e) {
3481      Throwable error = e;
3482      if (
3483        e instanceof InvocationTargetException
3484          && ((InvocationTargetException) e).getTargetException() != null
3485      ) {
3486        error = ((InvocationTargetException) e).getTargetException();
3487      }
3488      throw new RuntimeException("Failed construction of Master: " + masterClass.toString() + ". ",
3489        error);
3490    }
3491  }
3492
3493  /**
3494   * @see org.apache.hadoop.hbase.master.HMasterCommandLine
3495   */
3496  public static void main(String[] args) {
3497    LOG.info("STARTING service " + HMaster.class.getSimpleName());
3498    VersionInfo.logVersion();
3499    new HMasterCommandLine(HMaster.class).doMain(args);
3500  }
3501
3502  public HFileCleaner getHFileCleaner() {
3503    return this.hfileCleaners.get(0);
3504  }
3505
3506  public List<HFileCleaner> getHFileCleaners() {
3507    return this.hfileCleaners;
3508  }
3509
3510  public LogCleaner getLogCleaner() {
3511    return this.logCleaner;
3512  }
3513
3514  /** Returns the underlying snapshot manager */
3515  @Override
3516  public SnapshotManager getSnapshotManager() {
3517    return this.snapshotManager;
3518  }
3519
3520  /** Returns the underlying MasterProcedureManagerHost */
3521  @Override
3522  public MasterProcedureManagerHost getMasterProcedureManagerHost() {
3523    return mpmHost;
3524  }
3525
3526  @Override
3527  public ClusterSchema getClusterSchema() {
3528    return this.clusterSchemaService;
3529  }
3530
3531  /**
3532   * Create a new Namespace.
3533   * @param namespaceDescriptor descriptor for new Namespace
3534   * @param nonceGroup          Identifier for the source of the request, a client or process.
3535   * @param nonce               A unique identifier for this operation from the client or process
3536   *                            identified by <code>nonceGroup</code> (the source must ensure each
3537   *                            operation gets a unique id).
3538   * @return procedure id
3539   */
3540  long createNamespace(final NamespaceDescriptor namespaceDescriptor, final long nonceGroup,
3541    final long nonce) throws IOException {
3542    checkInitialized();
3543
3544    TableName.isLegalNamespaceName(Bytes.toBytes(namespaceDescriptor.getName()));
3545
3546    return MasterProcedureUtil
3547      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
3548        @Override
3549        protected void run() throws IOException {
3550          getMaster().getMasterCoprocessorHost().preCreateNamespace(namespaceDescriptor);
3551          // We need to wait for the procedure to potentially fail due to "prepare" sanity
3552          // checks. This will block only the beginning of the procedure. See HBASE-19953.
3553          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
3554          LOG.info(getClientIdAuditPrefix() + " creating " + namespaceDescriptor);
3555          // Execute the operation synchronously - wait for the operation to complete before
3556          // continuing.
3557          setProcId(getClusterSchema().createNamespace(namespaceDescriptor, getNonceKey(), latch));
3558          latch.await();
3559          getMaster().getMasterCoprocessorHost().postCreateNamespace(namespaceDescriptor);
3560        }
3561
3562        @Override
3563        protected String getDescription() {
3564          return "CreateNamespaceProcedure";
3565        }
3566      });
3567  }
3568
3569  /**
3570   * Modify an existing Namespace.
3571   * @param nonceGroup Identifier for the source of the request, a client or process.
3572   * @param nonce      A unique identifier for this operation from the client or process identified
3573   *                   by <code>nonceGroup</code> (the source must ensure each operation gets a
3574   *                   unique id).
3575   * @return procedure id
3576   */
3577  long modifyNamespace(final NamespaceDescriptor newNsDescriptor, final long nonceGroup,
3578    final long nonce) throws IOException {
3579    checkInitialized();
3580
3581    TableName.isLegalNamespaceName(Bytes.toBytes(newNsDescriptor.getName()));
3582
3583    return MasterProcedureUtil
3584      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
3585        @Override
3586        protected void run() throws IOException {
3587          NamespaceDescriptor oldNsDescriptor = getNamespace(newNsDescriptor.getName());
3588          getMaster().getMasterCoprocessorHost().preModifyNamespace(oldNsDescriptor,
3589            newNsDescriptor);
3590          // We need to wait for the procedure to potentially fail due to "prepare" sanity
3591          // checks. This will block only the beginning of the procedure. See HBASE-19953.
3592          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
3593          LOG.info(getClientIdAuditPrefix() + " modify " + newNsDescriptor);
3594          // Execute the operation synchronously - wait for the operation to complete before
3595          // continuing.
3596          setProcId(getClusterSchema().modifyNamespace(newNsDescriptor, getNonceKey(), latch));
3597          latch.await();
3598          getMaster().getMasterCoprocessorHost().postModifyNamespace(oldNsDescriptor,
3599            newNsDescriptor);
3600        }
3601
3602        @Override
3603        protected String getDescription() {
3604          return "ModifyNamespaceProcedure";
3605        }
3606      });
3607  }
3608
3609  /**
3610   * Delete an existing Namespace. Only empty Namespaces (no tables) can be removed.
3611   * @param nonceGroup Identifier for the source of the request, a client or process.
3612   * @param nonce      A unique identifier for this operation from the client or process identified
3613   *                   by <code>nonceGroup</code> (the source must ensure each operation gets a
3614   *                   unique id).
3615   * @return procedure id
3616   */
3617  long deleteNamespace(final String name, final long nonceGroup, final long nonce)
3618    throws IOException {
3619    checkInitialized();
3620
3621    return MasterProcedureUtil
3622      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
3623        @Override
3624        protected void run() throws IOException {
3625          getMaster().getMasterCoprocessorHost().preDeleteNamespace(name);
3626          LOG.info(getClientIdAuditPrefix() + " delete " + name);
3627          // Execute the operation synchronously - wait for the operation to complete before
3628          // continuing.
3629          //
3630          // We need to wait for the procedure to potentially fail due to "prepare" sanity
3631          // checks. This will block only the beginning of the procedure. See HBASE-19953.
3632          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
3633          setProcId(submitProcedure(
3634            new DeleteNamespaceProcedure(procedureExecutor.getEnvironment(), name, latch)));
3635          latch.await();
3636          // Will not be invoked in the face of Exception thrown by the Procedure's execution
3637          getMaster().getMasterCoprocessorHost().postDeleteNamespace(name);
3638        }
3639
3640        @Override
3641        protected String getDescription() {
3642          return "DeleteNamespaceProcedure";
3643        }
3644      });
3645  }
3646
3647  /**
3648   * Get a Namespace
3649   * @param name Name of the Namespace
3650   * @return Namespace descriptor for <code>name</code>
3651   */
3652  NamespaceDescriptor getNamespace(String name) throws IOException {
3653    checkInitialized();
3654    if (this.cpHost != null) this.cpHost.preGetNamespaceDescriptor(name);
3655    NamespaceDescriptor nsd = this.clusterSchemaService.getNamespace(name);
3656    if (this.cpHost != null) this.cpHost.postGetNamespaceDescriptor(nsd);
3657    return nsd;
3658  }
3659
3660  /**
3661   * Get all Namespaces
3662   * @return All Namespace descriptors
3663   */
3664  List<NamespaceDescriptor> getNamespaces() throws IOException {
3665    checkInitialized();
3666    final List<NamespaceDescriptor> nsds = new ArrayList<>();
3667    if (cpHost != null) {
3668      cpHost.preListNamespaceDescriptors(nsds);
3669    }
3670    nsds.addAll(this.clusterSchemaService.getNamespaces());
3671    if (this.cpHost != null) {
3672      this.cpHost.postListNamespaceDescriptors(nsds);
3673    }
3674    return nsds;
3675  }
3676
3677  /**
3678   * List namespace names
3679   * @return All namespace names
3680   */
3681  public List<String> listNamespaces() throws IOException {
3682    checkInitialized();
3683    List<String> namespaces = new ArrayList<>();
3684    if (cpHost != null) {
3685      cpHost.preListNamespaces(namespaces);
3686    }
3687    for (NamespaceDescriptor namespace : clusterSchemaService.getNamespaces()) {
3688      namespaces.add(namespace.getName());
3689    }
3690    if (cpHost != null) {
3691      cpHost.postListNamespaces(namespaces);
3692    }
3693    return namespaces;
3694  }
3695
3696  @Override
3697  public List<TableName> listTableNamesByNamespace(String name) throws IOException {
3698    checkInitialized();
3699    return listTableNames(name, null, true);
3700  }
3701
3702  @Override
3703  public List<TableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
3704    checkInitialized();
3705    return listTableDescriptors(name, null, null, true);
3706  }
3707
3708  @Override
3709  public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
3710    throws IOException {
3711    if (cpHost != null) {
3712      cpHost.preAbortProcedure(this.procedureExecutor, procId);
3713    }
3714
3715    final boolean result = this.procedureExecutor.abort(procId, mayInterruptIfRunning);
3716
3717    if (cpHost != null) {
3718      cpHost.postAbortProcedure();
3719    }
3720
3721    return result;
3722  }
3723
3724  @Override
3725  public List<Procedure<?>> getProcedures() throws IOException {
3726    if (cpHost != null) {
3727      cpHost.preGetProcedures();
3728    }
3729
3730    @SuppressWarnings({ "unchecked", "rawtypes" })
3731    List<Procedure<?>> procList = (List) this.procedureExecutor.getProcedures();
3732
3733    if (cpHost != null) {
3734      cpHost.postGetProcedures(procList);
3735    }
3736
3737    return procList;
3738  }
3739
3740  @Override
3741  public List<LockedResource> getLocks() throws IOException {
3742    if (cpHost != null) {
3743      cpHost.preGetLocks();
3744    }
3745
3746    MasterProcedureScheduler procedureScheduler =
3747      procedureExecutor.getEnvironment().getProcedureScheduler();
3748
3749    final List<LockedResource> lockedResources = procedureScheduler.getLocks();
3750
3751    if (cpHost != null) {
3752      cpHost.postGetLocks(lockedResources);
3753    }
3754
3755    return lockedResources;
3756  }
3757
3758  /**
3759   * Returns the list of table descriptors that match the specified request
3760   * @param namespace        the namespace to query, or null if querying for all
3761   * @param regex            The regular expression to match against, or null if querying for all
3762   * @param tableNameList    the list of table names, or null if querying for all
3763   * @param includeSysTables False to match only against userspace tables
3764   * @return the list of table descriptors
3765   */
3766  public List<TableDescriptor> listTableDescriptors(final String namespace, final String regex,
3767    final List<TableName> tableNameList, final boolean includeSysTables) throws IOException {
3768    List<TableDescriptor> htds = new ArrayList<>();
3769    if (cpHost != null) {
3770      cpHost.preGetTableDescriptors(tableNameList, htds, regex);
3771    }
3772    htds = getTableDescriptors(htds, namespace, regex, tableNameList, includeSysTables);
3773    if (cpHost != null) {
3774      cpHost.postGetTableDescriptors(tableNameList, htds, regex);
3775    }
3776    return htds;
3777  }
3778
3779  /**
3780   * Returns the list of table names that match the specified request
3781   * @param regex            The regular expression to match against, or null if querying for all
3782   * @param namespace        the namespace to query, or null if querying for all
3783   * @param includeSysTables False to match only against userspace tables
3784   * @return the list of table names
3785   */
3786  public List<TableName> listTableNames(final String namespace, final String regex,
3787    final boolean includeSysTables) throws IOException {
3788    List<TableDescriptor> htds = new ArrayList<>();
3789    if (cpHost != null) {
3790      cpHost.preGetTableNames(htds, regex);
3791    }
3792    htds = getTableDescriptors(htds, namespace, regex, null, includeSysTables);
3793    if (cpHost != null) {
3794      cpHost.postGetTableNames(htds, regex);
3795    }
3796    List<TableName> result = new ArrayList<>(htds.size());
3797    for (TableDescriptor htd : htds)
3798      result.add(htd.getTableName());
3799    return result;
3800  }
3801
3802  /**
3803   * Return a list of table table descriptors after applying any provided filter parameters. Note
3804   * that the user-facing description of this filter logic is presented on the class-level javadoc
3805   * of {@link NormalizeTableFilterParams}.
3806   */
3807  private List<TableDescriptor> getTableDescriptors(final List<TableDescriptor> htds,
3808    final String namespace, final String regex, final List<TableName> tableNameList,
3809    final boolean includeSysTables) throws IOException {
3810    if (tableNameList == null || tableNameList.isEmpty()) {
3811      // request for all TableDescriptors
3812      Collection<TableDescriptor> allHtds;
3813      if (namespace != null && namespace.length() > 0) {
3814        // Do a check on the namespace existence. Will fail if does not exist.
3815        this.clusterSchemaService.getNamespace(namespace);
3816        allHtds = tableDescriptors.getByNamespace(namespace).values();
3817      } else {
3818        allHtds = tableDescriptors.getAll().values();
3819      }
3820      for (TableDescriptor desc : allHtds) {
3821        if (
3822          tableStateManager.isTablePresent(desc.getTableName())
3823            && (includeSysTables || !desc.getTableName().isSystemTable())
3824        ) {
3825          htds.add(desc);
3826        }
3827      }
3828    } else {
3829      for (TableName s : tableNameList) {
3830        if (tableStateManager.isTablePresent(s)) {
3831          TableDescriptor desc = tableDescriptors.get(s);
3832          if (desc != null) {
3833            htds.add(desc);
3834          }
3835        }
3836      }
3837    }
3838
3839    // Retains only those matched by regular expression.
3840    if (regex != null) filterTablesByRegex(htds, Pattern.compile(regex));
3841    return htds;
3842  }
3843
3844  /**
3845   * Removes the table descriptors that don't match the pattern.
3846   * @param descriptors list of table descriptors to filter
3847   * @param pattern     the regex to use
3848   */
3849  private static void filterTablesByRegex(final Collection<TableDescriptor> descriptors,
3850    final Pattern pattern) {
3851    final String defaultNS = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR;
3852    Iterator<TableDescriptor> itr = descriptors.iterator();
3853    while (itr.hasNext()) {
3854      TableDescriptor htd = itr.next();
3855      String tableName = htd.getTableName().getNameAsString();
3856      boolean matched = pattern.matcher(tableName).matches();
3857      if (!matched && htd.getTableName().getNamespaceAsString().equals(defaultNS)) {
3858        matched = pattern.matcher(defaultNS + TableName.NAMESPACE_DELIM + tableName).matches();
3859      }
3860      if (!matched) {
3861        itr.remove();
3862      }
3863    }
3864  }
3865
3866  @Override
3867  public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
3868    return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
3869      .getLastMajorCompactionTimestamp(table);
3870  }
3871
3872  @Override
3873  public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
3874    return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
3875      .getLastMajorCompactionTimestamp(regionName);
3876  }
3877
3878  /**
3879   * Gets the mob file compaction state for a specific table. Whether all the mob files are selected
3880   * is known during the compaction execution, but the statistic is done just before compaction
3881   * starts, it is hard to know the compaction type at that time, so the rough statistics are chosen
3882   * for the mob file compaction. Only two compaction states are available,
3883   * CompactionState.MAJOR_AND_MINOR and CompactionState.NONE.
3884   * @param tableName The current table name.
3885   * @return If a given table is in mob file compaction now.
3886   */
3887  public GetRegionInfoResponse.CompactionState getMobCompactionState(TableName tableName) {
3888    AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
3889    if (compactionsCount != null && compactionsCount.get() != 0) {
3890      return GetRegionInfoResponse.CompactionState.MAJOR_AND_MINOR;
3891    }
3892    return GetRegionInfoResponse.CompactionState.NONE;
3893  }
3894
3895  public void reportMobCompactionStart(TableName tableName) throws IOException {
3896    IdLock.Entry lockEntry = null;
3897    try {
3898      lockEntry = mobCompactionLock.getLockEntry(tableName.hashCode());
3899      AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
3900      if (compactionsCount == null) {
3901        compactionsCount = new AtomicInteger(0);
3902        mobCompactionStates.put(tableName, compactionsCount);
3903      }
3904      compactionsCount.incrementAndGet();
3905    } finally {
3906      if (lockEntry != null) {
3907        mobCompactionLock.releaseLockEntry(lockEntry);
3908      }
3909    }
3910  }
3911
3912  public void reportMobCompactionEnd(TableName tableName) throws IOException {
3913    IdLock.Entry lockEntry = null;
3914    try {
3915      lockEntry = mobCompactionLock.getLockEntry(tableName.hashCode());
3916      AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
3917      if (compactionsCount != null) {
3918        int count = compactionsCount.decrementAndGet();
3919        // remove the entry if the count is 0.
3920        if (count == 0) {
3921          mobCompactionStates.remove(tableName);
3922        }
3923      }
3924    } finally {
3925      if (lockEntry != null) {
3926        mobCompactionLock.releaseLockEntry(lockEntry);
3927      }
3928    }
3929  }
3930
3931  /**
3932   * Queries the state of the {@link LoadBalancerStateStore}. If the balancer is not initialized,
3933   * false is returned.
3934   * @return The state of the load balancer, or false if the load balancer isn't defined.
3935   */
3936  public boolean isBalancerOn() {
3937    return !isInMaintenanceMode() && loadBalancerStateStore != null && loadBalancerStateStore.get();
3938  }
3939
3940  /**
3941   * Queries the state of the {@link RegionNormalizerStateStore}. If it's not initialized, false is
3942   * returned.
3943   */
3944  public boolean isNormalizerOn() {
3945    return !isInMaintenanceMode() && getRegionNormalizerManager().isNormalizerOn();
3946  }
3947
3948  /**
3949   * Queries the state of the {@link SplitOrMergeStateStore}. If it is not initialized, false is
3950   * returned. If switchType is illegal, false will return.
3951   * @param switchType see {@link org.apache.hadoop.hbase.client.MasterSwitchType}
3952   * @return The state of the switch
3953   */
3954  @Override
3955  public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) {
3956    return !isInMaintenanceMode() && splitOrMergeStateStore != null
3957      && splitOrMergeStateStore.isSplitOrMergeEnabled(switchType);
3958  }
3959
3960  /**
3961   * Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned.
3962   * <p/>
3963   * Notice that, the base load balancer will always be {@link RSGroupBasedLoadBalancer} now, so
3964   * this method will return the balancer used inside each rs group.
3965   * @return The name of the {@link LoadBalancer} in use.
3966   */
3967  public String getLoadBalancerClassName() {
3968    return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
3969      LoadBalancerFactory.getDefaultLoadBalancerClass().getName());
3970  }
3971
3972  public SplitOrMergeStateStore getSplitOrMergeStateStore() {
3973    return splitOrMergeStateStore;
3974  }
3975
3976  @Override
3977  public RSGroupBasedLoadBalancer getLoadBalancer() {
3978    return balancer;
3979  }
3980
3981  @Override
3982  public FavoredNodesManager getFavoredNodesManager() {
3983    return balancer.getFavoredNodesManager();
3984  }
3985
3986  private long executePeerProcedure(AbstractPeerProcedure<?> procedure) throws IOException {
3987    if (!isReplicationPeerModificationEnabled()) {
3988      throw new IOException("Replication peer modification disabled");
3989    }
3990    long procId = procedureExecutor.submitProcedure(procedure);
3991    procedure.getLatch().await();
3992    return procId;
3993  }
3994
3995  @Override
3996  public long addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
3997    throws ReplicationException, IOException {
3998    LOG.info(getClientIdAuditPrefix() + " creating replication peer, id=" + peerId + ", config="
3999      + peerConfig + ", state=" + (enabled ? "ENABLED" : "DISABLED"));
4000    return executePeerProcedure(new AddPeerProcedure(peerId, peerConfig, enabled));
4001  }
4002
4003  @Override
4004  public long removeReplicationPeer(String peerId) throws ReplicationException, IOException {
4005    LOG.info(getClientIdAuditPrefix() + " removing replication peer, id=" + peerId);
4006    return executePeerProcedure(new RemovePeerProcedure(peerId));
4007  }
4008
4009  @Override
4010  public long enableReplicationPeer(String peerId) throws ReplicationException, IOException {
4011    LOG.info(getClientIdAuditPrefix() + " enable replication peer, id=" + peerId);
4012    return executePeerProcedure(new EnablePeerProcedure(peerId));
4013  }
4014
4015  @Override
4016  public long disableReplicationPeer(String peerId) throws ReplicationException, IOException {
4017    LOG.info(getClientIdAuditPrefix() + " disable replication peer, id=" + peerId);
4018    return executePeerProcedure(new DisablePeerProcedure(peerId));
4019  }
4020
4021  @Override
4022  public ReplicationPeerConfig getReplicationPeerConfig(String peerId)
4023    throws ReplicationException, IOException {
4024    if (cpHost != null) {
4025      cpHost.preGetReplicationPeerConfig(peerId);
4026    }
4027    LOG.info(getClientIdAuditPrefix() + " get replication peer config, id=" + peerId);
4028    ReplicationPeerConfig peerConfig = this.replicationPeerManager.getPeerConfig(peerId)
4029      .orElseThrow(() -> new ReplicationPeerNotFoundException(peerId));
4030    if (cpHost != null) {
4031      cpHost.postGetReplicationPeerConfig(peerId);
4032    }
4033    return peerConfig;
4034  }
4035
4036  @Override
4037  public long updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
4038    throws ReplicationException, IOException {
4039    LOG.info(getClientIdAuditPrefix() + " update replication peer config, id=" + peerId
4040      + ", config=" + peerConfig);
4041    return executePeerProcedure(new UpdatePeerConfigProcedure(peerId, peerConfig));
4042  }
4043
4044  @Override
4045  public List<ReplicationPeerDescription> listReplicationPeers(String regex)
4046    throws ReplicationException, IOException {
4047    if (cpHost != null) {
4048      cpHost.preListReplicationPeers(regex);
4049    }
4050    LOG.debug("{} list replication peers, regex={}", getClientIdAuditPrefix(), regex);
4051    Pattern pattern = regex == null ? null : Pattern.compile(regex);
4052    List<ReplicationPeerDescription> peers = this.replicationPeerManager.listPeers(pattern);
4053    if (cpHost != null) {
4054      cpHost.postListReplicationPeers(regex);
4055    }
4056    return peers;
4057  }
4058
4059  @Override
4060  public long transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state)
4061    throws ReplicationException, IOException {
4062    LOG.info(
4063      getClientIdAuditPrefix()
4064        + " transit current cluster state to {} in a synchronous replication peer id={}",
4065      state, peerId);
4066    return executePeerProcedure(new TransitPeerSyncReplicationStateProcedure(peerId, state));
4067  }
4068
4069  @Override
4070  public boolean replicationPeerModificationSwitch(boolean on) throws IOException {
4071    return replicationPeerModificationStateStore.set(on);
4072  }
4073
4074  @Override
4075  public boolean isReplicationPeerModificationEnabled() {
4076    return replicationPeerModificationStateStore.get();
4077  }
4078
4079  /**
4080   * Mark region server(s) as decommissioned (previously called 'draining') to prevent additional
4081   * regions from getting assigned to them. Also unload the regions on the servers asynchronously.0
4082   * @param servers Region servers to decommission.
4083   */
4084  public void decommissionRegionServers(final List<ServerName> servers, final boolean offload)
4085    throws IOException {
4086    List<ServerName> serversAdded = new ArrayList<>(servers.size());
4087    // Place the decommission marker first.
4088    String parentZnode = getZooKeeper().getZNodePaths().drainingZNode;
4089    for (ServerName server : servers) {
4090      try {
4091        String node = ZNodePaths.joinZNode(parentZnode, server.getServerName());
4092        ZKUtil.createAndFailSilent(getZooKeeper(), node);
4093      } catch (KeeperException ke) {
4094        throw new HBaseIOException(
4095          this.zooKeeper.prefix("Unable to decommission '" + server.getServerName() + "'."), ke);
4096      }
4097      if (this.serverManager.addServerToDrainList(server)) {
4098        serversAdded.add(server);
4099      }
4100    }
4101    // Move the regions off the decommissioned servers.
4102    if (offload) {
4103      final List<ServerName> destServers = this.serverManager.createDestinationServersList();
4104      for (ServerName server : serversAdded) {
4105        final List<RegionInfo> regionsOnServer = this.assignmentManager.getRegionsOnServer(server);
4106        for (RegionInfo hri : regionsOnServer) {
4107          ServerName dest = balancer.randomAssignment(hri, destServers);
4108          if (dest == null) {
4109            throw new HBaseIOException("Unable to determine a plan to move " + hri);
4110          }
4111          RegionPlan rp = new RegionPlan(hri, server, dest);
4112          this.assignmentManager.moveAsync(rp);
4113        }
4114      }
4115    }
4116  }
4117
4118  /**
4119   * List region servers marked as decommissioned (previously called 'draining') to not get regions
4120   * assigned to them.
4121   * @return List of decommissioned servers.
4122   */
4123  public List<ServerName> listDecommissionedRegionServers() {
4124    return this.serverManager.getDrainingServersList();
4125  }
4126
4127  /**
4128   * Remove decommission marker (previously called 'draining') from a region server to allow regions
4129   * assignments. Load regions onto the server asynchronously if a list of regions is given
4130   * @param server Region server to remove decommission marker from.
4131   */
4132  public void recommissionRegionServer(final ServerName server,
4133    final List<byte[]> encodedRegionNames) throws IOException {
4134    // Remove the server from decommissioned (draining) server list.
4135    String parentZnode = getZooKeeper().getZNodePaths().drainingZNode;
4136    String node = ZNodePaths.joinZNode(parentZnode, server.getServerName());
4137    try {
4138      ZKUtil.deleteNodeFailSilent(getZooKeeper(), node);
4139    } catch (KeeperException ke) {
4140      throw new HBaseIOException(
4141        this.zooKeeper.prefix("Unable to recommission '" + server.getServerName() + "'."), ke);
4142    }
4143    this.serverManager.removeServerFromDrainList(server);
4144
4145    // Load the regions onto the server if we are given a list of regions.
4146    if (encodedRegionNames == null || encodedRegionNames.isEmpty()) {
4147      return;
4148    }
4149    if (!this.serverManager.isServerOnline(server)) {
4150      return;
4151    }
4152    for (byte[] encodedRegionName : encodedRegionNames) {
4153      RegionState regionState =
4154        assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName));
4155      if (regionState == null) {
4156        LOG.warn("Unknown region " + Bytes.toStringBinary(encodedRegionName));
4157        continue;
4158      }
4159      RegionInfo hri = regionState.getRegion();
4160      if (server.equals(regionState.getServerName())) {
4161        LOG.info("Skipping move of region " + hri.getRegionNameAsString()
4162          + " because region already assigned to the same server " + server + ".");
4163        continue;
4164      }
4165      RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), server);
4166      this.assignmentManager.moveAsync(rp);
4167    }
4168  }
4169
4170  @Override
4171  public LockManager getLockManager() {
4172    return lockManager;
4173  }
4174
4175  public QuotaObserverChore getQuotaObserverChore() {
4176    return this.quotaObserverChore;
4177  }
4178
4179  public SpaceQuotaSnapshotNotifier getSpaceQuotaSnapshotNotifier() {
4180    return this.spaceQuotaSnapshotNotifier;
4181  }
4182
4183  @SuppressWarnings("unchecked")
4184  private RemoteProcedure<MasterProcedureEnv, ?> getRemoteProcedure(long procId) {
4185    Procedure<?> procedure = procedureExecutor.getProcedure(procId);
4186    if (procedure == null) {
4187      return null;
4188    }
4189    assert procedure instanceof RemoteProcedure;
4190    return (RemoteProcedure<MasterProcedureEnv, ?>) procedure;
4191  }
4192
4193  public void remoteProcedureCompleted(long procId) {
4194    LOG.debug("Remote procedure done, pid={}", procId);
4195    RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId);
4196    if (procedure != null) {
4197      procedure.remoteOperationCompleted(procedureExecutor.getEnvironment());
4198    }
4199  }
4200
4201  public void remoteProcedureFailed(long procId, RemoteProcedureException error) {
4202    LOG.debug("Remote procedure failed, pid={}", procId, error);
4203    RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId);
4204    if (procedure != null) {
4205      procedure.remoteOperationFailed(procedureExecutor.getEnvironment(), error);
4206    }
4207  }
4208
4209  /**
4210   * Reopen regions provided in the argument
4211   * @param tableName   The current table name
4212   * @param regionNames The region names of the regions to reopen
4213   * @param nonceGroup  Identifier for the source of the request, a client or process
4214   * @param nonce       A unique identifier for this operation from the client or process identified
4215   *                    by <code>nonceGroup</code> (the source must ensure each operation gets a
4216   *                    unique id).
4217   * @return procedure Id
4218   * @throws IOException if reopening region fails while running procedure
4219   */
4220  long reopenRegions(final TableName tableName, final List<byte[]> regionNames,
4221    final long nonceGroup, final long nonce) throws IOException {
4222
4223    return MasterProcedureUtil
4224      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
4225
4226        @Override
4227        protected void run() throws IOException {
4228          submitProcedure(new ReopenTableRegionsProcedure(tableName, regionNames));
4229        }
4230
4231        @Override
4232        protected String getDescription() {
4233          return "ReopenTableRegionsProcedure";
4234        }
4235
4236      });
4237
4238  }
4239
4240  @Override
4241  public ReplicationPeerManager getReplicationPeerManager() {
4242    return replicationPeerManager;
4243  }
4244
4245  @Override
4246  public ReplicationLogCleanerBarrier getReplicationLogCleanerBarrier() {
4247    return replicationLogCleanerBarrier;
4248  }
4249
4250  @Override
4251  public Semaphore getSyncReplicationPeerLock() {
4252    return syncReplicationPeerLock;
4253  }
4254
4255  public HashMap<String, List<Pair<ServerName, ReplicationLoadSource>>>
4256    getReplicationLoad(ServerName[] serverNames) {
4257    List<ReplicationPeerDescription> peerList = this.getReplicationPeerManager().listPeers(null);
4258    if (peerList == null) {
4259      return null;
4260    }
4261    HashMap<String, List<Pair<ServerName, ReplicationLoadSource>>> replicationLoadSourceMap =
4262      new HashMap<>(peerList.size());
4263    peerList.stream()
4264      .forEach(peer -> replicationLoadSourceMap.put(peer.getPeerId(), new ArrayList<>()));
4265    for (ServerName serverName : serverNames) {
4266      List<ReplicationLoadSource> replicationLoadSources =
4267        getServerManager().getLoad(serverName).getReplicationLoadSourceList();
4268      for (ReplicationLoadSource replicationLoadSource : replicationLoadSources) {
4269        List<Pair<ServerName, ReplicationLoadSource>> replicationLoadSourceList =
4270          replicationLoadSourceMap.get(replicationLoadSource.getPeerID());
4271        if (replicationLoadSourceList == null) {
4272          LOG.debug("{} does not exist, but it exists "
4273            + "in znode(/hbase/replication/rs). when the rs restarts, peerId is deleted, so "
4274            + "we just need to ignore it", replicationLoadSource.getPeerID());
4275          continue;
4276        }
4277        replicationLoadSourceList.add(new Pair<>(serverName, replicationLoadSource));
4278      }
4279    }
4280    for (List<Pair<ServerName, ReplicationLoadSource>> loads : replicationLoadSourceMap.values()) {
4281      if (loads.size() > 0) {
4282        loads.sort(Comparator.comparingLong(load -> (-1) * load.getSecond().getReplicationLag()));
4283      }
4284    }
4285    return replicationLoadSourceMap;
4286  }
4287
4288  /**
4289   * This method modifies the master's configuration in order to inject replication-related features
4290   */
4291  @InterfaceAudience.Private
4292  public static void decorateMasterConfiguration(Configuration conf) {
4293    String plugins = conf.get(HBASE_MASTER_LOGCLEANER_PLUGINS);
4294    String cleanerClass = ReplicationLogCleaner.class.getCanonicalName();
4295    if (plugins == null || !plugins.contains(cleanerClass)) {
4296      conf.set(HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass);
4297    }
4298    if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf)) {
4299      plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
4300      cleanerClass = ReplicationHFileCleaner.class.getCanonicalName();
4301      if (!plugins.contains(cleanerClass)) {
4302        conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, plugins + "," + cleanerClass);
4303      }
4304    }
4305  }
4306
4307  public SnapshotQuotaObserverChore getSnapshotQuotaObserverChore() {
4308    return this.snapshotQuotaChore;
4309  }
4310
4311  public ActiveMasterManager getActiveMasterManager() {
4312    return activeMasterManager;
4313  }
4314
4315  @Override
4316  public SyncReplicationReplayWALManager getSyncReplicationReplayWALManager() {
4317    return this.syncReplicationReplayWALManager;
4318  }
4319
4320  @Override
4321  public HbckChore getHbckChore() {
4322    return this.hbckChore;
4323  }
4324
4325  @Override
4326  public void runReplicationBarrierCleaner() {
4327    ReplicationBarrierCleaner rbc = this.replicationBarrierCleaner;
4328    if (rbc != null) {
4329      rbc.chore();
4330    }
4331  }
4332
4333  @Override
4334  public RSGroupInfoManager getRSGroupInfoManager() {
4335    return rsGroupInfoManager;
4336  }
4337
4338  /**
4339   * Get the compaction state of the table
4340   * @param tableName The table name
4341   * @return CompactionState Compaction state of the table
4342   */
4343  public CompactionState getCompactionState(final TableName tableName) {
4344    CompactionState compactionState = CompactionState.NONE;
4345    try {
4346      List<RegionInfo> regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName);
4347      for (RegionInfo regionInfo : regions) {
4348        ServerName serverName =
4349          assignmentManager.getRegionStates().getRegionServerOfRegion(regionInfo);
4350        if (serverName == null) {
4351          continue;
4352        }
4353        ServerMetrics sl = serverManager.getLoad(serverName);
4354        if (sl == null) {
4355          continue;
4356        }
4357        RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName());
4358        if (regionMetrics == null) {
4359          LOG.warn("Can not get compaction details for the region: {} , it may be not online.",
4360            regionInfo.getRegionNameAsString());
4361          continue;
4362        }
4363        if (regionMetrics.getCompactionState() == CompactionState.MAJOR) {
4364          if (compactionState == CompactionState.MINOR) {
4365            compactionState = CompactionState.MAJOR_AND_MINOR;
4366          } else {
4367            compactionState = CompactionState.MAJOR;
4368          }
4369        } else if (regionMetrics.getCompactionState() == CompactionState.MINOR) {
4370          if (compactionState == CompactionState.MAJOR) {
4371            compactionState = CompactionState.MAJOR_AND_MINOR;
4372          } else {
4373            compactionState = CompactionState.MINOR;
4374          }
4375        }
4376      }
4377    } catch (Exception e) {
4378      compactionState = null;
4379      LOG.error("Exception when get compaction state for " + tableName.getNameAsString(), e);
4380    }
4381    return compactionState;
4382  }
4383
4384  @Override
4385  public MetaLocationSyncer getMetaLocationSyncer() {
4386    return metaLocationSyncer;
4387  }
4388
4389  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4390      allowedOnPath = ".*/src/test/.*")
4391  public MasterRegion getMasterRegion() {
4392    return masterRegion;
4393  }
4394
4395  @Override
4396  public void onConfigurationChange(Configuration newConf) {
4397    try {
4398      Superusers.initialize(newConf);
4399    } catch (IOException e) {
4400      LOG.warn("Failed to initialize SuperUsers on reloading of the configuration");
4401    }
4402    // append the quotas observer back to the master coprocessor key
4403    setQuotasObserver(newConf);
4404    // update region server coprocessor if the configuration has changed.
4405    if (
4406      CoprocessorConfigurationUtil.checkConfigurationChange(getConfiguration(), newConf,
4407        CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY) && !maintenanceMode
4408    ) {
4409      LOG.info("Update the master coprocessor(s) because the configuration has changed");
4410      initializeCoprocessorHost(newConf);
4411    }
4412  }
4413
4414  @Override
4415  protected NamedQueueRecorder createNamedQueueRecord() {
4416    final boolean isBalancerDecisionRecording =
4417      conf.getBoolean(BaseLoadBalancer.BALANCER_DECISION_BUFFER_ENABLED,
4418        BaseLoadBalancer.DEFAULT_BALANCER_DECISION_BUFFER_ENABLED);
4419    final boolean isBalancerRejectionRecording =
4420      conf.getBoolean(BaseLoadBalancer.BALANCER_REJECTION_BUFFER_ENABLED,
4421        BaseLoadBalancer.DEFAULT_BALANCER_REJECTION_BUFFER_ENABLED);
4422    if (isBalancerDecisionRecording || isBalancerRejectionRecording) {
4423      return NamedQueueRecorder.getInstance(conf);
4424    } else {
4425      return null;
4426    }
4427  }
4428
4429  @Override
4430  protected boolean clusterMode() {
4431    return true;
4432  }
4433
4434  public String getClusterId() {
4435    if (activeMaster) {
4436      return clusterId;
4437    }
4438    return cachedClusterId.getFromCacheOrFetch();
4439  }
4440
4441  public Optional<ServerName> getActiveMaster() {
4442    return activeMasterManager.getActiveMasterServerName();
4443  }
4444
4445  public List<ServerName> getBackupMasters() {
4446    return activeMasterManager.getBackupMasters();
4447  }
4448
4449  @Override
4450  public Iterator<ServerName> getBootstrapNodes() {
4451    return regionServerTracker.getRegionServers().iterator();
4452  }
4453
4454  @Override
4455  public List<HRegionLocation> getMetaLocations() {
4456    return metaRegionLocationCache.getMetaRegionLocations();
4457  }
4458
4459  @Override
4460  public void flushMasterStore() throws IOException {
4461    LOG.info("Force flush master local region.");
4462    if (this.cpHost != null) {
4463      try {
4464        cpHost.preMasterStoreFlush();
4465      } catch (IOException ioe) {
4466        LOG.error("Error invoking master coprocessor preMasterStoreFlush()", ioe);
4467      }
4468    }
4469    masterRegion.flush(true);
4470    if (this.cpHost != null) {
4471      try {
4472        cpHost.postMasterStoreFlush();
4473      } catch (IOException ioe) {
4474        LOG.error("Error invoking master coprocessor postMasterStoreFlush()", ioe);
4475      }
4476    }
4477  }
4478
4479  public Collection<ServerName> getLiveRegionServers() {
4480    return regionServerTracker.getRegionServers();
4481  }
4482
4483  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4484      allowedOnPath = ".*/src/test/.*")
4485  void setLoadBalancer(RSGroupBasedLoadBalancer loadBalancer) {
4486    this.balancer = loadBalancer;
4487  }
4488
4489  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4490      allowedOnPath = ".*/src/test/.*")
4491  void setAssignmentManager(AssignmentManager assignmentManager) {
4492    this.assignmentManager = assignmentManager;
4493  }
4494
4495  @RestrictedApi(explanation = "Should only be called in tests", link = "",
4496      allowedOnPath = ".*/src/test/.*")
4497  static void setDisableBalancerChoreForTest(boolean disable) {
4498    disableBalancerChoreForTest = disable;
4499  }
4500
4501  private void setQuotasObserver(Configuration conf) {
4502    // Add the Observer to delete quotas on table deletion before starting all CPs by
4503    // default with quota support, avoiding if user specifically asks to not load this Observer.
4504    if (QuotaUtil.isQuotaEnabled(conf)) {
4505      updateConfigurationForQuotasObserver(conf);
4506    }
4507  }
4508
4509  private void initializeCoprocessorHost(Configuration conf) {
4510    // initialize master side coprocessors before we start handling requests
4511    this.cpHost = new MasterCoprocessorHost(this, conf);
4512  }
4513
4514  @Override
4515  public long flushTable(TableName tableName, List<byte[]> columnFamilies, long nonceGroup,
4516    long nonce) throws IOException {
4517    checkInitialized();
4518
4519    if (
4520      !getConfiguration().getBoolean(MasterFlushTableProcedureManager.FLUSH_PROCEDURE_ENABLED,
4521        MasterFlushTableProcedureManager.FLUSH_PROCEDURE_ENABLED_DEFAULT)
4522    ) {
4523      throw new DoNotRetryIOException("FlushTableProcedureV2 is DISABLED");
4524    }
4525
4526    return MasterProcedureUtil
4527      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
4528        @Override
4529        protected void run() throws IOException {
4530          getMaster().getMasterCoprocessorHost().preTableFlush(tableName);
4531          LOG.info(getClientIdAuditPrefix() + " flush " + tableName);
4532          submitProcedure(
4533            new FlushTableProcedure(procedureExecutor.getEnvironment(), tableName, columnFamilies));
4534          getMaster().getMasterCoprocessorHost().postTableFlush(tableName);
4535        }
4536
4537        @Override
4538        protected String getDescription() {
4539          return "FlushTableProcedure";
4540        }
4541      });
4542  }
4543}