001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master;
019
020import java.io.IOException;
021import java.lang.reflect.InvocationTargetException;
022import java.lang.reflect.Method;
023import java.net.InetAddress;
024import java.util.ArrayList;
025import java.util.Collections;
026import java.util.HashMap;
027import java.util.HashSet;
028import java.util.List;
029import java.util.Map;
030import java.util.Map.Entry;
031import java.util.Optional;
032import java.util.Set;
033import java.util.concurrent.CompletableFuture;
034import java.util.concurrent.ExecutionException;
035import java.util.concurrent.ThreadLocalRandom;
036import java.util.stream.Collectors;
037import org.apache.hadoop.conf.Configuration;
038import org.apache.hadoop.hbase.ClusterMetricsBuilder;
039import org.apache.hadoop.hbase.DoNotRetryIOException;
040import org.apache.hadoop.hbase.HBaseRpcServicesBase;
041import org.apache.hadoop.hbase.HConstants;
042import org.apache.hadoop.hbase.MasterNotRunningException;
043import org.apache.hadoop.hbase.MetaTableAccessor;
044import org.apache.hadoop.hbase.NamespaceDescriptor;
045import org.apache.hadoop.hbase.ServerMetrics;
046import org.apache.hadoop.hbase.ServerMetricsBuilder;
047import org.apache.hadoop.hbase.ServerName;
048import org.apache.hadoop.hbase.TableName;
049import org.apache.hadoop.hbase.UnknownRegionException;
050import org.apache.hadoop.hbase.client.BalanceRequest;
051import org.apache.hadoop.hbase.client.BalanceResponse;
052import org.apache.hadoop.hbase.client.MasterSwitchType;
053import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
054import org.apache.hadoop.hbase.client.RegionInfo;
055import org.apache.hadoop.hbase.client.Table;
056import org.apache.hadoop.hbase.client.TableDescriptor;
057import org.apache.hadoop.hbase.client.TableState;
058import org.apache.hadoop.hbase.client.VersionInfoUtil;
059import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
060import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
061import org.apache.hadoop.hbase.errorhandling.ForeignException;
062import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
063import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
064import org.apache.hadoop.hbase.ipc.PriorityFunction;
065import org.apache.hadoop.hbase.ipc.QosPriority;
066import org.apache.hadoop.hbase.ipc.RpcServer;
067import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
068import org.apache.hadoop.hbase.ipc.ServerRpcController;
069import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
070import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
071import org.apache.hadoop.hbase.master.assignment.RegionStates;
072import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
073import org.apache.hadoop.hbase.master.hbck.HbckChore;
074import org.apache.hadoop.hbase.master.janitor.MetaFixer;
075import org.apache.hadoop.hbase.master.locking.LockProcedure;
076import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
077import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
078import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
079import org.apache.hadoop.hbase.master.procedure.RestoreBackupSystemTableProcedure;
080import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
081import org.apache.hadoop.hbase.master.replication.AbstractPeerNoLockProcedure;
082import org.apache.hadoop.hbase.mob.MobUtils;
083import org.apache.hadoop.hbase.namequeues.BalancerDecisionDetails;
084import org.apache.hadoop.hbase.namequeues.BalancerRejectionDetails;
085import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder;
086import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest;
087import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse;
088import org.apache.hadoop.hbase.net.Address;
089import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
090import org.apache.hadoop.hbase.procedure2.LockType;
091import org.apache.hadoop.hbase.procedure2.LockedResource;
092import org.apache.hadoop.hbase.procedure2.Procedure;
093import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
094import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
095import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
096import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
097import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
098import org.apache.hadoop.hbase.quotas.QuotaUtil;
099import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
100import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
101import org.apache.hadoop.hbase.replication.ReplicationException;
102import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
103import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
104import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
105import org.apache.hadoop.hbase.rsgroup.RSGroupUtil;
106import org.apache.hadoop.hbase.security.Superusers;
107import org.apache.hadoop.hbase.security.User;
108import org.apache.hadoop.hbase.security.access.AccessChecker;
109import org.apache.hadoop.hbase.security.access.AccessChecker.InputUser;
110import org.apache.hadoop.hbase.security.access.AccessController;
111import org.apache.hadoop.hbase.security.access.Permission;
112import org.apache.hadoop.hbase.security.access.Permission.Action;
113import org.apache.hadoop.hbase.security.access.PermissionStorage;
114import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
115import org.apache.hadoop.hbase.security.access.UserPermission;
116import org.apache.hadoop.hbase.security.visibility.VisibilityController;
117import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
118import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
119import org.apache.hadoop.hbase.util.Bytes;
120import org.apache.hadoop.hbase.util.DNS;
121import org.apache.hadoop.hbase.util.DNS.ServerType;
122import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
123import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
124import org.apache.hadoop.hbase.util.Pair;
125import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
126import org.apache.yetus.audience.InterfaceAudience;
127import org.slf4j.Logger;
128import org.slf4j.LoggerFactory;
129
130import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
131import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
132import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
133import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.ServiceDescriptor;
134import org.apache.hbase.thirdparty.com.google.protobuf.Message;
135import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
136import org.apache.hbase.thirdparty.com.google.protobuf.Service;
137import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
138import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
139
140import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
141import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
142import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos;
143import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.AccessControlService;
144import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest;
145import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse;
146import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest;
147import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantResponse;
148import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest;
149import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsResponse;
150import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type;
151import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest;
152import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeResponse;
153import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
154import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
155import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
156import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
157import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse;
158import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
159import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
160import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
161import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
162import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest;
163import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse;
164import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
165import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
166import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
167import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
168import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest;
169import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse;
170import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
171import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
172import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
173import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
174import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
175import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
176import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
177import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
178import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
179import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
180import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
181import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
182import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
183import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
184import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
185import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
186import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
187import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
188import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
189import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
190import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
191import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse;
192import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
193import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
194import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
195import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
196import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
197import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
198import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
199import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
200import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
201import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
202import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
203import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
204import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
205import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
206import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
207import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
208import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
209import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
210import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
211import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
212import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
213import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
214import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
215import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
216import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
217import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
218import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
219import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
220import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
221import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
222import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
223import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
224import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
225import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
226import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
227import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
228import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
229import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
230import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
231import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
232import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
233import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
234import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
235import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
236import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
237import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaRequest;
238import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaResponse;
239import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreRequest;
240import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreResponse;
241import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableRequest;
242import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableResponse;
243import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
244import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
245import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
246import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
247import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
248import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse;
249import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
250import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
251import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
252import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
253import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
254import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
255import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
256import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
257import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
258import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
259import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
260import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
261import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest;
262import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
263import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService;
264import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
265import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
266import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
267import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
268import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
269import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
270import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
271import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
272import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest;
273import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
274import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
275import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
276import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
277import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
278import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest;
279import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse;
280import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
281import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
282import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
283import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
284import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
285import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
286import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
287import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
288import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest;
289import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesResponse;
290import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
291import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
292import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByStateRequest;
293import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByStateResponse;
294import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
295import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
296import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByStateRequest;
297import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByStateResponse;
298import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
299import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
300import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
301import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
302import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
303import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
304import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
305import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
306import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerRequest;
307import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerResponse;
308import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
309import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
310import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
311import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
312import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerRequest;
313import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerResponse;
314import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
315import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse;
316import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
317import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
318import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
319import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
320import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
321import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
322import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState;
323import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
324import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
325import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersRequest;
326import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersResponse;
327import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
328import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
329import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
330import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
331import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreRequest;
332import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreResponse;
333import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
334import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
335import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
336import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
337import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
338import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse;
339import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
340import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
341import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
342import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
343import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest;
344import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaResponse;
345import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
346import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse;
347import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
348import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
349import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest;
350import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
351import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
352import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
353import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
354import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
355import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
356import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
357import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
358import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaRequest;
359import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaResponse;
360import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleRequest;
361import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleResponse;
362import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
363import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
364import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
365import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
366import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
367import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
368import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.NamespaceQuotaSnapshot;
369import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.TableQuotaSnapshot;
370import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
371import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
372import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
373import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest;
374import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
375import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
376import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
377import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
378import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
379import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetConfiguredNamespacesAndTablesInRSGroupRequest;
380import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetConfiguredNamespacesAndTablesInRSGroupResponse;
381import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
382import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
383import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
384import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
385import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
386import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
387import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
388import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
389import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListTablesInRSGroupRequest;
390import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListTablesInRSGroupResponse;
391import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
392import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
393import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
394import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
395import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
396import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse;
397import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RenameRSGroupRequest;
398import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RenameRSGroupResponse;
399import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigRequest;
400import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigResponse;
401import org.apache.hadoop.hbase.shaded.protobuf.generated.RecentLogs;
402import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos;
403import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest;
404import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse;
405import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
406import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
407import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLiveRegionServersRequest;
408import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLiveRegionServersResponse;
409import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
410import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse;
411import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
412import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
413import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
414import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
415import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
416import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
417import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult;
418import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest;
419import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse;
420import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
421import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
422import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
423import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
424import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ClientMetaService;
425import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
426import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
427import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
428import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
429import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
430import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
431import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
432import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
433import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerModificationProceduresRequest;
434import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerModificationProceduresResponse;
435import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerStateRequest;
436import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerStateResponse;
437import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.IsReplicationPeerModificationEnabledRequest;
438import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.IsReplicationPeerModificationEnabledResponse;
439import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
440import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
441import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
442import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
443import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerModificationSwitchRequest;
444import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerModificationSwitchResponse;
445import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState;
446import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequest;
447import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse;
448import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
449import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
450import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
451import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
452
453/**
454 * Implements the master RPC services.
455 */
456@InterfaceAudience.Private
457public class MasterRpcServices extends HBaseRpcServicesBase<HMaster>
458  implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface,
459  LockService.BlockingInterface, HbckService.BlockingInterface {
460
461  private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName());
462  private static final Logger AUDITLOG =
463    LoggerFactory.getLogger("SecurityLogger." + MasterRpcServices.class.getName());
464
465  /** RPC scheduler to use for the master. */
466  public static final String MASTER_RPC_SCHEDULER_FACTORY_CLASS =
467    "hbase.master.rpc.scheduler.factory.class";
468
469  /**
470   * @return Subset of configuration to pass initializing regionservers: e.g. the filesystem to use
471   *         and root directory to use.
472   */
473  private RegionServerStartupResponse.Builder createConfigurationSubset() {
474    RegionServerStartupResponse.Builder resp =
475      addConfig(RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR);
476    resp = addConfig(resp, "fs.defaultFS");
477    return addConfig(resp, "hbase.master.info.port");
478  }
479
480  private RegionServerStartupResponse.Builder
481    addConfig(final RegionServerStartupResponse.Builder resp, final String key) {
482    NameStringPair.Builder entry =
483      NameStringPair.newBuilder().setName(key).setValue(server.getConfiguration().get(key));
484    resp.addMapEntries(entry.build());
485    return resp;
486  }
487
488  public MasterRpcServices(HMaster m) throws IOException {
489    super(m, m.getProcessName());
490  }
491
492  @Override
493  protected boolean defaultReservoirEnabled() {
494    return false;
495  }
496
497  @Override
498  protected ServerType getDNSServerType() {
499    return DNS.ServerType.MASTER;
500  }
501
502  @Override
503  protected String getHostname(Configuration conf, String defaultHostname) {
504    return conf.get("hbase.master.ipc.address", defaultHostname);
505  }
506
507  @Override
508  protected String getPortConfigName() {
509    return HConstants.MASTER_PORT;
510  }
511
512  @Override
513  protected int getDefaultPort() {
514    return HConstants.DEFAULT_MASTER_PORT;
515  }
516
517  @Override
518  protected Class<?> getRpcSchedulerFactoryClass(Configuration conf) {
519    return conf.getClass(MASTER_RPC_SCHEDULER_FACTORY_CLASS, SimpleRpcSchedulerFactory.class);
520  }
521
522  @Override
523  protected PriorityFunction createPriority() {
524    return new MasterAnnotationReadingPriorityFunction(this);
525  }
526
527  /**
528   * Checks for the following pre-checks in order:
529   * <ol>
530   * <li>Master is initialized</li>
531   * <li>Rpc caller has admin permissions</li>
532   * </ol>
533   * @param requestName name of rpc request. Used in reporting failures to provide context.
534   * @throws ServiceException If any of the above listed pre-check fails.
535   */
536  private void rpcPreCheck(String requestName) throws ServiceException {
537    try {
538      server.checkInitialized();
539      requirePermission(requestName, Permission.Action.ADMIN);
540    } catch (IOException ioe) {
541      throw new ServiceException(ioe);
542    }
543  }
544
545  enum BalanceSwitchMode {
546    SYNC,
547    ASYNC
548  }
549
550  /**
551   * Assigns balancer switch according to BalanceSwitchMode
552   * @param b    new balancer switch
553   * @param mode BalanceSwitchMode
554   * @return old balancer switch
555   */
556  boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException {
557    boolean oldValue = server.loadBalancerStateStore.get();
558    boolean newValue = b;
559    try {
560      if (server.cpHost != null) {
561        server.cpHost.preBalanceSwitch(newValue);
562      }
563      if (mode == BalanceSwitchMode.SYNC) {
564        synchronized (server.getLoadBalancer()) {
565          server.loadBalancerStateStore.set(newValue);
566        }
567      } else {
568        server.loadBalancerStateStore.set(newValue);
569      }
570      LOG.info(server.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue);
571      if (server.cpHost != null) {
572        server.cpHost.postBalanceSwitch(oldValue, newValue);
573      }
574      server.getLoadBalancer().updateBalancerStatus(newValue);
575    } catch (IOException ioe) {
576      LOG.warn("Error flipping balance switch", ioe);
577    }
578    return oldValue;
579  }
580
581  boolean synchronousBalanceSwitch(final boolean b) throws IOException {
582    return switchBalancer(b, BalanceSwitchMode.SYNC);
583  }
584
585  /** Returns list of blocking services and their security info classes that this server supports */
586  @Override
587  protected List<BlockingServiceAndInterface> getServices() {
588    List<BlockingServiceAndInterface> bssi = new ArrayList<>(5);
589    bssi.add(new BlockingServiceAndInterface(MasterService.newReflectiveBlockingService(this),
590      MasterService.BlockingInterface.class));
591    bssi.add(
592      new BlockingServiceAndInterface(RegionServerStatusService.newReflectiveBlockingService(this),
593        RegionServerStatusService.BlockingInterface.class));
594    bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this),
595      LockService.BlockingInterface.class));
596    bssi.add(new BlockingServiceAndInterface(HbckService.newReflectiveBlockingService(this),
597      HbckService.BlockingInterface.class));
598    bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this),
599      ClientMetaService.BlockingInterface.class));
600    bssi.add(new BlockingServiceAndInterface(AdminService.newReflectiveBlockingService(this),
601      AdminService.BlockingInterface.class));
602    return bssi;
603  }
604
605  void start(ZKWatcher zkWatcher) {
606    internalStart(zkWatcher);
607  }
608
609  void stop() {
610    internalStop();
611  }
612
613  @Override
614  // priority for all RegionServerStatusProtos rpc's are set HIGH_QOS in
615  // MasterAnnotationReadingPriorityFunction itself
616  public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller,
617    GetLastFlushedSequenceIdRequest request) throws ServiceException {
618    try {
619      server.checkServiceStarted();
620    } catch (IOException ioe) {
621      throw new ServiceException(ioe);
622    }
623    byte[] encodedRegionName = request.getRegionName().toByteArray();
624    RegionStoreSequenceIds ids =
625      server.getServerManager().getLastFlushedSequenceId(encodedRegionName);
626    return ResponseConverter.buildGetLastFlushedSequenceIdResponse(ids);
627  }
628
629  @Override
630  public RegionServerReportResponse regionServerReport(RpcController controller,
631    RegionServerReportRequest request) throws ServiceException {
632    try {
633      server.checkServiceStarted();
634      int versionNumber = 0;
635      String version = "0.0.0";
636      VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
637      if (versionInfo != null) {
638        version = versionInfo.getVersion();
639        versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
640      }
641      ClusterStatusProtos.ServerLoad sl = request.getLoad();
642      ServerName serverName = ProtobufUtil.toServerName(request.getServer());
643      ServerMetrics oldLoad = server.getServerManager().getLoad(serverName);
644      ServerMetrics newLoad =
645        ServerMetricsBuilder.toServerMetrics(serverName, versionNumber, version, sl);
646      server.getServerManager().regionServerReport(serverName, newLoad);
647      server.getAssignmentManager().reportOnlineRegions(serverName,
648        newLoad.getRegionMetrics().keySet());
649      if (sl != null && server.metricsMaster != null) {
650        // Up our metrics.
651        server.metricsMaster.incrementRequests(
652          sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getRequestCount() : 0));
653        server.metricsMaster.incrementReadRequests(
654          sl.getReadRequestsCount() - (oldLoad != null ? oldLoad.getReadRequestsCount() : 0));
655        server.metricsMaster.incrementWriteRequests(
656          sl.getWriteRequestsCount() - (oldLoad != null ? oldLoad.getWriteRequestsCount() : 0));
657      }
658    } catch (IOException ioe) {
659      throw new ServiceException(ioe);
660    }
661    return RegionServerReportResponse.newBuilder().build();
662  }
663
664  @Override
665  public RegionServerStartupResponse regionServerStartup(RpcController controller,
666    RegionServerStartupRequest request) throws ServiceException {
667    // Register with server manager
668    try {
669      server.checkServiceStarted();
670      int versionNumber = 0;
671      String version = "0.0.0";
672      VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
673      if (versionInfo != null) {
674        version = versionInfo.getVersion();
675        versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
676      }
677      InetAddress ia = server.getRemoteInetAddress(request.getPort(), request.getServerStartCode());
678      // if regionserver passed hostname to use,
679      // then use it instead of doing a reverse DNS lookup
680      ServerName rs =
681        server.getServerManager().regionServerStartup(request, versionNumber, version, ia);
682
683      // Send back some config info
684      RegionServerStartupResponse.Builder resp = createConfigurationSubset();
685      NameStringPair.Builder entry = NameStringPair.newBuilder()
686        .setName(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER).setValue(rs.getHostname());
687      resp.addMapEntries(entry.build());
688
689      return resp.build();
690    } catch (IOException ioe) {
691      throw new ServiceException(ioe);
692    }
693  }
694
695  @Override
696  public ReportRSFatalErrorResponse reportRSFatalError(RpcController controller,
697    ReportRSFatalErrorRequest request) throws ServiceException {
698    String errorText = request.getErrorMessage();
699    ServerName sn = ProtobufUtil.toServerName(request.getServer());
700    String msg = sn + " reported a fatal error:\n" + errorText;
701    LOG.warn(msg);
702    server.rsFatals.add(msg);
703    return ReportRSFatalErrorResponse.newBuilder().build();
704  }
705
706  @Override
707  public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req)
708    throws ServiceException {
709    try {
710      long procId = server.addColumn(ProtobufUtil.toTableName(req.getTableName()),
711        ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(),
712        req.getNonce());
713      if (procId == -1) {
714        // This mean operation was not performed in server, so do not set any procId
715        return AddColumnResponse.newBuilder().build();
716      } else {
717        return AddColumnResponse.newBuilder().setProcId(procId).build();
718      }
719    } catch (IOException ioe) {
720      throw new ServiceException(ioe);
721    }
722  }
723
724  @Override
725  public AssignRegionResponse assignRegion(RpcController controller, AssignRegionRequest req)
726    throws ServiceException {
727    try {
728      server.checkInitialized();
729
730      final RegionSpecifierType type = req.getRegion().getType();
731      if (type != RegionSpecifierType.REGION_NAME) {
732        LOG.warn("assignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
733          + " actual: " + type);
734      }
735
736      final byte[] regionName = req.getRegion().getValue().toByteArray();
737      final RegionInfo regionInfo = server.getAssignmentManager().getRegionInfo(regionName);
738      if (regionInfo == null) {
739        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
740      }
741
742      final AssignRegionResponse arr = AssignRegionResponse.newBuilder().build();
743      if (server.cpHost != null) {
744        server.cpHost.preAssign(regionInfo);
745      }
746      LOG.info(server.getClientIdAuditPrefix() + " assign " + regionInfo.getRegionNameAsString());
747      server.getAssignmentManager().assign(regionInfo);
748      if (server.cpHost != null) {
749        server.cpHost.postAssign(regionInfo);
750      }
751      return arr;
752    } catch (IOException ioe) {
753      throw new ServiceException(ioe);
754    }
755  }
756
757  @Override
758  public MasterProtos.BalanceResponse balance(RpcController controller,
759    MasterProtos.BalanceRequest request) throws ServiceException {
760    try {
761      return ProtobufUtil.toBalanceResponse(server.balance(ProtobufUtil.toBalanceRequest(request)));
762    } catch (IOException ex) {
763      throw new ServiceException(ex);
764    }
765  }
766
767  @Override
768  public CreateNamespaceResponse createNamespace(RpcController controller,
769    CreateNamespaceRequest request) throws ServiceException {
770    try {
771      long procId =
772        server.createNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
773          request.getNonceGroup(), request.getNonce());
774      return CreateNamespaceResponse.newBuilder().setProcId(procId).build();
775    } catch (IOException e) {
776      throw new ServiceException(e);
777    }
778  }
779
780  @Override
781  public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
782    throws ServiceException {
783    TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema());
784    byte[][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
785    try {
786      long procId =
787        server.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
788      LOG.info(server.getClientIdAuditPrefix() + " procedure request for creating table: "
789        + req.getTableSchema().getTableName() + " procId is: " + procId);
790      return CreateTableResponse.newBuilder().setProcId(procId).build();
791    } catch (IOException ioe) {
792      throw new ServiceException(ioe);
793    }
794  }
795
796  @Override
797  public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req)
798    throws ServiceException {
799    try {
800      long procId = server.deleteColumn(ProtobufUtil.toTableName(req.getTableName()),
801        req.getColumnName().toByteArray(), req.getNonceGroup(), req.getNonce());
802      if (procId == -1) {
803        // This mean operation was not performed in server, so do not set any procId
804        return DeleteColumnResponse.newBuilder().build();
805      } else {
806        return DeleteColumnResponse.newBuilder().setProcId(procId).build();
807      }
808    } catch (IOException ioe) {
809      throw new ServiceException(ioe);
810    }
811  }
812
813  @Override
814  public DeleteNamespaceResponse deleteNamespace(RpcController controller,
815    DeleteNamespaceRequest request) throws ServiceException {
816    try {
817      long procId = server.deleteNamespace(request.getNamespaceName(), request.getNonceGroup(),
818        request.getNonce());
819      return DeleteNamespaceResponse.newBuilder().setProcId(procId).build();
820    } catch (IOException e) {
821      throw new ServiceException(e);
822    }
823  }
824
825  /**
826   * Execute Delete Snapshot operation.
827   * @return DeleteSnapshotResponse (a protobuf wrapped void) if the snapshot existed and was
828   *         deleted properly.
829   * @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not
830   *                          exist.
831   */
832  @Override
833  public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
834    DeleteSnapshotRequest request) throws ServiceException {
835    try {
836      server.checkInitialized();
837      server.snapshotManager.checkSnapshotSupport();
838
839      LOG.info(server.getClientIdAuditPrefix() + " delete " + request.getSnapshot());
840      server.snapshotManager.deleteSnapshot(request.getSnapshot());
841      return DeleteSnapshotResponse.newBuilder().build();
842    } catch (IOException e) {
843      throw new ServiceException(e);
844    }
845  }
846
847  @Override
848  public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request)
849    throws ServiceException {
850    try {
851      long procId = server.deleteTable(ProtobufUtil.toTableName(request.getTableName()),
852        request.getNonceGroup(), request.getNonce());
853      return DeleteTableResponse.newBuilder().setProcId(procId).build();
854    } catch (IOException ioe) {
855      throw new ServiceException(ioe);
856    }
857  }
858
859  @Override
860  public TruncateTableResponse truncateTable(RpcController controller, TruncateTableRequest request)
861    throws ServiceException {
862    try {
863      long procId = server.truncateTable(ProtobufUtil.toTableName(request.getTableName()),
864        request.getPreserveSplits(), request.getNonceGroup(), request.getNonce());
865      return TruncateTableResponse.newBuilder().setProcId(procId).build();
866    } catch (IOException ioe) {
867      throw new ServiceException(ioe);
868    }
869  }
870
871  @Override
872  public DisableTableResponse disableTable(RpcController controller, DisableTableRequest request)
873    throws ServiceException {
874    try {
875      long procId = server.disableTable(ProtobufUtil.toTableName(request.getTableName()),
876        request.getNonceGroup(), request.getNonce());
877      return DisableTableResponse.newBuilder().setProcId(procId).build();
878    } catch (IOException ioe) {
879      throw new ServiceException(ioe);
880    }
881  }
882
883  @Override
884  public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c,
885    EnableCatalogJanitorRequest req) throws ServiceException {
886    rpcPreCheck("enableCatalogJanitor");
887    return EnableCatalogJanitorResponse.newBuilder()
888      .setPrevValue(server.catalogJanitorChore.setEnabled(req.getEnable())).build();
889  }
890
891  @Override
892  public SetCleanerChoreRunningResponse setCleanerChoreRunning(RpcController c,
893    SetCleanerChoreRunningRequest req) throws ServiceException {
894    rpcPreCheck("setCleanerChoreRunning");
895
896    boolean prevValue =
897      server.getLogCleaner().getEnabled() && server.getHFileCleaner().getEnabled();
898    server.getLogCleaner().setEnabled(req.getOn());
899    for (HFileCleaner hFileCleaner : server.getHFileCleaners()) {
900      hFileCleaner.setEnabled(req.getOn());
901    }
902    return SetCleanerChoreRunningResponse.newBuilder().setPrevValue(prevValue).build();
903  }
904
905  @Override
906  public EnableTableResponse enableTable(RpcController controller, EnableTableRequest request)
907    throws ServiceException {
908    try {
909      long procId = server.enableTable(ProtobufUtil.toTableName(request.getTableName()),
910        request.getNonceGroup(), request.getNonce());
911      return EnableTableResponse.newBuilder().setProcId(procId).build();
912    } catch (IOException ioe) {
913      throw new ServiceException(ioe);
914    }
915  }
916
917  @Override
918  public MergeTableRegionsResponse mergeTableRegions(RpcController c,
919    MergeTableRegionsRequest request) throws ServiceException {
920    try {
921      server.checkInitialized();
922    } catch (IOException ioe) {
923      throw new ServiceException(ioe);
924    }
925
926    RegionStates regionStates = server.getAssignmentManager().getRegionStates();
927
928    RegionInfo[] regionsToMerge = new RegionInfo[request.getRegionCount()];
929    for (int i = 0; i < request.getRegionCount(); i++) {
930      final byte[] encodedNameOfRegion = request.getRegion(i).getValue().toByteArray();
931      if (request.getRegion(i).getType() != RegionSpecifierType.ENCODED_REGION_NAME) {
932        LOG.warn("MergeRegions specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME
933          + " actual: region " + i + " =" + request.getRegion(i).getType());
934      }
935      RegionState regionState = regionStates.getRegionState(Bytes.toString(encodedNameOfRegion));
936      if (regionState == null) {
937        throw new ServiceException(
938          new UnknownRegionException(Bytes.toStringBinary(encodedNameOfRegion)));
939      }
940      regionsToMerge[i] = regionState.getRegion();
941    }
942
943    try {
944      long procId = server.mergeRegions(regionsToMerge, request.getForcible(),
945        request.getNonceGroup(), request.getNonce());
946      return MergeTableRegionsResponse.newBuilder().setProcId(procId).build();
947    } catch (IOException ioe) {
948      throw new ServiceException(ioe);
949    }
950  }
951
952  @Override
953  public SplitTableRegionResponse splitRegion(final RpcController controller,
954    final SplitTableRegionRequest request) throws ServiceException {
955    try {
956      long procId = server.splitRegion(ProtobufUtil.toRegionInfo(request.getRegionInfo()),
957        request.hasSplitRow() ? request.getSplitRow().toByteArray() : null, request.getNonceGroup(),
958        request.getNonce());
959      return SplitTableRegionResponse.newBuilder().setProcId(procId).build();
960    } catch (IOException ie) {
961      throw new ServiceException(ie);
962    }
963  }
964
965  @Override
966  public MasterProtos.TruncateRegionResponse truncateRegion(RpcController controller,
967    final MasterProtos.TruncateRegionRequest request) throws ServiceException {
968    try {
969      long procId = server.truncateRegion(ProtobufUtil.toRegionInfo(request.getRegionInfo()),
970        request.getNonceGroup(), request.getNonce());
971      return MasterProtos.TruncateRegionResponse.newBuilder().setProcId(procId).build();
972    } catch (IOException ie) {
973      throw new ServiceException(ie);
974    }
975  }
976
977  @Override
978  public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller,
979    final ClientProtos.CoprocessorServiceRequest request) throws ServiceException {
980    rpcPreCheck("execMasterService");
981    try {
982      ServerRpcController execController = new ServerRpcController();
983      ClientProtos.CoprocessorServiceCall call = request.getCall();
984      String serviceName = call.getServiceName();
985      String methodName = call.getMethodName();
986      if (!server.coprocessorServiceHandlers.containsKey(serviceName)) {
987        throw new UnknownProtocolException(null,
988          "No registered Master Coprocessor Endpoint found for " + serviceName
989            + ". Has it been enabled?");
990      }
991
992      Service service = server.coprocessorServiceHandlers.get(serviceName);
993      ServiceDescriptor serviceDesc = service.getDescriptorForType();
994      MethodDescriptor methodDesc =
995        CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc);
996
997      Message execRequest = CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest());
998      final Message.Builder responseBuilder =
999        service.getResponsePrototype(methodDesc).newBuilderForType();
1000      service.callMethod(methodDesc, execController, execRequest, (message) -> {
1001        if (message != null) {
1002          responseBuilder.mergeFrom(message);
1003        }
1004      });
1005      Message execResult = responseBuilder.build();
1006      if (execController.getFailedOn() != null) {
1007        throw execController.getFailedOn();
1008      }
1009
1010      String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("");
1011      User caller = RpcServer.getRequestUser().orElse(null);
1012      AUDITLOG.info("User {} (remote address: {}) master service request for {}.{}", caller,
1013        remoteAddress, serviceName, methodName);
1014
1015      return CoprocessorRpcUtils.getResponse(execResult, HConstants.EMPTY_BYTE_ARRAY);
1016    } catch (IOException ie) {
1017      throw new ServiceException(ie);
1018    }
1019  }
1020
1021  /**
1022   * Triggers an asynchronous attempt to run a distributed procedure. {@inheritDoc}
1023   */
1024  @Override
1025  public ExecProcedureResponse execProcedure(RpcController controller, ExecProcedureRequest request)
1026    throws ServiceException {
1027    try {
1028      server.checkInitialized();
1029      ProcedureDescription desc = request.getProcedure();
1030      MasterProcedureManager mpm =
1031        server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
1032      if (mpm == null) {
1033        throw new ServiceException(
1034          new DoNotRetryIOException("The procedure is not registered: " + desc.getSignature()));
1035      }
1036      LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
1037      mpm.checkPermissions(desc, getAccessChecker(), RpcServer.getRequestUser().orElse(null));
1038      mpm.execProcedure(desc);
1039      // send back the max amount of time the client should wait for the procedure
1040      // to complete
1041      long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME;
1042      return ExecProcedureResponse.newBuilder().setExpectedTimeout(waitTime).build();
1043    } catch (ForeignException e) {
1044      throw new ServiceException(e.getCause());
1045    } catch (IOException e) {
1046      throw new ServiceException(e);
1047    }
1048  }
1049
1050  /**
1051   * Triggers a synchronous attempt to run a distributed procedure and sets return data in response.
1052   * {@inheritDoc}
1053   */
1054  @Override
1055  public ExecProcedureResponse execProcedureWithRet(RpcController controller,
1056    ExecProcedureRequest request) throws ServiceException {
1057    rpcPreCheck("execProcedureWithRet");
1058    try {
1059      ProcedureDescription desc = request.getProcedure();
1060      MasterProcedureManager mpm =
1061        server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
1062      if (mpm == null) {
1063        throw new ServiceException("The procedure is not registered: " + desc.getSignature());
1064      }
1065      LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
1066      byte[] data = mpm.execProcedureWithRet(desc);
1067      ExecProcedureResponse.Builder builder = ExecProcedureResponse.newBuilder();
1068      // set return data if available
1069      if (data != null) {
1070        builder.setReturnData(UnsafeByteOperations.unsafeWrap(data));
1071      }
1072      return builder.build();
1073    } catch (IOException e) {
1074      throw new ServiceException(e);
1075    }
1076  }
1077
1078  @Override
1079  public GetClusterStatusResponse getClusterStatus(RpcController controller,
1080    GetClusterStatusRequest req) throws ServiceException {
1081    GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
1082    try {
1083      // We used to check if Master was up at this point but let this call proceed even if
1084      // Master is initializing... else we shut out stuff like hbck2 tool from making progress
1085      // since it queries this method to figure cluster version. hbck2 wants to be able to work
1086      // against Master even if it is 'initializing' so it can do fixup.
1087      response.setClusterStatus(ClusterMetricsBuilder.toClusterStatus(
1088        server.getClusterMetrics(ClusterMetricsBuilder.toOptions(req.getOptionsList()))));
1089    } catch (IOException e) {
1090      throw new ServiceException(e);
1091    }
1092    return response.build();
1093  }
1094
1095  /**
1096   * List the currently available/stored snapshots. Any in-progress snapshots are ignored
1097   */
1098  @Override
1099  public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller,
1100    GetCompletedSnapshotsRequest request) throws ServiceException {
1101    try {
1102      server.checkInitialized();
1103      GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder();
1104      List<SnapshotDescription> snapshots = server.snapshotManager.getCompletedSnapshots();
1105
1106      // convert to protobuf
1107      for (SnapshotDescription snapshot : snapshots) {
1108        builder.addSnapshots(snapshot);
1109      }
1110      return builder.build();
1111    } catch (IOException e) {
1112      throw new ServiceException(e);
1113    }
1114  }
1115
1116  @Override
1117  public ListNamespacesResponse listNamespaces(RpcController controller,
1118    ListNamespacesRequest request) throws ServiceException {
1119    try {
1120      return ListNamespacesResponse.newBuilder().addAllNamespaceName(server.listNamespaces())
1121        .build();
1122    } catch (IOException e) {
1123      throw new ServiceException(e);
1124    }
1125  }
1126
1127  @Override
1128  public GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller,
1129    GetNamespaceDescriptorRequest request) throws ServiceException {
1130    try {
1131      return GetNamespaceDescriptorResponse.newBuilder()
1132        .setNamespaceDescriptor(
1133          ProtobufUtil.toProtoNamespaceDescriptor(server.getNamespace(request.getNamespaceName())))
1134        .build();
1135    } catch (IOException e) {
1136      throw new ServiceException(e);
1137    }
1138  }
1139
1140  /**
1141   * Get the number of regions of the table that have been updated by the alter.
1142   * @return Pair indicating the number of regions updated Pair.getFirst is the regions that are yet
1143   *         to be updated Pair.getSecond is the total number of regions of the table
1144   */
1145  @Override
1146  public GetSchemaAlterStatusResponse getSchemaAlterStatus(RpcController controller,
1147    GetSchemaAlterStatusRequest req) throws ServiceException {
1148    // TODO: currently, we query using the table name on the client side. this
1149    // may overlap with other table operations or the table operation may
1150    // have completed before querying this API. We need to refactor to a
1151    // transaction system in the future to avoid these ambiguities.
1152    TableName tableName = ProtobufUtil.toTableName(req.getTableName());
1153
1154    try {
1155      server.checkInitialized();
1156      Pair<Integer, Integer> pair = server.getAssignmentManager().getReopenStatus(tableName);
1157      GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder();
1158      ret.setYetToUpdateRegions(pair.getFirst());
1159      ret.setTotalRegions(pair.getSecond());
1160      return ret.build();
1161    } catch (IOException ioe) {
1162      throw new ServiceException(ioe);
1163    }
1164  }
1165
1166  /**
1167   * Get list of TableDescriptors for requested tables.
1168   * @param c   Unused (set to null).
1169   * @param req GetTableDescriptorsRequest that contains: - tableNames: requested tables, or if
1170   *            empty, all are requested.
1171   */
1172  @Override
1173  public GetTableDescriptorsResponse getTableDescriptors(RpcController c,
1174    GetTableDescriptorsRequest req) throws ServiceException {
1175    try {
1176      server.checkInitialized();
1177
1178      final String regex = req.hasRegex() ? req.getRegex() : null;
1179      final String namespace = req.hasNamespace() ? req.getNamespace() : null;
1180      List<TableName> tableNameList = null;
1181      if (req.getTableNamesCount() > 0) {
1182        tableNameList = new ArrayList<TableName>(req.getTableNamesCount());
1183        for (HBaseProtos.TableName tableNamePB : req.getTableNamesList()) {
1184          tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
1185        }
1186      }
1187
1188      List<TableDescriptor> descriptors =
1189        server.listTableDescriptors(namespace, regex, tableNameList, req.getIncludeSysTables());
1190
1191      GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder();
1192      if (descriptors != null && descriptors.size() > 0) {
1193        // Add the table descriptors to the response
1194        for (TableDescriptor htd : descriptors) {
1195          builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
1196        }
1197      }
1198      return builder.build();
1199    } catch (IOException ioe) {
1200      throw new ServiceException(ioe);
1201    }
1202  }
1203
1204  @Override
1205  public ListTableDescriptorsByStateResponse listTableDescriptorsByState(RpcController controller,
1206    ListTableDescriptorsByStateRequest request) throws ServiceException {
1207    try {
1208      server.checkInitialized();
1209      List<TableDescriptor> descriptors = server.listTableDescriptors(null, null, null, false);
1210
1211      ListTableDescriptorsByStateResponse.Builder builder =
1212        ListTableDescriptorsByStateResponse.newBuilder();
1213      if (descriptors != null && descriptors.size() > 0) {
1214        // Add the table descriptors to the response
1215        TableState.State state =
1216          request.getIsEnabled() ? TableState.State.ENABLED : TableState.State.DISABLED;
1217        for (TableDescriptor htd : descriptors) {
1218          if (server.getTableStateManager().isTableState(htd.getTableName(), state)) {
1219            builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
1220          }
1221        }
1222      }
1223      return builder.build();
1224    } catch (IOException ioe) {
1225      throw new ServiceException(ioe);
1226    }
1227  }
1228
1229  /**
1230   * Get list of userspace table names
1231   * @param controller Unused (set to null).
1232   * @param req        GetTableNamesRequest
1233   */
1234  @Override
1235  public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest req)
1236    throws ServiceException {
1237    try {
1238      server.checkServiceStarted();
1239
1240      final String regex = req.hasRegex() ? req.getRegex() : null;
1241      final String namespace = req.hasNamespace() ? req.getNamespace() : null;
1242      List<TableName> tableNames =
1243        server.listTableNames(namespace, regex, req.getIncludeSysTables());
1244
1245      GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
1246      if (tableNames != null && tableNames.size() > 0) {
1247        // Add the table names to the response
1248        for (TableName table : tableNames) {
1249          builder.addTableNames(ProtobufUtil.toProtoTableName(table));
1250        }
1251      }
1252      return builder.build();
1253    } catch (IOException e) {
1254      throw new ServiceException(e);
1255    }
1256  }
1257
1258  @Override
1259  public ListTableNamesByStateResponse listTableNamesByState(RpcController controller,
1260    ListTableNamesByStateRequest request) throws ServiceException {
1261    try {
1262      server.checkServiceStarted();
1263      List<TableName> tableNames = server.listTableNames(null, null, false);
1264      ListTableNamesByStateResponse.Builder builder = ListTableNamesByStateResponse.newBuilder();
1265      if (tableNames != null && tableNames.size() > 0) {
1266        // Add the disabled table names to the response
1267        TableState.State state =
1268          request.getIsEnabled() ? TableState.State.ENABLED : TableState.State.DISABLED;
1269        for (TableName table : tableNames) {
1270          if (server.getTableStateManager().isTableState(table, state)) {
1271            builder.addTableNames(ProtobufUtil.toProtoTableName(table));
1272          }
1273        }
1274      }
1275      return builder.build();
1276    } catch (IOException e) {
1277      throw new ServiceException(e);
1278    }
1279  }
1280
1281  @Override
1282  public GetTableStateResponse getTableState(RpcController controller, GetTableStateRequest request)
1283    throws ServiceException {
1284    try {
1285      server.checkServiceStarted();
1286      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
1287      TableState ts = server.getTableStateManager().getTableState(tableName);
1288      GetTableStateResponse.Builder builder = GetTableStateResponse.newBuilder();
1289      builder.setTableState(ts.convert());
1290      return builder.build();
1291    } catch (IOException e) {
1292      throw new ServiceException(e);
1293    }
1294  }
1295
1296  @Override
1297  public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
1298    IsCatalogJanitorEnabledRequest req) throws ServiceException {
1299    return IsCatalogJanitorEnabledResponse.newBuilder().setValue(server.isCatalogJanitorEnabled())
1300      .build();
1301  }
1302
1303  @Override
1304  public IsCleanerChoreEnabledResponse isCleanerChoreEnabled(RpcController c,
1305    IsCleanerChoreEnabledRequest req) throws ServiceException {
1306    return IsCleanerChoreEnabledResponse.newBuilder().setValue(server.isCleanerChoreEnabled())
1307      .build();
1308  }
1309
1310  @Override
1311  public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req)
1312    throws ServiceException {
1313    try {
1314      server.checkServiceStarted();
1315      return IsMasterRunningResponse.newBuilder().setIsMasterRunning(!server.isStopped()).build();
1316    } catch (IOException e) {
1317      throw new ServiceException(e);
1318    }
1319  }
1320
1321  /**
1322   * Checks if the specified procedure is done.
1323   * @return true if the procedure is done, false if the procedure is in the process of completing
1324   * @throws ServiceException if invalid procedure or failed procedure with progress failure reason.
1325   */
1326  @Override
1327  public IsProcedureDoneResponse isProcedureDone(RpcController controller,
1328    IsProcedureDoneRequest request) throws ServiceException {
1329    try {
1330      server.checkInitialized();
1331      ProcedureDescription desc = request.getProcedure();
1332      MasterProcedureManager mpm =
1333        server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
1334      if (mpm == null) {
1335        throw new ServiceException("The procedure is not registered: " + desc.getSignature());
1336      }
1337      LOG.debug("Checking to see if procedure from request:" + desc.getSignature() + " is done");
1338
1339      IsProcedureDoneResponse.Builder builder = IsProcedureDoneResponse.newBuilder();
1340      boolean done = mpm.isProcedureDone(desc);
1341      builder.setDone(done);
1342      return builder.build();
1343    } catch (ForeignException e) {
1344      throw new ServiceException(e.getCause());
1345    } catch (IOException e) {
1346      throw new ServiceException(e);
1347    }
1348  }
1349
1350  /**
1351   * Checks if the specified snapshot is done.
1352   * @return true if the snapshot is in file system ready to use, false if the snapshot is in the
1353   *         process of completing
1354   * @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or a wrapped
1355   *                          HBaseSnapshotException with progress failure reason.
1356   */
1357  @Override
1358  public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
1359    IsSnapshotDoneRequest request) throws ServiceException {
1360    LOG.debug("Checking to see if snapshot from request:"
1361      + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()) + " is done");
1362    try {
1363      server.checkInitialized();
1364      IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
1365      boolean done = server.snapshotManager.isSnapshotDone(request.getSnapshot());
1366      builder.setDone(done);
1367      return builder.build();
1368    } catch (ForeignException e) {
1369      throw new ServiceException(e.getCause());
1370    } catch (IOException e) {
1371      throw new ServiceException(e);
1372    }
1373  }
1374
1375  @Override
1376  public GetProcedureResultResponse getProcedureResult(RpcController controller,
1377    GetProcedureResultRequest request) throws ServiceException {
1378    LOG.debug("Checking to see if procedure is done pid={}", request.getProcId());
1379    try {
1380      server.checkInitialized();
1381      GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder();
1382      long procId = request.getProcId();
1383      ProcedureExecutor<?> executor = server.getMasterProcedureExecutor();
1384      Procedure<?> result = executor.getResultOrProcedure(procId);
1385      if (result != null) {
1386        builder.setSubmittedTime(result.getSubmittedTime());
1387        builder.setLastUpdate(result.getLastUpdate());
1388        if (executor.isFinished(procId)) {
1389          builder.setState(GetProcedureResultResponse.State.FINISHED);
1390          if (result.isFailed()) {
1391            IOException exception = MasterProcedureUtil.unwrapRemoteIOException(result);
1392            builder.setException(ForeignExceptionUtil.toProtoForeignException(exception));
1393          }
1394          byte[] resultData = result.getResult();
1395          if (resultData != null) {
1396            builder.setResult(UnsafeByteOperations.unsafeWrap(resultData));
1397          }
1398          server.getMasterProcedureExecutor().removeResult(request.getProcId());
1399        } else {
1400          builder.setState(GetProcedureResultResponse.State.RUNNING);
1401        }
1402      } else {
1403        builder.setState(GetProcedureResultResponse.State.NOT_FOUND);
1404      }
1405      return builder.build();
1406    } catch (IOException e) {
1407      throw new ServiceException(e);
1408    }
1409  }
1410
1411  @Override
1412  public AbortProcedureResponse abortProcedure(RpcController rpcController,
1413    AbortProcedureRequest request) throws ServiceException {
1414    try {
1415      AbortProcedureResponse.Builder response = AbortProcedureResponse.newBuilder();
1416      boolean abortResult =
1417        server.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning());
1418      response.setIsProcedureAborted(abortResult);
1419      return response.build();
1420    } catch (IOException e) {
1421      throw new ServiceException(e);
1422    }
1423  }
1424
1425  @Override
1426  public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c,
1427    ListNamespaceDescriptorsRequest request) throws ServiceException {
1428    try {
1429      ListNamespaceDescriptorsResponse.Builder response =
1430        ListNamespaceDescriptorsResponse.newBuilder();
1431      for (NamespaceDescriptor ns : server.getNamespaces()) {
1432        response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns));
1433      }
1434      return response.build();
1435    } catch (IOException e) {
1436      throw new ServiceException(e);
1437    }
1438  }
1439
1440  @Override
1441  public GetProceduresResponse getProcedures(RpcController rpcController,
1442    GetProceduresRequest request) throws ServiceException {
1443    try {
1444      final GetProceduresResponse.Builder response = GetProceduresResponse.newBuilder();
1445      for (Procedure<?> p : server.getProcedures()) {
1446        response.addProcedure(ProcedureUtil.convertToProtoProcedure(p));
1447      }
1448      return response.build();
1449    } catch (IOException e) {
1450      throw new ServiceException(e);
1451    }
1452  }
1453
1454  @Override
1455  public GetLocksResponse getLocks(RpcController controller, GetLocksRequest request)
1456    throws ServiceException {
1457    try {
1458      final GetLocksResponse.Builder builder = GetLocksResponse.newBuilder();
1459
1460      for (LockedResource lockedResource : server.getLocks()) {
1461        builder.addLock(ProcedureUtil.convertToProtoLockedResource(lockedResource));
1462      }
1463
1464      return builder.build();
1465    } catch (IOException e) {
1466      throw new ServiceException(e);
1467    }
1468  }
1469
1470  @Override
1471  public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c,
1472    ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
1473    try {
1474      ListTableDescriptorsByNamespaceResponse.Builder b =
1475        ListTableDescriptorsByNamespaceResponse.newBuilder();
1476      for (TableDescriptor htd : server
1477        .listTableDescriptorsByNamespace(request.getNamespaceName())) {
1478        b.addTableSchema(ProtobufUtil.toTableSchema(htd));
1479      }
1480      return b.build();
1481    } catch (IOException e) {
1482      throw new ServiceException(e);
1483    }
1484  }
1485
1486  @Override
1487  public ListTableNamesByNamespaceResponse listTableNamesByNamespace(RpcController c,
1488    ListTableNamesByNamespaceRequest request) throws ServiceException {
1489    try {
1490      ListTableNamesByNamespaceResponse.Builder b = ListTableNamesByNamespaceResponse.newBuilder();
1491      for (TableName tableName : server.listTableNamesByNamespace(request.getNamespaceName())) {
1492        b.addTableName(ProtobufUtil.toProtoTableName(tableName));
1493      }
1494      return b.build();
1495    } catch (IOException e) {
1496      throw new ServiceException(e);
1497    }
1498  }
1499
1500  @Override
1501  public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req)
1502    throws ServiceException {
1503    try {
1504      long procId = server.modifyColumn(ProtobufUtil.toTableName(req.getTableName()),
1505        ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(),
1506        req.getNonce());
1507      if (procId == -1) {
1508        // This mean operation was not performed in server, so do not set any procId
1509        return ModifyColumnResponse.newBuilder().build();
1510      } else {
1511        return ModifyColumnResponse.newBuilder().setProcId(procId).build();
1512      }
1513    } catch (IOException ioe) {
1514      throw new ServiceException(ioe);
1515    }
1516  }
1517
1518  @Override
1519  public ModifyColumnStoreFileTrackerResponse modifyColumnStoreFileTracker(RpcController controller,
1520    ModifyColumnStoreFileTrackerRequest req) throws ServiceException {
1521    try {
1522      long procId =
1523        server.modifyColumnStoreFileTracker(ProtobufUtil.toTableName(req.getTableName()),
1524          req.getFamily().toByteArray(), req.getDstSft(), req.getNonceGroup(), req.getNonce());
1525      return ModifyColumnStoreFileTrackerResponse.newBuilder().setProcId(procId).build();
1526    } catch (IOException ioe) {
1527      throw new ServiceException(ioe);
1528    }
1529  }
1530
1531  @Override
1532  public ModifyNamespaceResponse modifyNamespace(RpcController controller,
1533    ModifyNamespaceRequest request) throws ServiceException {
1534    try {
1535      long procId =
1536        server.modifyNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
1537          request.getNonceGroup(), request.getNonce());
1538      return ModifyNamespaceResponse.newBuilder().setProcId(procId).build();
1539    } catch (IOException e) {
1540      throw new ServiceException(e);
1541    }
1542  }
1543
1544  @Override
1545  public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req)
1546    throws ServiceException {
1547    try {
1548      long procId = server.modifyTable(ProtobufUtil.toTableName(req.getTableName()),
1549        ProtobufUtil.toTableDescriptor(req.getTableSchema()), req.getNonceGroup(), req.getNonce(),
1550        req.getReopenRegions());
1551      return ModifyTableResponse.newBuilder().setProcId(procId).build();
1552    } catch (IOException ioe) {
1553      throw new ServiceException(ioe);
1554    }
1555  }
1556
1557  @Override
1558  public ModifyTableStoreFileTrackerResponse modifyTableStoreFileTracker(RpcController controller,
1559    ModifyTableStoreFileTrackerRequest req) throws ServiceException {
1560    try {
1561      long procId = server.modifyTableStoreFileTracker(ProtobufUtil.toTableName(req.getTableName()),
1562        req.getDstSft(), req.getNonceGroup(), req.getNonce());
1563      return ModifyTableStoreFileTrackerResponse.newBuilder().setProcId(procId).build();
1564    } catch (IOException ioe) {
1565      throw new ServiceException(ioe);
1566    }
1567  }
1568
1569  @Override
1570  public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest req)
1571    throws ServiceException {
1572    final byte[] encodedRegionName = req.getRegion().getValue().toByteArray();
1573    RegionSpecifierType type = req.getRegion().getType();
1574    final byte[] destServerName = (req.hasDestServerName())
1575      ? Bytes.toBytes(ProtobufUtil.toServerName(req.getDestServerName()).getServerName())
1576      : null;
1577    MoveRegionResponse mrr = MoveRegionResponse.newBuilder().build();
1578
1579    if (type != RegionSpecifierType.ENCODED_REGION_NAME) {
1580      LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME
1581        + " actual: " + type);
1582    }
1583
1584    try {
1585      server.checkInitialized();
1586      server.move(encodedRegionName, destServerName);
1587    } catch (IOException ioe) {
1588      throw new ServiceException(ioe);
1589    }
1590    return mrr;
1591  }
1592
1593  /**
1594   * Offline specified region from master's in-memory state. It will not attempt to reassign the
1595   * region as in unassign. This is a special method that should be used by experts or hbck.
1596   */
1597  @Override
1598  public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request)
1599    throws ServiceException {
1600    try {
1601      server.checkInitialized();
1602
1603      final RegionSpecifierType type = request.getRegion().getType();
1604      if (type != RegionSpecifierType.REGION_NAME) {
1605        LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
1606          + " actual: " + type);
1607      }
1608
1609      final byte[] regionName = request.getRegion().getValue().toByteArray();
1610      final RegionInfo hri = server.getAssignmentManager().getRegionInfo(regionName);
1611      if (hri == null) {
1612        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
1613      }
1614
1615      if (server.cpHost != null) {
1616        server.cpHost.preRegionOffline(hri);
1617      }
1618      LOG.info(server.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString());
1619      server.getAssignmentManager().offlineRegion(hri);
1620      if (server.cpHost != null) {
1621        server.cpHost.postRegionOffline(hri);
1622      }
1623    } catch (IOException ioe) {
1624      throw new ServiceException(ioe);
1625    }
1626    return OfflineRegionResponse.newBuilder().build();
1627  }
1628
1629  /**
1630   * Execute Restore/Clone snapshot operation.
1631   * <p>
1632   * If the specified table exists a "Restore" is executed, replacing the table schema and directory
1633   * data with the content of the snapshot. The table must be disabled, or a
1634   * UnsupportedOperationException will be thrown.
1635   * <p>
1636   * If the table doesn't exist a "Clone" is executed, a new table is created using the schema at
1637   * the time of the snapshot, and the content of the snapshot.
1638   * <p>
1639   * The restore/clone operation does not require copying HFiles. Since HFiles are immutable the
1640   * table can point to and use the same files as the original one.
1641   */
1642  @Override
1643  public RestoreSnapshotResponse restoreSnapshot(RpcController controller,
1644    RestoreSnapshotRequest request) throws ServiceException {
1645    try {
1646      long procId = server.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(),
1647        request.getNonce(), request.getRestoreACL(), request.getCustomSFT());
1648      return RestoreSnapshotResponse.newBuilder().setProcId(procId).build();
1649    } catch (ForeignException e) {
1650      throw new ServiceException(e.getCause());
1651    } catch (IOException e) {
1652      throw new ServiceException(e);
1653    }
1654  }
1655
1656  @Override
1657  public SetSnapshotCleanupResponse switchSnapshotCleanup(RpcController controller,
1658    SetSnapshotCleanupRequest request) throws ServiceException {
1659    try {
1660      server.checkInitialized();
1661      final boolean enabled = request.getEnabled();
1662      final boolean isSynchronous = request.hasSynchronous() && request.getSynchronous();
1663      final boolean prevSnapshotCleanupRunning = this.switchSnapshotCleanup(enabled, isSynchronous);
1664      return SetSnapshotCleanupResponse.newBuilder()
1665        .setPrevSnapshotCleanup(prevSnapshotCleanupRunning).build();
1666    } catch (IOException e) {
1667      throw new ServiceException(e);
1668    }
1669  }
1670
1671  @Override
1672  public IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(RpcController controller,
1673    IsSnapshotCleanupEnabledRequest request) throws ServiceException {
1674    try {
1675      server.checkInitialized();
1676      final boolean isSnapshotCleanupEnabled = server.snapshotCleanupStateStore.get();
1677      return IsSnapshotCleanupEnabledResponse.newBuilder().setEnabled(isSnapshotCleanupEnabled)
1678        .build();
1679    } catch (IOException e) {
1680      throw new ServiceException(e);
1681    }
1682  }
1683
1684  /**
1685   * Turn on/off snapshot auto-cleanup based on TTL
1686   * @param enabledNewVal Set to <code>true</code> to enable, <code>false</code> to disable
1687   * @param synchronous   If <code>true</code>, it waits until current snapshot cleanup is
1688   *                      completed, if outstanding
1689   * @return previous snapshot auto-cleanup mode
1690   */
1691  private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal,
1692    final boolean synchronous) throws IOException {
1693    final boolean oldValue = server.snapshotCleanupStateStore.get();
1694    server.switchSnapshotCleanup(enabledNewVal, synchronous);
1695    LOG.info("{} Successfully set snapshot cleanup to {}", server.getClientIdAuditPrefix(),
1696      enabledNewVal);
1697    return oldValue;
1698  }
1699
1700  @Override
1701  public RunCatalogScanResponse runCatalogScan(RpcController c, RunCatalogScanRequest req)
1702    throws ServiceException {
1703    rpcPreCheck("runCatalogScan");
1704    try {
1705      return ResponseConverter.buildRunCatalogScanResponse(this.server.catalogJanitorChore.scan());
1706    } catch (IOException ioe) {
1707      throw new ServiceException(ioe);
1708    }
1709  }
1710
1711  @Override
1712  public RunCleanerChoreResponse runCleanerChore(RpcController c, RunCleanerChoreRequest req)
1713    throws ServiceException {
1714    rpcPreCheck("runCleanerChore");
1715    try {
1716      CompletableFuture<Boolean> fileCleanerFuture = server.getHFileCleaner().triggerCleanerNow();
1717      CompletableFuture<Boolean> logCleanerFuture = server.getLogCleaner().triggerCleanerNow();
1718      boolean result = fileCleanerFuture.get() && logCleanerFuture.get();
1719      return ResponseConverter.buildRunCleanerChoreResponse(result);
1720    } catch (InterruptedException e) {
1721      throw new ServiceException(e);
1722    } catch (ExecutionException e) {
1723      throw new ServiceException(e.getCause());
1724    }
1725  }
1726
1727  @Override
1728  public SetBalancerRunningResponse setBalancerRunning(RpcController c,
1729    SetBalancerRunningRequest req) throws ServiceException {
1730    try {
1731      server.checkInitialized();
1732      boolean prevValue = (req.getSynchronous())
1733        ? synchronousBalanceSwitch(req.getOn())
1734        : server.balanceSwitch(req.getOn());
1735      return SetBalancerRunningResponse.newBuilder().setPrevBalanceValue(prevValue).build();
1736    } catch (IOException ioe) {
1737      throw new ServiceException(ioe);
1738    }
1739  }
1740
1741  @Override
1742  public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request)
1743    throws ServiceException {
1744    LOG.info(server.getClientIdAuditPrefix() + " shutdown");
1745    try {
1746      server.shutdown();
1747    } catch (IOException e) {
1748      LOG.error("Exception occurred in HMaster.shutdown()", e);
1749      throw new ServiceException(e);
1750    }
1751    return ShutdownResponse.newBuilder().build();
1752  }
1753
1754  /**
1755   * Triggers an asynchronous attempt to take a snapshot. {@inheritDoc}
1756   */
1757  @Override
1758  public SnapshotResponse snapshot(RpcController controller, SnapshotRequest request)
1759    throws ServiceException {
1760    try {
1761      server.checkInitialized();
1762      server.snapshotManager.checkSnapshotSupport();
1763
1764      LOG.info(server.getClientIdAuditPrefix() + " snapshot request for:"
1765        + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()));
1766      // get the snapshot information
1767      SnapshotDescription snapshot =
1768        SnapshotDescriptionUtils.validate(request.getSnapshot(), server.getConfiguration());
1769      // send back the max amount of time the client should wait for the snapshot to complete
1770      long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(server.getConfiguration(),
1771        snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
1772
1773      SnapshotResponse.Builder builder = SnapshotResponse.newBuilder().setExpectedTimeout(waitTime);
1774
1775      // If there is nonce group and nonce in the snapshot request, then the client can
1776      // handle snapshot procedure procId. And if enable the snapshot procedure, we
1777      // will do the snapshot work with proc-v2, otherwise we will fall back to zk proc.
1778      if (
1779        request.hasNonceGroup() && request.hasNonce()
1780          && server.snapshotManager.snapshotProcedureEnabled()
1781      ) {
1782        long nonceGroup = request.getNonceGroup();
1783        long nonce = request.getNonce();
1784        long procId = server.snapshotManager.takeSnapshot(snapshot, nonceGroup, nonce);
1785        return builder.setProcId(procId).build();
1786      } else {
1787        server.snapshotManager.takeSnapshot(snapshot);
1788        return builder.build();
1789      }
1790    } catch (ForeignException e) {
1791      throw new ServiceException(e.getCause());
1792    } catch (IOException e) {
1793      throw new ServiceException(e);
1794    }
1795  }
1796
1797  @Override
1798  public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request)
1799    throws ServiceException {
1800    LOG.info(server.getClientIdAuditPrefix() + " stop");
1801    try {
1802      server.stopMaster();
1803    } catch (IOException e) {
1804      LOG.error("Exception occurred while stopping master", e);
1805      throw new ServiceException(e);
1806    }
1807    return StopMasterResponse.newBuilder().build();
1808  }
1809
1810  @Override
1811  public IsInMaintenanceModeResponse isMasterInMaintenanceMode(final RpcController controller,
1812    final IsInMaintenanceModeRequest request) throws ServiceException {
1813    IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder();
1814    response.setInMaintenanceMode(server.isInMaintenanceMode());
1815    return response.build();
1816  }
1817
1818  @Override
1819  public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req)
1820    throws ServiceException {
1821    try {
1822      final byte[] regionName = req.getRegion().getValue().toByteArray();
1823      RegionSpecifierType type = req.getRegion().getType();
1824      UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build();
1825
1826      server.checkInitialized();
1827      if (type != RegionSpecifierType.REGION_NAME) {
1828        LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
1829          + " actual: " + type);
1830      }
1831      RegionStateNode rsn =
1832        server.getAssignmentManager().getRegionStates().getRegionStateNodeFromName(regionName);
1833      if (rsn == null) {
1834        throw new UnknownRegionException(Bytes.toString(regionName));
1835      }
1836
1837      RegionInfo hri = rsn.getRegionInfo();
1838      if (server.cpHost != null) {
1839        server.cpHost.preUnassign(hri);
1840      }
1841      LOG.debug(server.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString()
1842        + " in current location if it is online");
1843      server.getAssignmentManager().unassign(hri);
1844      if (server.cpHost != null) {
1845        server.cpHost.postUnassign(hri);
1846      }
1847
1848      return urr;
1849    } catch (IOException ioe) {
1850      throw new ServiceException(ioe);
1851    }
1852  }
1853
1854  @Override
1855  public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c,
1856    ReportRegionStateTransitionRequest req) throws ServiceException {
1857    try {
1858      server.checkServiceStarted();
1859      for (RegionServerStatusProtos.RegionStateTransition transition : req.getTransitionList()) {
1860        long procId =
1861          transition.getProcIdCount() > 0 ? transition.getProcId(0) : Procedure.NO_PROC_ID;
1862        // -1 is less than any possible MasterActiveCode
1863        long initiatingMasterActiveTime = transition.hasInitiatingMasterActiveTime()
1864          ? transition.getInitiatingMasterActiveTime()
1865          : -1;
1866        throwOnOldMaster(procId, initiatingMasterActiveTime);
1867      }
1868      return server.getAssignmentManager().reportRegionStateTransition(req);
1869    } catch (IOException ioe) {
1870      throw new ServiceException(ioe);
1871    }
1872  }
1873
1874  @Override
1875  public SetQuotaResponse setQuota(RpcController c, SetQuotaRequest req) throws ServiceException {
1876    try {
1877      server.checkInitialized();
1878      SetQuotaResponse response = server.getMasterQuotaManager().setQuota(req);
1879      server.reloadRegionServerQuotas();
1880
1881      return response;
1882    } catch (Exception e) {
1883      throw new ServiceException(e);
1884    }
1885  }
1886
1887  @Override
1888  public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller,
1889    MajorCompactionTimestampRequest request) throws ServiceException {
1890    MajorCompactionTimestampResponse.Builder response =
1891      MajorCompactionTimestampResponse.newBuilder();
1892    try {
1893      server.checkInitialized();
1894      response.setCompactionTimestamp(
1895        server.getLastMajorCompactionTimestamp(ProtobufUtil.toTableName(request.getTableName())));
1896    } catch (IOException e) {
1897      throw new ServiceException(e);
1898    }
1899    return response.build();
1900  }
1901
1902  @Override
1903  public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
1904    RpcController controller, MajorCompactionTimestampForRegionRequest request)
1905    throws ServiceException {
1906    MajorCompactionTimestampResponse.Builder response =
1907      MajorCompactionTimestampResponse.newBuilder();
1908    try {
1909      server.checkInitialized();
1910      response.setCompactionTimestamp(server
1911        .getLastMajorCompactionTimestampForRegion(request.getRegion().getValue().toByteArray()));
1912    } catch (IOException e) {
1913      throw new ServiceException(e);
1914    }
1915    return response.build();
1916  }
1917
1918  @Override
1919  public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller,
1920    IsBalancerEnabledRequest request) throws ServiceException {
1921    IsBalancerEnabledResponse.Builder response = IsBalancerEnabledResponse.newBuilder();
1922    response.setEnabled(server.isBalancerOn());
1923    return response.build();
1924  }
1925
1926  @Override
1927  public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller,
1928    SetSplitOrMergeEnabledRequest request) throws ServiceException {
1929    SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder();
1930    try {
1931      server.checkInitialized();
1932      boolean newValue = request.getEnabled();
1933      for (MasterProtos.MasterSwitchType masterSwitchType : request.getSwitchTypesList()) {
1934        MasterSwitchType switchType = convert(masterSwitchType);
1935        boolean oldValue = server.isSplitOrMergeEnabled(switchType);
1936        response.addPrevValue(oldValue);
1937        if (server.cpHost != null) {
1938          server.cpHost.preSetSplitOrMergeEnabled(newValue, switchType);
1939        }
1940        server.getSplitOrMergeStateStore().setSplitOrMergeEnabled(newValue, switchType);
1941        if (server.cpHost != null) {
1942          server.cpHost.postSetSplitOrMergeEnabled(newValue, switchType);
1943        }
1944      }
1945    } catch (IOException e) {
1946      throw new ServiceException(e);
1947    }
1948    return response.build();
1949  }
1950
1951  @Override
1952  public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller,
1953    IsSplitOrMergeEnabledRequest request) throws ServiceException {
1954    IsSplitOrMergeEnabledResponse.Builder response = IsSplitOrMergeEnabledResponse.newBuilder();
1955    response.setEnabled(server.isSplitOrMergeEnabled(convert(request.getSwitchType())));
1956    return response.build();
1957  }
1958
1959  @Override
1960  public NormalizeResponse normalize(RpcController controller, NormalizeRequest request)
1961    throws ServiceException {
1962    rpcPreCheck("normalize");
1963    try {
1964      final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder()
1965        .tableNames(ProtobufUtil.toTableNameList(request.getTableNamesList()))
1966        .regex(request.hasRegex() ? request.getRegex() : null)
1967        .namespace(request.hasNamespace() ? request.getNamespace() : null).build();
1968      return NormalizeResponse.newBuilder()
1969        // all API requests are considered priority requests.
1970        .setNormalizerRan(server.normalizeRegions(ntfp, true)).build();
1971    } catch (IOException ex) {
1972      throw new ServiceException(ex);
1973    }
1974  }
1975
1976  @Override
1977  public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller,
1978    SetNormalizerRunningRequest request) throws ServiceException {
1979    rpcPreCheck("setNormalizerRunning");
1980
1981    // Sets normalizer on/off flag in ZK.
1982    // TODO: this method is totally broken in terms of atomicity of actions and values read.
1983    // 1. The contract has this RPC returning the previous value. There isn't a ZKUtil method
1984    // that lets us retrieve the previous value as part of setting a new value, so we simply
1985    // perform a read before issuing the update. Thus we have a data race opportunity, between
1986    // when the `prevValue` is read and whatever is actually overwritten.
1987    // 2. Down in `setNormalizerOn`, the call to `createAndWatch` inside of the catch clause can
1988    // itself fail in the event that the znode already exists. Thus, another data race, between
1989    // when the initial `setData` call is notified of the absence of the target znode and the
1990    // subsequent `createAndWatch`, with another client creating said node.
1991    // That said, there's supposed to be only one active master and thus there's supposed to be
1992    // only one process with the authority to modify the value.
1993    final boolean prevValue = server.getRegionNormalizerManager().isNormalizerOn();
1994    final boolean newValue = request.getOn();
1995    try {
1996      server.getRegionNormalizerManager().setNormalizerOn(newValue);
1997    } catch (IOException e) {
1998      throw new ServiceException(e);
1999    }
2000    LOG.info("{} set normalizerSwitch={}", server.getClientIdAuditPrefix(), newValue);
2001    return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build();
2002  }
2003
2004  @Override
2005  public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller,
2006    IsNormalizerEnabledRequest request) {
2007    IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder();
2008    response.setEnabled(server.isNormalizerOn());
2009    return response.build();
2010  }
2011
2012  /**
2013   * Returns the security capabilities in effect on the cluster
2014   */
2015  @Override
2016  public SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller,
2017    SecurityCapabilitiesRequest request) throws ServiceException {
2018    SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder();
2019    try {
2020      server.checkInitialized();
2021      Set<SecurityCapabilitiesResponse.Capability> capabilities = new HashSet<>();
2022      // Authentication
2023      if (User.isHBaseSecurityEnabled(server.getConfiguration())) {
2024        capabilities.add(SecurityCapabilitiesResponse.Capability.SECURE_AUTHENTICATION);
2025      } else {
2026        capabilities.add(SecurityCapabilitiesResponse.Capability.SIMPLE_AUTHENTICATION);
2027      }
2028      // A coprocessor that implements AccessControlService can provide AUTHORIZATION and
2029      // CELL_AUTHORIZATION
2030      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
2031        if (AccessChecker.isAuthorizationSupported(server.getConfiguration())) {
2032          capabilities.add(SecurityCapabilitiesResponse.Capability.AUTHORIZATION);
2033        }
2034        if (AccessController.isCellAuthorizationSupported(server.getConfiguration())) {
2035          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_AUTHORIZATION);
2036        }
2037      }
2038      // A coprocessor that implements VisibilityLabelsService can provide CELL_VISIBILITY.
2039      if (server.cpHost != null && hasVisibilityLabelsServiceCoprocessor(server.cpHost)) {
2040        if (VisibilityController.isCellAuthorizationSupported(server.getConfiguration())) {
2041          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_VISIBILITY);
2042        }
2043      }
2044      response.addAllCapabilities(capabilities);
2045    } catch (IOException e) {
2046      throw new ServiceException(e);
2047    }
2048    return response.build();
2049  }
2050
2051  /**
2052   * Determines if there is a MasterCoprocessor deployed which implements
2053   * {@link AccessControlService.Interface}.
2054   */
2055  boolean hasAccessControlServiceCoprocessor(MasterCoprocessorHost cpHost) {
2056    return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class),
2057      AccessControlService.Interface.class);
2058  }
2059
2060  /**
2061   * Determines if there is a MasterCoprocessor deployed which implements
2062   * {@link VisibilityLabelsService.Interface}.
2063   */
2064  boolean hasVisibilityLabelsServiceCoprocessor(MasterCoprocessorHost cpHost) {
2065    return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class),
2066      VisibilityLabelsService.Interface.class);
2067  }
2068
2069  /**
2070   * Determines if there is a coprocessor implementation in the provided argument which extends or
2071   * implements the provided {@code service}.
2072   */
2073  boolean checkCoprocessorWithService(List<MasterCoprocessor> coprocessorsToCheck,
2074    Class<?> service) {
2075    if (coprocessorsToCheck == null || coprocessorsToCheck.isEmpty()) {
2076      return false;
2077    }
2078    for (MasterCoprocessor cp : coprocessorsToCheck) {
2079      if (service.isAssignableFrom(cp.getClass())) {
2080        return true;
2081      }
2082    }
2083    return false;
2084  }
2085
2086  private MasterSwitchType convert(MasterProtos.MasterSwitchType switchType) {
2087    switch (switchType) {
2088      case SPLIT:
2089        return MasterSwitchType.SPLIT;
2090      case MERGE:
2091        return MasterSwitchType.MERGE;
2092      default:
2093        break;
2094    }
2095    return null;
2096  }
2097
2098  @Override
2099  public AddReplicationPeerResponse addReplicationPeer(RpcController controller,
2100    AddReplicationPeerRequest request) throws ServiceException {
2101    try {
2102      long procId = server.addReplicationPeer(request.getPeerId(),
2103        ReplicationPeerConfigUtil.convert(request.getPeerConfig()),
2104        request.getPeerState().getState().equals(ReplicationState.State.ENABLED));
2105      return AddReplicationPeerResponse.newBuilder().setProcId(procId).build();
2106    } catch (ReplicationException | IOException e) {
2107      throw new ServiceException(e);
2108    }
2109  }
2110
2111  @Override
2112  public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller,
2113    RemoveReplicationPeerRequest request) throws ServiceException {
2114    try {
2115      long procId = server.removeReplicationPeer(request.getPeerId());
2116      return RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build();
2117    } catch (ReplicationException | IOException e) {
2118      throw new ServiceException(e);
2119    }
2120  }
2121
2122  @Override
2123  public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller,
2124    EnableReplicationPeerRequest request) throws ServiceException {
2125    try {
2126      long procId = server.enableReplicationPeer(request.getPeerId());
2127      return EnableReplicationPeerResponse.newBuilder().setProcId(procId).build();
2128    } catch (ReplicationException | IOException e) {
2129      throw new ServiceException(e);
2130    }
2131  }
2132
2133  @Override
2134  public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller,
2135    DisableReplicationPeerRequest request) throws ServiceException {
2136    try {
2137      long procId = server.disableReplicationPeer(request.getPeerId());
2138      return DisableReplicationPeerResponse.newBuilder().setProcId(procId).build();
2139    } catch (ReplicationException | IOException e) {
2140      throw new ServiceException(e);
2141    }
2142  }
2143
2144  @Override
2145  public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller,
2146    GetReplicationPeerConfigRequest request) throws ServiceException {
2147    GetReplicationPeerConfigResponse.Builder response =
2148      GetReplicationPeerConfigResponse.newBuilder();
2149    try {
2150      String peerId = request.getPeerId();
2151      ReplicationPeerConfig peerConfig = server.getReplicationPeerConfig(peerId);
2152      response.setPeerId(peerId);
2153      response.setPeerConfig(ReplicationPeerConfigUtil.convert(peerConfig));
2154    } catch (ReplicationException | IOException e) {
2155      throw new ServiceException(e);
2156    }
2157    return response.build();
2158  }
2159
2160  @Override
2161  public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller,
2162    UpdateReplicationPeerConfigRequest request) throws ServiceException {
2163    try {
2164      long procId = server.updateReplicationPeerConfig(request.getPeerId(),
2165        ReplicationPeerConfigUtil.convert(request.getPeerConfig()));
2166      return UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build();
2167    } catch (ReplicationException | IOException e) {
2168      throw new ServiceException(e);
2169    }
2170  }
2171
2172  @Override
2173  public TransitReplicationPeerSyncReplicationStateResponse
2174    transitReplicationPeerSyncReplicationState(RpcController controller,
2175      TransitReplicationPeerSyncReplicationStateRequest request) throws ServiceException {
2176    try {
2177      long procId = server.transitReplicationPeerSyncReplicationState(request.getPeerId(),
2178        ReplicationPeerConfigUtil.toSyncReplicationState(request.getSyncReplicationState()));
2179      return TransitReplicationPeerSyncReplicationStateResponse.newBuilder().setProcId(procId)
2180        .build();
2181    } catch (ReplicationException | IOException e) {
2182      throw new ServiceException(e);
2183    }
2184  }
2185
2186  @Override
2187  public ListReplicationPeersResponse listReplicationPeers(RpcController controller,
2188    ListReplicationPeersRequest request) throws ServiceException {
2189    ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder();
2190    try {
2191      List<ReplicationPeerDescription> peers =
2192        server.listReplicationPeers(request.hasRegex() ? request.getRegex() : null);
2193      for (ReplicationPeerDescription peer : peers) {
2194        response.addPeerDesc(ReplicationPeerConfigUtil.toProtoReplicationPeerDescription(peer));
2195      }
2196    } catch (ReplicationException | IOException e) {
2197      throw new ServiceException(e);
2198    }
2199    return response.build();
2200  }
2201
2202  @Override
2203  public GetReplicationPeerStateResponse isReplicationPeerEnabled(RpcController controller,
2204    GetReplicationPeerStateRequest request) throws ServiceException {
2205    boolean isEnabled;
2206    try {
2207      isEnabled = server.getReplicationPeerManager().getPeerState(request.getPeerId());
2208    } catch (ReplicationException ioe) {
2209      throw new ServiceException(ioe);
2210    }
2211    return GetReplicationPeerStateResponse.newBuilder().setIsEnabled(isEnabled).build();
2212  }
2213
2214  @Override
2215  public ReplicationPeerModificationSwitchResponse replicationPeerModificationSwitch(
2216    RpcController controller, ReplicationPeerModificationSwitchRequest request)
2217    throws ServiceException {
2218    try {
2219      server.checkInitialized();
2220      boolean prevValue = server.replicationPeerModificationSwitch(request.getOn());
2221      return ReplicationPeerModificationSwitchResponse.newBuilder().setPreviousValue(prevValue)
2222        .build();
2223    } catch (IOException ioe) {
2224      throw new ServiceException(ioe);
2225    }
2226  }
2227
2228  @Override
2229  public GetReplicationPeerModificationProceduresResponse getReplicationPeerModificationProcedures(
2230    RpcController controller, GetReplicationPeerModificationProceduresRequest request)
2231    throws ServiceException {
2232    try {
2233      server.checkInitialized();
2234      GetReplicationPeerModificationProceduresResponse.Builder builder =
2235        GetReplicationPeerModificationProceduresResponse.newBuilder();
2236      for (Procedure<?> proc : server.getProcedures()) {
2237        if (proc.isFinished()) {
2238          continue;
2239        }
2240        if (!(proc instanceof AbstractPeerNoLockProcedure)) {
2241          continue;
2242        }
2243        builder.addProcedure(ProcedureUtil.convertToProtoProcedure(proc));
2244      }
2245      return builder.build();
2246    } catch (IOException ioe) {
2247      throw new ServiceException(ioe);
2248    }
2249  }
2250
2251  @Override
2252  public IsReplicationPeerModificationEnabledResponse isReplicationPeerModificationEnabled(
2253    RpcController controller, IsReplicationPeerModificationEnabledRequest request)
2254    throws ServiceException {
2255    try {
2256      server.checkInitialized();
2257      return IsReplicationPeerModificationEnabledResponse.newBuilder()
2258        .setEnabled(server.isReplicationPeerModificationEnabled()).build();
2259    } catch (IOException ioe) {
2260      throw new ServiceException(ioe);
2261    }
2262  }
2263
2264  @Override
2265  public ListDecommissionedRegionServersResponse listDecommissionedRegionServers(
2266    RpcController controller, ListDecommissionedRegionServersRequest request)
2267    throws ServiceException {
2268    ListDecommissionedRegionServersResponse.Builder response =
2269      ListDecommissionedRegionServersResponse.newBuilder();
2270    try {
2271      server.checkInitialized();
2272      if (server.cpHost != null) {
2273        server.cpHost.preListDecommissionedRegionServers();
2274      }
2275      List<ServerName> servers = server.listDecommissionedRegionServers();
2276      response.addAllServerName((servers.stream().map(server -> ProtobufUtil.toServerName(server)))
2277        .collect(Collectors.toList()));
2278      if (server.cpHost != null) {
2279        server.cpHost.postListDecommissionedRegionServers();
2280      }
2281    } catch (IOException io) {
2282      throw new ServiceException(io);
2283    }
2284
2285    return response.build();
2286  }
2287
2288  @Override
2289  public DecommissionRegionServersResponse decommissionRegionServers(RpcController controller,
2290    DecommissionRegionServersRequest request) throws ServiceException {
2291    try {
2292      server.checkInitialized();
2293      List<ServerName> servers = request.getServerNameList().stream()
2294        .map(pbServer -> ProtobufUtil.toServerName(pbServer)).collect(Collectors.toList());
2295      boolean offload = request.getOffload();
2296      if (server.cpHost != null) {
2297        server.cpHost.preDecommissionRegionServers(servers, offload);
2298      }
2299      server.decommissionRegionServers(servers, offload);
2300      if (server.cpHost != null) {
2301        server.cpHost.postDecommissionRegionServers(servers, offload);
2302      }
2303    } catch (IOException io) {
2304      throw new ServiceException(io);
2305    }
2306
2307    return DecommissionRegionServersResponse.newBuilder().build();
2308  }
2309
2310  @Override
2311  public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller,
2312    RecommissionRegionServerRequest request) throws ServiceException {
2313    try {
2314      server.checkInitialized();
2315      ServerName sn = ProtobufUtil.toServerName(request.getServerName());
2316      List<byte[]> encodedRegionNames = request.getRegionList().stream()
2317        .map(regionSpecifier -> regionSpecifier.getValue().toByteArray())
2318        .collect(Collectors.toList());
2319      if (server.cpHost != null) {
2320        server.cpHost.preRecommissionRegionServer(sn, encodedRegionNames);
2321      }
2322      server.recommissionRegionServer(sn, encodedRegionNames);
2323      if (server.cpHost != null) {
2324        server.cpHost.postRecommissionRegionServer(sn, encodedRegionNames);
2325      }
2326    } catch (IOException io) {
2327      throw new ServiceException(io);
2328    }
2329
2330    return RecommissionRegionServerResponse.newBuilder().build();
2331  }
2332
2333  @Override
2334  public LockResponse requestLock(RpcController controller, final LockRequest request)
2335    throws ServiceException {
2336    try {
2337      if (request.getDescription().isEmpty()) {
2338        throw new IllegalArgumentException("Empty description");
2339      }
2340      NonceProcedureRunnable npr;
2341      LockType type = LockType.valueOf(request.getLockType().name());
2342      if (request.getRegionInfoCount() > 0) {
2343        final RegionInfo[] regionInfos = new RegionInfo[request.getRegionInfoCount()];
2344        for (int i = 0; i < request.getRegionInfoCount(); ++i) {
2345          regionInfos[i] = ProtobufUtil.toRegionInfo(request.getRegionInfo(i));
2346        }
2347        npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) {
2348          @Override
2349          protected void run() throws IOException {
2350            setProcId(server.getLockManager().remoteLocks().requestRegionsLock(regionInfos,
2351              request.getDescription(), getNonceKey()));
2352          }
2353
2354          @Override
2355          protected String getDescription() {
2356            return "RequestLock";
2357          }
2358        };
2359      } else if (request.hasTableName()) {
2360        final TableName tableName = ProtobufUtil.toTableName(request.getTableName());
2361        npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) {
2362          @Override
2363          protected void run() throws IOException {
2364            setProcId(server.getLockManager().remoteLocks().requestTableLock(tableName, type,
2365              request.getDescription(), getNonceKey()));
2366          }
2367
2368          @Override
2369          protected String getDescription() {
2370            return "RequestLock";
2371          }
2372        };
2373      } else if (request.hasNamespace()) {
2374        npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) {
2375          @Override
2376          protected void run() throws IOException {
2377            setProcId(server.getLockManager().remoteLocks().requestNamespaceLock(
2378              request.getNamespace(), type, request.getDescription(), getNonceKey()));
2379          }
2380
2381          @Override
2382          protected String getDescription() {
2383            return "RequestLock";
2384          }
2385        };
2386      } else {
2387        throw new IllegalArgumentException("one of table/namespace/region should be specified");
2388      }
2389      long procId = MasterProcedureUtil.submitProcedure(npr);
2390      return LockResponse.newBuilder().setProcId(procId).build();
2391    } catch (IllegalArgumentException e) {
2392      LOG.warn("Exception when queuing lock", e);
2393      throw new ServiceException(new DoNotRetryIOException(e));
2394    } catch (IOException e) {
2395      LOG.warn("Exception when queuing lock", e);
2396      throw new ServiceException(e);
2397    }
2398  }
2399
2400  /**
2401   * @return LOCKED, if procedure is found and it has the lock; else UNLOCKED.
2402   * @throws ServiceException if given proc id is found but it is not a LockProcedure.
2403   */
2404  @Override
2405  public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request)
2406    throws ServiceException {
2407    try {
2408      if (
2409        server.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(),
2410          request.getKeepAlive())
2411      ) {
2412        return LockHeartbeatResponse.newBuilder()
2413          .setTimeoutMs(server.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF,
2414            LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS))
2415          .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).build();
2416      } else {
2417        return LockHeartbeatResponse.newBuilder()
2418          .setLockStatus(LockHeartbeatResponse.LockStatus.UNLOCKED).build();
2419      }
2420    } catch (IOException e) {
2421      throw new ServiceException(e);
2422    }
2423  }
2424
2425  @Override
2426  public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller,
2427    RegionSpaceUseReportRequest request) throws ServiceException {
2428    try {
2429      server.checkInitialized();
2430      if (!QuotaUtil.isQuotaEnabled(server.getConfiguration())) {
2431        return RegionSpaceUseReportResponse.newBuilder().build();
2432      }
2433      MasterQuotaManager quotaManager = this.server.getMasterQuotaManager();
2434      if (quotaManager != null) {
2435        final long now = EnvironmentEdgeManager.currentTime();
2436        for (RegionSpaceUse report : request.getSpaceUseList()) {
2437          quotaManager.addRegionSize(ProtobufUtil.toRegionInfo(report.getRegionInfo()),
2438            report.getRegionSize(), now);
2439        }
2440      } else {
2441        LOG.debug("Received region space usage report but HMaster is not ready to process it, "
2442          + "skipping");
2443      }
2444      return RegionSpaceUseReportResponse.newBuilder().build();
2445    } catch (Exception e) {
2446      throw new ServiceException(e);
2447    }
2448  }
2449
2450  @Override
2451  public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(RpcController controller,
2452    GetSpaceQuotaRegionSizesRequest request) throws ServiceException {
2453    try {
2454      server.checkInitialized();
2455      MasterQuotaManager quotaManager = this.server.getMasterQuotaManager();
2456      GetSpaceQuotaRegionSizesResponse.Builder builder =
2457        GetSpaceQuotaRegionSizesResponse.newBuilder();
2458      if (quotaManager != null) {
2459        Map<RegionInfo, Long> regionSizes = quotaManager.snapshotRegionSizes();
2460        Map<TableName, Long> regionSizesByTable = new HashMap<>();
2461        // Translate hregioninfo+long -> tablename+long
2462        for (Entry<RegionInfo, Long> entry : regionSizes.entrySet()) {
2463          final TableName tableName = entry.getKey().getTable();
2464          Long prevSize = regionSizesByTable.get(tableName);
2465          if (prevSize == null) {
2466            prevSize = 0L;
2467          }
2468          regionSizesByTable.put(tableName, prevSize + entry.getValue());
2469        }
2470        // Serialize them into the protobuf
2471        for (Entry<TableName, Long> tableSize : regionSizesByTable.entrySet()) {
2472          builder.addSizes(
2473            RegionSizes.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableSize.getKey()))
2474              .setSize(tableSize.getValue()).build());
2475        }
2476        return builder.build();
2477      } else {
2478        LOG.debug("Received space quota region size report but HMaster is not ready to process it,"
2479          + "skipping");
2480      }
2481      return builder.build();
2482    } catch (Exception e) {
2483      throw new ServiceException(e);
2484    }
2485  }
2486
2487  @Override
2488  public GetQuotaStatesResponse getQuotaStates(RpcController controller,
2489    GetQuotaStatesRequest request) throws ServiceException {
2490    try {
2491      server.checkInitialized();
2492      QuotaObserverChore quotaChore = this.server.getQuotaObserverChore();
2493      GetQuotaStatesResponse.Builder builder = GetQuotaStatesResponse.newBuilder();
2494      if (quotaChore != null) {
2495        // The "current" view of all tables with quotas
2496        Map<TableName, SpaceQuotaSnapshot> tableSnapshots = quotaChore.getTableQuotaSnapshots();
2497        for (Entry<TableName, SpaceQuotaSnapshot> entry : tableSnapshots.entrySet()) {
2498          builder.addTableSnapshots(TableQuotaSnapshot.newBuilder()
2499            .setTableName(ProtobufUtil.toProtoTableName(entry.getKey()))
2500            .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build());
2501        }
2502        // The "current" view of all namespaces with quotas
2503        Map<String, SpaceQuotaSnapshot> nsSnapshots = quotaChore.getNamespaceQuotaSnapshots();
2504        for (Entry<String, SpaceQuotaSnapshot> entry : nsSnapshots.entrySet()) {
2505          builder.addNsSnapshots(NamespaceQuotaSnapshot.newBuilder().setNamespace(entry.getKey())
2506            .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build());
2507        }
2508        return builder.build();
2509      }
2510      return builder.build();
2511    } catch (Exception e) {
2512      throw new ServiceException(e);
2513    }
2514  }
2515
2516  @Override
2517  public ClearDeadServersResponse clearDeadServers(RpcController controller,
2518    ClearDeadServersRequest request) throws ServiceException {
2519    LOG.debug(server.getClientIdAuditPrefix() + " clear dead region servers.");
2520    ClearDeadServersResponse.Builder response = ClearDeadServersResponse.newBuilder();
2521    try {
2522      server.checkInitialized();
2523      if (server.cpHost != null) {
2524        server.cpHost.preClearDeadServers();
2525      }
2526
2527      if (server.getServerManager().areDeadServersInProgress()) {
2528        LOG.debug("Some dead server is still under processing, won't clear the dead server list");
2529        response.addAllServerName(request.getServerNameList());
2530      } else {
2531        DeadServer deadServer = server.getServerManager().getDeadServers();
2532        Set<Address> clearedServers = new HashSet<>();
2533        for (HBaseProtos.ServerName pbServer : request.getServerNameList()) {
2534          ServerName serverName = ProtobufUtil.toServerName(pbServer);
2535          final boolean deadInProcess =
2536            server.getProcedures().stream().anyMatch(p -> (p instanceof ServerCrashProcedure)
2537              && ((ServerCrashProcedure) p).getServerName().equals(serverName));
2538          if (deadInProcess) {
2539            throw new ServiceException(
2540              String.format("Dead server '%s' is not 'dead' in fact...", serverName));
2541          }
2542
2543          if (!deadServer.removeDeadServer(serverName)) {
2544            response.addServerName(pbServer);
2545          } else {
2546            clearedServers.add(serverName.getAddress());
2547          }
2548        }
2549        server.getRSGroupInfoManager().removeServers(clearedServers);
2550        LOG.info("Remove decommissioned servers {} from RSGroup done", clearedServers);
2551      }
2552
2553      if (server.cpHost != null) {
2554        server.cpHost.postClearDeadServers(
2555          ProtobufUtil.toServerNameList(request.getServerNameList()),
2556          ProtobufUtil.toServerNameList(response.getServerNameList()));
2557      }
2558    } catch (IOException io) {
2559      throw new ServiceException(io);
2560    }
2561    return response.build();
2562  }
2563
2564  @Override
2565  public ReportProcedureDoneResponse reportProcedureDone(RpcController controller,
2566    ReportProcedureDoneRequest request) throws ServiceException {
2567    // Check Masters is up and ready for duty before progressing. Remote side will keep trying.
2568    try {
2569      this.server.checkServiceStarted();
2570      for (RemoteProcedureResult result : request.getResultList()) {
2571        // -1 is less than any possible MasterActiveCode
2572        long initiatingMasterActiveTime =
2573          result.hasInitiatingMasterActiveTime() ? result.getInitiatingMasterActiveTime() : -1;
2574        throwOnOldMaster(result.getProcId(), initiatingMasterActiveTime);
2575      }
2576    } catch (IOException ioe) {
2577      throw new ServiceException(ioe);
2578    }
2579    request.getResultList().forEach(result -> {
2580      if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) {
2581        byte[] remoteResultData =
2582          result.hasProcResultData() ? result.getProcResultData().toByteArray() : null;
2583        server.remoteProcedureCompleted(result.getProcId(), remoteResultData);
2584      } else {
2585        server.remoteProcedureFailed(result.getProcId(),
2586          RemoteProcedureException.fromProto(result.getError()));
2587      }
2588    });
2589    return ReportProcedureDoneResponse.getDefaultInstance();
2590  }
2591
2592  private void throwOnOldMaster(long procId, long initiatingMasterActiveTime)
2593    throws MasterNotRunningException {
2594    if (initiatingMasterActiveTime > server.getMasterActiveTime()) {
2595      // procedure is initiated by new active master but report received on master with older active
2596      // time
2597      LOG.warn(
2598        "Report for procId: {} and initiatingMasterAT {} received on master with activeTime {}",
2599        procId, initiatingMasterActiveTime, server.getMasterActiveTime());
2600      throw new MasterNotRunningException("Another master is active");
2601    }
2602  }
2603
2604  @Override
2605  public FileArchiveNotificationResponse reportFileArchival(RpcController controller,
2606    FileArchiveNotificationRequest request) throws ServiceException {
2607    try {
2608      server.checkInitialized();
2609      if (!QuotaUtil.isQuotaEnabled(server.getConfiguration())) {
2610        return FileArchiveNotificationResponse.newBuilder().build();
2611      }
2612      server.getMasterQuotaManager().processFileArchivals(request, server.getConnection(),
2613        server.getConfiguration(), server.getFileSystem());
2614      return FileArchiveNotificationResponse.newBuilder().build();
2615    } catch (Exception e) {
2616      throw new ServiceException(e);
2617    }
2618  }
2619
2620  // HBCK Services
2621
2622  @Override
2623  public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest req)
2624    throws ServiceException {
2625    rpcPreCheck("runHbckChore");
2626    LOG.info("{} request HBCK chore to run", server.getClientIdAuditPrefix());
2627    HbckChore hbckChore = server.getHbckChore();
2628    boolean ran = hbckChore.runChore();
2629    return RunHbckChoreResponse.newBuilder().setRan(ran).build();
2630  }
2631
2632  /**
2633   * Update state of the table in meta only. This is required by hbck in some situations to cleanup
2634   * stuck assign/ unassign regions procedures for the table.
2635   * @return previous state of the table
2636   */
2637  @Override
2638  public GetTableStateResponse setTableStateInMeta(RpcController controller,
2639    SetTableStateInMetaRequest request) throws ServiceException {
2640    rpcPreCheck("setTableStateInMeta");
2641    TableName tn = ProtobufUtil.toTableName(request.getTableName());
2642    try {
2643      TableState prevState = this.server.getTableStateManager().getTableState(tn);
2644      TableState newState = TableState.convert(tn, request.getTableState());
2645      LOG.info("{} set table={} state from {} to {}", server.getClientIdAuditPrefix(), tn,
2646        prevState.getState(), newState.getState());
2647      this.server.getTableStateManager().setTableState(tn, newState.getState());
2648      return GetTableStateResponse.newBuilder().setTableState(prevState.convert()).build();
2649    } catch (Exception e) {
2650      throw new ServiceException(e);
2651    }
2652  }
2653
2654  /**
2655   * Update state of the region in meta only. This is required by hbck in some situations to cleanup
2656   * stuck assign/ unassign regions procedures for the table.
2657   * @return previous states of the regions
2658   */
2659  @Override
2660  public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller,
2661    SetRegionStateInMetaRequest request) throws ServiceException {
2662    rpcPreCheck("setRegionStateInMeta");
2663    SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder();
2664    final AssignmentManager am = server.getAssignmentManager();
2665    try {
2666      for (RegionSpecifierAndState s : request.getStatesList()) {
2667        final RegionSpecifier spec = s.getRegionSpecifier();
2668        final RegionInfo targetRegionInfo = getRegionInfo(spec);
2669        final RegionState.State targetState = RegionState.State.convert(s.getState());
2670        final RegionState.State currentState = Optional.ofNullable(targetRegionInfo)
2671          .map(info -> am.getRegionStates().getRegionState(info)).map(RegionState::getState)
2672          .orElseThrow(
2673            () -> new ServiceException("No existing state known for region '" + spec + "'."));
2674        LOG.info("{} set region={} state from {} to {}", server.getClientIdAuditPrefix(),
2675          targetRegionInfo, currentState, targetState);
2676        if (currentState == targetState) {
2677          LOG.debug("Proposed state matches current state. {}, {}", targetRegionInfo, currentState);
2678          continue;
2679        }
2680        MetaTableAccessor.updateRegionState(server.getConnection(), targetRegionInfo, targetState);
2681        // Loads from meta again to refresh AM cache with the new region state
2682        am.populateRegionStatesFromMeta(targetRegionInfo);
2683        builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec)
2684          .setState(currentState.convert()));
2685      }
2686    } catch (IOException e) {
2687      throw new ServiceException(e);
2688    }
2689    return builder.build();
2690  }
2691
2692  /**
2693   * Get {@link RegionInfo} from Master using content of {@link RegionSpecifier} as key.
2694   * @return {@link RegionInfo} found by decoding {@code rs} or {@code null} if {@code rs} is
2695   *         unknown to the master.
2696   * @throws ServiceException If some error occurs while querying META or parsing results.
2697   */
2698  private RegionInfo getRegionInfo(HBaseProtos.RegionSpecifier rs) throws ServiceException {
2699    // TODO: this doesn't handle MOB regions. Should it? See the public method #getRegionInfo
2700    final AssignmentManager am = server.getAssignmentManager();
2701    final String encodedRegionName;
2702    final RegionInfo info;
2703    // first try resolving from the AM's caches.
2704    switch (rs.getType()) {
2705      case REGION_NAME:
2706        final byte[] regionName = rs.getValue().toByteArray();
2707        encodedRegionName = RegionInfo.encodeRegionName(regionName);
2708        info = am.getRegionInfo(regionName);
2709        break;
2710      case ENCODED_REGION_NAME:
2711        encodedRegionName = rs.getValue().toStringUtf8();
2712        info = am.getRegionInfo(encodedRegionName);
2713        break;
2714      default:
2715        throw new IllegalArgumentException("Unrecognized RegionSpecifierType " + rs.getType());
2716    }
2717    if (info != null) {
2718      return info;
2719    }
2720    // fall back to a meta scan and check the cache again.
2721    try {
2722      am.populateRegionStatesFromMeta(encodedRegionName);
2723    } catch (IOException e) {
2724      throw new ServiceException(e);
2725    }
2726    return am.getRegionInfo(encodedRegionName);
2727  }
2728
2729  /**
2730   * @throws ServiceException If no MasterProcedureExecutor
2731   */
2732  private void checkMasterProcedureExecutor() throws ServiceException {
2733    if (this.server.getMasterProcedureExecutor() == null) {
2734      throw new ServiceException("Master's ProcedureExecutor not initialized; retry later");
2735    }
2736  }
2737
2738  /**
2739   * A 'raw' version of assign that does bulk and can skirt Master state checks if override is set;
2740   * i.e. assigns can be forced during Master startup or if RegionState is unclean. Used by HBCK2.
2741   */
2742  @Override
2743  public MasterProtos.AssignsResponse assigns(RpcController controller,
2744    MasterProtos.AssignsRequest request) throws ServiceException {
2745    checkMasterProcedureExecutor();
2746    final ProcedureExecutor<MasterProcedureEnv> pe = server.getMasterProcedureExecutor();
2747    final AssignmentManager am = server.getAssignmentManager();
2748    MasterProtos.AssignsResponse.Builder responseBuilder =
2749      MasterProtos.AssignsResponse.newBuilder();
2750    final boolean override = request.getOverride();
2751    final boolean force = request.getForce();
2752    LOG.info("{} assigns, override={}", server.getClientIdAuditPrefix(), override);
2753    for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
2754      final RegionInfo info = getRegionInfo(rs);
2755      if (info == null) {
2756        LOG.info("Unknown region {}", rs);
2757        continue;
2758      }
2759      responseBuilder.addPid(Optional.ofNullable(am.createOneAssignProcedure(info, override, force))
2760        .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID));
2761    }
2762    return responseBuilder.build();
2763  }
2764
2765  /**
2766   * A 'raw' version of unassign that does bulk and can skirt Master state checks if override is
2767   * set; i.e. unassigns can be forced during Master startup or if RegionState is unclean. Used by
2768   * HBCK2.
2769   */
2770  @Override
2771  public MasterProtos.UnassignsResponse unassigns(RpcController controller,
2772    MasterProtos.UnassignsRequest request) throws ServiceException {
2773    checkMasterProcedureExecutor();
2774    final ProcedureExecutor<MasterProcedureEnv> pe = server.getMasterProcedureExecutor();
2775    final AssignmentManager am = server.getAssignmentManager();
2776    MasterProtos.UnassignsResponse.Builder responseBuilder =
2777      MasterProtos.UnassignsResponse.newBuilder();
2778    final boolean override = request.getOverride();
2779    final boolean force = request.getForce();
2780    LOG.info("{} unassigns, override={}", server.getClientIdAuditPrefix(), override);
2781    for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
2782      final RegionInfo info = getRegionInfo(rs);
2783      if (info == null) {
2784        LOG.info("Unknown region {}", rs);
2785        continue;
2786      }
2787      responseBuilder
2788        .addPid(Optional.ofNullable(am.createOneUnassignProcedure(info, override, force))
2789          .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID));
2790    }
2791    return responseBuilder.build();
2792  }
2793
2794  /**
2795   * Bypass specified procedure to completion. Procedure is marked completed but no actual work is
2796   * done from the current state/ step onwards. Parents of the procedure are also marked for bypass.
2797   * NOTE: this is a dangerous operation and may be used to unstuck buggy procedures. This may leave
2798   * system in incoherent state. This may need to be followed by some cleanup steps/ actions by
2799   * operator.
2800   * @return BypassProcedureToCompletionResponse indicating success or failure
2801   */
2802  @Override
2803  public MasterProtos.BypassProcedureResponse bypassProcedure(RpcController controller,
2804    MasterProtos.BypassProcedureRequest request) throws ServiceException {
2805    try {
2806      LOG.info("{} bypass procedures={}, waitTime={}, override={}, recursive={}",
2807        server.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(),
2808        request.getOverride(), request.getRecursive());
2809      List<Boolean> ret =
2810        server.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(),
2811          request.getWaitTime(), request.getOverride(), request.getRecursive());
2812      return MasterProtos.BypassProcedureResponse.newBuilder().addAllBypassed(ret).build();
2813    } catch (IOException e) {
2814      throw new ServiceException(e);
2815    }
2816  }
2817
2818  @Override
2819  public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProcedure(
2820    RpcController controller, MasterProtos.ScheduleServerCrashProcedureRequest request)
2821    throws ServiceException {
2822    List<Long> pids = new ArrayList<>();
2823    for (HBaseProtos.ServerName sn : request.getServerNameList()) {
2824      ServerName serverName = ProtobufUtil.toServerName(sn);
2825      LOG.info("{} schedule ServerCrashProcedure for {}", this.server.getClientIdAuditPrefix(),
2826        serverName);
2827      if (shouldSubmitSCP(serverName)) {
2828        pids.add(this.server.getServerManager().expireServer(serverName, true));
2829      } else {
2830        pids.add(Procedure.NO_PROC_ID);
2831      }
2832    }
2833    return MasterProtos.ScheduleServerCrashProcedureResponse.newBuilder().addAllPid(pids).build();
2834  }
2835
2836  @Override
2837  public MasterProtos.ScheduleSCPsForUnknownServersResponse scheduleSCPsForUnknownServers(
2838    RpcController controller, MasterProtos.ScheduleSCPsForUnknownServersRequest request)
2839    throws ServiceException {
2840    List<Long> pids = new ArrayList<>();
2841    final Set<ServerName> serverNames = server.getAssignmentManager().getRegionStates()
2842      .getRegionStates().stream().map(RegionState::getServerName).collect(Collectors.toSet());
2843
2844    final Set<ServerName> unknownServerNames = serverNames.stream()
2845      .filter(sn -> server.getServerManager().isServerUnknown(sn)).collect(Collectors.toSet());
2846
2847    for (ServerName sn : unknownServerNames) {
2848      LOG.info("{} schedule ServerCrashProcedure for unknown {}",
2849        this.server.getClientIdAuditPrefix(), sn);
2850      if (shouldSubmitSCP(sn)) {
2851        pids.add(this.server.getServerManager().expireServer(sn, true));
2852      } else {
2853        pids.add(Procedure.NO_PROC_ID);
2854      }
2855    }
2856    return MasterProtos.ScheduleSCPsForUnknownServersResponse.newBuilder().addAllPid(pids).build();
2857  }
2858
2859  @Override
2860  public FixMetaResponse fixMeta(RpcController controller, FixMetaRequest request)
2861    throws ServiceException {
2862    rpcPreCheck("fixMeta");
2863    try {
2864      MetaFixer mf = new MetaFixer(this.server);
2865      mf.fix();
2866      return FixMetaResponse.newBuilder().build();
2867    } catch (IOException ioe) {
2868      throw new ServiceException(ioe);
2869    }
2870  }
2871
2872  @Override
2873  public SwitchRpcThrottleResponse switchRpcThrottle(RpcController controller,
2874    SwitchRpcThrottleRequest request) throws ServiceException {
2875    try {
2876      server.checkInitialized();
2877      return server.getMasterQuotaManager().switchRpcThrottle(request);
2878    } catch (Exception e) {
2879      throw new ServiceException(e);
2880    }
2881  }
2882
2883  @Override
2884  public MasterProtos.IsRpcThrottleEnabledResponse isRpcThrottleEnabled(RpcController controller,
2885    MasterProtos.IsRpcThrottleEnabledRequest request) throws ServiceException {
2886    try {
2887      server.checkInitialized();
2888      return server.getMasterQuotaManager().isRpcThrottleEnabled(request);
2889    } catch (Exception e) {
2890      throw new ServiceException(e);
2891    }
2892  }
2893
2894  @Override
2895  public SwitchExceedThrottleQuotaResponse switchExceedThrottleQuota(RpcController controller,
2896    SwitchExceedThrottleQuotaRequest request) throws ServiceException {
2897    try {
2898      server.checkInitialized();
2899      return server.getMasterQuotaManager().switchExceedThrottleQuota(request);
2900    } catch (Exception e) {
2901      throw new ServiceException(e);
2902    }
2903  }
2904
2905  @Override
2906  public GrantResponse grant(RpcController controller, GrantRequest request)
2907    throws ServiceException {
2908    try {
2909      server.checkInitialized();
2910      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
2911        final UserPermission perm =
2912          ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
2913        boolean mergeExistingPermissions = request.getMergeExistingPermissions();
2914        server.cpHost.preGrant(perm, mergeExistingPermissions);
2915        try (Table table = server.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
2916          PermissionStorage.addUserPermission(getConfiguration(), perm, table,
2917            mergeExistingPermissions);
2918        }
2919        server.cpHost.postGrant(perm, mergeExistingPermissions);
2920        return GrantResponse.getDefaultInstance();
2921      } else {
2922        throw new DoNotRetryIOException(
2923          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2924      }
2925    } catch (IOException ioe) {
2926      throw new ServiceException(ioe);
2927    }
2928  }
2929
2930  @Override
2931  public RevokeResponse revoke(RpcController controller, RevokeRequest request)
2932    throws ServiceException {
2933    try {
2934      server.checkInitialized();
2935      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
2936        final UserPermission userPermission =
2937          ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
2938        server.cpHost.preRevoke(userPermission);
2939        try (Table table = server.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
2940          PermissionStorage.removeUserPermission(server.getConfiguration(), userPermission, table);
2941        }
2942        server.cpHost.postRevoke(userPermission);
2943        return RevokeResponse.getDefaultInstance();
2944      } else {
2945        throw new DoNotRetryIOException(
2946          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2947      }
2948    } catch (IOException ioe) {
2949      throw new ServiceException(ioe);
2950    }
2951  }
2952
2953  @Override
2954  public GetUserPermissionsResponse getUserPermissions(RpcController controller,
2955    GetUserPermissionsRequest request) throws ServiceException {
2956    try {
2957      server.checkInitialized();
2958      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
2959        final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null;
2960        String namespace =
2961          request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null;
2962        TableName table =
2963          request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null;
2964        byte[] cf = request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null;
2965        byte[] cq =
2966          request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null;
2967        Type permissionType = request.hasType() ? request.getType() : null;
2968        server.getMasterCoprocessorHost().preGetUserPermissions(userName, namespace, table, cf, cq);
2969
2970        List<UserPermission> perms = null;
2971        if (permissionType == Type.Table) {
2972          boolean filter = (cf != null || userName != null) ? true : false;
2973          perms = PermissionStorage.getUserTablePermissions(server.getConfiguration(), table, cf,
2974            cq, userName, filter);
2975        } else if (permissionType == Type.Namespace) {
2976          perms = PermissionStorage.getUserNamespacePermissions(server.getConfiguration(),
2977            namespace, userName, userName != null ? true : false);
2978        } else {
2979          perms = PermissionStorage.getUserPermissions(server.getConfiguration(), null, null, null,
2980            userName, userName != null ? true : false);
2981          // Skip super users when filter user is specified
2982          if (userName == null) {
2983            // Adding superusers explicitly to the result set as PermissionStorage do not store
2984            // them. Also using acl as table name to be inline with the results of global admin and
2985            // will help in avoiding any leakage of information about being superusers.
2986            for (String user : Superusers.getSuperUsers()) {
2987              perms.add(new UserPermission(user,
2988                Permission.newBuilder().withActions(Action.values()).build()));
2989            }
2990          }
2991        }
2992
2993        server.getMasterCoprocessorHost().postGetUserPermissions(userName, namespace, table, cf,
2994          cq);
2995        AccessControlProtos.GetUserPermissionsResponse response =
2996          ShadedAccessControlUtil.buildGetUserPermissionsResponse(perms);
2997        return response;
2998      } else {
2999        throw new DoNotRetryIOException(
3000          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
3001      }
3002    } catch (IOException ioe) {
3003      throw new ServiceException(ioe);
3004    }
3005  }
3006
3007  @Override
3008  public HasUserPermissionsResponse hasUserPermissions(RpcController controller,
3009    HasUserPermissionsRequest request) throws ServiceException {
3010    try {
3011      server.checkInitialized();
3012      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
3013        User caller = RpcServer.getRequestUser().orElse(null);
3014        String userName =
3015          request.hasUserName() ? request.getUserName().toStringUtf8() : caller.getShortName();
3016        List<Permission> permissions = new ArrayList<>();
3017        for (int i = 0; i < request.getPermissionCount(); i++) {
3018          permissions.add(ShadedAccessControlUtil.toPermission(request.getPermission(i)));
3019        }
3020        server.getMasterCoprocessorHost().preHasUserPermissions(userName, permissions);
3021        if (!caller.getShortName().equals(userName)) {
3022          List<String> groups = AccessChecker.getUserGroups(userName);
3023          caller = new InputUser(userName, groups.toArray(new String[groups.size()]));
3024        }
3025        List<Boolean> hasUserPermissions = new ArrayList<>();
3026        if (getAccessChecker() != null) {
3027          for (Permission permission : permissions) {
3028            boolean hasUserPermission =
3029              getAccessChecker().hasUserPermission(caller, "hasUserPermissions", permission);
3030            hasUserPermissions.add(hasUserPermission);
3031          }
3032        } else {
3033          for (int i = 0; i < permissions.size(); i++) {
3034            hasUserPermissions.add(true);
3035          }
3036        }
3037        server.getMasterCoprocessorHost().postHasUserPermissions(userName, permissions);
3038        HasUserPermissionsResponse.Builder builder =
3039          HasUserPermissionsResponse.newBuilder().addAllHasUserPermission(hasUserPermissions);
3040        return builder.build();
3041      } else {
3042        throw new DoNotRetryIOException(
3043          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
3044      }
3045    } catch (IOException ioe) {
3046      throw new ServiceException(ioe);
3047    }
3048  }
3049
3050  private boolean shouldSubmitSCP(ServerName serverName) {
3051    // check if there is already a SCP of this server running
3052    List<Procedure<MasterProcedureEnv>> procedures =
3053      server.getMasterProcedureExecutor().getProcedures();
3054    for (Procedure<MasterProcedureEnv> procedure : procedures) {
3055      if (procedure instanceof ServerCrashProcedure) {
3056        if (
3057          serverName.compareTo(((ServerCrashProcedure) procedure).getServerName()) == 0
3058            && !procedure.isFinished()
3059        ) {
3060          LOG.info("there is already a SCP of this server {} running, pid {}", serverName,
3061            procedure.getProcId());
3062          return false;
3063        }
3064      }
3065    }
3066    return true;
3067  }
3068
3069  @Override
3070  public GetRSGroupInfoResponse getRSGroupInfo(RpcController controller,
3071    GetRSGroupInfoRequest request) throws ServiceException {
3072    String groupName = request.getRSGroupName();
3073    LOG.info(
3074      server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName);
3075    try {
3076      if (server.getMasterCoprocessorHost() != null) {
3077        server.getMasterCoprocessorHost().preGetRSGroupInfo(groupName);
3078      }
3079      RSGroupInfo rsGroupInfo = server.getRSGroupInfoManager().getRSGroup(groupName);
3080      GetRSGroupInfoResponse resp;
3081      if (rsGroupInfo != null) {
3082        resp = GetRSGroupInfoResponse.newBuilder()
3083          .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build();
3084      } else {
3085        resp = GetRSGroupInfoResponse.getDefaultInstance();
3086      }
3087      if (server.getMasterCoprocessorHost() != null) {
3088        server.getMasterCoprocessorHost().postGetRSGroupInfo(groupName);
3089      }
3090      return resp;
3091    } catch (IOException e) {
3092      throw new ServiceException(e);
3093    }
3094  }
3095
3096  @Override
3097  public GetRSGroupInfoOfTableResponse getRSGroupInfoOfTable(RpcController controller,
3098    GetRSGroupInfoOfTableRequest request) throws ServiceException {
3099    TableName tableName = ProtobufUtil.toTableName(request.getTableName());
3100    LOG.info(
3101      server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName);
3102    try {
3103      if (server.getMasterCoprocessorHost() != null) {
3104        server.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName);
3105      }
3106      GetRSGroupInfoOfTableResponse resp;
3107      TableDescriptor td = server.getTableDescriptors().get(tableName);
3108      if (td == null) {
3109        resp = GetRSGroupInfoOfTableResponse.getDefaultInstance();
3110      } else {
3111        RSGroupInfo rsGroupInfo =
3112          RSGroupUtil.getRSGroupInfo(server, server.getRSGroupInfoManager(), tableName)
3113            .orElse(server.getRSGroupInfoManager().getRSGroup(RSGroupInfo.DEFAULT_GROUP));
3114        resp = GetRSGroupInfoOfTableResponse.newBuilder()
3115          .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build();
3116      }
3117      if (server.getMasterCoprocessorHost() != null) {
3118        server.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName);
3119      }
3120      return resp;
3121    } catch (IOException e) {
3122      throw new ServiceException(e);
3123    }
3124  }
3125
3126  @Override
3127  public GetRSGroupInfoOfServerResponse getRSGroupInfoOfServer(RpcController controller,
3128    GetRSGroupInfoOfServerRequest request) throws ServiceException {
3129    Address hp =
3130      Address.fromParts(request.getServer().getHostName(), request.getServer().getPort());
3131    LOG.info(server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp);
3132    try {
3133      if (server.getMasterCoprocessorHost() != null) {
3134        server.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp);
3135      }
3136      RSGroupInfo rsGroupInfo = server.getRSGroupInfoManager().getRSGroupOfServer(hp);
3137      GetRSGroupInfoOfServerResponse resp;
3138      if (rsGroupInfo != null) {
3139        resp = GetRSGroupInfoOfServerResponse.newBuilder()
3140          .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build();
3141      } else {
3142        resp = GetRSGroupInfoOfServerResponse.getDefaultInstance();
3143      }
3144      if (server.getMasterCoprocessorHost() != null) {
3145        server.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp);
3146      }
3147      return resp;
3148    } catch (IOException e) {
3149      throw new ServiceException(e);
3150    }
3151  }
3152
3153  @Override
3154  public MoveServersResponse moveServers(RpcController controller, MoveServersRequest request)
3155    throws ServiceException {
3156    Set<Address> hostPorts = Sets.newHashSet();
3157    MoveServersResponse.Builder builder = MoveServersResponse.newBuilder();
3158    for (HBaseProtos.ServerName el : request.getServersList()) {
3159      hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
3160    }
3161    LOG.info(server.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup "
3162      + request.getTargetGroup());
3163    try {
3164      if (server.getMasterCoprocessorHost() != null) {
3165        server.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup());
3166      }
3167      server.getRSGroupInfoManager().moveServers(hostPorts, request.getTargetGroup());
3168      if (server.getMasterCoprocessorHost() != null) {
3169        server.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup());
3170      }
3171    } catch (IOException e) {
3172      throw new ServiceException(e);
3173    }
3174    return builder.build();
3175  }
3176
3177  @Override
3178  public AddRSGroupResponse addRSGroup(RpcController controller, AddRSGroupRequest request)
3179    throws ServiceException {
3180    AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder();
3181    LOG.info(server.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName());
3182    try {
3183      if (server.getMasterCoprocessorHost() != null) {
3184        server.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName());
3185      }
3186      server.getRSGroupInfoManager().addRSGroup(new RSGroupInfo(request.getRSGroupName()));
3187      if (server.getMasterCoprocessorHost() != null) {
3188        server.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName());
3189      }
3190    } catch (IOException e) {
3191      throw new ServiceException(e);
3192    }
3193    return builder.build();
3194  }
3195
3196  @Override
3197  public RemoveRSGroupResponse removeRSGroup(RpcController controller, RemoveRSGroupRequest request)
3198    throws ServiceException {
3199    RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder();
3200    LOG.info(server.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName());
3201    try {
3202      if (server.getMasterCoprocessorHost() != null) {
3203        server.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName());
3204      }
3205      server.getRSGroupInfoManager().removeRSGroup(request.getRSGroupName());
3206      if (server.getMasterCoprocessorHost() != null) {
3207        server.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName());
3208      }
3209    } catch (IOException e) {
3210      throw new ServiceException(e);
3211    }
3212    return builder.build();
3213  }
3214
3215  @Override
3216  public BalanceRSGroupResponse balanceRSGroup(RpcController controller,
3217    BalanceRSGroupRequest request) throws ServiceException {
3218    BalanceRequest balanceRequest = ProtobufUtil.toBalanceRequest(request);
3219
3220    BalanceRSGroupResponse.Builder builder =
3221      BalanceRSGroupResponse.newBuilder().setBalanceRan(false);
3222
3223    LOG.info(
3224      server.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName());
3225    try {
3226      if (server.getMasterCoprocessorHost() != null) {
3227        server.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName(),
3228          balanceRequest);
3229      }
3230      BalanceResponse response =
3231        server.getRSGroupInfoManager().balanceRSGroup(request.getRSGroupName(), balanceRequest);
3232      ProtobufUtil.populateBalanceRSGroupResponse(builder, response);
3233      if (server.getMasterCoprocessorHost() != null) {
3234        server.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(),
3235          balanceRequest, response);
3236      }
3237    } catch (IOException e) {
3238      throw new ServiceException(e);
3239    }
3240    return builder.build();
3241  }
3242
3243  @Override
3244  public ListRSGroupInfosResponse listRSGroupInfos(RpcController controller,
3245    ListRSGroupInfosRequest request) throws ServiceException {
3246    ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder();
3247    LOG.info(server.getClientIdAuditPrefix() + " list rsgroup");
3248    try {
3249      if (server.getMasterCoprocessorHost() != null) {
3250        server.getMasterCoprocessorHost().preListRSGroups();
3251      }
3252      List<RSGroupInfo> rsGroupInfos = server.getRSGroupInfoManager().listRSGroups().stream()
3253        .map(RSGroupInfo::new).collect(Collectors.toList());
3254      Map<String, RSGroupInfo> name2Info = new HashMap<>();
3255      List<TableDescriptor> needToFill =
3256        new ArrayList<>(server.getTableDescriptors().getAll().values());
3257      for (RSGroupInfo rsGroupInfo : rsGroupInfos) {
3258        name2Info.put(rsGroupInfo.getName(), rsGroupInfo);
3259        for (TableDescriptor td : server.getTableDescriptors().getAll().values()) {
3260          if (rsGroupInfo.containsTable(td.getTableName())) {
3261            needToFill.remove(td);
3262          }
3263        }
3264      }
3265      for (TableDescriptor td : needToFill) {
3266        String groupName = td.getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP);
3267        RSGroupInfo rsGroupInfo = name2Info.get(groupName);
3268        if (rsGroupInfo != null) {
3269          rsGroupInfo.addTable(td.getTableName());
3270        }
3271      }
3272      for (RSGroupInfo rsGroupInfo : rsGroupInfos) {
3273        // TODO: this can be done at once outside this loop, do not need to scan all every time.
3274        builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo));
3275      }
3276      if (server.getMasterCoprocessorHost() != null) {
3277        server.getMasterCoprocessorHost().postListRSGroups();
3278      }
3279    } catch (IOException e) {
3280      throw new ServiceException(e);
3281    }
3282    return builder.build();
3283  }
3284
3285  @Override
3286  public RemoveServersResponse removeServers(RpcController controller, RemoveServersRequest request)
3287    throws ServiceException {
3288    RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder();
3289    Set<Address> servers = Sets.newHashSet();
3290    for (HBaseProtos.ServerName el : request.getServersList()) {
3291      servers.add(Address.fromParts(el.getHostName(), el.getPort()));
3292    }
3293    LOG.info(
3294      server.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + servers);
3295    try {
3296      if (server.getMasterCoprocessorHost() != null) {
3297        server.getMasterCoprocessorHost().preRemoveServers(servers);
3298      }
3299      server.getRSGroupInfoManager().removeServers(servers);
3300      if (server.getMasterCoprocessorHost() != null) {
3301        server.getMasterCoprocessorHost().postRemoveServers(servers);
3302      }
3303    } catch (IOException e) {
3304      throw new ServiceException(e);
3305    }
3306    return builder.build();
3307  }
3308
3309  @Override
3310  public ListTablesInRSGroupResponse listTablesInRSGroup(RpcController controller,
3311    ListTablesInRSGroupRequest request) throws ServiceException {
3312    ListTablesInRSGroupResponse.Builder builder = ListTablesInRSGroupResponse.newBuilder();
3313    String groupName = request.getGroupName();
3314    LOG.info(server.getClientIdAuditPrefix() + " list tables in rsgroup " + groupName);
3315    try {
3316      if (server.getMasterCoprocessorHost() != null) {
3317        server.getMasterCoprocessorHost().preListTablesInRSGroup(groupName);
3318      }
3319      RSGroupUtil.listTablesInRSGroup(server, groupName).stream()
3320        .map(ProtobufUtil::toProtoTableName).forEach(builder::addTableName);
3321      if (server.getMasterCoprocessorHost() != null) {
3322        server.getMasterCoprocessorHost().postListTablesInRSGroup(groupName);
3323      }
3324    } catch (IOException e) {
3325      throw new ServiceException(e);
3326    }
3327    return builder.build();
3328  }
3329
3330  @Override
3331  public GetConfiguredNamespacesAndTablesInRSGroupResponse
3332    getConfiguredNamespacesAndTablesInRSGroup(RpcController controller,
3333      GetConfiguredNamespacesAndTablesInRSGroupRequest request) throws ServiceException {
3334    GetConfiguredNamespacesAndTablesInRSGroupResponse.Builder builder =
3335      GetConfiguredNamespacesAndTablesInRSGroupResponse.newBuilder();
3336    String groupName = request.getGroupName();
3337    LOG.info(server.getClientIdAuditPrefix() + " get configured namespaces and tables in rsgroup "
3338      + groupName);
3339    try {
3340      if (server.getMasterCoprocessorHost() != null) {
3341        server.getMasterCoprocessorHost().preGetConfiguredNamespacesAndTablesInRSGroup(groupName);
3342      }
3343      for (NamespaceDescriptor nd : server.getClusterSchema().getNamespaces()) {
3344        if (groupName.equals(nd.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP))) {
3345          builder.addNamespace(nd.getName());
3346        }
3347      }
3348      for (TableDescriptor td : server.getTableDescriptors().getAll().values()) {
3349        if (td.getRegionServerGroup().map(g -> g.equals(groupName)).orElse(false)) {
3350          builder.addTableName(ProtobufUtil.toProtoTableName(td.getTableName()));
3351        }
3352      }
3353      if (server.getMasterCoprocessorHost() != null) {
3354        server.getMasterCoprocessorHost().postGetConfiguredNamespacesAndTablesInRSGroup(groupName);
3355      }
3356    } catch (IOException e) {
3357      throw new ServiceException(e);
3358    }
3359    return builder.build();
3360  }
3361
3362  @Override
3363  public RenameRSGroupResponse renameRSGroup(RpcController controller, RenameRSGroupRequest request)
3364    throws ServiceException {
3365    RenameRSGroupResponse.Builder builder = RenameRSGroupResponse.newBuilder();
3366    String oldRSGroup = request.getOldRsgroupName();
3367    String newRSGroup = request.getNewRsgroupName();
3368    LOG.info("{} rename rsgroup from {} to {} ", server.getClientIdAuditPrefix(), oldRSGroup,
3369      newRSGroup);
3370    try {
3371      if (server.getMasterCoprocessorHost() != null) {
3372        server.getMasterCoprocessorHost().preRenameRSGroup(oldRSGroup, newRSGroup);
3373      }
3374      server.getRSGroupInfoManager().renameRSGroup(oldRSGroup, newRSGroup);
3375      if (server.getMasterCoprocessorHost() != null) {
3376        server.getMasterCoprocessorHost().postRenameRSGroup(oldRSGroup, newRSGroup);
3377      }
3378    } catch (IOException e) {
3379      throw new ServiceException(e);
3380    }
3381    return builder.build();
3382  }
3383
3384  @Override
3385  public UpdateRSGroupConfigResponse updateRSGroupConfig(RpcController controller,
3386    UpdateRSGroupConfigRequest request) throws ServiceException {
3387    UpdateRSGroupConfigResponse.Builder builder = UpdateRSGroupConfigResponse.newBuilder();
3388    String groupName = request.getGroupName();
3389    Map<String, String> configuration = new HashMap<>();
3390    request.getConfigurationList().forEach(p -> configuration.put(p.getName(), p.getValue()));
3391    LOG.info("{} update rsgroup {} configuration {}", server.getClientIdAuditPrefix(), groupName,
3392      configuration);
3393    try {
3394      if (server.getMasterCoprocessorHost() != null) {
3395        server.getMasterCoprocessorHost().preUpdateRSGroupConfig(groupName, configuration);
3396      }
3397      server.getRSGroupInfoManager().updateRSGroupConfig(groupName, configuration);
3398      if (server.getMasterCoprocessorHost() != null) {
3399        server.getMasterCoprocessorHost().postUpdateRSGroupConfig(groupName, configuration);
3400      }
3401    } catch (IOException e) {
3402      throw new ServiceException(e);
3403    }
3404    return builder.build();
3405  }
3406
3407  @Override
3408  public HBaseProtos.LogEntry getLogEntries(RpcController controller,
3409    HBaseProtos.LogRequest request) throws ServiceException {
3410    try {
3411      final String logClassName = request.getLogClassName();
3412      Class<?> logClass = Class.forName(logClassName).asSubclass(Message.class);
3413      Method method = logClass.getMethod("parseFrom", ByteString.class);
3414      if (logClassName.contains("BalancerDecisionsRequest")) {
3415        MasterProtos.BalancerDecisionsRequest balancerDecisionsRequest =
3416          (MasterProtos.BalancerDecisionsRequest) method.invoke(null, request.getLogMessage());
3417        MasterProtos.BalancerDecisionsResponse balancerDecisionsResponse =
3418          getBalancerDecisions(balancerDecisionsRequest);
3419        return HBaseProtos.LogEntry.newBuilder()
3420          .setLogClassName(balancerDecisionsResponse.getClass().getName())
3421          .setLogMessage(balancerDecisionsResponse.toByteString()).build();
3422      } else if (logClassName.contains("BalancerRejectionsRequest")) {
3423        MasterProtos.BalancerRejectionsRequest balancerRejectionsRequest =
3424          (MasterProtos.BalancerRejectionsRequest) method.invoke(null, request.getLogMessage());
3425        MasterProtos.BalancerRejectionsResponse balancerRejectionsResponse =
3426          getBalancerRejections(balancerRejectionsRequest);
3427        return HBaseProtos.LogEntry.newBuilder()
3428          .setLogClassName(balancerRejectionsResponse.getClass().getName())
3429          .setLogMessage(balancerRejectionsResponse.toByteString()).build();
3430      }
3431    } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException
3432      | InvocationTargetException e) {
3433      LOG.error("Error while retrieving log entries.", e);
3434      throw new ServiceException(e);
3435    }
3436    throw new ServiceException("Invalid request params");
3437  }
3438
3439  private MasterProtos.BalancerDecisionsResponse
3440    getBalancerDecisions(MasterProtos.BalancerDecisionsRequest request) {
3441    final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder();
3442    if (namedQueueRecorder == null) {
3443      return MasterProtos.BalancerDecisionsResponse.newBuilder()
3444        .addAllBalancerDecision(Collections.emptyList()).build();
3445    }
3446    final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest();
3447    namedQueueGetRequest.setNamedQueueEvent(BalancerDecisionDetails.BALANCER_DECISION_EVENT);
3448    namedQueueGetRequest.setBalancerDecisionsRequest(request);
3449    NamedQueueGetResponse namedQueueGetResponse =
3450      namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest);
3451    List<RecentLogs.BalancerDecision> balancerDecisions = namedQueueGetResponse != null
3452      ? namedQueueGetResponse.getBalancerDecisions()
3453      : Collections.emptyList();
3454    return MasterProtos.BalancerDecisionsResponse.newBuilder()
3455      .addAllBalancerDecision(balancerDecisions).build();
3456  }
3457
3458  private MasterProtos.BalancerRejectionsResponse
3459    getBalancerRejections(MasterProtos.BalancerRejectionsRequest request) {
3460    final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder();
3461    if (namedQueueRecorder == null) {
3462      return MasterProtos.BalancerRejectionsResponse.newBuilder()
3463        .addAllBalancerRejection(Collections.emptyList()).build();
3464    }
3465    final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest();
3466    namedQueueGetRequest.setNamedQueueEvent(BalancerRejectionDetails.BALANCER_REJECTION_EVENT);
3467    namedQueueGetRequest.setBalancerRejectionsRequest(request);
3468    NamedQueueGetResponse namedQueueGetResponse =
3469      namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest);
3470    List<RecentLogs.BalancerRejection> balancerRejections = namedQueueGetResponse != null
3471      ? namedQueueGetResponse.getBalancerRejections()
3472      : Collections.emptyList();
3473    return MasterProtos.BalancerRejectionsResponse.newBuilder()
3474      .addAllBalancerRejection(balancerRejections).build();
3475  }
3476
3477  @Override
3478  @QosPriority(priority = HConstants.ADMIN_QOS)
3479  public GetRegionInfoResponse getRegionInfo(final RpcController controller,
3480    final GetRegionInfoRequest request) throws ServiceException {
3481    final GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
3482    final RegionInfo info = getRegionInfo(request.getRegion());
3483    if (info != null) {
3484      builder.setRegionInfo(ProtobufUtil.toRegionInfo(info));
3485    } else {
3486      // Is it a MOB name? These work differently.
3487      byte[] regionName = request.getRegion().getValue().toByteArray();
3488      TableName tableName = RegionInfo.getTable(regionName);
3489      if (MobUtils.isMobRegionName(tableName, regionName)) {
3490        // a dummy region info contains the compaction state.
3491        RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName);
3492        builder.setRegionInfo(ProtobufUtil.toRegionInfo(mobRegionInfo));
3493        if (request.hasCompactionState() && request.getCompactionState()) {
3494          builder.setCompactionState(server.getMobCompactionState(tableName));
3495        }
3496      } else {
3497        // If unknown RegionInfo and not a MOB region, it is unknown.
3498        throw new ServiceException(new UnknownRegionException(Bytes.toString(regionName)));
3499      }
3500    }
3501    return builder.build();
3502  }
3503
3504  @Override
3505  public GetStoreFileResponse getStoreFile(RpcController controller, GetStoreFileRequest request)
3506    throws ServiceException {
3507    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3508  }
3509
3510  @Override
3511  public GetOnlineRegionResponse getOnlineRegion(RpcController controller,
3512    GetOnlineRegionRequest request) throws ServiceException {
3513    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3514  }
3515
3516  @Override
3517  public OpenRegionResponse openRegion(RpcController controller, OpenRegionRequest request)
3518    throws ServiceException {
3519    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3520  }
3521
3522  @Override
3523  public WarmupRegionResponse warmupRegion(RpcController controller, WarmupRegionRequest request)
3524    throws ServiceException {
3525    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3526  }
3527
3528  @Override
3529  public CloseRegionResponse closeRegion(RpcController controller, CloseRegionRequest request)
3530    throws ServiceException {
3531    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3532  }
3533
3534  @Override
3535  public FlushRegionResponse flushRegion(RpcController controller, FlushRegionRequest request)
3536    throws ServiceException {
3537    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3538  }
3539
3540  @Override
3541  public CompactionSwitchResponse compactionSwitch(RpcController controller,
3542    CompactionSwitchRequest request) throws ServiceException {
3543    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3544  }
3545
3546  @Override
3547  public CompactRegionResponse compactRegion(RpcController controller, CompactRegionRequest request)
3548    throws ServiceException {
3549    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3550  }
3551
3552  @Override
3553  public ReplicateWALEntryResponse replicateWALEntry(RpcController controller,
3554    ReplicateWALEntryRequest request) throws ServiceException {
3555    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3556  }
3557
3558  @Override
3559  public ReplicateWALEntryResponse replay(RpcController controller,
3560    ReplicateWALEntryRequest request) throws ServiceException {
3561    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3562  }
3563
3564  @Override
3565  public RollWALWriterResponse rollWALWriter(RpcController controller, RollWALWriterRequest request)
3566    throws ServiceException {
3567    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3568  }
3569
3570  @Override
3571  public GetServerInfoResponse getServerInfo(RpcController controller, GetServerInfoRequest request)
3572    throws ServiceException {
3573    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3574  }
3575
3576  @Override
3577  public StopServerResponse stopServer(RpcController controller, StopServerRequest request)
3578    throws ServiceException {
3579    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3580  }
3581
3582  @Override
3583  public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller,
3584    UpdateFavoredNodesRequest request) throws ServiceException {
3585    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3586  }
3587
3588  @Override
3589  public GetRegionLoadResponse getRegionLoad(RpcController controller, GetRegionLoadRequest request)
3590    throws ServiceException {
3591    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3592  }
3593
3594  @Override
3595  public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller,
3596    ClearCompactionQueuesRequest request) throws ServiceException {
3597    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3598  }
3599
3600  @Override
3601  public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller,
3602    ClearRegionBlockCacheRequest request) throws ServiceException {
3603    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3604  }
3605
3606  @Override
3607  public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(RpcController controller,
3608    GetSpaceQuotaSnapshotsRequest request) throws ServiceException {
3609    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3610  }
3611
3612  @Override
3613  public ExecuteProceduresResponse executeProcedures(RpcController controller,
3614    ExecuteProceduresRequest request) throws ServiceException {
3615    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3616  }
3617
3618  @Override
3619  public GetCachedFilesListResponse getCachedFilesList(RpcController controller,
3620    GetCachedFilesListRequest request) throws ServiceException {
3621    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3622  }
3623
3624  @Override
3625  public GetLiveRegionServersResponse getLiveRegionServers(RpcController controller,
3626    GetLiveRegionServersRequest request) throws ServiceException {
3627    List<ServerName> regionServers = new ArrayList<>(server.getLiveRegionServers());
3628    Collections.shuffle(regionServers, ThreadLocalRandom.current());
3629    GetLiveRegionServersResponse.Builder builder =
3630      GetLiveRegionServersResponse.newBuilder().setTotal(regionServers.size());
3631    regionServers.stream().limit(request.getCount()).map(ProtobufUtil::toServerName)
3632      .forEach(builder::addServer);
3633    return builder.build();
3634  }
3635
3636  @Override
3637  public ReplicateWALEntryResponse replicateToReplica(RpcController controller,
3638    ReplicateWALEntryRequest request) throws ServiceException {
3639    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3640  }
3641
3642  @Override
3643  public FlushMasterStoreResponse flushMasterStore(RpcController controller,
3644    FlushMasterStoreRequest request) throws ServiceException {
3645    rpcPreCheck("flushMasterStore");
3646    try {
3647      server.flushMasterStore();
3648    } catch (IOException ioe) {
3649      throw new ServiceException(ioe);
3650    }
3651    return FlushMasterStoreResponse.newBuilder().build();
3652  }
3653
3654  @Override
3655  public FlushTableResponse flushTable(RpcController controller, FlushTableRequest req)
3656    throws ServiceException {
3657    TableName tableName = ProtobufUtil.toTableName(req.getTableName());
3658    List<byte[]> columnFamilies = req.getColumnFamilyCount() > 0
3659      ? req.getColumnFamilyList().stream().filter(cf -> !cf.isEmpty()).map(ByteString::toByteArray)
3660        .collect(Collectors.toList())
3661      : null;
3662    try {
3663      long procId =
3664        server.flushTable(tableName, columnFamilies, req.getNonceGroup(), req.getNonce());
3665      return FlushTableResponse.newBuilder().setProcId(procId).build();
3666    } catch (IOException ioe) {
3667      throw new ServiceException(ioe);
3668    }
3669  }
3670
3671  @Override
3672  public MasterProtos.RestoreBackupSystemTableResponse restoreBackupSystemTable(
3673    RpcController rpcController,
3674    MasterProtos.RestoreBackupSystemTableRequest restoreBackupSystemTableRequest)
3675    throws ServiceException {
3676    try {
3677      String snapshotName = restoreBackupSystemTableRequest.getSnapshotName();
3678      SnapshotDescription snapshot = server.snapshotManager.getCompletedSnapshots().stream()
3679        .filter(s -> s.getName().equals(snapshotName)).findFirst()
3680        .orElseThrow(() -> new ServiceException("Snapshot %s not found".formatted(snapshotName)));
3681      long pid = server.getMasterProcedureExecutor()
3682        .submitProcedure(new RestoreBackupSystemTableProcedure(snapshot));
3683      return MasterProtos.RestoreBackupSystemTableResponse.newBuilder().setProcId(pid).build();
3684    } catch (IOException e) {
3685      throw new ServiceException(e);
3686    }
3687  }
3688
3689  @Override
3690  public RollAllWALWritersResponse rollAllWALWriters(RpcController rpcController,
3691    RollAllWALWritersRequest request) throws ServiceException {
3692    try {
3693      long procId = server.rollAllWALWriters(request.getNonceGroup(), request.getNonce());
3694      return RollAllWALWritersResponse.newBuilder().setProcId(procId).build();
3695    } catch (IOException ioe) {
3696      throw new ServiceException(ioe);
3697    }
3698  }
3699}