001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master;
019
020import java.io.IOException;
021import java.lang.reflect.InvocationTargetException;
022import java.lang.reflect.Method;
023import java.net.InetAddress;
024import java.util.ArrayList;
025import java.util.Collections;
026import java.util.HashMap;
027import java.util.HashSet;
028import java.util.List;
029import java.util.Map;
030import java.util.Map.Entry;
031import java.util.Set;
032import java.util.concurrent.CompletableFuture;
033import java.util.concurrent.ExecutionException;
034import java.util.concurrent.ThreadLocalRandom;
035import java.util.stream.Collectors;
036import org.apache.hadoop.conf.Configuration;
037import org.apache.hadoop.hbase.ClusterMetricsBuilder;
038import org.apache.hadoop.hbase.DoNotRetryIOException;
039import org.apache.hadoop.hbase.HBaseRpcServicesBase;
040import org.apache.hadoop.hbase.HConstants;
041import org.apache.hadoop.hbase.MetaTableAccessor;
042import org.apache.hadoop.hbase.NamespaceDescriptor;
043import org.apache.hadoop.hbase.ServerMetrics;
044import org.apache.hadoop.hbase.ServerMetricsBuilder;
045import org.apache.hadoop.hbase.ServerName;
046import org.apache.hadoop.hbase.TableName;
047import org.apache.hadoop.hbase.UnknownRegionException;
048import org.apache.hadoop.hbase.client.BalanceRequest;
049import org.apache.hadoop.hbase.client.BalanceResponse;
050import org.apache.hadoop.hbase.client.MasterSwitchType;
051import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
052import org.apache.hadoop.hbase.client.Put;
053import org.apache.hadoop.hbase.client.RegionInfo;
054import org.apache.hadoop.hbase.client.Table;
055import org.apache.hadoop.hbase.client.TableDescriptor;
056import org.apache.hadoop.hbase.client.TableState;
057import org.apache.hadoop.hbase.client.VersionInfoUtil;
058import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
059import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
060import org.apache.hadoop.hbase.errorhandling.ForeignException;
061import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
062import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
063import org.apache.hadoop.hbase.ipc.PriorityFunction;
064import org.apache.hadoop.hbase.ipc.QosPriority;
065import org.apache.hadoop.hbase.ipc.RpcServer;
066import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
067import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
068import org.apache.hadoop.hbase.ipc.ServerRpcController;
069import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
070import org.apache.hadoop.hbase.master.assignment.RegionStates;
071import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
072import org.apache.hadoop.hbase.master.hbck.HbckChore;
073import org.apache.hadoop.hbase.master.janitor.MetaFixer;
074import org.apache.hadoop.hbase.master.locking.LockProcedure;
075import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
076import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
077import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
078import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
079import org.apache.hadoop.hbase.mob.MobUtils;
080import org.apache.hadoop.hbase.namequeues.BalancerDecisionDetails;
081import org.apache.hadoop.hbase.namequeues.BalancerRejectionDetails;
082import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder;
083import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest;
084import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse;
085import org.apache.hadoop.hbase.net.Address;
086import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
087import org.apache.hadoop.hbase.procedure2.LockType;
088import org.apache.hadoop.hbase.procedure2.LockedResource;
089import org.apache.hadoop.hbase.procedure2.Procedure;
090import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
091import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
092import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
093import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
094import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
095import org.apache.hadoop.hbase.quotas.QuotaUtil;
096import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
097import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
098import org.apache.hadoop.hbase.replication.ReplicationException;
099import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
100import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
101import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
102import org.apache.hadoop.hbase.rsgroup.RSGroupUtil;
103import org.apache.hadoop.hbase.security.Superusers;
104import org.apache.hadoop.hbase.security.User;
105import org.apache.hadoop.hbase.security.access.AccessChecker;
106import org.apache.hadoop.hbase.security.access.AccessChecker.InputUser;
107import org.apache.hadoop.hbase.security.access.AccessController;
108import org.apache.hadoop.hbase.security.access.Permission;
109import org.apache.hadoop.hbase.security.access.Permission.Action;
110import org.apache.hadoop.hbase.security.access.PermissionStorage;
111import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
112import org.apache.hadoop.hbase.security.access.UserPermission;
113import org.apache.hadoop.hbase.security.visibility.VisibilityController;
114import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
115import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
116import org.apache.hadoop.hbase.util.Bytes;
117import org.apache.hadoop.hbase.util.DNS;
118import org.apache.hadoop.hbase.util.DNS.ServerType;
119import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
120import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
121import org.apache.hadoop.hbase.util.Pair;
122import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
123import org.apache.yetus.audience.InterfaceAudience;
124import org.slf4j.Logger;
125import org.slf4j.LoggerFactory;
126
127import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
128import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
129import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
130import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.ServiceDescriptor;
131import org.apache.hbase.thirdparty.com.google.protobuf.Message;
132import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
133import org.apache.hbase.thirdparty.com.google.protobuf.Service;
134import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
135import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
136
137import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
138import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
139import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos;
140import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.AccessControlService;
141import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest;
142import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse;
143import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest;
144import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantResponse;
145import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest;
146import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsResponse;
147import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type;
148import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest;
149import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeResponse;
150import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
151import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
152import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
153import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
154import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse;
155import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
156import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
157import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
158import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
159import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest;
160import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse;
161import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
162import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
163import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
164import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
165import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
166import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
167import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
168import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
169import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
170import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
171import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
172import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
173import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
174import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
175import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
176import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
177import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
178import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
179import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
180import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
181import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
182import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
183import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
184import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
185import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
186import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse;
187import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
188import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
189import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
190import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
191import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
192import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
193import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
194import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
195import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
196import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
197import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
198import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
199import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
200import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
201import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
202import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
203import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
204import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
205import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
206import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
207import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
208import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
209import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
210import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
211import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
212import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
213import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
214import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
215import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
216import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
217import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
218import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
219import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
220import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
221import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
222import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
223import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
224import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
225import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
226import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
227import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
228import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
229import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
230import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
231import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
232import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaRequest;
233import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaResponse;
234import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreRequest;
235import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreResponse;
236import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
237import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
238import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
239import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
240import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
241import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse;
242import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
243import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
244import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
245import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
246import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
247import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
248import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
249import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
250import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
251import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
252import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
253import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
254import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest;
255import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
256import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService;
257import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
258import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
259import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
260import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
261import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
262import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
263import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
264import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
265import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest;
266import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
267import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
268import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
269import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
270import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
271import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest;
272import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse;
273import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
274import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
275import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
276import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
277import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
278import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
279import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
280import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
281import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest;
282import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesResponse;
283import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
284import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
285import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByStateRequest;
286import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByStateResponse;
287import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
288import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
289import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByStateRequest;
290import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByStateResponse;
291import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
292import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
293import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
294import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
295import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
296import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
297import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
298import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
299import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerRequest;
300import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerResponse;
301import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
302import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
303import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
304import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
305import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerRequest;
306import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerResponse;
307import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
308import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse;
309import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
310import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
311import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
312import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
313import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
314import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
315import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState;
316import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
317import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
318import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
319import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
320import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
321import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
322import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreRequest;
323import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreResponse;
324import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
325import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
326import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
327import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
328import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
329import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse;
330import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
331import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
332import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
333import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
334import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest;
335import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaResponse;
336import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
337import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse;
338import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
339import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
340import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest;
341import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
342import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
343import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
344import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
345import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
346import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
347import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
348import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
349import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaRequest;
350import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaResponse;
351import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleRequest;
352import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleResponse;
353import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
354import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
355import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
356import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
357import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
358import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
359import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.NamespaceQuotaSnapshot;
360import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.TableQuotaSnapshot;
361import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
362import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
363import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
364import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest;
365import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
366import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
367import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
368import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
369import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
370import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetConfiguredNamespacesAndTablesInRSGroupRequest;
371import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetConfiguredNamespacesAndTablesInRSGroupResponse;
372import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
373import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
374import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
375import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
376import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
377import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
378import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
379import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
380import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListTablesInRSGroupRequest;
381import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListTablesInRSGroupResponse;
382import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
383import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
384import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
385import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
386import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
387import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse;
388import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RenameRSGroupRequest;
389import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RenameRSGroupResponse;
390import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigRequest;
391import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigResponse;
392import org.apache.hadoop.hbase.shaded.protobuf.generated.RecentLogs;
393import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest;
394import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse;
395import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
396import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
397import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLiveRegionServersRequest;
398import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLiveRegionServersResponse;
399import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
400import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse;
401import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
402import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
403import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
404import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
405import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
406import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
407import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult;
408import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest;
409import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse;
410import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
411import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
412import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
413import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
414import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ClientMetaService;
415import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
416import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
417import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
418import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
419import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
420import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
421import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
422import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
423import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerStateRequest;
424import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerStateResponse;
425import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
426import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
427import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
428import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
429import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState;
430import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequest;
431import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse;
432import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
433import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
434import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
435import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
436
437/**
438 * Implements the master RPC services.
439 */
440@InterfaceAudience.Private
441public class MasterRpcServices extends HBaseRpcServicesBase<HMaster>
442  implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface,
443  LockService.BlockingInterface, HbckService.BlockingInterface {
444
445  private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName());
446  private static final Logger AUDITLOG =
447    LoggerFactory.getLogger("SecurityLogger." + MasterRpcServices.class.getName());
448
449  /** RPC scheduler to use for the master. */
450  public static final String MASTER_RPC_SCHEDULER_FACTORY_CLASS =
451    "hbase.master.rpc.scheduler.factory.class";
452
453  /**
454   * @return Subset of configuration to pass initializing regionservers: e.g. the filesystem to use
455   *         and root directory to use.
456   */
457  private RegionServerStartupResponse.Builder createConfigurationSubset() {
458    RegionServerStartupResponse.Builder resp =
459      addConfig(RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR);
460    resp = addConfig(resp, "fs.defaultFS");
461    return addConfig(resp, "hbase.master.info.port");
462  }
463
464  private RegionServerStartupResponse.Builder
465    addConfig(final RegionServerStartupResponse.Builder resp, final String key) {
466    NameStringPair.Builder entry =
467      NameStringPair.newBuilder().setName(key).setValue(server.getConfiguration().get(key));
468    resp.addMapEntries(entry.build());
469    return resp;
470  }
471
472  public MasterRpcServices(HMaster m) throws IOException {
473    super(m, m.getProcessName());
474  }
475
476  @Override
477  protected boolean defaultReservoirEnabled() {
478    return false;
479  }
480
481  @Override
482  protected ServerType getDNSServerType() {
483    return DNS.ServerType.MASTER;
484  }
485
486  @Override
487  protected String getHostname(Configuration conf, String defaultHostname) {
488    return conf.get("hbase.master.ipc.address", defaultHostname);
489  }
490
491  @Override
492  protected String getPortConfigName() {
493    return HConstants.MASTER_PORT;
494  }
495
496  @Override
497  protected int getDefaultPort() {
498    return HConstants.DEFAULT_MASTER_PORT;
499  }
500
501  @Override
502  protected Class<?> getRpcSchedulerFactoryClass(Configuration conf) {
503    return conf.getClass(MASTER_RPC_SCHEDULER_FACTORY_CLASS, SimpleRpcSchedulerFactory.class);
504  }
505
506  @Override
507  protected PriorityFunction createPriority() {
508    return new MasterAnnotationReadingPriorityFunction(this);
509  }
510
511  /**
512   * Checks for the following pre-checks in order:
513   * <ol>
514   * <li>Master is initialized</li>
515   * <li>Rpc caller has admin permissions</li>
516   * </ol>
517   * @param requestName name of rpc request. Used in reporting failures to provide context.
518   * @throws ServiceException If any of the above listed pre-check fails.
519   */
520  private void rpcPreCheck(String requestName) throws ServiceException {
521    try {
522      server.checkInitialized();
523      requirePermission(requestName, Permission.Action.ADMIN);
524    } catch (IOException ioe) {
525      throw new ServiceException(ioe);
526    }
527  }
528
529  enum BalanceSwitchMode {
530    SYNC,
531    ASYNC
532  }
533
534  /**
535   * Assigns balancer switch according to BalanceSwitchMode
536   * @param b    new balancer switch
537   * @param mode BalanceSwitchMode
538   * @return old balancer switch
539   */
540  boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException {
541    boolean oldValue = server.loadBalancerStateStore.get();
542    boolean newValue = b;
543    try {
544      if (server.cpHost != null) {
545        server.cpHost.preBalanceSwitch(newValue);
546      }
547      if (mode == BalanceSwitchMode.SYNC) {
548        synchronized (server.getLoadBalancer()) {
549          server.loadBalancerStateStore.set(newValue);
550        }
551      } else {
552        server.loadBalancerStateStore.set(newValue);
553      }
554      LOG.info(server.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue);
555      if (server.cpHost != null) {
556        server.cpHost.postBalanceSwitch(oldValue, newValue);
557      }
558      server.getLoadBalancer().updateBalancerStatus(newValue);
559    } catch (IOException ioe) {
560      LOG.warn("Error flipping balance switch", ioe);
561    }
562    return oldValue;
563  }
564
565  boolean synchronousBalanceSwitch(final boolean b) throws IOException {
566    return switchBalancer(b, BalanceSwitchMode.SYNC);
567  }
568
569  /** Returns list of blocking services and their security info classes that this server supports */
570  @Override
571  protected List<BlockingServiceAndInterface> getServices() {
572    List<BlockingServiceAndInterface> bssi = new ArrayList<>(5);
573    bssi.add(new BlockingServiceAndInterface(MasterService.newReflectiveBlockingService(this),
574      MasterService.BlockingInterface.class));
575    bssi.add(
576      new BlockingServiceAndInterface(RegionServerStatusService.newReflectiveBlockingService(this),
577        RegionServerStatusService.BlockingInterface.class));
578    bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this),
579      LockService.BlockingInterface.class));
580    bssi.add(new BlockingServiceAndInterface(HbckService.newReflectiveBlockingService(this),
581      HbckService.BlockingInterface.class));
582    bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this),
583      ClientMetaService.BlockingInterface.class));
584    bssi.add(new BlockingServiceAndInterface(AdminService.newReflectiveBlockingService(this),
585      AdminService.BlockingInterface.class));
586    return bssi;
587  }
588
589  void start(ZKWatcher zkWatcher) {
590    internalStart(zkWatcher);
591  }
592
593  void stop() {
594    internalStop();
595  }
596
597  @Override
598  @QosPriority(priority = HConstants.ADMIN_QOS)
599  public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller,
600    GetLastFlushedSequenceIdRequest request) throws ServiceException {
601    try {
602      server.checkServiceStarted();
603    } catch (IOException ioe) {
604      throw new ServiceException(ioe);
605    }
606    byte[] encodedRegionName = request.getRegionName().toByteArray();
607    RegionStoreSequenceIds ids =
608      server.getServerManager().getLastFlushedSequenceId(encodedRegionName);
609    return ResponseConverter.buildGetLastFlushedSequenceIdResponse(ids);
610  }
611
612  @Override
613  public RegionServerReportResponse regionServerReport(RpcController controller,
614    RegionServerReportRequest request) throws ServiceException {
615    try {
616      server.checkServiceStarted();
617      int versionNumber = 0;
618      String version = "0.0.0";
619      VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
620      if (versionInfo != null) {
621        version = versionInfo.getVersion();
622        versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
623      }
624      ClusterStatusProtos.ServerLoad sl = request.getLoad();
625      ServerName serverName = ProtobufUtil.toServerName(request.getServer());
626      ServerMetrics oldLoad = server.getServerManager().getLoad(serverName);
627      ServerMetrics newLoad =
628        ServerMetricsBuilder.toServerMetrics(serverName, versionNumber, version, sl);
629      server.getServerManager().regionServerReport(serverName, newLoad);
630      server.getAssignmentManager().reportOnlineRegions(serverName,
631        newLoad.getRegionMetrics().keySet());
632      if (sl != null && server.metricsMaster != null) {
633        // Up our metrics.
634        server.metricsMaster.incrementRequests(
635          sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getRequestCount() : 0));
636        server.metricsMaster.incrementReadRequests(
637          sl.getReadRequestsCount() - (oldLoad != null ? oldLoad.getReadRequestsCount() : 0));
638        server.metricsMaster.incrementWriteRequests(
639          sl.getWriteRequestsCount() - (oldLoad != null ? oldLoad.getWriteRequestsCount() : 0));
640      }
641    } catch (IOException ioe) {
642      throw new ServiceException(ioe);
643    }
644    return RegionServerReportResponse.newBuilder().build();
645  }
646
647  @Override
648  public RegionServerStartupResponse regionServerStartup(RpcController controller,
649    RegionServerStartupRequest request) throws ServiceException {
650    // Register with server manager
651    try {
652      server.checkServiceStarted();
653      int versionNumber = 0;
654      String version = "0.0.0";
655      VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
656      if (versionInfo != null) {
657        version = versionInfo.getVersion();
658        versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
659      }
660      InetAddress ia = server.getRemoteInetAddress(request.getPort(), request.getServerStartCode());
661      // if regionserver passed hostname to use,
662      // then use it instead of doing a reverse DNS lookup
663      ServerName rs =
664        server.getServerManager().regionServerStartup(request, versionNumber, version, ia);
665
666      // Send back some config info
667      RegionServerStartupResponse.Builder resp = createConfigurationSubset();
668      NameStringPair.Builder entry = NameStringPair.newBuilder()
669        .setName(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER).setValue(rs.getHostname());
670      resp.addMapEntries(entry.build());
671
672      return resp.build();
673    } catch (IOException ioe) {
674      throw new ServiceException(ioe);
675    }
676  }
677
678  @Override
679  public ReportRSFatalErrorResponse reportRSFatalError(RpcController controller,
680    ReportRSFatalErrorRequest request) throws ServiceException {
681    String errorText = request.getErrorMessage();
682    ServerName sn = ProtobufUtil.toServerName(request.getServer());
683    String msg = sn + " reported a fatal error:\n" + errorText;
684    LOG.warn(msg);
685    server.rsFatals.add(msg);
686    return ReportRSFatalErrorResponse.newBuilder().build();
687  }
688
689  @Override
690  public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req)
691    throws ServiceException {
692    try {
693      long procId = server.addColumn(ProtobufUtil.toTableName(req.getTableName()),
694        ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(),
695        req.getNonce());
696      if (procId == -1) {
697        // This mean operation was not performed in server, so do not set any procId
698        return AddColumnResponse.newBuilder().build();
699      } else {
700        return AddColumnResponse.newBuilder().setProcId(procId).build();
701      }
702    } catch (IOException ioe) {
703      throw new ServiceException(ioe);
704    }
705  }
706
707  @Override
708  public AssignRegionResponse assignRegion(RpcController controller, AssignRegionRequest req)
709    throws ServiceException {
710    try {
711      server.checkInitialized();
712
713      final RegionSpecifierType type = req.getRegion().getType();
714      if (type != RegionSpecifierType.REGION_NAME) {
715        LOG.warn("assignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
716          + " actual: " + type);
717      }
718
719      final byte[] regionName = req.getRegion().getValue().toByteArray();
720      final RegionInfo regionInfo = server.getAssignmentManager().getRegionInfo(regionName);
721      if (regionInfo == null) {
722        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
723      }
724
725      final AssignRegionResponse arr = AssignRegionResponse.newBuilder().build();
726      if (server.cpHost != null) {
727        server.cpHost.preAssign(regionInfo);
728      }
729      LOG.info(server.getClientIdAuditPrefix() + " assign " + regionInfo.getRegionNameAsString());
730      server.getAssignmentManager().assign(regionInfo);
731      if (server.cpHost != null) {
732        server.cpHost.postAssign(regionInfo);
733      }
734      return arr;
735    } catch (IOException ioe) {
736      throw new ServiceException(ioe);
737    }
738  }
739
740  @Override
741  public MasterProtos.BalanceResponse balance(RpcController controller,
742    MasterProtos.BalanceRequest request) throws ServiceException {
743    try {
744      return ProtobufUtil.toBalanceResponse(server.balance(ProtobufUtil.toBalanceRequest(request)));
745    } catch (IOException ex) {
746      throw new ServiceException(ex);
747    }
748  }
749
750  @Override
751  public CreateNamespaceResponse createNamespace(RpcController controller,
752    CreateNamespaceRequest request) throws ServiceException {
753    try {
754      long procId =
755        server.createNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
756          request.getNonceGroup(), request.getNonce());
757      return CreateNamespaceResponse.newBuilder().setProcId(procId).build();
758    } catch (IOException e) {
759      throw new ServiceException(e);
760    }
761  }
762
763  @Override
764  public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
765    throws ServiceException {
766    TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema());
767    byte[][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
768    try {
769      long procId =
770        server.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
771      LOG.info(server.getClientIdAuditPrefix() + " procedure request for creating table: "
772        + req.getTableSchema().getTableName() + " procId is: " + procId);
773      return CreateTableResponse.newBuilder().setProcId(procId).build();
774    } catch (IOException ioe) {
775      throw new ServiceException(ioe);
776    }
777  }
778
779  @Override
780  public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req)
781    throws ServiceException {
782    try {
783      long procId = server.deleteColumn(ProtobufUtil.toTableName(req.getTableName()),
784        req.getColumnName().toByteArray(), req.getNonceGroup(), req.getNonce());
785      if (procId == -1) {
786        // This mean operation was not performed in server, so do not set any procId
787        return DeleteColumnResponse.newBuilder().build();
788      } else {
789        return DeleteColumnResponse.newBuilder().setProcId(procId).build();
790      }
791    } catch (IOException ioe) {
792      throw new ServiceException(ioe);
793    }
794  }
795
796  @Override
797  public DeleteNamespaceResponse deleteNamespace(RpcController controller,
798    DeleteNamespaceRequest request) throws ServiceException {
799    try {
800      long procId = server.deleteNamespace(request.getNamespaceName(), request.getNonceGroup(),
801        request.getNonce());
802      return DeleteNamespaceResponse.newBuilder().setProcId(procId).build();
803    } catch (IOException e) {
804      throw new ServiceException(e);
805    }
806  }
807
808  /**
809   * Execute Delete Snapshot operation.
810   * @return DeleteSnapshotResponse (a protobuf wrapped void) if the snapshot existed and was
811   *         deleted properly.
812   * @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not
813   *                          exist.
814   */
815  @Override
816  public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
817    DeleteSnapshotRequest request) throws ServiceException {
818    try {
819      server.checkInitialized();
820      server.snapshotManager.checkSnapshotSupport();
821
822      LOG.info(server.getClientIdAuditPrefix() + " delete " + request.getSnapshot());
823      server.snapshotManager.deleteSnapshot(request.getSnapshot());
824      return DeleteSnapshotResponse.newBuilder().build();
825    } catch (IOException e) {
826      throw new ServiceException(e);
827    }
828  }
829
830  @Override
831  public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request)
832    throws ServiceException {
833    try {
834      long procId = server.deleteTable(ProtobufUtil.toTableName(request.getTableName()),
835        request.getNonceGroup(), request.getNonce());
836      return DeleteTableResponse.newBuilder().setProcId(procId).build();
837    } catch (IOException ioe) {
838      throw new ServiceException(ioe);
839    }
840  }
841
842  @Override
843  public TruncateTableResponse truncateTable(RpcController controller, TruncateTableRequest request)
844    throws ServiceException {
845    try {
846      long procId = server.truncateTable(ProtobufUtil.toTableName(request.getTableName()),
847        request.getPreserveSplits(), request.getNonceGroup(), request.getNonce());
848      return TruncateTableResponse.newBuilder().setProcId(procId).build();
849    } catch (IOException ioe) {
850      throw new ServiceException(ioe);
851    }
852  }
853
854  @Override
855  public DisableTableResponse disableTable(RpcController controller, DisableTableRequest request)
856    throws ServiceException {
857    try {
858      long procId = server.disableTable(ProtobufUtil.toTableName(request.getTableName()),
859        request.getNonceGroup(), request.getNonce());
860      return DisableTableResponse.newBuilder().setProcId(procId).build();
861    } catch (IOException ioe) {
862      throw new ServiceException(ioe);
863    }
864  }
865
866  @Override
867  public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c,
868    EnableCatalogJanitorRequest req) throws ServiceException {
869    rpcPreCheck("enableCatalogJanitor");
870    return EnableCatalogJanitorResponse.newBuilder()
871      .setPrevValue(server.catalogJanitorChore.setEnabled(req.getEnable())).build();
872  }
873
874  @Override
875  public SetCleanerChoreRunningResponse setCleanerChoreRunning(RpcController c,
876    SetCleanerChoreRunningRequest req) throws ServiceException {
877    rpcPreCheck("setCleanerChoreRunning");
878
879    boolean prevValue =
880      server.getLogCleaner().getEnabled() && server.getHFileCleaner().getEnabled();
881    server.getLogCleaner().setEnabled(req.getOn());
882    for (HFileCleaner hFileCleaner : server.getHFileCleaners()) {
883      hFileCleaner.setEnabled(req.getOn());
884    }
885    return SetCleanerChoreRunningResponse.newBuilder().setPrevValue(prevValue).build();
886  }
887
888  @Override
889  public EnableTableResponse enableTable(RpcController controller, EnableTableRequest request)
890    throws ServiceException {
891    try {
892      long procId = server.enableTable(ProtobufUtil.toTableName(request.getTableName()),
893        request.getNonceGroup(), request.getNonce());
894      return EnableTableResponse.newBuilder().setProcId(procId).build();
895    } catch (IOException ioe) {
896      throw new ServiceException(ioe);
897    }
898  }
899
900  @Override
901  public MergeTableRegionsResponse mergeTableRegions(RpcController c,
902    MergeTableRegionsRequest request) throws ServiceException {
903    try {
904      server.checkInitialized();
905    } catch (IOException ioe) {
906      throw new ServiceException(ioe);
907    }
908
909    RegionStates regionStates = server.getAssignmentManager().getRegionStates();
910
911    RegionInfo[] regionsToMerge = new RegionInfo[request.getRegionCount()];
912    for (int i = 0; i < request.getRegionCount(); i++) {
913      final byte[] encodedNameOfRegion = request.getRegion(i).getValue().toByteArray();
914      if (request.getRegion(i).getType() != RegionSpecifierType.ENCODED_REGION_NAME) {
915        LOG.warn("MergeRegions specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME
916          + " actual: region " + i + " =" + request.getRegion(i).getType());
917      }
918      RegionState regionState = regionStates.getRegionState(Bytes.toString(encodedNameOfRegion));
919      if (regionState == null) {
920        throw new ServiceException(
921          new UnknownRegionException(Bytes.toStringBinary(encodedNameOfRegion)));
922      }
923      regionsToMerge[i] = regionState.getRegion();
924    }
925
926    try {
927      long procId = server.mergeRegions(regionsToMerge, request.getForcible(),
928        request.getNonceGroup(), request.getNonce());
929      return MergeTableRegionsResponse.newBuilder().setProcId(procId).build();
930    } catch (IOException ioe) {
931      throw new ServiceException(ioe);
932    }
933  }
934
935  @Override
936  public SplitTableRegionResponse splitRegion(final RpcController controller,
937    final SplitTableRegionRequest request) throws ServiceException {
938    try {
939      long procId = server.splitRegion(ProtobufUtil.toRegionInfo(request.getRegionInfo()),
940        request.hasSplitRow() ? request.getSplitRow().toByteArray() : null, request.getNonceGroup(),
941        request.getNonce());
942      return SplitTableRegionResponse.newBuilder().setProcId(procId).build();
943    } catch (IOException ie) {
944      throw new ServiceException(ie);
945    }
946  }
947
948  @Override
949  public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller,
950    final ClientProtos.CoprocessorServiceRequest request) throws ServiceException {
951    rpcPreCheck("execMasterService");
952    try {
953      ServerRpcController execController = new ServerRpcController();
954      ClientProtos.CoprocessorServiceCall call = request.getCall();
955      String serviceName = call.getServiceName();
956      String methodName = call.getMethodName();
957      if (!server.coprocessorServiceHandlers.containsKey(serviceName)) {
958        throw new UnknownProtocolException(null,
959          "No registered Master Coprocessor Endpoint found for " + serviceName
960            + ". Has it been enabled?");
961      }
962
963      Service service = server.coprocessorServiceHandlers.get(serviceName);
964      ServiceDescriptor serviceDesc = service.getDescriptorForType();
965      MethodDescriptor methodDesc =
966        CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc);
967
968      Message execRequest = CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest());
969      final Message.Builder responseBuilder =
970        service.getResponsePrototype(methodDesc).newBuilderForType();
971      service.callMethod(methodDesc, execController, execRequest, (message) -> {
972        if (message != null) {
973          responseBuilder.mergeFrom(message);
974        }
975      });
976      Message execResult = responseBuilder.build();
977      if (execController.getFailedOn() != null) {
978        throw execController.getFailedOn();
979      }
980
981      String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("");
982      User caller = RpcServer.getRequestUser().orElse(null);
983      AUDITLOG.info("User {} (remote address: {}) master service request for {}.{}", caller,
984        remoteAddress, serviceName, methodName);
985
986      return CoprocessorRpcUtils.getResponse(execResult, HConstants.EMPTY_BYTE_ARRAY);
987    } catch (IOException ie) {
988      throw new ServiceException(ie);
989    }
990  }
991
992  /**
993   * Triggers an asynchronous attempt to run a distributed procedure. {@inheritDoc}
994   */
995  @Override
996  public ExecProcedureResponse execProcedure(RpcController controller, ExecProcedureRequest request)
997    throws ServiceException {
998    try {
999      server.checkInitialized();
1000      ProcedureDescription desc = request.getProcedure();
1001      MasterProcedureManager mpm =
1002        server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
1003      if (mpm == null) {
1004        throw new ServiceException(
1005          new DoNotRetryIOException("The procedure is not registered: " + desc.getSignature()));
1006      }
1007      LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
1008      mpm.checkPermissions(desc, getAccessChecker(), RpcServer.getRequestUser().orElse(null));
1009      mpm.execProcedure(desc);
1010      // send back the max amount of time the client should wait for the procedure
1011      // to complete
1012      long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME;
1013      return ExecProcedureResponse.newBuilder().setExpectedTimeout(waitTime).build();
1014    } catch (ForeignException e) {
1015      throw new ServiceException(e.getCause());
1016    } catch (IOException e) {
1017      throw new ServiceException(e);
1018    }
1019  }
1020
1021  /**
1022   * Triggers a synchronous attempt to run a distributed procedure and sets return data in response.
1023   * {@inheritDoc}
1024   */
1025  @Override
1026  public ExecProcedureResponse execProcedureWithRet(RpcController controller,
1027    ExecProcedureRequest request) throws ServiceException {
1028    rpcPreCheck("execProcedureWithRet");
1029    try {
1030      ProcedureDescription desc = request.getProcedure();
1031      MasterProcedureManager mpm =
1032        server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
1033      if (mpm == null) {
1034        throw new ServiceException("The procedure is not registered: " + desc.getSignature());
1035      }
1036      LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
1037      byte[] data = mpm.execProcedureWithRet(desc);
1038      ExecProcedureResponse.Builder builder = ExecProcedureResponse.newBuilder();
1039      // set return data if available
1040      if (data != null) {
1041        builder.setReturnData(UnsafeByteOperations.unsafeWrap(data));
1042      }
1043      return builder.build();
1044    } catch (IOException e) {
1045      throw new ServiceException(e);
1046    }
1047  }
1048
1049  @Override
1050  public GetClusterStatusResponse getClusterStatus(RpcController controller,
1051    GetClusterStatusRequest req) throws ServiceException {
1052    GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
1053    try {
1054      // We used to check if Master was up at this point but let this call proceed even if
1055      // Master is initializing... else we shut out stuff like hbck2 tool from making progress
1056      // since it queries this method to figure cluster version. hbck2 wants to be able to work
1057      // against Master even if it is 'initializing' so it can do fixup.
1058      response.setClusterStatus(ClusterMetricsBuilder.toClusterStatus(
1059        server.getClusterMetrics(ClusterMetricsBuilder.toOptions(req.getOptionsList()))));
1060    } catch (IOException e) {
1061      throw new ServiceException(e);
1062    }
1063    return response.build();
1064  }
1065
1066  /**
1067   * List the currently available/stored snapshots. Any in-progress snapshots are ignored
1068   */
1069  @Override
1070  public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller,
1071    GetCompletedSnapshotsRequest request) throws ServiceException {
1072    try {
1073      server.checkInitialized();
1074      GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder();
1075      List<SnapshotDescription> snapshots = server.snapshotManager.getCompletedSnapshots();
1076
1077      // convert to protobuf
1078      for (SnapshotDescription snapshot : snapshots) {
1079        builder.addSnapshots(snapshot);
1080      }
1081      return builder.build();
1082    } catch (IOException e) {
1083      throw new ServiceException(e);
1084    }
1085  }
1086
1087  @Override
1088  public ListNamespacesResponse listNamespaces(RpcController controller,
1089    ListNamespacesRequest request) throws ServiceException {
1090    try {
1091      return ListNamespacesResponse.newBuilder().addAllNamespaceName(server.listNamespaces())
1092        .build();
1093    } catch (IOException e) {
1094      throw new ServiceException(e);
1095    }
1096  }
1097
1098  @Override
1099  public GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller,
1100    GetNamespaceDescriptorRequest request) throws ServiceException {
1101    try {
1102      return GetNamespaceDescriptorResponse.newBuilder()
1103        .setNamespaceDescriptor(
1104          ProtobufUtil.toProtoNamespaceDescriptor(server.getNamespace(request.getNamespaceName())))
1105        .build();
1106    } catch (IOException e) {
1107      throw new ServiceException(e);
1108    }
1109  }
1110
1111  /**
1112   * Get the number of regions of the table that have been updated by the alter.
1113   * @return Pair indicating the number of regions updated Pair.getFirst is the regions that are yet
1114   *         to be updated Pair.getSecond is the total number of regions of the table
1115   */
1116  @Override
1117  public GetSchemaAlterStatusResponse getSchemaAlterStatus(RpcController controller,
1118    GetSchemaAlterStatusRequest req) throws ServiceException {
1119    // TODO: currently, we query using the table name on the client side. this
1120    // may overlap with other table operations or the table operation may
1121    // have completed before querying this API. We need to refactor to a
1122    // transaction system in the future to avoid these ambiguities.
1123    TableName tableName = ProtobufUtil.toTableName(req.getTableName());
1124
1125    try {
1126      server.checkInitialized();
1127      Pair<Integer, Integer> pair = server.getAssignmentManager().getReopenStatus(tableName);
1128      GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder();
1129      ret.setYetToUpdateRegions(pair.getFirst());
1130      ret.setTotalRegions(pair.getSecond());
1131      return ret.build();
1132    } catch (IOException ioe) {
1133      throw new ServiceException(ioe);
1134    }
1135  }
1136
1137  /**
1138   * Get list of TableDescriptors for requested tables.
1139   * @param c   Unused (set to null).
1140   * @param req GetTableDescriptorsRequest that contains: - tableNames: requested tables, or if
1141   *            empty, all are requested.
1142   */
1143  @Override
1144  public GetTableDescriptorsResponse getTableDescriptors(RpcController c,
1145    GetTableDescriptorsRequest req) throws ServiceException {
1146    try {
1147      server.checkInitialized();
1148
1149      final String regex = req.hasRegex() ? req.getRegex() : null;
1150      final String namespace = req.hasNamespace() ? req.getNamespace() : null;
1151      List<TableName> tableNameList = null;
1152      if (req.getTableNamesCount() > 0) {
1153        tableNameList = new ArrayList<TableName>(req.getTableNamesCount());
1154        for (HBaseProtos.TableName tableNamePB : req.getTableNamesList()) {
1155          tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
1156        }
1157      }
1158
1159      List<TableDescriptor> descriptors =
1160        server.listTableDescriptors(namespace, regex, tableNameList, req.getIncludeSysTables());
1161
1162      GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder();
1163      if (descriptors != null && descriptors.size() > 0) {
1164        // Add the table descriptors to the response
1165        for (TableDescriptor htd : descriptors) {
1166          builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
1167        }
1168      }
1169      return builder.build();
1170    } catch (IOException ioe) {
1171      throw new ServiceException(ioe);
1172    }
1173  }
1174
1175  @Override
1176  public ListTableDescriptorsByStateResponse listTableDescriptorsByState(RpcController controller,
1177    ListTableDescriptorsByStateRequest request) throws ServiceException {
1178    try {
1179      server.checkInitialized();
1180      List<TableDescriptor> descriptors = server.listTableDescriptors(null, null, null, false);
1181
1182      ListTableDescriptorsByStateResponse.Builder builder =
1183        ListTableDescriptorsByStateResponse.newBuilder();
1184      if (descriptors != null && descriptors.size() > 0) {
1185        // Add the table descriptors to the response
1186        TableState.State state =
1187          request.getIsEnabled() ? TableState.State.ENABLED : TableState.State.DISABLED;
1188        for (TableDescriptor htd : descriptors) {
1189          if (server.getTableStateManager().isTableState(htd.getTableName(), state)) {
1190            builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
1191          }
1192        }
1193      }
1194      return builder.build();
1195    } catch (IOException ioe) {
1196      throw new ServiceException(ioe);
1197    }
1198  }
1199
1200  /**
1201   * Get list of userspace table names
1202   * @param controller Unused (set to null).
1203   * @param req        GetTableNamesRequest
1204   */
1205  @Override
1206  public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest req)
1207    throws ServiceException {
1208    try {
1209      server.checkServiceStarted();
1210
1211      final String regex = req.hasRegex() ? req.getRegex() : null;
1212      final String namespace = req.hasNamespace() ? req.getNamespace() : null;
1213      List<TableName> tableNames =
1214        server.listTableNames(namespace, regex, req.getIncludeSysTables());
1215
1216      GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
1217      if (tableNames != null && tableNames.size() > 0) {
1218        // Add the table names to the response
1219        for (TableName table : tableNames) {
1220          builder.addTableNames(ProtobufUtil.toProtoTableName(table));
1221        }
1222      }
1223      return builder.build();
1224    } catch (IOException e) {
1225      throw new ServiceException(e);
1226    }
1227  }
1228
1229  @Override
1230  public ListTableNamesByStateResponse listTableNamesByState(RpcController controller,
1231    ListTableNamesByStateRequest request) throws ServiceException {
1232    try {
1233      server.checkServiceStarted();
1234      List<TableName> tableNames = server.listTableNames(null, null, false);
1235      ListTableNamesByStateResponse.Builder builder = ListTableNamesByStateResponse.newBuilder();
1236      if (tableNames != null && tableNames.size() > 0) {
1237        // Add the disabled table names to the response
1238        TableState.State state =
1239          request.getIsEnabled() ? TableState.State.ENABLED : TableState.State.DISABLED;
1240        for (TableName table : tableNames) {
1241          if (server.getTableStateManager().isTableState(table, state)) {
1242            builder.addTableNames(ProtobufUtil.toProtoTableName(table));
1243          }
1244        }
1245      }
1246      return builder.build();
1247    } catch (IOException e) {
1248      throw new ServiceException(e);
1249    }
1250  }
1251
1252  @Override
1253  public GetTableStateResponse getTableState(RpcController controller, GetTableStateRequest request)
1254    throws ServiceException {
1255    try {
1256      server.checkServiceStarted();
1257      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
1258      TableState ts = server.getTableStateManager().getTableState(tableName);
1259      GetTableStateResponse.Builder builder = GetTableStateResponse.newBuilder();
1260      builder.setTableState(ts.convert());
1261      return builder.build();
1262    } catch (IOException e) {
1263      throw new ServiceException(e);
1264    }
1265  }
1266
1267  @Override
1268  public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
1269    IsCatalogJanitorEnabledRequest req) throws ServiceException {
1270    return IsCatalogJanitorEnabledResponse.newBuilder().setValue(server.isCatalogJanitorEnabled())
1271      .build();
1272  }
1273
1274  @Override
1275  public IsCleanerChoreEnabledResponse isCleanerChoreEnabled(RpcController c,
1276    IsCleanerChoreEnabledRequest req) throws ServiceException {
1277    return IsCleanerChoreEnabledResponse.newBuilder().setValue(server.isCleanerChoreEnabled())
1278      .build();
1279  }
1280
1281  @Override
1282  public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req)
1283    throws ServiceException {
1284    try {
1285      server.checkServiceStarted();
1286      return IsMasterRunningResponse.newBuilder().setIsMasterRunning(!server.isStopped()).build();
1287    } catch (IOException e) {
1288      throw new ServiceException(e);
1289    }
1290  }
1291
1292  /**
1293   * Checks if the specified procedure is done.
1294   * @return true if the procedure is done, false if the procedure is in the process of completing
1295   * @throws ServiceException if invalid procedure or failed procedure with progress failure reason.
1296   */
1297  @Override
1298  public IsProcedureDoneResponse isProcedureDone(RpcController controller,
1299    IsProcedureDoneRequest request) throws ServiceException {
1300    try {
1301      server.checkInitialized();
1302      ProcedureDescription desc = request.getProcedure();
1303      MasterProcedureManager mpm =
1304        server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
1305      if (mpm == null) {
1306        throw new ServiceException("The procedure is not registered: " + desc.getSignature());
1307      }
1308      LOG.debug("Checking to see if procedure from request:" + desc.getSignature() + " is done");
1309
1310      IsProcedureDoneResponse.Builder builder = IsProcedureDoneResponse.newBuilder();
1311      boolean done = mpm.isProcedureDone(desc);
1312      builder.setDone(done);
1313      return builder.build();
1314    } catch (ForeignException e) {
1315      throw new ServiceException(e.getCause());
1316    } catch (IOException e) {
1317      throw new ServiceException(e);
1318    }
1319  }
1320
1321  /**
1322   * Checks if the specified snapshot is done.
1323   * @return true if the snapshot is in file system ready to use, false if the snapshot is in the
1324   *         process of completing
1325   * @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or a wrapped
1326   *                          HBaseSnapshotException with progress failure reason.
1327   */
1328  @Override
1329  public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
1330    IsSnapshotDoneRequest request) throws ServiceException {
1331    LOG.debug("Checking to see if snapshot from request:"
1332      + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()) + " is done");
1333    try {
1334      server.checkInitialized();
1335      IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
1336      boolean done = server.snapshotManager.isSnapshotDone(request.getSnapshot());
1337      builder.setDone(done);
1338      return builder.build();
1339    } catch (ForeignException e) {
1340      throw new ServiceException(e.getCause());
1341    } catch (IOException e) {
1342      throw new ServiceException(e);
1343    }
1344  }
1345
1346  @Override
1347  public GetProcedureResultResponse getProcedureResult(RpcController controller,
1348    GetProcedureResultRequest request) throws ServiceException {
1349    LOG.debug("Checking to see if procedure is done pid=" + request.getProcId());
1350    try {
1351      server.checkInitialized();
1352      GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder();
1353      long procId = request.getProcId();
1354      ProcedureExecutor<?> executor = server.getMasterProcedureExecutor();
1355      Procedure<?> result = executor.getResultOrProcedure(procId);
1356      if (result != null) {
1357        builder.setSubmittedTime(result.getSubmittedTime());
1358        builder.setLastUpdate(result.getLastUpdate());
1359        if (executor.isFinished(procId)) {
1360          builder.setState(GetProcedureResultResponse.State.FINISHED);
1361          if (result.isFailed()) {
1362            IOException exception = MasterProcedureUtil.unwrapRemoteIOException(result);
1363            builder.setException(ForeignExceptionUtil.toProtoForeignException(exception));
1364          }
1365          byte[] resultData = result.getResult();
1366          if (resultData != null) {
1367            builder.setResult(UnsafeByteOperations.unsafeWrap(resultData));
1368          }
1369          server.getMasterProcedureExecutor().removeResult(request.getProcId());
1370        } else {
1371          builder.setState(GetProcedureResultResponse.State.RUNNING);
1372        }
1373      } else {
1374        builder.setState(GetProcedureResultResponse.State.NOT_FOUND);
1375      }
1376      return builder.build();
1377    } catch (IOException e) {
1378      throw new ServiceException(e);
1379    }
1380  }
1381
1382  @Override
1383  public AbortProcedureResponse abortProcedure(RpcController rpcController,
1384    AbortProcedureRequest request) throws ServiceException {
1385    try {
1386      AbortProcedureResponse.Builder response = AbortProcedureResponse.newBuilder();
1387      boolean abortResult =
1388        server.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning());
1389      response.setIsProcedureAborted(abortResult);
1390      return response.build();
1391    } catch (IOException e) {
1392      throw new ServiceException(e);
1393    }
1394  }
1395
1396  @Override
1397  public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c,
1398    ListNamespaceDescriptorsRequest request) throws ServiceException {
1399    try {
1400      ListNamespaceDescriptorsResponse.Builder response =
1401        ListNamespaceDescriptorsResponse.newBuilder();
1402      for (NamespaceDescriptor ns : server.getNamespaces()) {
1403        response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns));
1404      }
1405      return response.build();
1406    } catch (IOException e) {
1407      throw new ServiceException(e);
1408    }
1409  }
1410
1411  @Override
1412  public GetProceduresResponse getProcedures(RpcController rpcController,
1413    GetProceduresRequest request) throws ServiceException {
1414    try {
1415      final GetProceduresResponse.Builder response = GetProceduresResponse.newBuilder();
1416      for (Procedure<?> p : server.getProcedures()) {
1417        response.addProcedure(ProcedureUtil.convertToProtoProcedure(p));
1418      }
1419      return response.build();
1420    } catch (IOException e) {
1421      throw new ServiceException(e);
1422    }
1423  }
1424
1425  @Override
1426  public GetLocksResponse getLocks(RpcController controller, GetLocksRequest request)
1427    throws ServiceException {
1428    try {
1429      final GetLocksResponse.Builder builder = GetLocksResponse.newBuilder();
1430
1431      for (LockedResource lockedResource : server.getLocks()) {
1432        builder.addLock(ProcedureUtil.convertToProtoLockedResource(lockedResource));
1433      }
1434
1435      return builder.build();
1436    } catch (IOException e) {
1437      throw new ServiceException(e);
1438    }
1439  }
1440
1441  @Override
1442  public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c,
1443    ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
1444    try {
1445      ListTableDescriptorsByNamespaceResponse.Builder b =
1446        ListTableDescriptorsByNamespaceResponse.newBuilder();
1447      for (TableDescriptor htd : server
1448        .listTableDescriptorsByNamespace(request.getNamespaceName())) {
1449        b.addTableSchema(ProtobufUtil.toTableSchema(htd));
1450      }
1451      return b.build();
1452    } catch (IOException e) {
1453      throw new ServiceException(e);
1454    }
1455  }
1456
1457  @Override
1458  public ListTableNamesByNamespaceResponse listTableNamesByNamespace(RpcController c,
1459    ListTableNamesByNamespaceRequest request) throws ServiceException {
1460    try {
1461      ListTableNamesByNamespaceResponse.Builder b = ListTableNamesByNamespaceResponse.newBuilder();
1462      for (TableName tableName : server.listTableNamesByNamespace(request.getNamespaceName())) {
1463        b.addTableName(ProtobufUtil.toProtoTableName(tableName));
1464      }
1465      return b.build();
1466    } catch (IOException e) {
1467      throw new ServiceException(e);
1468    }
1469  }
1470
1471  @Override
1472  public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req)
1473    throws ServiceException {
1474    try {
1475      long procId = server.modifyColumn(ProtobufUtil.toTableName(req.getTableName()),
1476        ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(),
1477        req.getNonce());
1478      if (procId == -1) {
1479        // This mean operation was not performed in server, so do not set any procId
1480        return ModifyColumnResponse.newBuilder().build();
1481      } else {
1482        return ModifyColumnResponse.newBuilder().setProcId(procId).build();
1483      }
1484    } catch (IOException ioe) {
1485      throw new ServiceException(ioe);
1486    }
1487  }
1488
1489  @Override
1490  public ModifyColumnStoreFileTrackerResponse modifyColumnStoreFileTracker(RpcController controller,
1491    ModifyColumnStoreFileTrackerRequest req) throws ServiceException {
1492    try {
1493      long procId =
1494        server.modifyColumnStoreFileTracker(ProtobufUtil.toTableName(req.getTableName()),
1495          req.getFamily().toByteArray(), req.getDstSft(), req.getNonceGroup(), req.getNonce());
1496      return ModifyColumnStoreFileTrackerResponse.newBuilder().setProcId(procId).build();
1497    } catch (IOException ioe) {
1498      throw new ServiceException(ioe);
1499    }
1500  }
1501
1502  @Override
1503  public ModifyNamespaceResponse modifyNamespace(RpcController controller,
1504    ModifyNamespaceRequest request) throws ServiceException {
1505    try {
1506      long procId =
1507        server.modifyNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
1508          request.getNonceGroup(), request.getNonce());
1509      return ModifyNamespaceResponse.newBuilder().setProcId(procId).build();
1510    } catch (IOException e) {
1511      throw new ServiceException(e);
1512    }
1513  }
1514
1515  @Override
1516  public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req)
1517    throws ServiceException {
1518    try {
1519      long procId = server.modifyTable(ProtobufUtil.toTableName(req.getTableName()),
1520        ProtobufUtil.toTableDescriptor(req.getTableSchema()), req.getNonceGroup(), req.getNonce());
1521      return ModifyTableResponse.newBuilder().setProcId(procId).build();
1522    } catch (IOException ioe) {
1523      throw new ServiceException(ioe);
1524    }
1525  }
1526
1527  @Override
1528  public ModifyTableStoreFileTrackerResponse modifyTableStoreFileTracker(RpcController controller,
1529    ModifyTableStoreFileTrackerRequest req) throws ServiceException {
1530    try {
1531      long procId = server.modifyTableStoreFileTracker(ProtobufUtil.toTableName(req.getTableName()),
1532        req.getDstSft(), req.getNonceGroup(), req.getNonce());
1533      return ModifyTableStoreFileTrackerResponse.newBuilder().setProcId(procId).build();
1534    } catch (IOException ioe) {
1535      throw new ServiceException(ioe);
1536    }
1537  }
1538
1539  @Override
1540  public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest req)
1541    throws ServiceException {
1542    final byte[] encodedRegionName = req.getRegion().getValue().toByteArray();
1543    RegionSpecifierType type = req.getRegion().getType();
1544    final byte[] destServerName = (req.hasDestServerName())
1545      ? Bytes.toBytes(ProtobufUtil.toServerName(req.getDestServerName()).getServerName())
1546      : null;
1547    MoveRegionResponse mrr = MoveRegionResponse.newBuilder().build();
1548
1549    if (type != RegionSpecifierType.ENCODED_REGION_NAME) {
1550      LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME
1551        + " actual: " + type);
1552    }
1553
1554    try {
1555      server.checkInitialized();
1556      server.move(encodedRegionName, destServerName);
1557    } catch (IOException ioe) {
1558      throw new ServiceException(ioe);
1559    }
1560    return mrr;
1561  }
1562
1563  /**
1564   * Offline specified region from master's in-memory state. It will not attempt to reassign the
1565   * region as in unassign. This is a special method that should be used by experts or hbck.
1566   */
1567  @Override
1568  public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request)
1569    throws ServiceException {
1570    try {
1571      server.checkInitialized();
1572
1573      final RegionSpecifierType type = request.getRegion().getType();
1574      if (type != RegionSpecifierType.REGION_NAME) {
1575        LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
1576          + " actual: " + type);
1577      }
1578
1579      final byte[] regionName = request.getRegion().getValue().toByteArray();
1580      final RegionInfo hri = server.getAssignmentManager().getRegionInfo(regionName);
1581      if (hri == null) {
1582        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
1583      }
1584
1585      if (server.cpHost != null) {
1586        server.cpHost.preRegionOffline(hri);
1587      }
1588      LOG.info(server.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString());
1589      server.getAssignmentManager().offlineRegion(hri);
1590      if (server.cpHost != null) {
1591        server.cpHost.postRegionOffline(hri);
1592      }
1593    } catch (IOException ioe) {
1594      throw new ServiceException(ioe);
1595    }
1596    return OfflineRegionResponse.newBuilder().build();
1597  }
1598
1599  /**
1600   * Execute Restore/Clone snapshot operation.
1601   * <p>
1602   * If the specified table exists a "Restore" is executed, replacing the table schema and directory
1603   * data with the content of the snapshot. The table must be disabled, or a
1604   * UnsupportedOperationException will be thrown.
1605   * <p>
1606   * If the table doesn't exist a "Clone" is executed, a new table is created using the schema at
1607   * the time of the snapshot, and the content of the snapshot.
1608   * <p>
1609   * The restore/clone operation does not require copying HFiles. Since HFiles are immutable the
1610   * table can point to and use the same files as the original one.
1611   */
1612  @Override
1613  public RestoreSnapshotResponse restoreSnapshot(RpcController controller,
1614    RestoreSnapshotRequest request) throws ServiceException {
1615    try {
1616      long procId = server.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(),
1617        request.getNonce(), request.getRestoreACL(), request.getCustomSFT());
1618      return RestoreSnapshotResponse.newBuilder().setProcId(procId).build();
1619    } catch (ForeignException e) {
1620      throw new ServiceException(e.getCause());
1621    } catch (IOException e) {
1622      throw new ServiceException(e);
1623    }
1624  }
1625
1626  @Override
1627  public SetSnapshotCleanupResponse switchSnapshotCleanup(RpcController controller,
1628    SetSnapshotCleanupRequest request) throws ServiceException {
1629    try {
1630      server.checkInitialized();
1631      final boolean enabled = request.getEnabled();
1632      final boolean isSynchronous = request.hasSynchronous() && request.getSynchronous();
1633      final boolean prevSnapshotCleanupRunning = this.switchSnapshotCleanup(enabled, isSynchronous);
1634      return SetSnapshotCleanupResponse.newBuilder()
1635        .setPrevSnapshotCleanup(prevSnapshotCleanupRunning).build();
1636    } catch (IOException e) {
1637      throw new ServiceException(e);
1638    }
1639  }
1640
1641  @Override
1642  public IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(RpcController controller,
1643    IsSnapshotCleanupEnabledRequest request) throws ServiceException {
1644    try {
1645      server.checkInitialized();
1646      final boolean isSnapshotCleanupEnabled = server.snapshotCleanupStateStore.get();
1647      return IsSnapshotCleanupEnabledResponse.newBuilder().setEnabled(isSnapshotCleanupEnabled)
1648        .build();
1649    } catch (IOException e) {
1650      throw new ServiceException(e);
1651    }
1652  }
1653
1654  /**
1655   * Turn on/off snapshot auto-cleanup based on TTL
1656   * @param enabledNewVal Set to <code>true</code> to enable, <code>false</code> to disable
1657   * @param synchronous   If <code>true</code>, it waits until current snapshot cleanup is
1658   *                      completed, if outstanding
1659   * @return previous snapshot auto-cleanup mode
1660   */
1661  private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal,
1662    final boolean synchronous) throws IOException {
1663    final boolean oldValue = server.snapshotCleanupStateStore.get();
1664    server.switchSnapshotCleanup(enabledNewVal, synchronous);
1665    LOG.info("{} Successfully set snapshot cleanup to {}", server.getClientIdAuditPrefix(),
1666      enabledNewVal);
1667    return oldValue;
1668  }
1669
1670  @Override
1671  public RunCatalogScanResponse runCatalogScan(RpcController c, RunCatalogScanRequest req)
1672    throws ServiceException {
1673    rpcPreCheck("runCatalogScan");
1674    try {
1675      return ResponseConverter.buildRunCatalogScanResponse(this.server.catalogJanitorChore.scan());
1676    } catch (IOException ioe) {
1677      throw new ServiceException(ioe);
1678    }
1679  }
1680
1681  @Override
1682  public RunCleanerChoreResponse runCleanerChore(RpcController c, RunCleanerChoreRequest req)
1683    throws ServiceException {
1684    rpcPreCheck("runCleanerChore");
1685    try {
1686      CompletableFuture<Boolean> fileCleanerFuture = server.getHFileCleaner().triggerCleanerNow();
1687      CompletableFuture<Boolean> logCleanerFuture = server.getLogCleaner().triggerCleanerNow();
1688      boolean result = fileCleanerFuture.get() && logCleanerFuture.get();
1689      return ResponseConverter.buildRunCleanerChoreResponse(result);
1690    } catch (InterruptedException e) {
1691      throw new ServiceException(e);
1692    } catch (ExecutionException e) {
1693      throw new ServiceException(e.getCause());
1694    }
1695  }
1696
1697  @Override
1698  public SetBalancerRunningResponse setBalancerRunning(RpcController c,
1699    SetBalancerRunningRequest req) throws ServiceException {
1700    try {
1701      server.checkInitialized();
1702      boolean prevValue = (req.getSynchronous())
1703        ? synchronousBalanceSwitch(req.getOn())
1704        : server.balanceSwitch(req.getOn());
1705      return SetBalancerRunningResponse.newBuilder().setPrevBalanceValue(prevValue).build();
1706    } catch (IOException ioe) {
1707      throw new ServiceException(ioe);
1708    }
1709  }
1710
1711  @Override
1712  public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request)
1713    throws ServiceException {
1714    LOG.info(server.getClientIdAuditPrefix() + " shutdown");
1715    try {
1716      server.shutdown();
1717    } catch (IOException e) {
1718      LOG.error("Exception occurred in HMaster.shutdown()", e);
1719      throw new ServiceException(e);
1720    }
1721    return ShutdownResponse.newBuilder().build();
1722  }
1723
1724  /**
1725   * Triggers an asynchronous attempt to take a snapshot. {@inheritDoc}
1726   */
1727  @Override
1728  public SnapshotResponse snapshot(RpcController controller, SnapshotRequest request)
1729    throws ServiceException {
1730    try {
1731      server.checkInitialized();
1732      server.snapshotManager.checkSnapshotSupport();
1733
1734      LOG.info(server.getClientIdAuditPrefix() + " snapshot request for:"
1735        + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()));
1736      // get the snapshot information
1737      SnapshotDescription snapshot =
1738        SnapshotDescriptionUtils.validate(request.getSnapshot(), server.getConfiguration());
1739      // send back the max amount of time the client should wait for the snapshot to complete
1740      long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(server.getConfiguration(),
1741        snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
1742
1743      SnapshotResponse.Builder builder = SnapshotResponse.newBuilder().setExpectedTimeout(waitTime);
1744
1745      // If there is nonce group and nonce in the snapshot request, then the client can
1746      // handle snapshot procedure procId. And if enable the snapshot procedure, we
1747      // will do the snapshot work with proc-v2, otherwise we will fall back to zk proc.
1748      if (
1749        request.hasNonceGroup() && request.hasNonce()
1750          && server.snapshotManager.snapshotProcedureEnabled()
1751      ) {
1752        long nonceGroup = request.getNonceGroup();
1753        long nonce = request.getNonce();
1754        long procId = server.snapshotManager.takeSnapshot(snapshot, nonceGroup, nonce);
1755        return builder.setProcId(procId).build();
1756      } else {
1757        server.snapshotManager.takeSnapshot(snapshot);
1758        return builder.build();
1759      }
1760    } catch (ForeignException e) {
1761      throw new ServiceException(e.getCause());
1762    } catch (IOException e) {
1763      throw new ServiceException(e);
1764    }
1765  }
1766
1767  @Override
1768  public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request)
1769    throws ServiceException {
1770    LOG.info(server.getClientIdAuditPrefix() + " stop");
1771    try {
1772      server.stopMaster();
1773    } catch (IOException e) {
1774      LOG.error("Exception occurred while stopping master", e);
1775      throw new ServiceException(e);
1776    }
1777    return StopMasterResponse.newBuilder().build();
1778  }
1779
1780  @Override
1781  public IsInMaintenanceModeResponse isMasterInMaintenanceMode(final RpcController controller,
1782    final IsInMaintenanceModeRequest request) throws ServiceException {
1783    IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder();
1784    response.setInMaintenanceMode(server.isInMaintenanceMode());
1785    return response.build();
1786  }
1787
1788  @Override
1789  public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req)
1790    throws ServiceException {
1791    try {
1792      final byte[] regionName = req.getRegion().getValue().toByteArray();
1793      RegionSpecifierType type = req.getRegion().getType();
1794      UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build();
1795
1796      server.checkInitialized();
1797      if (type != RegionSpecifierType.REGION_NAME) {
1798        LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
1799          + " actual: " + type);
1800      }
1801      RegionStateNode rsn =
1802        server.getAssignmentManager().getRegionStates().getRegionStateNodeFromName(regionName);
1803      if (rsn == null) {
1804        throw new UnknownRegionException(Bytes.toString(regionName));
1805      }
1806
1807      RegionInfo hri = rsn.getRegionInfo();
1808      if (server.cpHost != null) {
1809        server.cpHost.preUnassign(hri);
1810      }
1811      LOG.debug(server.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString()
1812        + " in current location if it is online");
1813      server.getAssignmentManager().unassign(hri);
1814      if (server.cpHost != null) {
1815        server.cpHost.postUnassign(hri);
1816      }
1817
1818      return urr;
1819    } catch (IOException ioe) {
1820      throw new ServiceException(ioe);
1821    }
1822  }
1823
1824  @Override
1825  public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c,
1826    ReportRegionStateTransitionRequest req) throws ServiceException {
1827    try {
1828      server.checkServiceStarted();
1829      return server.getAssignmentManager().reportRegionStateTransition(req);
1830    } catch (IOException ioe) {
1831      throw new ServiceException(ioe);
1832    }
1833  }
1834
1835  @Override
1836  public SetQuotaResponse setQuota(RpcController c, SetQuotaRequest req) throws ServiceException {
1837    try {
1838      server.checkInitialized();
1839      return server.getMasterQuotaManager().setQuota(req);
1840    } catch (Exception e) {
1841      throw new ServiceException(e);
1842    }
1843  }
1844
1845  @Override
1846  public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller,
1847    MajorCompactionTimestampRequest request) throws ServiceException {
1848    MajorCompactionTimestampResponse.Builder response =
1849      MajorCompactionTimestampResponse.newBuilder();
1850    try {
1851      server.checkInitialized();
1852      response.setCompactionTimestamp(
1853        server.getLastMajorCompactionTimestamp(ProtobufUtil.toTableName(request.getTableName())));
1854    } catch (IOException e) {
1855      throw new ServiceException(e);
1856    }
1857    return response.build();
1858  }
1859
1860  @Override
1861  public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
1862    RpcController controller, MajorCompactionTimestampForRegionRequest request)
1863    throws ServiceException {
1864    MajorCompactionTimestampResponse.Builder response =
1865      MajorCompactionTimestampResponse.newBuilder();
1866    try {
1867      server.checkInitialized();
1868      response.setCompactionTimestamp(server
1869        .getLastMajorCompactionTimestampForRegion(request.getRegion().getValue().toByteArray()));
1870    } catch (IOException e) {
1871      throw new ServiceException(e);
1872    }
1873    return response.build();
1874  }
1875
1876  @Override
1877  public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller,
1878    IsBalancerEnabledRequest request) throws ServiceException {
1879    IsBalancerEnabledResponse.Builder response = IsBalancerEnabledResponse.newBuilder();
1880    response.setEnabled(server.isBalancerOn());
1881    return response.build();
1882  }
1883
1884  @Override
1885  public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller,
1886    SetSplitOrMergeEnabledRequest request) throws ServiceException {
1887    SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder();
1888    try {
1889      server.checkInitialized();
1890      boolean newValue = request.getEnabled();
1891      for (MasterProtos.MasterSwitchType masterSwitchType : request.getSwitchTypesList()) {
1892        MasterSwitchType switchType = convert(masterSwitchType);
1893        boolean oldValue = server.isSplitOrMergeEnabled(switchType);
1894        response.addPrevValue(oldValue);
1895        if (server.cpHost != null) {
1896          server.cpHost.preSetSplitOrMergeEnabled(newValue, switchType);
1897        }
1898        server.getSplitOrMergeStateStore().setSplitOrMergeEnabled(newValue, switchType);
1899        if (server.cpHost != null) {
1900          server.cpHost.postSetSplitOrMergeEnabled(newValue, switchType);
1901        }
1902      }
1903    } catch (IOException e) {
1904      throw new ServiceException(e);
1905    }
1906    return response.build();
1907  }
1908
1909  @Override
1910  public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller,
1911    IsSplitOrMergeEnabledRequest request) throws ServiceException {
1912    IsSplitOrMergeEnabledResponse.Builder response = IsSplitOrMergeEnabledResponse.newBuilder();
1913    response.setEnabled(server.isSplitOrMergeEnabled(convert(request.getSwitchType())));
1914    return response.build();
1915  }
1916
1917  @Override
1918  public NormalizeResponse normalize(RpcController controller, NormalizeRequest request)
1919    throws ServiceException {
1920    rpcPreCheck("normalize");
1921    try {
1922      final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder()
1923        .tableNames(ProtobufUtil.toTableNameList(request.getTableNamesList()))
1924        .regex(request.hasRegex() ? request.getRegex() : null)
1925        .namespace(request.hasNamespace() ? request.getNamespace() : null).build();
1926      return NormalizeResponse.newBuilder()
1927        // all API requests are considered priority requests.
1928        .setNormalizerRan(server.normalizeRegions(ntfp, true)).build();
1929    } catch (IOException ex) {
1930      throw new ServiceException(ex);
1931    }
1932  }
1933
1934  @Override
1935  public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller,
1936    SetNormalizerRunningRequest request) throws ServiceException {
1937    rpcPreCheck("setNormalizerRunning");
1938
1939    // Sets normalizer on/off flag in ZK.
1940    // TODO: this method is totally broken in terms of atomicity of actions and values read.
1941    // 1. The contract has this RPC returning the previous value. There isn't a ZKUtil method
1942    // that lets us retrieve the previous value as part of setting a new value, so we simply
1943    // perform a read before issuing the update. Thus we have a data race opportunity, between
1944    // when the `prevValue` is read and whatever is actually overwritten.
1945    // 2. Down in `setNormalizerOn`, the call to `createAndWatch` inside of the catch clause can
1946    // itself fail in the event that the znode already exists. Thus, another data race, between
1947    // when the initial `setData` call is notified of the absence of the target znode and the
1948    // subsequent `createAndWatch`, with another client creating said node.
1949    // That said, there's supposed to be only one active master and thus there's supposed to be
1950    // only one process with the authority to modify the value.
1951    final boolean prevValue = server.getRegionNormalizerManager().isNormalizerOn();
1952    final boolean newValue = request.getOn();
1953    try {
1954      server.getRegionNormalizerManager().setNormalizerOn(newValue);
1955    } catch (IOException e) {
1956      throw new ServiceException(e);
1957    }
1958    LOG.info("{} set normalizerSwitch={}", server.getClientIdAuditPrefix(), newValue);
1959    return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build();
1960  }
1961
1962  @Override
1963  public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller,
1964    IsNormalizerEnabledRequest request) {
1965    IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder();
1966    response.setEnabled(server.isNormalizerOn());
1967    return response.build();
1968  }
1969
1970  /**
1971   * Returns the security capabilities in effect on the cluster
1972   */
1973  @Override
1974  public SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller,
1975    SecurityCapabilitiesRequest request) throws ServiceException {
1976    SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder();
1977    try {
1978      server.checkInitialized();
1979      Set<SecurityCapabilitiesResponse.Capability> capabilities = new HashSet<>();
1980      // Authentication
1981      if (User.isHBaseSecurityEnabled(server.getConfiguration())) {
1982        capabilities.add(SecurityCapabilitiesResponse.Capability.SECURE_AUTHENTICATION);
1983      } else {
1984        capabilities.add(SecurityCapabilitiesResponse.Capability.SIMPLE_AUTHENTICATION);
1985      }
1986      // A coprocessor that implements AccessControlService can provide AUTHORIZATION and
1987      // CELL_AUTHORIZATION
1988      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
1989        if (AccessChecker.isAuthorizationSupported(server.getConfiguration())) {
1990          capabilities.add(SecurityCapabilitiesResponse.Capability.AUTHORIZATION);
1991        }
1992        if (AccessController.isCellAuthorizationSupported(server.getConfiguration())) {
1993          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_AUTHORIZATION);
1994        }
1995      }
1996      // A coprocessor that implements VisibilityLabelsService can provide CELL_VISIBILITY.
1997      if (server.cpHost != null && hasVisibilityLabelsServiceCoprocessor(server.cpHost)) {
1998        if (VisibilityController.isCellAuthorizationSupported(server.getConfiguration())) {
1999          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_VISIBILITY);
2000        }
2001      }
2002      response.addAllCapabilities(capabilities);
2003    } catch (IOException e) {
2004      throw new ServiceException(e);
2005    }
2006    return response.build();
2007  }
2008
2009  /**
2010   * Determines if there is a MasterCoprocessor deployed which implements
2011   * {@link AccessControlService.Interface}.
2012   */
2013  boolean hasAccessControlServiceCoprocessor(MasterCoprocessorHost cpHost) {
2014    return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class),
2015      AccessControlService.Interface.class);
2016  }
2017
2018  /**
2019   * Determines if there is a MasterCoprocessor deployed which implements
2020   * {@link VisibilityLabelsService.Interface}.
2021   */
2022  boolean hasVisibilityLabelsServiceCoprocessor(MasterCoprocessorHost cpHost) {
2023    return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class),
2024      VisibilityLabelsService.Interface.class);
2025  }
2026
2027  /**
2028   * Determines if there is a coprocessor implementation in the provided argument which extends or
2029   * implements the provided {@code service}.
2030   */
2031  boolean checkCoprocessorWithService(List<MasterCoprocessor> coprocessorsToCheck,
2032    Class<?> service) {
2033    if (coprocessorsToCheck == null || coprocessorsToCheck.isEmpty()) {
2034      return false;
2035    }
2036    for (MasterCoprocessor cp : coprocessorsToCheck) {
2037      if (service.isAssignableFrom(cp.getClass())) {
2038        return true;
2039      }
2040    }
2041    return false;
2042  }
2043
2044  private MasterSwitchType convert(MasterProtos.MasterSwitchType switchType) {
2045    switch (switchType) {
2046      case SPLIT:
2047        return MasterSwitchType.SPLIT;
2048      case MERGE:
2049        return MasterSwitchType.MERGE;
2050      default:
2051        break;
2052    }
2053    return null;
2054  }
2055
2056  @Override
2057  public AddReplicationPeerResponse addReplicationPeer(RpcController controller,
2058    AddReplicationPeerRequest request) throws ServiceException {
2059    try {
2060      long procId = server.addReplicationPeer(request.getPeerId(),
2061        ReplicationPeerConfigUtil.convert(request.getPeerConfig()),
2062        request.getPeerState().getState().equals(ReplicationState.State.ENABLED));
2063      return AddReplicationPeerResponse.newBuilder().setProcId(procId).build();
2064    } catch (ReplicationException | IOException e) {
2065      throw new ServiceException(e);
2066    }
2067  }
2068
2069  @Override
2070  public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller,
2071    RemoveReplicationPeerRequest request) throws ServiceException {
2072    try {
2073      long procId = server.removeReplicationPeer(request.getPeerId());
2074      return RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build();
2075    } catch (ReplicationException | IOException e) {
2076      throw new ServiceException(e);
2077    }
2078  }
2079
2080  @Override
2081  public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller,
2082    EnableReplicationPeerRequest request) throws ServiceException {
2083    try {
2084      long procId = server.enableReplicationPeer(request.getPeerId());
2085      return EnableReplicationPeerResponse.newBuilder().setProcId(procId).build();
2086    } catch (ReplicationException | IOException e) {
2087      throw new ServiceException(e);
2088    }
2089  }
2090
2091  @Override
2092  public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller,
2093    DisableReplicationPeerRequest request) throws ServiceException {
2094    try {
2095      long procId = server.disableReplicationPeer(request.getPeerId());
2096      return DisableReplicationPeerResponse.newBuilder().setProcId(procId).build();
2097    } catch (ReplicationException | IOException e) {
2098      throw new ServiceException(e);
2099    }
2100  }
2101
2102  @Override
2103  public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller,
2104    GetReplicationPeerConfigRequest request) throws ServiceException {
2105    GetReplicationPeerConfigResponse.Builder response =
2106      GetReplicationPeerConfigResponse.newBuilder();
2107    try {
2108      String peerId = request.getPeerId();
2109      ReplicationPeerConfig peerConfig = server.getReplicationPeerConfig(peerId);
2110      response.setPeerId(peerId);
2111      response.setPeerConfig(ReplicationPeerConfigUtil.convert(peerConfig));
2112    } catch (ReplicationException | IOException e) {
2113      throw new ServiceException(e);
2114    }
2115    return response.build();
2116  }
2117
2118  @Override
2119  public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller,
2120    UpdateReplicationPeerConfigRequest request) throws ServiceException {
2121    try {
2122      long procId = server.updateReplicationPeerConfig(request.getPeerId(),
2123        ReplicationPeerConfigUtil.convert(request.getPeerConfig()));
2124      return UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build();
2125    } catch (ReplicationException | IOException e) {
2126      throw new ServiceException(e);
2127    }
2128  }
2129
2130  @Override
2131  public TransitReplicationPeerSyncReplicationStateResponse
2132    transitReplicationPeerSyncReplicationState(RpcController controller,
2133      TransitReplicationPeerSyncReplicationStateRequest request) throws ServiceException {
2134    try {
2135      long procId = server.transitReplicationPeerSyncReplicationState(request.getPeerId(),
2136        ReplicationPeerConfigUtil.toSyncReplicationState(request.getSyncReplicationState()));
2137      return TransitReplicationPeerSyncReplicationStateResponse.newBuilder().setProcId(procId)
2138        .build();
2139    } catch (ReplicationException | IOException e) {
2140      throw new ServiceException(e);
2141    }
2142  }
2143
2144  @Override
2145  public ListReplicationPeersResponse listReplicationPeers(RpcController controller,
2146    ListReplicationPeersRequest request) throws ServiceException {
2147    ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder();
2148    try {
2149      List<ReplicationPeerDescription> peers =
2150        server.listReplicationPeers(request.hasRegex() ? request.getRegex() : null);
2151      for (ReplicationPeerDescription peer : peers) {
2152        response.addPeerDesc(ReplicationPeerConfigUtil.toProtoReplicationPeerDescription(peer));
2153      }
2154    } catch (ReplicationException | IOException e) {
2155      throw new ServiceException(e);
2156    }
2157    return response.build();
2158  }
2159
2160  @Override
2161  public GetReplicationPeerStateResponse isReplicationPeerEnabled(RpcController controller,
2162    GetReplicationPeerStateRequest request) throws ServiceException {
2163    boolean isEnabled;
2164    try {
2165      isEnabled = server.getReplicationPeerManager().getPeerState(request.getPeerId());
2166    } catch (ReplicationException ioe) {
2167      throw new ServiceException(ioe);
2168    }
2169    return GetReplicationPeerStateResponse.newBuilder().setIsEnabled(isEnabled).build();
2170  }
2171
2172  @Override
2173  public ListDecommissionedRegionServersResponse listDecommissionedRegionServers(
2174    RpcController controller, ListDecommissionedRegionServersRequest request)
2175    throws ServiceException {
2176    ListDecommissionedRegionServersResponse.Builder response =
2177      ListDecommissionedRegionServersResponse.newBuilder();
2178    try {
2179      server.checkInitialized();
2180      if (server.cpHost != null) {
2181        server.cpHost.preListDecommissionedRegionServers();
2182      }
2183      List<ServerName> servers = server.listDecommissionedRegionServers();
2184      response.addAllServerName((servers.stream().map(server -> ProtobufUtil.toServerName(server)))
2185        .collect(Collectors.toList()));
2186      if (server.cpHost != null) {
2187        server.cpHost.postListDecommissionedRegionServers();
2188      }
2189    } catch (IOException io) {
2190      throw new ServiceException(io);
2191    }
2192
2193    return response.build();
2194  }
2195
2196  @Override
2197  public DecommissionRegionServersResponse decommissionRegionServers(RpcController controller,
2198    DecommissionRegionServersRequest request) throws ServiceException {
2199    try {
2200      server.checkInitialized();
2201      List<ServerName> servers = request.getServerNameList().stream()
2202        .map(pbServer -> ProtobufUtil.toServerName(pbServer)).collect(Collectors.toList());
2203      boolean offload = request.getOffload();
2204      if (server.cpHost != null) {
2205        server.cpHost.preDecommissionRegionServers(servers, offload);
2206      }
2207      server.decommissionRegionServers(servers, offload);
2208      if (server.cpHost != null) {
2209        server.cpHost.postDecommissionRegionServers(servers, offload);
2210      }
2211    } catch (IOException io) {
2212      throw new ServiceException(io);
2213    }
2214
2215    return DecommissionRegionServersResponse.newBuilder().build();
2216  }
2217
2218  @Override
2219  public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller,
2220    RecommissionRegionServerRequest request) throws ServiceException {
2221    try {
2222      server.checkInitialized();
2223      ServerName sn = ProtobufUtil.toServerName(request.getServerName());
2224      List<byte[]> encodedRegionNames = request.getRegionList().stream()
2225        .map(regionSpecifier -> regionSpecifier.getValue().toByteArray())
2226        .collect(Collectors.toList());
2227      if (server.cpHost != null) {
2228        server.cpHost.preRecommissionRegionServer(sn, encodedRegionNames);
2229      }
2230      server.recommissionRegionServer(sn, encodedRegionNames);
2231      if (server.cpHost != null) {
2232        server.cpHost.postRecommissionRegionServer(sn, encodedRegionNames);
2233      }
2234    } catch (IOException io) {
2235      throw new ServiceException(io);
2236    }
2237
2238    return RecommissionRegionServerResponse.newBuilder().build();
2239  }
2240
2241  @Override
2242  public LockResponse requestLock(RpcController controller, final LockRequest request)
2243    throws ServiceException {
2244    try {
2245      if (request.getDescription().isEmpty()) {
2246        throw new IllegalArgumentException("Empty description");
2247      }
2248      NonceProcedureRunnable npr;
2249      LockType type = LockType.valueOf(request.getLockType().name());
2250      if (request.getRegionInfoCount() > 0) {
2251        final RegionInfo[] regionInfos = new RegionInfo[request.getRegionInfoCount()];
2252        for (int i = 0; i < request.getRegionInfoCount(); ++i) {
2253          regionInfos[i] = ProtobufUtil.toRegionInfo(request.getRegionInfo(i));
2254        }
2255        npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) {
2256          @Override
2257          protected void run() throws IOException {
2258            setProcId(server.getLockManager().remoteLocks().requestRegionsLock(regionInfos,
2259              request.getDescription(), getNonceKey()));
2260          }
2261
2262          @Override
2263          protected String getDescription() {
2264            return "RequestLock";
2265          }
2266        };
2267      } else if (request.hasTableName()) {
2268        final TableName tableName = ProtobufUtil.toTableName(request.getTableName());
2269        npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) {
2270          @Override
2271          protected void run() throws IOException {
2272            setProcId(server.getLockManager().remoteLocks().requestTableLock(tableName, type,
2273              request.getDescription(), getNonceKey()));
2274          }
2275
2276          @Override
2277          protected String getDescription() {
2278            return "RequestLock";
2279          }
2280        };
2281      } else if (request.hasNamespace()) {
2282        npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) {
2283          @Override
2284          protected void run() throws IOException {
2285            setProcId(server.getLockManager().remoteLocks().requestNamespaceLock(
2286              request.getNamespace(), type, request.getDescription(), getNonceKey()));
2287          }
2288
2289          @Override
2290          protected String getDescription() {
2291            return "RequestLock";
2292          }
2293        };
2294      } else {
2295        throw new IllegalArgumentException("one of table/namespace/region should be specified");
2296      }
2297      long procId = MasterProcedureUtil.submitProcedure(npr);
2298      return LockResponse.newBuilder().setProcId(procId).build();
2299    } catch (IllegalArgumentException e) {
2300      LOG.warn("Exception when queuing lock", e);
2301      throw new ServiceException(new DoNotRetryIOException(e));
2302    } catch (IOException e) {
2303      LOG.warn("Exception when queuing lock", e);
2304      throw new ServiceException(e);
2305    }
2306  }
2307
2308  /**
2309   * @return LOCKED, if procedure is found and it has the lock; else UNLOCKED.
2310   * @throws ServiceException if given proc id is found but it is not a LockProcedure.
2311   */
2312  @Override
2313  public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request)
2314    throws ServiceException {
2315    try {
2316      if (
2317        server.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(),
2318          request.getKeepAlive())
2319      ) {
2320        return LockHeartbeatResponse.newBuilder()
2321          .setTimeoutMs(server.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF,
2322            LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS))
2323          .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).build();
2324      } else {
2325        return LockHeartbeatResponse.newBuilder()
2326          .setLockStatus(LockHeartbeatResponse.LockStatus.UNLOCKED).build();
2327      }
2328    } catch (IOException e) {
2329      throw new ServiceException(e);
2330    }
2331  }
2332
2333  @Override
2334  public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller,
2335    RegionSpaceUseReportRequest request) throws ServiceException {
2336    try {
2337      server.checkInitialized();
2338      if (!QuotaUtil.isQuotaEnabled(server.getConfiguration())) {
2339        return RegionSpaceUseReportResponse.newBuilder().build();
2340      }
2341      MasterQuotaManager quotaManager = this.server.getMasterQuotaManager();
2342      if (quotaManager != null) {
2343        final long now = EnvironmentEdgeManager.currentTime();
2344        for (RegionSpaceUse report : request.getSpaceUseList()) {
2345          quotaManager.addRegionSize(ProtobufUtil.toRegionInfo(report.getRegionInfo()),
2346            report.getRegionSize(), now);
2347        }
2348      } else {
2349        LOG.debug("Received region space usage report but HMaster is not ready to process it, "
2350          + "skipping");
2351      }
2352      return RegionSpaceUseReportResponse.newBuilder().build();
2353    } catch (Exception e) {
2354      throw new ServiceException(e);
2355    }
2356  }
2357
2358  @Override
2359  public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(RpcController controller,
2360    GetSpaceQuotaRegionSizesRequest request) throws ServiceException {
2361    try {
2362      server.checkInitialized();
2363      MasterQuotaManager quotaManager = this.server.getMasterQuotaManager();
2364      GetSpaceQuotaRegionSizesResponse.Builder builder =
2365        GetSpaceQuotaRegionSizesResponse.newBuilder();
2366      if (quotaManager != null) {
2367        Map<RegionInfo, Long> regionSizes = quotaManager.snapshotRegionSizes();
2368        Map<TableName, Long> regionSizesByTable = new HashMap<>();
2369        // Translate hregioninfo+long -> tablename+long
2370        for (Entry<RegionInfo, Long> entry : regionSizes.entrySet()) {
2371          final TableName tableName = entry.getKey().getTable();
2372          Long prevSize = regionSizesByTable.get(tableName);
2373          if (prevSize == null) {
2374            prevSize = 0L;
2375          }
2376          regionSizesByTable.put(tableName, prevSize + entry.getValue());
2377        }
2378        // Serialize them into the protobuf
2379        for (Entry<TableName, Long> tableSize : regionSizesByTable.entrySet()) {
2380          builder.addSizes(
2381            RegionSizes.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableSize.getKey()))
2382              .setSize(tableSize.getValue()).build());
2383        }
2384        return builder.build();
2385      } else {
2386        LOG.debug("Received space quota region size report but HMaster is not ready to process it,"
2387          + "skipping");
2388      }
2389      return builder.build();
2390    } catch (Exception e) {
2391      throw new ServiceException(e);
2392    }
2393  }
2394
2395  @Override
2396  public GetQuotaStatesResponse getQuotaStates(RpcController controller,
2397    GetQuotaStatesRequest request) throws ServiceException {
2398    try {
2399      server.checkInitialized();
2400      QuotaObserverChore quotaChore = this.server.getQuotaObserverChore();
2401      GetQuotaStatesResponse.Builder builder = GetQuotaStatesResponse.newBuilder();
2402      if (quotaChore != null) {
2403        // The "current" view of all tables with quotas
2404        Map<TableName, SpaceQuotaSnapshot> tableSnapshots = quotaChore.getTableQuotaSnapshots();
2405        for (Entry<TableName, SpaceQuotaSnapshot> entry : tableSnapshots.entrySet()) {
2406          builder.addTableSnapshots(TableQuotaSnapshot.newBuilder()
2407            .setTableName(ProtobufUtil.toProtoTableName(entry.getKey()))
2408            .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build());
2409        }
2410        // The "current" view of all namespaces with quotas
2411        Map<String, SpaceQuotaSnapshot> nsSnapshots = quotaChore.getNamespaceQuotaSnapshots();
2412        for (Entry<String, SpaceQuotaSnapshot> entry : nsSnapshots.entrySet()) {
2413          builder.addNsSnapshots(NamespaceQuotaSnapshot.newBuilder().setNamespace(entry.getKey())
2414            .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build());
2415        }
2416        return builder.build();
2417      }
2418      return builder.build();
2419    } catch (Exception e) {
2420      throw new ServiceException(e);
2421    }
2422  }
2423
2424  @Override
2425  public ClearDeadServersResponse clearDeadServers(RpcController controller,
2426    ClearDeadServersRequest request) throws ServiceException {
2427    LOG.debug(server.getClientIdAuditPrefix() + " clear dead region servers.");
2428    ClearDeadServersResponse.Builder response = ClearDeadServersResponse.newBuilder();
2429    try {
2430      server.checkInitialized();
2431      if (server.cpHost != null) {
2432        server.cpHost.preClearDeadServers();
2433      }
2434
2435      if (server.getServerManager().areDeadServersInProgress()) {
2436        LOG.debug("Some dead server is still under processing, won't clear the dead server list");
2437        response.addAllServerName(request.getServerNameList());
2438      } else {
2439        DeadServer deadServer = server.getServerManager().getDeadServers();
2440        Set<Address> clearedServers = new HashSet<>();
2441        for (HBaseProtos.ServerName pbServer : request.getServerNameList()) {
2442          ServerName serverName = ProtobufUtil.toServerName(pbServer);
2443          final boolean deadInProcess =
2444            server.getProcedures().stream().anyMatch(p -> (p instanceof ServerCrashProcedure)
2445              && ((ServerCrashProcedure) p).getServerName().equals(serverName));
2446          if (deadInProcess) {
2447            throw new ServiceException(
2448              String.format("Dead server '%s' is not 'dead' in fact...", serverName));
2449          }
2450
2451          if (!deadServer.removeDeadServer(serverName)) {
2452            response.addServerName(pbServer);
2453          } else {
2454            clearedServers.add(serverName.getAddress());
2455          }
2456        }
2457        server.getRSGroupInfoManager().removeServers(clearedServers);
2458        LOG.info("Remove decommissioned servers {} from RSGroup done", clearedServers);
2459      }
2460
2461      if (server.cpHost != null) {
2462        server.cpHost.postClearDeadServers(
2463          ProtobufUtil.toServerNameList(request.getServerNameList()),
2464          ProtobufUtil.toServerNameList(response.getServerNameList()));
2465      }
2466    } catch (IOException io) {
2467      throw new ServiceException(io);
2468    }
2469    return response.build();
2470  }
2471
2472  @Override
2473  public ReportProcedureDoneResponse reportProcedureDone(RpcController controller,
2474    ReportProcedureDoneRequest request) throws ServiceException {
2475    // Check Masters is up and ready for duty before progressing. Remote side will keep trying.
2476    try {
2477      this.server.checkServiceStarted();
2478    } catch (ServerNotRunningYetException snrye) {
2479      throw new ServiceException(snrye);
2480    }
2481    request.getResultList().forEach(result -> {
2482      if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) {
2483        server.remoteProcedureCompleted(result.getProcId());
2484      } else {
2485        server.remoteProcedureFailed(result.getProcId(),
2486          RemoteProcedureException.fromProto(result.getError()));
2487      }
2488    });
2489    return ReportProcedureDoneResponse.getDefaultInstance();
2490  }
2491
2492  @Override
2493  public FileArchiveNotificationResponse reportFileArchival(RpcController controller,
2494    FileArchiveNotificationRequest request) throws ServiceException {
2495    try {
2496      server.checkInitialized();
2497      if (!QuotaUtil.isQuotaEnabled(server.getConfiguration())) {
2498        return FileArchiveNotificationResponse.newBuilder().build();
2499      }
2500      server.getMasterQuotaManager().processFileArchivals(request, server.getConnection(),
2501        server.getConfiguration(), server.getFileSystem());
2502      return FileArchiveNotificationResponse.newBuilder().build();
2503    } catch (Exception e) {
2504      throw new ServiceException(e);
2505    }
2506  }
2507
2508  // HBCK Services
2509
2510  @Override
2511  public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest req)
2512    throws ServiceException {
2513    rpcPreCheck("runHbckChore");
2514    LOG.info("{} request HBCK chore to run", server.getClientIdAuditPrefix());
2515    HbckChore hbckChore = server.getHbckChore();
2516    boolean ran = hbckChore.runChore();
2517    return RunHbckChoreResponse.newBuilder().setRan(ran).build();
2518  }
2519
2520  /**
2521   * Update state of the table in meta only. This is required by hbck in some situations to cleanup
2522   * stuck assign/ unassign regions procedures for the table.
2523   * @return previous state of the table
2524   */
2525  @Override
2526  public GetTableStateResponse setTableStateInMeta(RpcController controller,
2527    SetTableStateInMetaRequest request) throws ServiceException {
2528    rpcPreCheck("setTableStateInMeta");
2529    TableName tn = ProtobufUtil.toTableName(request.getTableName());
2530    try {
2531      TableState prevState = this.server.getTableStateManager().getTableState(tn);
2532      TableState newState = TableState.convert(tn, request.getTableState());
2533      LOG.info("{} set table={} state from {} to {}", server.getClientIdAuditPrefix(), tn,
2534        prevState.getState(), newState.getState());
2535      this.server.getTableStateManager().setTableState(tn, newState.getState());
2536      return GetTableStateResponse.newBuilder().setTableState(prevState.convert()).build();
2537    } catch (Exception e) {
2538      throw new ServiceException(e);
2539    }
2540  }
2541
2542  /**
2543   * Update state of the region in meta only. This is required by hbck in some situations to cleanup
2544   * stuck assign/ unassign regions procedures for the table.
2545   * @return previous states of the regions
2546   */
2547  @Override
2548  public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller,
2549    SetRegionStateInMetaRequest request) throws ServiceException {
2550    rpcPreCheck("setRegionStateInMeta");
2551    SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder();
2552    try {
2553      for (RegionSpecifierAndState s : request.getStatesList()) {
2554        RegionSpecifier spec = s.getRegionSpecifier();
2555        String encodedName;
2556        if (spec.getType() == RegionSpecifierType.ENCODED_REGION_NAME) {
2557          encodedName = spec.getValue().toStringUtf8();
2558        } else {
2559          // TODO: actually, a full region name can save a lot on meta scan, improve later.
2560          encodedName = RegionInfo.encodeRegionName(spec.getValue().toByteArray());
2561        }
2562        RegionInfo info = this.server.getAssignmentManager().loadRegionFromMeta(encodedName);
2563        LOG.trace("region info loaded from meta table: {}", info);
2564        RegionState prevState =
2565          this.server.getAssignmentManager().getRegionStates().getRegionState(info);
2566        RegionState.State newState = RegionState.State.convert(s.getState());
2567        LOG.info("{} set region={} state from {} to {}", server.getClientIdAuditPrefix(), info,
2568          prevState.getState(), newState);
2569        Put metaPut =
2570          MetaTableAccessor.makePutFromRegionInfo(info, EnvironmentEdgeManager.currentTime());
2571        metaPut.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
2572          Bytes.toBytes(newState.name()));
2573        List<Put> putList = new ArrayList<>();
2574        putList.add(metaPut);
2575        MetaTableAccessor.putsToMetaTable(this.server.getConnection(), putList);
2576        // Loads from meta again to refresh AM cache with the new region state
2577        this.server.getAssignmentManager().loadRegionFromMeta(encodedName);
2578        builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec)
2579          .setState(prevState.getState().convert()));
2580      }
2581    } catch (Exception e) {
2582      throw new ServiceException(e);
2583    }
2584    return builder.build();
2585  }
2586
2587  /**
2588   * Get RegionInfo from Master using content of RegionSpecifier as key.
2589   * @return RegionInfo found by decoding <code>rs</code> or null if none found
2590   */
2591  private RegionInfo getRegionInfo(HBaseProtos.RegionSpecifier rs) throws UnknownRegionException {
2592    RegionInfo ri = null;
2593    switch (rs.getType()) {
2594      case REGION_NAME:
2595        final byte[] regionName = rs.getValue().toByteArray();
2596        ri = this.server.getAssignmentManager().getRegionInfo(regionName);
2597        break;
2598      case ENCODED_REGION_NAME:
2599        String encodedRegionName = Bytes.toString(rs.getValue().toByteArray());
2600        RegionState regionState =
2601          this.server.getAssignmentManager().getRegionStates().getRegionState(encodedRegionName);
2602        ri = regionState == null
2603          ? this.server.getAssignmentManager().loadRegionFromMeta(encodedRegionName)
2604          : regionState.getRegion();
2605        break;
2606      default:
2607        break;
2608    }
2609    return ri;
2610  }
2611
2612  /**
2613   * @throws ServiceException If no MasterProcedureExecutor
2614   */
2615  private void checkMasterProcedureExecutor() throws ServiceException {
2616    if (this.server.getMasterProcedureExecutor() == null) {
2617      throw new ServiceException("Master's ProcedureExecutor not initialized; retry later");
2618    }
2619  }
2620
2621  /**
2622   * A 'raw' version of assign that does bulk and can skirt Master state checks if override is set;
2623   * i.e. assigns can be forced during Master startup or if RegionState is unclean. Used by HBCK2.
2624   */
2625  @Override
2626  public MasterProtos.AssignsResponse assigns(RpcController controller,
2627    MasterProtos.AssignsRequest request) throws ServiceException {
2628    checkMasterProcedureExecutor();
2629    MasterProtos.AssignsResponse.Builder responseBuilder =
2630      MasterProtos.AssignsResponse.newBuilder();
2631    try {
2632      boolean override = request.getOverride();
2633      LOG.info("{} assigns, override={}", server.getClientIdAuditPrefix(), override);
2634      for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
2635        long pid = Procedure.NO_PROC_ID;
2636        RegionInfo ri = getRegionInfo(rs);
2637        if (ri == null) {
2638          LOG.info("Unknown={}", rs);
2639        } else {
2640          Procedure p = this.server.getAssignmentManager().createOneAssignProcedure(ri, override);
2641          if (p != null) {
2642            pid = this.server.getMasterProcedureExecutor().submitProcedure(p);
2643          }
2644        }
2645        responseBuilder.addPid(pid);
2646      }
2647      return responseBuilder.build();
2648    } catch (IOException ioe) {
2649      throw new ServiceException(ioe);
2650    }
2651  }
2652
2653  /**
2654   * A 'raw' version of unassign that does bulk and can skirt Master state checks if override is
2655   * set; i.e. unassigns can be forced during Master startup or if RegionState is unclean. Used by
2656   * HBCK2.
2657   */
2658  @Override
2659  public MasterProtos.UnassignsResponse unassigns(RpcController controller,
2660    MasterProtos.UnassignsRequest request) throws ServiceException {
2661    checkMasterProcedureExecutor();
2662    MasterProtos.UnassignsResponse.Builder responseBuilder =
2663      MasterProtos.UnassignsResponse.newBuilder();
2664    try {
2665      boolean override = request.getOverride();
2666      LOG.info("{} unassigns, override={}", server.getClientIdAuditPrefix(), override);
2667      for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
2668        long pid = Procedure.NO_PROC_ID;
2669        RegionInfo ri = getRegionInfo(rs);
2670        if (ri == null) {
2671          LOG.info("Unknown={}", rs);
2672        } else {
2673          Procedure p = this.server.getAssignmentManager().createOneUnassignProcedure(ri, override);
2674          if (p != null) {
2675            pid = this.server.getMasterProcedureExecutor().submitProcedure(p);
2676          }
2677        }
2678        responseBuilder.addPid(pid);
2679      }
2680      return responseBuilder.build();
2681    } catch (IOException ioe) {
2682      throw new ServiceException(ioe);
2683    }
2684  }
2685
2686  /**
2687   * Bypass specified procedure to completion. Procedure is marked completed but no actual work is
2688   * done from the current state/ step onwards. Parents of the procedure are also marked for bypass.
2689   * NOTE: this is a dangerous operation and may be used to unstuck buggy procedures. This may leave
2690   * system in inconherent state. This may need to be followed by some cleanup steps/ actions by
2691   * operator.
2692   * @return BypassProcedureToCompletionResponse indicating success or failure
2693   */
2694  @Override
2695  public MasterProtos.BypassProcedureResponse bypassProcedure(RpcController controller,
2696    MasterProtos.BypassProcedureRequest request) throws ServiceException {
2697    try {
2698      LOG.info("{} bypass procedures={}, waitTime={}, override={}, recursive={}",
2699        server.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(),
2700        request.getOverride(), request.getRecursive());
2701      List<Boolean> ret =
2702        server.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(),
2703          request.getWaitTime(), request.getOverride(), request.getRecursive());
2704      return MasterProtos.BypassProcedureResponse.newBuilder().addAllBypassed(ret).build();
2705    } catch (IOException e) {
2706      throw new ServiceException(e);
2707    }
2708  }
2709
2710  @Override
2711  public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProcedure(
2712    RpcController controller, MasterProtos.ScheduleServerCrashProcedureRequest request)
2713    throws ServiceException {
2714    List<Long> pids = new ArrayList<>();
2715    for (HBaseProtos.ServerName sn : request.getServerNameList()) {
2716      ServerName serverName = ProtobufUtil.toServerName(sn);
2717      LOG.info("{} schedule ServerCrashProcedure for {}", this.server.getClientIdAuditPrefix(),
2718        serverName);
2719      if (shouldSubmitSCP(serverName)) {
2720        pids.add(this.server.getServerManager().expireServer(serverName, true));
2721      } else {
2722        pids.add(Procedure.NO_PROC_ID);
2723      }
2724    }
2725    return MasterProtos.ScheduleServerCrashProcedureResponse.newBuilder().addAllPid(pids).build();
2726  }
2727
2728  @Override
2729  public MasterProtos.ScheduleSCPsForUnknownServersResponse scheduleSCPsForUnknownServers(
2730    RpcController controller, MasterProtos.ScheduleSCPsForUnknownServersRequest request)
2731    throws ServiceException {
2732    List<Long> pids = new ArrayList<>();
2733    final Set<ServerName> serverNames = server.getAssignmentManager().getRegionStates()
2734      .getRegionStates().stream().map(RegionState::getServerName).collect(Collectors.toSet());
2735
2736    final Set<ServerName> unknownServerNames = serverNames.stream()
2737      .filter(sn -> server.getServerManager().isServerUnknown(sn)).collect(Collectors.toSet());
2738
2739    for (ServerName sn : unknownServerNames) {
2740      LOG.info("{} schedule ServerCrashProcedure for unknown {}",
2741        this.server.getClientIdAuditPrefix(), sn);
2742      if (shouldSubmitSCP(sn)) {
2743        pids.add(this.server.getServerManager().expireServer(sn, true));
2744      } else {
2745        pids.add(Procedure.NO_PROC_ID);
2746      }
2747    }
2748    return MasterProtos.ScheduleSCPsForUnknownServersResponse.newBuilder().addAllPid(pids).build();
2749  }
2750
2751  @Override
2752  public FixMetaResponse fixMeta(RpcController controller, FixMetaRequest request)
2753    throws ServiceException {
2754    rpcPreCheck("fixMeta");
2755    try {
2756      MetaFixer mf = new MetaFixer(this.server);
2757      mf.fix();
2758      return FixMetaResponse.newBuilder().build();
2759    } catch (IOException ioe) {
2760      throw new ServiceException(ioe);
2761    }
2762  }
2763
2764  @Override
2765  public SwitchRpcThrottleResponse switchRpcThrottle(RpcController controller,
2766    SwitchRpcThrottleRequest request) throws ServiceException {
2767    try {
2768      server.checkInitialized();
2769      return server.getMasterQuotaManager().switchRpcThrottle(request);
2770    } catch (Exception e) {
2771      throw new ServiceException(e);
2772    }
2773  }
2774
2775  @Override
2776  public MasterProtos.IsRpcThrottleEnabledResponse isRpcThrottleEnabled(RpcController controller,
2777    MasterProtos.IsRpcThrottleEnabledRequest request) throws ServiceException {
2778    try {
2779      server.checkInitialized();
2780      return server.getMasterQuotaManager().isRpcThrottleEnabled(request);
2781    } catch (Exception e) {
2782      throw new ServiceException(e);
2783    }
2784  }
2785
2786  @Override
2787  public SwitchExceedThrottleQuotaResponse switchExceedThrottleQuota(RpcController controller,
2788    SwitchExceedThrottleQuotaRequest request) throws ServiceException {
2789    try {
2790      server.checkInitialized();
2791      return server.getMasterQuotaManager().switchExceedThrottleQuota(request);
2792    } catch (Exception e) {
2793      throw new ServiceException(e);
2794    }
2795  }
2796
2797  @Override
2798  public GrantResponse grant(RpcController controller, GrantRequest request)
2799    throws ServiceException {
2800    try {
2801      server.checkInitialized();
2802      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
2803        final UserPermission perm =
2804          ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
2805        boolean mergeExistingPermissions = request.getMergeExistingPermissions();
2806        server.cpHost.preGrant(perm, mergeExistingPermissions);
2807        try (Table table = server.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
2808          PermissionStorage.addUserPermission(getConfiguration(), perm, table,
2809            mergeExistingPermissions);
2810        }
2811        server.cpHost.postGrant(perm, mergeExistingPermissions);
2812        return GrantResponse.getDefaultInstance();
2813      } else {
2814        throw new DoNotRetryIOException(
2815          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2816      }
2817    } catch (IOException ioe) {
2818      throw new ServiceException(ioe);
2819    }
2820  }
2821
2822  @Override
2823  public RevokeResponse revoke(RpcController controller, RevokeRequest request)
2824    throws ServiceException {
2825    try {
2826      server.checkInitialized();
2827      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
2828        final UserPermission userPermission =
2829          ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
2830        server.cpHost.preRevoke(userPermission);
2831        try (Table table = server.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
2832          PermissionStorage.removeUserPermission(server.getConfiguration(), userPermission, table);
2833        }
2834        server.cpHost.postRevoke(userPermission);
2835        return RevokeResponse.getDefaultInstance();
2836      } else {
2837        throw new DoNotRetryIOException(
2838          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2839      }
2840    } catch (IOException ioe) {
2841      throw new ServiceException(ioe);
2842    }
2843  }
2844
2845  @Override
2846  public GetUserPermissionsResponse getUserPermissions(RpcController controller,
2847    GetUserPermissionsRequest request) throws ServiceException {
2848    try {
2849      server.checkInitialized();
2850      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
2851        final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null;
2852        String namespace =
2853          request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null;
2854        TableName table =
2855          request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null;
2856        byte[] cf = request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null;
2857        byte[] cq =
2858          request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null;
2859        Type permissionType = request.hasType() ? request.getType() : null;
2860        server.getMasterCoprocessorHost().preGetUserPermissions(userName, namespace, table, cf, cq);
2861
2862        List<UserPermission> perms = null;
2863        if (permissionType == Type.Table) {
2864          boolean filter = (cf != null || userName != null) ? true : false;
2865          perms = PermissionStorage.getUserTablePermissions(server.getConfiguration(), table, cf,
2866            cq, userName, filter);
2867        } else if (permissionType == Type.Namespace) {
2868          perms = PermissionStorage.getUserNamespacePermissions(server.getConfiguration(),
2869            namespace, userName, userName != null ? true : false);
2870        } else {
2871          perms = PermissionStorage.getUserPermissions(server.getConfiguration(), null, null, null,
2872            userName, userName != null ? true : false);
2873          // Skip super users when filter user is specified
2874          if (userName == null) {
2875            // Adding superusers explicitly to the result set as PermissionStorage do not store
2876            // them. Also using acl as table name to be inline with the results of global admin and
2877            // will help in avoiding any leakage of information about being superusers.
2878            for (String user : Superusers.getSuperUsers()) {
2879              perms.add(new UserPermission(user,
2880                Permission.newBuilder().withActions(Action.values()).build()));
2881            }
2882          }
2883        }
2884
2885        server.getMasterCoprocessorHost().postGetUserPermissions(userName, namespace, table, cf,
2886          cq);
2887        AccessControlProtos.GetUserPermissionsResponse response =
2888          ShadedAccessControlUtil.buildGetUserPermissionsResponse(perms);
2889        return response;
2890      } else {
2891        throw new DoNotRetryIOException(
2892          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2893      }
2894    } catch (IOException ioe) {
2895      throw new ServiceException(ioe);
2896    }
2897  }
2898
2899  @Override
2900  public HasUserPermissionsResponse hasUserPermissions(RpcController controller,
2901    HasUserPermissionsRequest request) throws ServiceException {
2902    try {
2903      server.checkInitialized();
2904      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
2905        User caller = RpcServer.getRequestUser().orElse(null);
2906        String userName =
2907          request.hasUserName() ? request.getUserName().toStringUtf8() : caller.getShortName();
2908        List<Permission> permissions = new ArrayList<>();
2909        for (int i = 0; i < request.getPermissionCount(); i++) {
2910          permissions.add(ShadedAccessControlUtil.toPermission(request.getPermission(i)));
2911        }
2912        server.getMasterCoprocessorHost().preHasUserPermissions(userName, permissions);
2913        if (!caller.getShortName().equals(userName)) {
2914          List<String> groups = AccessChecker.getUserGroups(userName);
2915          caller = new InputUser(userName, groups.toArray(new String[groups.size()]));
2916        }
2917        List<Boolean> hasUserPermissions = new ArrayList<>();
2918        if (getAccessChecker() != null) {
2919          for (Permission permission : permissions) {
2920            boolean hasUserPermission =
2921              getAccessChecker().hasUserPermission(caller, "hasUserPermissions", permission);
2922            hasUserPermissions.add(hasUserPermission);
2923          }
2924        } else {
2925          for (int i = 0; i < permissions.size(); i++) {
2926            hasUserPermissions.add(true);
2927          }
2928        }
2929        server.getMasterCoprocessorHost().postHasUserPermissions(userName, permissions);
2930        HasUserPermissionsResponse.Builder builder =
2931          HasUserPermissionsResponse.newBuilder().addAllHasUserPermission(hasUserPermissions);
2932        return builder.build();
2933      } else {
2934        throw new DoNotRetryIOException(
2935          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2936      }
2937    } catch (IOException ioe) {
2938      throw new ServiceException(ioe);
2939    }
2940  }
2941
2942  private boolean shouldSubmitSCP(ServerName serverName) {
2943    // check if there is already a SCP of this server running
2944    List<Procedure<MasterProcedureEnv>> procedures =
2945      server.getMasterProcedureExecutor().getProcedures();
2946    for (Procedure<MasterProcedureEnv> procedure : procedures) {
2947      if (procedure instanceof ServerCrashProcedure) {
2948        if (
2949          serverName.compareTo(((ServerCrashProcedure) procedure).getServerName()) == 0
2950            && !procedure.isFinished()
2951        ) {
2952          LOG.info("there is already a SCP of this server {} running, pid {}", serverName,
2953            procedure.getProcId());
2954          return false;
2955        }
2956      }
2957    }
2958    return true;
2959  }
2960
2961  @Override
2962  public GetRSGroupInfoResponse getRSGroupInfo(RpcController controller,
2963    GetRSGroupInfoRequest request) throws ServiceException {
2964    String groupName = request.getRSGroupName();
2965    LOG.info(
2966      server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName);
2967    try {
2968      if (server.getMasterCoprocessorHost() != null) {
2969        server.getMasterCoprocessorHost().preGetRSGroupInfo(groupName);
2970      }
2971      RSGroupInfo rsGroupInfo = server.getRSGroupInfoManager().getRSGroup(groupName);
2972      GetRSGroupInfoResponse resp;
2973      if (rsGroupInfo != null) {
2974        resp = GetRSGroupInfoResponse.newBuilder()
2975          .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build();
2976      } else {
2977        resp = GetRSGroupInfoResponse.getDefaultInstance();
2978      }
2979      if (server.getMasterCoprocessorHost() != null) {
2980        server.getMasterCoprocessorHost().postGetRSGroupInfo(groupName);
2981      }
2982      return resp;
2983    } catch (IOException e) {
2984      throw new ServiceException(e);
2985    }
2986  }
2987
2988  @Override
2989  public GetRSGroupInfoOfTableResponse getRSGroupInfoOfTable(RpcController controller,
2990    GetRSGroupInfoOfTableRequest request) throws ServiceException {
2991    TableName tableName = ProtobufUtil.toTableName(request.getTableName());
2992    LOG.info(
2993      server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName);
2994    try {
2995      if (server.getMasterCoprocessorHost() != null) {
2996        server.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName);
2997      }
2998      GetRSGroupInfoOfTableResponse resp;
2999      TableDescriptor td = server.getTableDescriptors().get(tableName);
3000      if (td == null) {
3001        resp = GetRSGroupInfoOfTableResponse.getDefaultInstance();
3002      } else {
3003        RSGroupInfo rsGroupInfo =
3004          RSGroupUtil.getRSGroupInfo(server, server.getRSGroupInfoManager(), tableName)
3005            .orElse(server.getRSGroupInfoManager().getRSGroup(RSGroupInfo.DEFAULT_GROUP));
3006        resp = GetRSGroupInfoOfTableResponse.newBuilder()
3007          .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build();
3008      }
3009      if (server.getMasterCoprocessorHost() != null) {
3010        server.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName);
3011      }
3012      return resp;
3013    } catch (IOException e) {
3014      throw new ServiceException(e);
3015    }
3016  }
3017
3018  @Override
3019  public GetRSGroupInfoOfServerResponse getRSGroupInfoOfServer(RpcController controller,
3020    GetRSGroupInfoOfServerRequest request) throws ServiceException {
3021    Address hp =
3022      Address.fromParts(request.getServer().getHostName(), request.getServer().getPort());
3023    LOG.info(server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp);
3024    try {
3025      if (server.getMasterCoprocessorHost() != null) {
3026        server.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp);
3027      }
3028      RSGroupInfo rsGroupInfo = server.getRSGroupInfoManager().getRSGroupOfServer(hp);
3029      GetRSGroupInfoOfServerResponse resp;
3030      if (rsGroupInfo != null) {
3031        resp = GetRSGroupInfoOfServerResponse.newBuilder()
3032          .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build();
3033      } else {
3034        resp = GetRSGroupInfoOfServerResponse.getDefaultInstance();
3035      }
3036      if (server.getMasterCoprocessorHost() != null) {
3037        server.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp);
3038      }
3039      return resp;
3040    } catch (IOException e) {
3041      throw new ServiceException(e);
3042    }
3043  }
3044
3045  @Override
3046  public MoveServersResponse moveServers(RpcController controller, MoveServersRequest request)
3047    throws ServiceException {
3048    Set<Address> hostPorts = Sets.newHashSet();
3049    MoveServersResponse.Builder builder = MoveServersResponse.newBuilder();
3050    for (HBaseProtos.ServerName el : request.getServersList()) {
3051      hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
3052    }
3053    LOG.info(server.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup "
3054      + request.getTargetGroup());
3055    try {
3056      if (server.getMasterCoprocessorHost() != null) {
3057        server.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup());
3058      }
3059      server.getRSGroupInfoManager().moveServers(hostPorts, request.getTargetGroup());
3060      if (server.getMasterCoprocessorHost() != null) {
3061        server.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup());
3062      }
3063    } catch (IOException e) {
3064      throw new ServiceException(e);
3065    }
3066    return builder.build();
3067  }
3068
3069  @Override
3070  public AddRSGroupResponse addRSGroup(RpcController controller, AddRSGroupRequest request)
3071    throws ServiceException {
3072    AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder();
3073    LOG.info(server.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName());
3074    try {
3075      if (server.getMasterCoprocessorHost() != null) {
3076        server.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName());
3077      }
3078      server.getRSGroupInfoManager().addRSGroup(new RSGroupInfo(request.getRSGroupName()));
3079      if (server.getMasterCoprocessorHost() != null) {
3080        server.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName());
3081      }
3082    } catch (IOException e) {
3083      throw new ServiceException(e);
3084    }
3085    return builder.build();
3086  }
3087
3088  @Override
3089  public RemoveRSGroupResponse removeRSGroup(RpcController controller, RemoveRSGroupRequest request)
3090    throws ServiceException {
3091    RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder();
3092    LOG.info(server.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName());
3093    try {
3094      if (server.getMasterCoprocessorHost() != null) {
3095        server.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName());
3096      }
3097      server.getRSGroupInfoManager().removeRSGroup(request.getRSGroupName());
3098      if (server.getMasterCoprocessorHost() != null) {
3099        server.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName());
3100      }
3101    } catch (IOException e) {
3102      throw new ServiceException(e);
3103    }
3104    return builder.build();
3105  }
3106
3107  @Override
3108  public BalanceRSGroupResponse balanceRSGroup(RpcController controller,
3109    BalanceRSGroupRequest request) throws ServiceException {
3110    BalanceRequest balanceRequest = ProtobufUtil.toBalanceRequest(request);
3111
3112    BalanceRSGroupResponse.Builder builder =
3113      BalanceRSGroupResponse.newBuilder().setBalanceRan(false);
3114
3115    LOG.info(
3116      server.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName());
3117    try {
3118      if (server.getMasterCoprocessorHost() != null) {
3119        server.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName(),
3120          balanceRequest);
3121      }
3122      BalanceResponse response =
3123        server.getRSGroupInfoManager().balanceRSGroup(request.getRSGroupName(), balanceRequest);
3124      ProtobufUtil.populateBalanceRSGroupResponse(builder, response);
3125      if (server.getMasterCoprocessorHost() != null) {
3126        server.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(),
3127          balanceRequest, response);
3128      }
3129    } catch (IOException e) {
3130      throw new ServiceException(e);
3131    }
3132    return builder.build();
3133  }
3134
3135  @Override
3136  public ListRSGroupInfosResponse listRSGroupInfos(RpcController controller,
3137    ListRSGroupInfosRequest request) throws ServiceException {
3138    ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder();
3139    LOG.info(server.getClientIdAuditPrefix() + " list rsgroup");
3140    try {
3141      if (server.getMasterCoprocessorHost() != null) {
3142        server.getMasterCoprocessorHost().preListRSGroups();
3143      }
3144      List<RSGroupInfo> rsGroupInfos = server.getRSGroupInfoManager().listRSGroups().stream()
3145        .map(RSGroupInfo::new).collect(Collectors.toList());
3146      Map<String, RSGroupInfo> name2Info = new HashMap<>();
3147      List<TableDescriptor> needToFill =
3148        new ArrayList<>(server.getTableDescriptors().getAll().values());
3149      for (RSGroupInfo rsGroupInfo : rsGroupInfos) {
3150        name2Info.put(rsGroupInfo.getName(), rsGroupInfo);
3151        for (TableDescriptor td : server.getTableDescriptors().getAll().values()) {
3152          if (rsGroupInfo.containsTable(td.getTableName())) {
3153            needToFill.remove(td);
3154          }
3155        }
3156      }
3157      for (TableDescriptor td : needToFill) {
3158        String groupName = td.getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP);
3159        RSGroupInfo rsGroupInfo = name2Info.get(groupName);
3160        if (rsGroupInfo != null) {
3161          rsGroupInfo.addTable(td.getTableName());
3162        }
3163      }
3164      for (RSGroupInfo rsGroupInfo : rsGroupInfos) {
3165        // TODO: this can be done at once outside this loop, do not need to scan all every time.
3166        builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo));
3167      }
3168      if (server.getMasterCoprocessorHost() != null) {
3169        server.getMasterCoprocessorHost().postListRSGroups();
3170      }
3171    } catch (IOException e) {
3172      throw new ServiceException(e);
3173    }
3174    return builder.build();
3175  }
3176
3177  @Override
3178  public RemoveServersResponse removeServers(RpcController controller, RemoveServersRequest request)
3179    throws ServiceException {
3180    RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder();
3181    Set<Address> servers = Sets.newHashSet();
3182    for (HBaseProtos.ServerName el : request.getServersList()) {
3183      servers.add(Address.fromParts(el.getHostName(), el.getPort()));
3184    }
3185    LOG.info(
3186      server.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + servers);
3187    try {
3188      if (server.getMasterCoprocessorHost() != null) {
3189        server.getMasterCoprocessorHost().preRemoveServers(servers);
3190      }
3191      server.getRSGroupInfoManager().removeServers(servers);
3192      if (server.getMasterCoprocessorHost() != null) {
3193        server.getMasterCoprocessorHost().postRemoveServers(servers);
3194      }
3195    } catch (IOException e) {
3196      throw new ServiceException(e);
3197    }
3198    return builder.build();
3199  }
3200
3201  @Override
3202  public ListTablesInRSGroupResponse listTablesInRSGroup(RpcController controller,
3203    ListTablesInRSGroupRequest request) throws ServiceException {
3204    ListTablesInRSGroupResponse.Builder builder = ListTablesInRSGroupResponse.newBuilder();
3205    String groupName = request.getGroupName();
3206    LOG.info(server.getClientIdAuditPrefix() + " list tables in rsgroup " + groupName);
3207    try {
3208      if (server.getMasterCoprocessorHost() != null) {
3209        server.getMasterCoprocessorHost().preListTablesInRSGroup(groupName);
3210      }
3211      RSGroupUtil.listTablesInRSGroup(server, groupName).stream()
3212        .map(ProtobufUtil::toProtoTableName).forEach(builder::addTableName);
3213      if (server.getMasterCoprocessorHost() != null) {
3214        server.getMasterCoprocessorHost().postListTablesInRSGroup(groupName);
3215      }
3216    } catch (IOException e) {
3217      throw new ServiceException(e);
3218    }
3219    return builder.build();
3220  }
3221
3222  @Override
3223  public GetConfiguredNamespacesAndTablesInRSGroupResponse
3224    getConfiguredNamespacesAndTablesInRSGroup(RpcController controller,
3225      GetConfiguredNamespacesAndTablesInRSGroupRequest request) throws ServiceException {
3226    GetConfiguredNamespacesAndTablesInRSGroupResponse.Builder builder =
3227      GetConfiguredNamespacesAndTablesInRSGroupResponse.newBuilder();
3228    String groupName = request.getGroupName();
3229    LOG.info(server.getClientIdAuditPrefix() + " get configured namespaces and tables in rsgroup "
3230      + groupName);
3231    try {
3232      if (server.getMasterCoprocessorHost() != null) {
3233        server.getMasterCoprocessorHost().preGetConfiguredNamespacesAndTablesInRSGroup(groupName);
3234      }
3235      for (NamespaceDescriptor nd : server.getClusterSchema().getNamespaces()) {
3236        if (groupName.equals(nd.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP))) {
3237          builder.addNamespace(nd.getName());
3238        }
3239      }
3240      for (TableDescriptor td : server.getTableDescriptors().getAll().values()) {
3241        if (td.getRegionServerGroup().map(g -> g.equals(groupName)).orElse(false)) {
3242          builder.addTableName(ProtobufUtil.toProtoTableName(td.getTableName()));
3243        }
3244      }
3245      if (server.getMasterCoprocessorHost() != null) {
3246        server.getMasterCoprocessorHost().postGetConfiguredNamespacesAndTablesInRSGroup(groupName);
3247      }
3248    } catch (IOException e) {
3249      throw new ServiceException(e);
3250    }
3251    return builder.build();
3252  }
3253
3254  @Override
3255  public RenameRSGroupResponse renameRSGroup(RpcController controller, RenameRSGroupRequest request)
3256    throws ServiceException {
3257    RenameRSGroupResponse.Builder builder = RenameRSGroupResponse.newBuilder();
3258    String oldRSGroup = request.getOldRsgroupName();
3259    String newRSGroup = request.getNewRsgroupName();
3260    LOG.info("{} rename rsgroup from {} to {} ", server.getClientIdAuditPrefix(), oldRSGroup,
3261      newRSGroup);
3262    try {
3263      if (server.getMasterCoprocessorHost() != null) {
3264        server.getMasterCoprocessorHost().preRenameRSGroup(oldRSGroup, newRSGroup);
3265      }
3266      server.getRSGroupInfoManager().renameRSGroup(oldRSGroup, newRSGroup);
3267      if (server.getMasterCoprocessorHost() != null) {
3268        server.getMasterCoprocessorHost().postRenameRSGroup(oldRSGroup, newRSGroup);
3269      }
3270    } catch (IOException e) {
3271      throw new ServiceException(e);
3272    }
3273    return builder.build();
3274  }
3275
3276  @Override
3277  public UpdateRSGroupConfigResponse updateRSGroupConfig(RpcController controller,
3278    UpdateRSGroupConfigRequest request) throws ServiceException {
3279    UpdateRSGroupConfigResponse.Builder builder = UpdateRSGroupConfigResponse.newBuilder();
3280    String groupName = request.getGroupName();
3281    Map<String, String> configuration = new HashMap<>();
3282    request.getConfigurationList().forEach(p -> configuration.put(p.getName(), p.getValue()));
3283    LOG.info("{} update rsgroup {} configuration {}", server.getClientIdAuditPrefix(), groupName,
3284      configuration);
3285    try {
3286      if (server.getMasterCoprocessorHost() != null) {
3287        server.getMasterCoprocessorHost().preUpdateRSGroupConfig(groupName, configuration);
3288      }
3289      server.getRSGroupInfoManager().updateRSGroupConfig(groupName, configuration);
3290      if (server.getMasterCoprocessorHost() != null) {
3291        server.getMasterCoprocessorHost().postUpdateRSGroupConfig(groupName, configuration);
3292      }
3293    } catch (IOException e) {
3294      throw new ServiceException(e);
3295    }
3296    return builder.build();
3297  }
3298
3299  @Override
3300  public HBaseProtos.LogEntry getLogEntries(RpcController controller,
3301    HBaseProtos.LogRequest request) throws ServiceException {
3302    try {
3303      final String logClassName = request.getLogClassName();
3304      Class<?> logClass = Class.forName(logClassName).asSubclass(Message.class);
3305      Method method = logClass.getMethod("parseFrom", ByteString.class);
3306      if (logClassName.contains("BalancerDecisionsRequest")) {
3307        MasterProtos.BalancerDecisionsRequest balancerDecisionsRequest =
3308          (MasterProtos.BalancerDecisionsRequest) method.invoke(null, request.getLogMessage());
3309        MasterProtos.BalancerDecisionsResponse balancerDecisionsResponse =
3310          getBalancerDecisions(balancerDecisionsRequest);
3311        return HBaseProtos.LogEntry.newBuilder()
3312          .setLogClassName(balancerDecisionsResponse.getClass().getName())
3313          .setLogMessage(balancerDecisionsResponse.toByteString()).build();
3314      } else if (logClassName.contains("BalancerRejectionsRequest")) {
3315        MasterProtos.BalancerRejectionsRequest balancerRejectionsRequest =
3316          (MasterProtos.BalancerRejectionsRequest) method.invoke(null, request.getLogMessage());
3317        MasterProtos.BalancerRejectionsResponse balancerRejectionsResponse =
3318          getBalancerRejections(balancerRejectionsRequest);
3319        return HBaseProtos.LogEntry.newBuilder()
3320          .setLogClassName(balancerRejectionsResponse.getClass().getName())
3321          .setLogMessage(balancerRejectionsResponse.toByteString()).build();
3322      }
3323    } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException
3324      | InvocationTargetException e) {
3325      LOG.error("Error while retrieving log entries.", e);
3326      throw new ServiceException(e);
3327    }
3328    throw new ServiceException("Invalid request params");
3329  }
3330
3331  private MasterProtos.BalancerDecisionsResponse
3332    getBalancerDecisions(MasterProtos.BalancerDecisionsRequest request) {
3333    final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder();
3334    if (namedQueueRecorder == null) {
3335      return MasterProtos.BalancerDecisionsResponse.newBuilder()
3336        .addAllBalancerDecision(Collections.emptyList()).build();
3337    }
3338    final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest();
3339    namedQueueGetRequest.setNamedQueueEvent(BalancerDecisionDetails.BALANCER_DECISION_EVENT);
3340    namedQueueGetRequest.setBalancerDecisionsRequest(request);
3341    NamedQueueGetResponse namedQueueGetResponse =
3342      namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest);
3343    List<RecentLogs.BalancerDecision> balancerDecisions = namedQueueGetResponse != null
3344      ? namedQueueGetResponse.getBalancerDecisions()
3345      : Collections.emptyList();
3346    return MasterProtos.BalancerDecisionsResponse.newBuilder()
3347      .addAllBalancerDecision(balancerDecisions).build();
3348  }
3349
3350  private MasterProtos.BalancerRejectionsResponse
3351    getBalancerRejections(MasterProtos.BalancerRejectionsRequest request) {
3352    final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder();
3353    if (namedQueueRecorder == null) {
3354      return MasterProtos.BalancerRejectionsResponse.newBuilder()
3355        .addAllBalancerRejection(Collections.emptyList()).build();
3356    }
3357    final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest();
3358    namedQueueGetRequest.setNamedQueueEvent(BalancerRejectionDetails.BALANCER_REJECTION_EVENT);
3359    namedQueueGetRequest.setBalancerRejectionsRequest(request);
3360    NamedQueueGetResponse namedQueueGetResponse =
3361      namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest);
3362    List<RecentLogs.BalancerRejection> balancerRejections = namedQueueGetResponse != null
3363      ? namedQueueGetResponse.getBalancerRejections()
3364      : Collections.emptyList();
3365    return MasterProtos.BalancerRejectionsResponse.newBuilder()
3366      .addAllBalancerRejection(balancerRejections).build();
3367  }
3368
3369  @Override
3370  @QosPriority(priority = HConstants.ADMIN_QOS)
3371  public GetRegionInfoResponse getRegionInfo(final RpcController controller,
3372    final GetRegionInfoRequest request) throws ServiceException {
3373    RegionInfo ri = null;
3374    try {
3375      ri = getRegionInfo(request.getRegion());
3376    } catch (UnknownRegionException ure) {
3377      throw new ServiceException(ure);
3378    }
3379    GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
3380    if (ri != null) {
3381      builder.setRegionInfo(ProtobufUtil.toRegionInfo(ri));
3382    } else {
3383      // Is it a MOB name? These work differently.
3384      byte[] regionName = request.getRegion().getValue().toByteArray();
3385      TableName tableName = RegionInfo.getTable(regionName);
3386      if (MobUtils.isMobRegionName(tableName, regionName)) {
3387        // a dummy region info contains the compaction state.
3388        RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName);
3389        builder.setRegionInfo(ProtobufUtil.toRegionInfo(mobRegionInfo));
3390        if (request.hasCompactionState() && request.getCompactionState()) {
3391          builder.setCompactionState(server.getMobCompactionState(tableName));
3392        }
3393      } else {
3394        // If unknown RegionInfo and not a MOB region, it is unknown.
3395        throw new ServiceException(new UnknownRegionException(Bytes.toString(regionName)));
3396      }
3397    }
3398    return builder.build();
3399  }
3400
3401  @Override
3402  public GetStoreFileResponse getStoreFile(RpcController controller, GetStoreFileRequest request)
3403    throws ServiceException {
3404    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3405  }
3406
3407  @Override
3408  public GetOnlineRegionResponse getOnlineRegion(RpcController controller,
3409    GetOnlineRegionRequest request) throws ServiceException {
3410    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3411  }
3412
3413  @Override
3414  public OpenRegionResponse openRegion(RpcController controller, OpenRegionRequest request)
3415    throws ServiceException {
3416    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3417  }
3418
3419  @Override
3420  public WarmupRegionResponse warmupRegion(RpcController controller, WarmupRegionRequest request)
3421    throws ServiceException {
3422    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3423  }
3424
3425  @Override
3426  public CloseRegionResponse closeRegion(RpcController controller, CloseRegionRequest request)
3427    throws ServiceException {
3428    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3429  }
3430
3431  @Override
3432  public FlushRegionResponse flushRegion(RpcController controller, FlushRegionRequest request)
3433    throws ServiceException {
3434    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3435  }
3436
3437  @Override
3438  public CompactionSwitchResponse compactionSwitch(RpcController controller,
3439    CompactionSwitchRequest request) throws ServiceException {
3440    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3441  }
3442
3443  @Override
3444  public CompactRegionResponse compactRegion(RpcController controller, CompactRegionRequest request)
3445    throws ServiceException {
3446    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3447  }
3448
3449  @Override
3450  public ReplicateWALEntryResponse replicateWALEntry(RpcController controller,
3451    ReplicateWALEntryRequest request) throws ServiceException {
3452    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3453  }
3454
3455  @Override
3456  public ReplicateWALEntryResponse replay(RpcController controller,
3457    ReplicateWALEntryRequest request) throws ServiceException {
3458    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3459  }
3460
3461  @Override
3462  public RollWALWriterResponse rollWALWriter(RpcController controller, RollWALWriterRequest request)
3463    throws ServiceException {
3464    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3465  }
3466
3467  @Override
3468  public GetServerInfoResponse getServerInfo(RpcController controller, GetServerInfoRequest request)
3469    throws ServiceException {
3470    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3471  }
3472
3473  @Override
3474  public StopServerResponse stopServer(RpcController controller, StopServerRequest request)
3475    throws ServiceException {
3476    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3477  }
3478
3479  @Override
3480  public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller,
3481    UpdateFavoredNodesRequest request) throws ServiceException {
3482    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3483  }
3484
3485  @Override
3486  public GetRegionLoadResponse getRegionLoad(RpcController controller, GetRegionLoadRequest request)
3487    throws ServiceException {
3488    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3489  }
3490
3491  @Override
3492  public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller,
3493    ClearCompactionQueuesRequest request) throws ServiceException {
3494    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3495  }
3496
3497  @Override
3498  public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller,
3499    ClearRegionBlockCacheRequest request) throws ServiceException {
3500    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3501  }
3502
3503  @Override
3504  public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(RpcController controller,
3505    GetSpaceQuotaSnapshotsRequest request) throws ServiceException {
3506    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3507  }
3508
3509  @Override
3510  public ExecuteProceduresResponse executeProcedures(RpcController controller,
3511    ExecuteProceduresRequest request) throws ServiceException {
3512    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3513  }
3514
3515  @Override
3516  public GetLiveRegionServersResponse getLiveRegionServers(RpcController controller,
3517    GetLiveRegionServersRequest request) throws ServiceException {
3518    List<ServerName> regionServers = new ArrayList<>(server.getLiveRegionServers());
3519    Collections.shuffle(regionServers, ThreadLocalRandom.current());
3520    GetLiveRegionServersResponse.Builder builder =
3521      GetLiveRegionServersResponse.newBuilder().setTotal(regionServers.size());
3522    regionServers.stream().limit(request.getCount()).map(ProtobufUtil::toServerName)
3523      .forEach(builder::addServer);
3524    return builder.build();
3525  }
3526
3527  @Override
3528  public ReplicateWALEntryResponse replicateToReplica(RpcController controller,
3529    ReplicateWALEntryRequest request) throws ServiceException {
3530    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3531  }
3532
3533  @Override
3534  public FlushMasterStoreResponse flushMasterStore(RpcController controller,
3535    FlushMasterStoreRequest request) throws ServiceException {
3536    rpcPreCheck("flushMasterStore");
3537    try {
3538      server.flushMasterStore();
3539    } catch (IOException ioe) {
3540      throw new ServiceException(ioe);
3541    }
3542    return FlushMasterStoreResponse.newBuilder().build();
3543  }
3544}