001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master;
019
020import java.io.IOException;
021import java.lang.reflect.InvocationTargetException;
022import java.lang.reflect.Method;
023import java.net.InetAddress;
024import java.util.ArrayList;
025import java.util.Collections;
026import java.util.HashMap;
027import java.util.HashSet;
028import java.util.List;
029import java.util.Map;
030import java.util.Map.Entry;
031import java.util.Optional;
032import java.util.Set;
033import java.util.concurrent.CompletableFuture;
034import java.util.concurrent.ExecutionException;
035import java.util.concurrent.ThreadLocalRandom;
036import java.util.stream.Collectors;
037import org.apache.hadoop.conf.Configuration;
038import org.apache.hadoop.hbase.ClusterMetricsBuilder;
039import org.apache.hadoop.hbase.DoNotRetryIOException;
040import org.apache.hadoop.hbase.HBaseRpcServicesBase;
041import org.apache.hadoop.hbase.HConstants;
042import org.apache.hadoop.hbase.MasterNotRunningException;
043import org.apache.hadoop.hbase.MetaTableAccessor;
044import org.apache.hadoop.hbase.NamespaceDescriptor;
045import org.apache.hadoop.hbase.ServerMetrics;
046import org.apache.hadoop.hbase.ServerMetricsBuilder;
047import org.apache.hadoop.hbase.ServerName;
048import org.apache.hadoop.hbase.TableName;
049import org.apache.hadoop.hbase.UnknownRegionException;
050import org.apache.hadoop.hbase.client.BalanceRequest;
051import org.apache.hadoop.hbase.client.BalanceResponse;
052import org.apache.hadoop.hbase.client.MasterSwitchType;
053import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
054import org.apache.hadoop.hbase.client.RegionInfo;
055import org.apache.hadoop.hbase.client.Table;
056import org.apache.hadoop.hbase.client.TableDescriptor;
057import org.apache.hadoop.hbase.client.TableState;
058import org.apache.hadoop.hbase.client.VersionInfoUtil;
059import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
060import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
061import org.apache.hadoop.hbase.errorhandling.ForeignException;
062import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
063import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
064import org.apache.hadoop.hbase.ipc.PriorityFunction;
065import org.apache.hadoop.hbase.ipc.QosPriority;
066import org.apache.hadoop.hbase.ipc.RpcServer;
067import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
068import org.apache.hadoop.hbase.ipc.ServerRpcController;
069import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
070import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
071import org.apache.hadoop.hbase.master.assignment.RegionStates;
072import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
073import org.apache.hadoop.hbase.master.hbck.HbckChore;
074import org.apache.hadoop.hbase.master.janitor.MetaFixer;
075import org.apache.hadoop.hbase.master.locking.LockProcedure;
076import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
077import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
078import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
079import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
080import org.apache.hadoop.hbase.master.replication.AbstractPeerNoLockProcedure;
081import org.apache.hadoop.hbase.mob.MobUtils;
082import org.apache.hadoop.hbase.namequeues.BalancerDecisionDetails;
083import org.apache.hadoop.hbase.namequeues.BalancerRejectionDetails;
084import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder;
085import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest;
086import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse;
087import org.apache.hadoop.hbase.net.Address;
088import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
089import org.apache.hadoop.hbase.procedure2.LockType;
090import org.apache.hadoop.hbase.procedure2.LockedResource;
091import org.apache.hadoop.hbase.procedure2.Procedure;
092import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
093import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
094import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
095import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
096import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
097import org.apache.hadoop.hbase.quotas.QuotaUtil;
098import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
099import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
100import org.apache.hadoop.hbase.replication.ReplicationException;
101import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
102import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
103import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
104import org.apache.hadoop.hbase.rsgroup.RSGroupUtil;
105import org.apache.hadoop.hbase.security.Superusers;
106import org.apache.hadoop.hbase.security.User;
107import org.apache.hadoop.hbase.security.access.AccessChecker;
108import org.apache.hadoop.hbase.security.access.AccessChecker.InputUser;
109import org.apache.hadoop.hbase.security.access.AccessController;
110import org.apache.hadoop.hbase.security.access.Permission;
111import org.apache.hadoop.hbase.security.access.Permission.Action;
112import org.apache.hadoop.hbase.security.access.PermissionStorage;
113import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
114import org.apache.hadoop.hbase.security.access.UserPermission;
115import org.apache.hadoop.hbase.security.visibility.VisibilityController;
116import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
117import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
118import org.apache.hadoop.hbase.util.Bytes;
119import org.apache.hadoop.hbase.util.DNS;
120import org.apache.hadoop.hbase.util.DNS.ServerType;
121import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
122import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
123import org.apache.hadoop.hbase.util.Pair;
124import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
125import org.apache.yetus.audience.InterfaceAudience;
126import org.slf4j.Logger;
127import org.slf4j.LoggerFactory;
128
129import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
130import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
131import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
132import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.ServiceDescriptor;
133import org.apache.hbase.thirdparty.com.google.protobuf.Message;
134import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
135import org.apache.hbase.thirdparty.com.google.protobuf.Service;
136import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
137import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
138
139import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
140import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
141import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos;
142import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.AccessControlService;
143import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest;
144import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse;
145import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest;
146import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantResponse;
147import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest;
148import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsResponse;
149import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type;
150import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest;
151import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeResponse;
152import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
153import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
154import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
155import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
156import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse;
157import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
158import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
159import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
160import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
161import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest;
162import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse;
163import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
164import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
165import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
166import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
167import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest;
168import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse;
169import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
170import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
171import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
172import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
173import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
174import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
175import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
176import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
177import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
178import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
179import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
180import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
181import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
182import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
183import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
184import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
185import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
186import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
187import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
188import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
189import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
190import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse;
191import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
192import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
193import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
194import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
195import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
196import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
197import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
198import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
199import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
200import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
201import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
202import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
203import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
204import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
205import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
206import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
207import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
208import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
209import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
210import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
211import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
212import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
213import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
214import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
215import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
216import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
217import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
218import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
219import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
220import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
221import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
222import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
223import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
224import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
225import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
226import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
227import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
228import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
229import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
230import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
231import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
232import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
233import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
234import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
235import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
236import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaRequest;
237import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaResponse;
238import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreRequest;
239import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreResponse;
240import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableRequest;
241import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableResponse;
242import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
243import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
244import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
245import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
246import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
247import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse;
248import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
249import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
250import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
251import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
252import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
253import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
254import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
255import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
256import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
257import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
258import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
259import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
260import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest;
261import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
262import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService;
263import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
264import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
265import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
266import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
267import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
268import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
269import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
270import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
271import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest;
272import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
273import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
274import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
275import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
276import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
277import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest;
278import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse;
279import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
280import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
281import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
282import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
283import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
284import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
285import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
286import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
287import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest;
288import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesResponse;
289import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
290import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
291import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByStateRequest;
292import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByStateResponse;
293import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
294import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
295import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByStateRequest;
296import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByStateResponse;
297import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
298import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
299import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
300import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
301import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
302import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
303import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
304import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
305import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerRequest;
306import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerResponse;
307import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
308import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
309import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
310import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
311import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerRequest;
312import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerResponse;
313import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
314import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse;
315import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
316import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
317import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
318import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
319import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
320import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
321import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState;
322import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
323import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
324import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
325import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
326import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
327import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
328import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreRequest;
329import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreResponse;
330import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
331import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
332import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
333import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
334import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
335import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse;
336import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
337import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
338import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
339import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
340import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest;
341import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaResponse;
342import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
343import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse;
344import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
345import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
346import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest;
347import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
348import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
349import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
350import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
351import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
352import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
353import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
354import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
355import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaRequest;
356import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaResponse;
357import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleRequest;
358import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleResponse;
359import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
360import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
361import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
362import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
363import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
364import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
365import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.NamespaceQuotaSnapshot;
366import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.TableQuotaSnapshot;
367import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
368import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
369import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
370import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest;
371import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
372import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
373import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
374import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
375import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
376import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetConfiguredNamespacesAndTablesInRSGroupRequest;
377import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetConfiguredNamespacesAndTablesInRSGroupResponse;
378import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
379import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
380import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
381import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
382import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
383import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
384import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
385import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
386import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListTablesInRSGroupRequest;
387import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListTablesInRSGroupResponse;
388import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
389import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
390import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
391import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
392import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
393import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse;
394import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RenameRSGroupRequest;
395import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RenameRSGroupResponse;
396import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigRequest;
397import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigResponse;
398import org.apache.hadoop.hbase.shaded.protobuf.generated.RecentLogs;
399import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos;
400import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest;
401import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse;
402import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
403import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
404import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLiveRegionServersRequest;
405import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLiveRegionServersResponse;
406import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
407import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse;
408import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
409import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
410import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
411import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
412import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
413import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
414import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult;
415import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest;
416import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse;
417import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
418import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
419import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
420import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
421import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ClientMetaService;
422import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
423import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
424import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
425import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
426import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
427import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
428import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
429import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
430import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerModificationProceduresRequest;
431import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerModificationProceduresResponse;
432import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerStateRequest;
433import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerStateResponse;
434import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.IsReplicationPeerModificationEnabledRequest;
435import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.IsReplicationPeerModificationEnabledResponse;
436import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
437import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
438import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
439import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
440import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerModificationSwitchRequest;
441import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerModificationSwitchResponse;
442import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState;
443import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequest;
444import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse;
445import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
446import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
447import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
448import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
449
450/**
451 * Implements the master RPC services.
452 */
453@InterfaceAudience.Private
454public class MasterRpcServices extends HBaseRpcServicesBase<HMaster>
455  implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface,
456  LockService.BlockingInterface, HbckService.BlockingInterface {
457
458  private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName());
459  private static final Logger AUDITLOG =
460    LoggerFactory.getLogger("SecurityLogger." + MasterRpcServices.class.getName());
461
462  /** RPC scheduler to use for the master. */
463  public static final String MASTER_RPC_SCHEDULER_FACTORY_CLASS =
464    "hbase.master.rpc.scheduler.factory.class";
465
466  /**
467   * @return Subset of configuration to pass initializing regionservers: e.g. the filesystem to use
468   *         and root directory to use.
469   */
470  private RegionServerStartupResponse.Builder createConfigurationSubset() {
471    RegionServerStartupResponse.Builder resp =
472      addConfig(RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR);
473    resp = addConfig(resp, "fs.defaultFS");
474    return addConfig(resp, "hbase.master.info.port");
475  }
476
477  private RegionServerStartupResponse.Builder
478    addConfig(final RegionServerStartupResponse.Builder resp, final String key) {
479    NameStringPair.Builder entry =
480      NameStringPair.newBuilder().setName(key).setValue(server.getConfiguration().get(key));
481    resp.addMapEntries(entry.build());
482    return resp;
483  }
484
485  public MasterRpcServices(HMaster m) throws IOException {
486    super(m, m.getProcessName());
487  }
488
489  @Override
490  protected boolean defaultReservoirEnabled() {
491    return false;
492  }
493
494  @Override
495  protected ServerType getDNSServerType() {
496    return DNS.ServerType.MASTER;
497  }
498
499  @Override
500  protected String getHostname(Configuration conf, String defaultHostname) {
501    return conf.get("hbase.master.ipc.address", defaultHostname);
502  }
503
504  @Override
505  protected String getPortConfigName() {
506    return HConstants.MASTER_PORT;
507  }
508
509  @Override
510  protected int getDefaultPort() {
511    return HConstants.DEFAULT_MASTER_PORT;
512  }
513
514  @Override
515  protected Class<?> getRpcSchedulerFactoryClass(Configuration conf) {
516    return conf.getClass(MASTER_RPC_SCHEDULER_FACTORY_CLASS, SimpleRpcSchedulerFactory.class);
517  }
518
519  @Override
520  protected PriorityFunction createPriority() {
521    return new MasterAnnotationReadingPriorityFunction(this);
522  }
523
524  /**
525   * Checks for the following pre-checks in order:
526   * <ol>
527   * <li>Master is initialized</li>
528   * <li>Rpc caller has admin permissions</li>
529   * </ol>
530   * @param requestName name of rpc request. Used in reporting failures to provide context.
531   * @throws ServiceException If any of the above listed pre-check fails.
532   */
533  private void rpcPreCheck(String requestName) throws ServiceException {
534    try {
535      server.checkInitialized();
536      requirePermission(requestName, Permission.Action.ADMIN);
537    } catch (IOException ioe) {
538      throw new ServiceException(ioe);
539    }
540  }
541
542  enum BalanceSwitchMode {
543    SYNC,
544    ASYNC
545  }
546
547  /**
548   * Assigns balancer switch according to BalanceSwitchMode
549   * @param b    new balancer switch
550   * @param mode BalanceSwitchMode
551   * @return old balancer switch
552   */
553  boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException {
554    boolean oldValue = server.loadBalancerStateStore.get();
555    boolean newValue = b;
556    try {
557      if (server.cpHost != null) {
558        server.cpHost.preBalanceSwitch(newValue);
559      }
560      if (mode == BalanceSwitchMode.SYNC) {
561        synchronized (server.getLoadBalancer()) {
562          server.loadBalancerStateStore.set(newValue);
563        }
564      } else {
565        server.loadBalancerStateStore.set(newValue);
566      }
567      LOG.info(server.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue);
568      if (server.cpHost != null) {
569        server.cpHost.postBalanceSwitch(oldValue, newValue);
570      }
571      server.getLoadBalancer().updateBalancerStatus(newValue);
572    } catch (IOException ioe) {
573      LOG.warn("Error flipping balance switch", ioe);
574    }
575    return oldValue;
576  }
577
578  boolean synchronousBalanceSwitch(final boolean b) throws IOException {
579    return switchBalancer(b, BalanceSwitchMode.SYNC);
580  }
581
582  /** Returns list of blocking services and their security info classes that this server supports */
583  @Override
584  protected List<BlockingServiceAndInterface> getServices() {
585    List<BlockingServiceAndInterface> bssi = new ArrayList<>(5);
586    bssi.add(new BlockingServiceAndInterface(MasterService.newReflectiveBlockingService(this),
587      MasterService.BlockingInterface.class));
588    bssi.add(
589      new BlockingServiceAndInterface(RegionServerStatusService.newReflectiveBlockingService(this),
590        RegionServerStatusService.BlockingInterface.class));
591    bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this),
592      LockService.BlockingInterface.class));
593    bssi.add(new BlockingServiceAndInterface(HbckService.newReflectiveBlockingService(this),
594      HbckService.BlockingInterface.class));
595    bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this),
596      ClientMetaService.BlockingInterface.class));
597    bssi.add(new BlockingServiceAndInterface(AdminService.newReflectiveBlockingService(this),
598      AdminService.BlockingInterface.class));
599    return bssi;
600  }
601
602  void start(ZKWatcher zkWatcher) {
603    internalStart(zkWatcher);
604  }
605
606  void stop() {
607    internalStop();
608  }
609
610  @Override
611  // priority for all RegionServerStatusProtos rpc's are set HIGH_QOS in
612  // MasterAnnotationReadingPriorityFunction itself
613  public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller,
614    GetLastFlushedSequenceIdRequest request) throws ServiceException {
615    try {
616      server.checkServiceStarted();
617    } catch (IOException ioe) {
618      throw new ServiceException(ioe);
619    }
620    byte[] encodedRegionName = request.getRegionName().toByteArray();
621    RegionStoreSequenceIds ids =
622      server.getServerManager().getLastFlushedSequenceId(encodedRegionName);
623    return ResponseConverter.buildGetLastFlushedSequenceIdResponse(ids);
624  }
625
626  @Override
627  public RegionServerReportResponse regionServerReport(RpcController controller,
628    RegionServerReportRequest request) throws ServiceException {
629    try {
630      server.checkServiceStarted();
631      int versionNumber = 0;
632      String version = "0.0.0";
633      VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
634      if (versionInfo != null) {
635        version = versionInfo.getVersion();
636        versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
637      }
638      ClusterStatusProtos.ServerLoad sl = request.getLoad();
639      ServerName serverName = ProtobufUtil.toServerName(request.getServer());
640      ServerMetrics oldLoad = server.getServerManager().getLoad(serverName);
641      ServerMetrics newLoad =
642        ServerMetricsBuilder.toServerMetrics(serverName, versionNumber, version, sl);
643      server.getServerManager().regionServerReport(serverName, newLoad);
644      server.getAssignmentManager().reportOnlineRegions(serverName,
645        newLoad.getRegionMetrics().keySet());
646      if (sl != null && server.metricsMaster != null) {
647        // Up our metrics.
648        server.metricsMaster.incrementRequests(
649          sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getRequestCount() : 0));
650        server.metricsMaster.incrementReadRequests(
651          sl.getReadRequestsCount() - (oldLoad != null ? oldLoad.getReadRequestsCount() : 0));
652        server.metricsMaster.incrementWriteRequests(
653          sl.getWriteRequestsCount() - (oldLoad != null ? oldLoad.getWriteRequestsCount() : 0));
654      }
655    } catch (IOException ioe) {
656      throw new ServiceException(ioe);
657    }
658    return RegionServerReportResponse.newBuilder().build();
659  }
660
661  @Override
662  public RegionServerStartupResponse regionServerStartup(RpcController controller,
663    RegionServerStartupRequest request) throws ServiceException {
664    // Register with server manager
665    try {
666      server.checkServiceStarted();
667      int versionNumber = 0;
668      String version = "0.0.0";
669      VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
670      if (versionInfo != null) {
671        version = versionInfo.getVersion();
672        versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
673      }
674      InetAddress ia = server.getRemoteInetAddress(request.getPort(), request.getServerStartCode());
675      // if regionserver passed hostname to use,
676      // then use it instead of doing a reverse DNS lookup
677      ServerName rs =
678        server.getServerManager().regionServerStartup(request, versionNumber, version, ia);
679
680      // Send back some config info
681      RegionServerStartupResponse.Builder resp = createConfigurationSubset();
682      NameStringPair.Builder entry = NameStringPair.newBuilder()
683        .setName(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER).setValue(rs.getHostname());
684      resp.addMapEntries(entry.build());
685
686      return resp.build();
687    } catch (IOException ioe) {
688      throw new ServiceException(ioe);
689    }
690  }
691
692  @Override
693  public ReportRSFatalErrorResponse reportRSFatalError(RpcController controller,
694    ReportRSFatalErrorRequest request) throws ServiceException {
695    String errorText = request.getErrorMessage();
696    ServerName sn = ProtobufUtil.toServerName(request.getServer());
697    String msg = sn + " reported a fatal error:\n" + errorText;
698    LOG.warn(msg);
699    server.rsFatals.add(msg);
700    return ReportRSFatalErrorResponse.newBuilder().build();
701  }
702
703  @Override
704  public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req)
705    throws ServiceException {
706    try {
707      long procId = server.addColumn(ProtobufUtil.toTableName(req.getTableName()),
708        ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(),
709        req.getNonce());
710      if (procId == -1) {
711        // This mean operation was not performed in server, so do not set any procId
712        return AddColumnResponse.newBuilder().build();
713      } else {
714        return AddColumnResponse.newBuilder().setProcId(procId).build();
715      }
716    } catch (IOException ioe) {
717      throw new ServiceException(ioe);
718    }
719  }
720
721  @Override
722  public AssignRegionResponse assignRegion(RpcController controller, AssignRegionRequest req)
723    throws ServiceException {
724    try {
725      server.checkInitialized();
726
727      final RegionSpecifierType type = req.getRegion().getType();
728      if (type != RegionSpecifierType.REGION_NAME) {
729        LOG.warn("assignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
730          + " actual: " + type);
731      }
732
733      final byte[] regionName = req.getRegion().getValue().toByteArray();
734      final RegionInfo regionInfo = server.getAssignmentManager().getRegionInfo(regionName);
735      if (regionInfo == null) {
736        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
737      }
738
739      final AssignRegionResponse arr = AssignRegionResponse.newBuilder().build();
740      if (server.cpHost != null) {
741        server.cpHost.preAssign(regionInfo);
742      }
743      LOG.info(server.getClientIdAuditPrefix() + " assign " + regionInfo.getRegionNameAsString());
744      server.getAssignmentManager().assign(regionInfo);
745      if (server.cpHost != null) {
746        server.cpHost.postAssign(regionInfo);
747      }
748      return arr;
749    } catch (IOException ioe) {
750      throw new ServiceException(ioe);
751    }
752  }
753
754  @Override
755  public MasterProtos.BalanceResponse balance(RpcController controller,
756    MasterProtos.BalanceRequest request) throws ServiceException {
757    try {
758      return ProtobufUtil.toBalanceResponse(server.balance(ProtobufUtil.toBalanceRequest(request)));
759    } catch (IOException ex) {
760      throw new ServiceException(ex);
761    }
762  }
763
764  @Override
765  public CreateNamespaceResponse createNamespace(RpcController controller,
766    CreateNamespaceRequest request) throws ServiceException {
767    try {
768      long procId =
769        server.createNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
770          request.getNonceGroup(), request.getNonce());
771      return CreateNamespaceResponse.newBuilder().setProcId(procId).build();
772    } catch (IOException e) {
773      throw new ServiceException(e);
774    }
775  }
776
777  @Override
778  public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
779    throws ServiceException {
780    TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema());
781    byte[][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
782    try {
783      long procId =
784        server.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
785      LOG.info(server.getClientIdAuditPrefix() + " procedure request for creating table: "
786        + req.getTableSchema().getTableName() + " procId is: " + procId);
787      return CreateTableResponse.newBuilder().setProcId(procId).build();
788    } catch (IOException ioe) {
789      throw new ServiceException(ioe);
790    }
791  }
792
793  @Override
794  public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req)
795    throws ServiceException {
796    try {
797      long procId = server.deleteColumn(ProtobufUtil.toTableName(req.getTableName()),
798        req.getColumnName().toByteArray(), req.getNonceGroup(), req.getNonce());
799      if (procId == -1) {
800        // This mean operation was not performed in server, so do not set any procId
801        return DeleteColumnResponse.newBuilder().build();
802      } else {
803        return DeleteColumnResponse.newBuilder().setProcId(procId).build();
804      }
805    } catch (IOException ioe) {
806      throw new ServiceException(ioe);
807    }
808  }
809
810  @Override
811  public DeleteNamespaceResponse deleteNamespace(RpcController controller,
812    DeleteNamespaceRequest request) throws ServiceException {
813    try {
814      long procId = server.deleteNamespace(request.getNamespaceName(), request.getNonceGroup(),
815        request.getNonce());
816      return DeleteNamespaceResponse.newBuilder().setProcId(procId).build();
817    } catch (IOException e) {
818      throw new ServiceException(e);
819    }
820  }
821
822  /**
823   * Execute Delete Snapshot operation.
824   * @return DeleteSnapshotResponse (a protobuf wrapped void) if the snapshot existed and was
825   *         deleted properly.
826   * @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not
827   *                          exist.
828   */
829  @Override
830  public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
831    DeleteSnapshotRequest request) throws ServiceException {
832    try {
833      server.checkInitialized();
834      server.snapshotManager.checkSnapshotSupport();
835
836      LOG.info(server.getClientIdAuditPrefix() + " delete " + request.getSnapshot());
837      server.snapshotManager.deleteSnapshot(request.getSnapshot());
838      return DeleteSnapshotResponse.newBuilder().build();
839    } catch (IOException e) {
840      throw new ServiceException(e);
841    }
842  }
843
844  @Override
845  public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request)
846    throws ServiceException {
847    try {
848      long procId = server.deleteTable(ProtobufUtil.toTableName(request.getTableName()),
849        request.getNonceGroup(), request.getNonce());
850      return DeleteTableResponse.newBuilder().setProcId(procId).build();
851    } catch (IOException ioe) {
852      throw new ServiceException(ioe);
853    }
854  }
855
856  @Override
857  public TruncateTableResponse truncateTable(RpcController controller, TruncateTableRequest request)
858    throws ServiceException {
859    try {
860      long procId = server.truncateTable(ProtobufUtil.toTableName(request.getTableName()),
861        request.getPreserveSplits(), request.getNonceGroup(), request.getNonce());
862      return TruncateTableResponse.newBuilder().setProcId(procId).build();
863    } catch (IOException ioe) {
864      throw new ServiceException(ioe);
865    }
866  }
867
868  @Override
869  public DisableTableResponse disableTable(RpcController controller, DisableTableRequest request)
870    throws ServiceException {
871    try {
872      long procId = server.disableTable(ProtobufUtil.toTableName(request.getTableName()),
873        request.getNonceGroup(), request.getNonce());
874      return DisableTableResponse.newBuilder().setProcId(procId).build();
875    } catch (IOException ioe) {
876      throw new ServiceException(ioe);
877    }
878  }
879
880  @Override
881  public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c,
882    EnableCatalogJanitorRequest req) throws ServiceException {
883    rpcPreCheck("enableCatalogJanitor");
884    return EnableCatalogJanitorResponse.newBuilder()
885      .setPrevValue(server.catalogJanitorChore.setEnabled(req.getEnable())).build();
886  }
887
888  @Override
889  public SetCleanerChoreRunningResponse setCleanerChoreRunning(RpcController c,
890    SetCleanerChoreRunningRequest req) throws ServiceException {
891    rpcPreCheck("setCleanerChoreRunning");
892
893    boolean prevValue =
894      server.getLogCleaner().getEnabled() && server.getHFileCleaner().getEnabled();
895    server.getLogCleaner().setEnabled(req.getOn());
896    for (HFileCleaner hFileCleaner : server.getHFileCleaners()) {
897      hFileCleaner.setEnabled(req.getOn());
898    }
899    return SetCleanerChoreRunningResponse.newBuilder().setPrevValue(prevValue).build();
900  }
901
902  @Override
903  public EnableTableResponse enableTable(RpcController controller, EnableTableRequest request)
904    throws ServiceException {
905    try {
906      long procId = server.enableTable(ProtobufUtil.toTableName(request.getTableName()),
907        request.getNonceGroup(), request.getNonce());
908      return EnableTableResponse.newBuilder().setProcId(procId).build();
909    } catch (IOException ioe) {
910      throw new ServiceException(ioe);
911    }
912  }
913
914  @Override
915  public MergeTableRegionsResponse mergeTableRegions(RpcController c,
916    MergeTableRegionsRequest request) throws ServiceException {
917    try {
918      server.checkInitialized();
919    } catch (IOException ioe) {
920      throw new ServiceException(ioe);
921    }
922
923    RegionStates regionStates = server.getAssignmentManager().getRegionStates();
924
925    RegionInfo[] regionsToMerge = new RegionInfo[request.getRegionCount()];
926    for (int i = 0; i < request.getRegionCount(); i++) {
927      final byte[] encodedNameOfRegion = request.getRegion(i).getValue().toByteArray();
928      if (request.getRegion(i).getType() != RegionSpecifierType.ENCODED_REGION_NAME) {
929        LOG.warn("MergeRegions specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME
930          + " actual: region " + i + " =" + request.getRegion(i).getType());
931      }
932      RegionState regionState = regionStates.getRegionState(Bytes.toString(encodedNameOfRegion));
933      if (regionState == null) {
934        throw new ServiceException(
935          new UnknownRegionException(Bytes.toStringBinary(encodedNameOfRegion)));
936      }
937      regionsToMerge[i] = regionState.getRegion();
938    }
939
940    try {
941      long procId = server.mergeRegions(regionsToMerge, request.getForcible(),
942        request.getNonceGroup(), request.getNonce());
943      return MergeTableRegionsResponse.newBuilder().setProcId(procId).build();
944    } catch (IOException ioe) {
945      throw new ServiceException(ioe);
946    }
947  }
948
949  @Override
950  public SplitTableRegionResponse splitRegion(final RpcController controller,
951    final SplitTableRegionRequest request) throws ServiceException {
952    try {
953      long procId = server.splitRegion(ProtobufUtil.toRegionInfo(request.getRegionInfo()),
954        request.hasSplitRow() ? request.getSplitRow().toByteArray() : null, request.getNonceGroup(),
955        request.getNonce());
956      return SplitTableRegionResponse.newBuilder().setProcId(procId).build();
957    } catch (IOException ie) {
958      throw new ServiceException(ie);
959    }
960  }
961
962  @Override
963  public MasterProtos.TruncateRegionResponse truncateRegion(RpcController controller,
964    final MasterProtos.TruncateRegionRequest request) throws ServiceException {
965    try {
966      long procId = server.truncateRegion(ProtobufUtil.toRegionInfo(request.getRegionInfo()),
967        request.getNonceGroup(), request.getNonce());
968      return MasterProtos.TruncateRegionResponse.newBuilder().setProcId(procId).build();
969    } catch (IOException ie) {
970      throw new ServiceException(ie);
971    }
972  }
973
974  @Override
975  public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller,
976    final ClientProtos.CoprocessorServiceRequest request) throws ServiceException {
977    rpcPreCheck("execMasterService");
978    try {
979      ServerRpcController execController = new ServerRpcController();
980      ClientProtos.CoprocessorServiceCall call = request.getCall();
981      String serviceName = call.getServiceName();
982      String methodName = call.getMethodName();
983      if (!server.coprocessorServiceHandlers.containsKey(serviceName)) {
984        throw new UnknownProtocolException(null,
985          "No registered Master Coprocessor Endpoint found for " + serviceName
986            + ". Has it been enabled?");
987      }
988
989      Service service = server.coprocessorServiceHandlers.get(serviceName);
990      ServiceDescriptor serviceDesc = service.getDescriptorForType();
991      MethodDescriptor methodDesc =
992        CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc);
993
994      Message execRequest = CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest());
995      final Message.Builder responseBuilder =
996        service.getResponsePrototype(methodDesc).newBuilderForType();
997      service.callMethod(methodDesc, execController, execRequest, (message) -> {
998        if (message != null) {
999          responseBuilder.mergeFrom(message);
1000        }
1001      });
1002      Message execResult = responseBuilder.build();
1003      if (execController.getFailedOn() != null) {
1004        throw execController.getFailedOn();
1005      }
1006
1007      String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("");
1008      User caller = RpcServer.getRequestUser().orElse(null);
1009      AUDITLOG.info("User {} (remote address: {}) master service request for {}.{}", caller,
1010        remoteAddress, serviceName, methodName);
1011
1012      return CoprocessorRpcUtils.getResponse(execResult, HConstants.EMPTY_BYTE_ARRAY);
1013    } catch (IOException ie) {
1014      throw new ServiceException(ie);
1015    }
1016  }
1017
1018  /**
1019   * Triggers an asynchronous attempt to run a distributed procedure. {@inheritDoc}
1020   */
1021  @Override
1022  public ExecProcedureResponse execProcedure(RpcController controller, ExecProcedureRequest request)
1023    throws ServiceException {
1024    try {
1025      server.checkInitialized();
1026      ProcedureDescription desc = request.getProcedure();
1027      MasterProcedureManager mpm =
1028        server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
1029      if (mpm == null) {
1030        throw new ServiceException(
1031          new DoNotRetryIOException("The procedure is not registered: " + desc.getSignature()));
1032      }
1033      LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
1034      mpm.checkPermissions(desc, getAccessChecker(), RpcServer.getRequestUser().orElse(null));
1035      mpm.execProcedure(desc);
1036      // send back the max amount of time the client should wait for the procedure
1037      // to complete
1038      long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME;
1039      return ExecProcedureResponse.newBuilder().setExpectedTimeout(waitTime).build();
1040    } catch (ForeignException e) {
1041      throw new ServiceException(e.getCause());
1042    } catch (IOException e) {
1043      throw new ServiceException(e);
1044    }
1045  }
1046
1047  /**
1048   * Triggers a synchronous attempt to run a distributed procedure and sets return data in response.
1049   * {@inheritDoc}
1050   */
1051  @Override
1052  public ExecProcedureResponse execProcedureWithRet(RpcController controller,
1053    ExecProcedureRequest request) throws ServiceException {
1054    rpcPreCheck("execProcedureWithRet");
1055    try {
1056      ProcedureDescription desc = request.getProcedure();
1057      MasterProcedureManager mpm =
1058        server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
1059      if (mpm == null) {
1060        throw new ServiceException("The procedure is not registered: " + desc.getSignature());
1061      }
1062      LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
1063      byte[] data = mpm.execProcedureWithRet(desc);
1064      ExecProcedureResponse.Builder builder = ExecProcedureResponse.newBuilder();
1065      // set return data if available
1066      if (data != null) {
1067        builder.setReturnData(UnsafeByteOperations.unsafeWrap(data));
1068      }
1069      return builder.build();
1070    } catch (IOException e) {
1071      throw new ServiceException(e);
1072    }
1073  }
1074
1075  @Override
1076  public GetClusterStatusResponse getClusterStatus(RpcController controller,
1077    GetClusterStatusRequest req) throws ServiceException {
1078    GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
1079    try {
1080      // We used to check if Master was up at this point but let this call proceed even if
1081      // Master is initializing... else we shut out stuff like hbck2 tool from making progress
1082      // since it queries this method to figure cluster version. hbck2 wants to be able to work
1083      // against Master even if it is 'initializing' so it can do fixup.
1084      response.setClusterStatus(ClusterMetricsBuilder.toClusterStatus(
1085        server.getClusterMetrics(ClusterMetricsBuilder.toOptions(req.getOptionsList()))));
1086    } catch (IOException e) {
1087      throw new ServiceException(e);
1088    }
1089    return response.build();
1090  }
1091
1092  /**
1093   * List the currently available/stored snapshots. Any in-progress snapshots are ignored
1094   */
1095  @Override
1096  public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller,
1097    GetCompletedSnapshotsRequest request) throws ServiceException {
1098    try {
1099      server.checkInitialized();
1100      GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder();
1101      List<SnapshotDescription> snapshots = server.snapshotManager.getCompletedSnapshots();
1102
1103      // convert to protobuf
1104      for (SnapshotDescription snapshot : snapshots) {
1105        builder.addSnapshots(snapshot);
1106      }
1107      return builder.build();
1108    } catch (IOException e) {
1109      throw new ServiceException(e);
1110    }
1111  }
1112
1113  @Override
1114  public ListNamespacesResponse listNamespaces(RpcController controller,
1115    ListNamespacesRequest request) throws ServiceException {
1116    try {
1117      return ListNamespacesResponse.newBuilder().addAllNamespaceName(server.listNamespaces())
1118        .build();
1119    } catch (IOException e) {
1120      throw new ServiceException(e);
1121    }
1122  }
1123
1124  @Override
1125  public GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller,
1126    GetNamespaceDescriptorRequest request) throws ServiceException {
1127    try {
1128      return GetNamespaceDescriptorResponse.newBuilder()
1129        .setNamespaceDescriptor(
1130          ProtobufUtil.toProtoNamespaceDescriptor(server.getNamespace(request.getNamespaceName())))
1131        .build();
1132    } catch (IOException e) {
1133      throw new ServiceException(e);
1134    }
1135  }
1136
1137  /**
1138   * Get the number of regions of the table that have been updated by the alter.
1139   * @return Pair indicating the number of regions updated Pair.getFirst is the regions that are yet
1140   *         to be updated Pair.getSecond is the total number of regions of the table
1141   */
1142  @Override
1143  public GetSchemaAlterStatusResponse getSchemaAlterStatus(RpcController controller,
1144    GetSchemaAlterStatusRequest req) throws ServiceException {
1145    // TODO: currently, we query using the table name on the client side. this
1146    // may overlap with other table operations or the table operation may
1147    // have completed before querying this API. We need to refactor to a
1148    // transaction system in the future to avoid these ambiguities.
1149    TableName tableName = ProtobufUtil.toTableName(req.getTableName());
1150
1151    try {
1152      server.checkInitialized();
1153      Pair<Integer, Integer> pair = server.getAssignmentManager().getReopenStatus(tableName);
1154      GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder();
1155      ret.setYetToUpdateRegions(pair.getFirst());
1156      ret.setTotalRegions(pair.getSecond());
1157      return ret.build();
1158    } catch (IOException ioe) {
1159      throw new ServiceException(ioe);
1160    }
1161  }
1162
1163  /**
1164   * Get list of TableDescriptors for requested tables.
1165   * @param c   Unused (set to null).
1166   * @param req GetTableDescriptorsRequest that contains: - tableNames: requested tables, or if
1167   *            empty, all are requested.
1168   */
1169  @Override
1170  public GetTableDescriptorsResponse getTableDescriptors(RpcController c,
1171    GetTableDescriptorsRequest req) throws ServiceException {
1172    try {
1173      server.checkInitialized();
1174
1175      final String regex = req.hasRegex() ? req.getRegex() : null;
1176      final String namespace = req.hasNamespace() ? req.getNamespace() : null;
1177      List<TableName> tableNameList = null;
1178      if (req.getTableNamesCount() > 0) {
1179        tableNameList = new ArrayList<TableName>(req.getTableNamesCount());
1180        for (HBaseProtos.TableName tableNamePB : req.getTableNamesList()) {
1181          tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
1182        }
1183      }
1184
1185      List<TableDescriptor> descriptors =
1186        server.listTableDescriptors(namespace, regex, tableNameList, req.getIncludeSysTables());
1187
1188      GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder();
1189      if (descriptors != null && descriptors.size() > 0) {
1190        // Add the table descriptors to the response
1191        for (TableDescriptor htd : descriptors) {
1192          builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
1193        }
1194      }
1195      return builder.build();
1196    } catch (IOException ioe) {
1197      throw new ServiceException(ioe);
1198    }
1199  }
1200
1201  @Override
1202  public ListTableDescriptorsByStateResponse listTableDescriptorsByState(RpcController controller,
1203    ListTableDescriptorsByStateRequest request) throws ServiceException {
1204    try {
1205      server.checkInitialized();
1206      List<TableDescriptor> descriptors = server.listTableDescriptors(null, null, null, false);
1207
1208      ListTableDescriptorsByStateResponse.Builder builder =
1209        ListTableDescriptorsByStateResponse.newBuilder();
1210      if (descriptors != null && descriptors.size() > 0) {
1211        // Add the table descriptors to the response
1212        TableState.State state =
1213          request.getIsEnabled() ? TableState.State.ENABLED : TableState.State.DISABLED;
1214        for (TableDescriptor htd : descriptors) {
1215          if (server.getTableStateManager().isTableState(htd.getTableName(), state)) {
1216            builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
1217          }
1218        }
1219      }
1220      return builder.build();
1221    } catch (IOException ioe) {
1222      throw new ServiceException(ioe);
1223    }
1224  }
1225
1226  /**
1227   * Get list of userspace table names
1228   * @param controller Unused (set to null).
1229   * @param req        GetTableNamesRequest
1230   */
1231  @Override
1232  public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest req)
1233    throws ServiceException {
1234    try {
1235      server.checkServiceStarted();
1236
1237      final String regex = req.hasRegex() ? req.getRegex() : null;
1238      final String namespace = req.hasNamespace() ? req.getNamespace() : null;
1239      List<TableName> tableNames =
1240        server.listTableNames(namespace, regex, req.getIncludeSysTables());
1241
1242      GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
1243      if (tableNames != null && tableNames.size() > 0) {
1244        // Add the table names to the response
1245        for (TableName table : tableNames) {
1246          builder.addTableNames(ProtobufUtil.toProtoTableName(table));
1247        }
1248      }
1249      return builder.build();
1250    } catch (IOException e) {
1251      throw new ServiceException(e);
1252    }
1253  }
1254
1255  @Override
1256  public ListTableNamesByStateResponse listTableNamesByState(RpcController controller,
1257    ListTableNamesByStateRequest request) throws ServiceException {
1258    try {
1259      server.checkServiceStarted();
1260      List<TableName> tableNames = server.listTableNames(null, null, false);
1261      ListTableNamesByStateResponse.Builder builder = ListTableNamesByStateResponse.newBuilder();
1262      if (tableNames != null && tableNames.size() > 0) {
1263        // Add the disabled table names to the response
1264        TableState.State state =
1265          request.getIsEnabled() ? TableState.State.ENABLED : TableState.State.DISABLED;
1266        for (TableName table : tableNames) {
1267          if (server.getTableStateManager().isTableState(table, state)) {
1268            builder.addTableNames(ProtobufUtil.toProtoTableName(table));
1269          }
1270        }
1271      }
1272      return builder.build();
1273    } catch (IOException e) {
1274      throw new ServiceException(e);
1275    }
1276  }
1277
1278  @Override
1279  public GetTableStateResponse getTableState(RpcController controller, GetTableStateRequest request)
1280    throws ServiceException {
1281    try {
1282      server.checkServiceStarted();
1283      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
1284      TableState ts = server.getTableStateManager().getTableState(tableName);
1285      GetTableStateResponse.Builder builder = GetTableStateResponse.newBuilder();
1286      builder.setTableState(ts.convert());
1287      return builder.build();
1288    } catch (IOException e) {
1289      throw new ServiceException(e);
1290    }
1291  }
1292
1293  @Override
1294  public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
1295    IsCatalogJanitorEnabledRequest req) throws ServiceException {
1296    return IsCatalogJanitorEnabledResponse.newBuilder().setValue(server.isCatalogJanitorEnabled())
1297      .build();
1298  }
1299
1300  @Override
1301  public IsCleanerChoreEnabledResponse isCleanerChoreEnabled(RpcController c,
1302    IsCleanerChoreEnabledRequest req) throws ServiceException {
1303    return IsCleanerChoreEnabledResponse.newBuilder().setValue(server.isCleanerChoreEnabled())
1304      .build();
1305  }
1306
1307  @Override
1308  public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req)
1309    throws ServiceException {
1310    try {
1311      server.checkServiceStarted();
1312      return IsMasterRunningResponse.newBuilder().setIsMasterRunning(!server.isStopped()).build();
1313    } catch (IOException e) {
1314      throw new ServiceException(e);
1315    }
1316  }
1317
1318  /**
1319   * Checks if the specified procedure is done.
1320   * @return true if the procedure is done, false if the procedure is in the process of completing
1321   * @throws ServiceException if invalid procedure or failed procedure with progress failure reason.
1322   */
1323  @Override
1324  public IsProcedureDoneResponse isProcedureDone(RpcController controller,
1325    IsProcedureDoneRequest request) throws ServiceException {
1326    try {
1327      server.checkInitialized();
1328      ProcedureDescription desc = request.getProcedure();
1329      MasterProcedureManager mpm =
1330        server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
1331      if (mpm == null) {
1332        throw new ServiceException("The procedure is not registered: " + desc.getSignature());
1333      }
1334      LOG.debug("Checking to see if procedure from request:" + desc.getSignature() + " is done");
1335
1336      IsProcedureDoneResponse.Builder builder = IsProcedureDoneResponse.newBuilder();
1337      boolean done = mpm.isProcedureDone(desc);
1338      builder.setDone(done);
1339      return builder.build();
1340    } catch (ForeignException e) {
1341      throw new ServiceException(e.getCause());
1342    } catch (IOException e) {
1343      throw new ServiceException(e);
1344    }
1345  }
1346
1347  /**
1348   * Checks if the specified snapshot is done.
1349   * @return true if the snapshot is in file system ready to use, false if the snapshot is in the
1350   *         process of completing
1351   * @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or a wrapped
1352   *                          HBaseSnapshotException with progress failure reason.
1353   */
1354  @Override
1355  public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
1356    IsSnapshotDoneRequest request) throws ServiceException {
1357    LOG.debug("Checking to see if snapshot from request:"
1358      + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()) + " is done");
1359    try {
1360      server.checkInitialized();
1361      IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
1362      boolean done = server.snapshotManager.isSnapshotDone(request.getSnapshot());
1363      builder.setDone(done);
1364      return builder.build();
1365    } catch (ForeignException e) {
1366      throw new ServiceException(e.getCause());
1367    } catch (IOException e) {
1368      throw new ServiceException(e);
1369    }
1370  }
1371
1372  @Override
1373  public GetProcedureResultResponse getProcedureResult(RpcController controller,
1374    GetProcedureResultRequest request) throws ServiceException {
1375    LOG.debug("Checking to see if procedure is done pid=" + request.getProcId());
1376    try {
1377      server.checkInitialized();
1378      GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder();
1379      long procId = request.getProcId();
1380      ProcedureExecutor<?> executor = server.getMasterProcedureExecutor();
1381      Procedure<?> result = executor.getResultOrProcedure(procId);
1382      if (result != null) {
1383        builder.setSubmittedTime(result.getSubmittedTime());
1384        builder.setLastUpdate(result.getLastUpdate());
1385        if (executor.isFinished(procId)) {
1386          builder.setState(GetProcedureResultResponse.State.FINISHED);
1387          if (result.isFailed()) {
1388            IOException exception = MasterProcedureUtil.unwrapRemoteIOException(result);
1389            builder.setException(ForeignExceptionUtil.toProtoForeignException(exception));
1390          }
1391          byte[] resultData = result.getResult();
1392          if (resultData != null) {
1393            builder.setResult(UnsafeByteOperations.unsafeWrap(resultData));
1394          }
1395          server.getMasterProcedureExecutor().removeResult(request.getProcId());
1396        } else {
1397          builder.setState(GetProcedureResultResponse.State.RUNNING);
1398        }
1399      } else {
1400        builder.setState(GetProcedureResultResponse.State.NOT_FOUND);
1401      }
1402      return builder.build();
1403    } catch (IOException e) {
1404      throw new ServiceException(e);
1405    }
1406  }
1407
1408  @Override
1409  public AbortProcedureResponse abortProcedure(RpcController rpcController,
1410    AbortProcedureRequest request) throws ServiceException {
1411    try {
1412      AbortProcedureResponse.Builder response = AbortProcedureResponse.newBuilder();
1413      boolean abortResult =
1414        server.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning());
1415      response.setIsProcedureAborted(abortResult);
1416      return response.build();
1417    } catch (IOException e) {
1418      throw new ServiceException(e);
1419    }
1420  }
1421
1422  @Override
1423  public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c,
1424    ListNamespaceDescriptorsRequest request) throws ServiceException {
1425    try {
1426      ListNamespaceDescriptorsResponse.Builder response =
1427        ListNamespaceDescriptorsResponse.newBuilder();
1428      for (NamespaceDescriptor ns : server.getNamespaces()) {
1429        response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns));
1430      }
1431      return response.build();
1432    } catch (IOException e) {
1433      throw new ServiceException(e);
1434    }
1435  }
1436
1437  @Override
1438  public GetProceduresResponse getProcedures(RpcController rpcController,
1439    GetProceduresRequest request) throws ServiceException {
1440    try {
1441      final GetProceduresResponse.Builder response = GetProceduresResponse.newBuilder();
1442      for (Procedure<?> p : server.getProcedures()) {
1443        response.addProcedure(ProcedureUtil.convertToProtoProcedure(p));
1444      }
1445      return response.build();
1446    } catch (IOException e) {
1447      throw new ServiceException(e);
1448    }
1449  }
1450
1451  @Override
1452  public GetLocksResponse getLocks(RpcController controller, GetLocksRequest request)
1453    throws ServiceException {
1454    try {
1455      final GetLocksResponse.Builder builder = GetLocksResponse.newBuilder();
1456
1457      for (LockedResource lockedResource : server.getLocks()) {
1458        builder.addLock(ProcedureUtil.convertToProtoLockedResource(lockedResource));
1459      }
1460
1461      return builder.build();
1462    } catch (IOException e) {
1463      throw new ServiceException(e);
1464    }
1465  }
1466
1467  @Override
1468  public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c,
1469    ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
1470    try {
1471      ListTableDescriptorsByNamespaceResponse.Builder b =
1472        ListTableDescriptorsByNamespaceResponse.newBuilder();
1473      for (TableDescriptor htd : server
1474        .listTableDescriptorsByNamespace(request.getNamespaceName())) {
1475        b.addTableSchema(ProtobufUtil.toTableSchema(htd));
1476      }
1477      return b.build();
1478    } catch (IOException e) {
1479      throw new ServiceException(e);
1480    }
1481  }
1482
1483  @Override
1484  public ListTableNamesByNamespaceResponse listTableNamesByNamespace(RpcController c,
1485    ListTableNamesByNamespaceRequest request) throws ServiceException {
1486    try {
1487      ListTableNamesByNamespaceResponse.Builder b = ListTableNamesByNamespaceResponse.newBuilder();
1488      for (TableName tableName : server.listTableNamesByNamespace(request.getNamespaceName())) {
1489        b.addTableName(ProtobufUtil.toProtoTableName(tableName));
1490      }
1491      return b.build();
1492    } catch (IOException e) {
1493      throw new ServiceException(e);
1494    }
1495  }
1496
1497  @Override
1498  public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req)
1499    throws ServiceException {
1500    try {
1501      long procId = server.modifyColumn(ProtobufUtil.toTableName(req.getTableName()),
1502        ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(),
1503        req.getNonce());
1504      if (procId == -1) {
1505        // This mean operation was not performed in server, so do not set any procId
1506        return ModifyColumnResponse.newBuilder().build();
1507      } else {
1508        return ModifyColumnResponse.newBuilder().setProcId(procId).build();
1509      }
1510    } catch (IOException ioe) {
1511      throw new ServiceException(ioe);
1512    }
1513  }
1514
1515  @Override
1516  public ModifyColumnStoreFileTrackerResponse modifyColumnStoreFileTracker(RpcController controller,
1517    ModifyColumnStoreFileTrackerRequest req) throws ServiceException {
1518    try {
1519      long procId =
1520        server.modifyColumnStoreFileTracker(ProtobufUtil.toTableName(req.getTableName()),
1521          req.getFamily().toByteArray(), req.getDstSft(), req.getNonceGroup(), req.getNonce());
1522      return ModifyColumnStoreFileTrackerResponse.newBuilder().setProcId(procId).build();
1523    } catch (IOException ioe) {
1524      throw new ServiceException(ioe);
1525    }
1526  }
1527
1528  @Override
1529  public ModifyNamespaceResponse modifyNamespace(RpcController controller,
1530    ModifyNamespaceRequest request) throws ServiceException {
1531    try {
1532      long procId =
1533        server.modifyNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
1534          request.getNonceGroup(), request.getNonce());
1535      return ModifyNamespaceResponse.newBuilder().setProcId(procId).build();
1536    } catch (IOException e) {
1537      throw new ServiceException(e);
1538    }
1539  }
1540
1541  @Override
1542  public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req)
1543    throws ServiceException {
1544    try {
1545      long procId = server.modifyTable(ProtobufUtil.toTableName(req.getTableName()),
1546        ProtobufUtil.toTableDescriptor(req.getTableSchema()), req.getNonceGroup(), req.getNonce(),
1547        req.getReopenRegions());
1548      return ModifyTableResponse.newBuilder().setProcId(procId).build();
1549    } catch (IOException ioe) {
1550      throw new ServiceException(ioe);
1551    }
1552  }
1553
1554  @Override
1555  public ModifyTableStoreFileTrackerResponse modifyTableStoreFileTracker(RpcController controller,
1556    ModifyTableStoreFileTrackerRequest req) throws ServiceException {
1557    try {
1558      long procId = server.modifyTableStoreFileTracker(ProtobufUtil.toTableName(req.getTableName()),
1559        req.getDstSft(), req.getNonceGroup(), req.getNonce());
1560      return ModifyTableStoreFileTrackerResponse.newBuilder().setProcId(procId).build();
1561    } catch (IOException ioe) {
1562      throw new ServiceException(ioe);
1563    }
1564  }
1565
1566  @Override
1567  public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest req)
1568    throws ServiceException {
1569    final byte[] encodedRegionName = req.getRegion().getValue().toByteArray();
1570    RegionSpecifierType type = req.getRegion().getType();
1571    final byte[] destServerName = (req.hasDestServerName())
1572      ? Bytes.toBytes(ProtobufUtil.toServerName(req.getDestServerName()).getServerName())
1573      : null;
1574    MoveRegionResponse mrr = MoveRegionResponse.newBuilder().build();
1575
1576    if (type != RegionSpecifierType.ENCODED_REGION_NAME) {
1577      LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME
1578        + " actual: " + type);
1579    }
1580
1581    try {
1582      server.checkInitialized();
1583      server.move(encodedRegionName, destServerName);
1584    } catch (IOException ioe) {
1585      throw new ServiceException(ioe);
1586    }
1587    return mrr;
1588  }
1589
1590  /**
1591   * Offline specified region from master's in-memory state. It will not attempt to reassign the
1592   * region as in unassign. This is a special method that should be used by experts or hbck.
1593   */
1594  @Override
1595  public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request)
1596    throws ServiceException {
1597    try {
1598      server.checkInitialized();
1599
1600      final RegionSpecifierType type = request.getRegion().getType();
1601      if (type != RegionSpecifierType.REGION_NAME) {
1602        LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
1603          + " actual: " + type);
1604      }
1605
1606      final byte[] regionName = request.getRegion().getValue().toByteArray();
1607      final RegionInfo hri = server.getAssignmentManager().getRegionInfo(regionName);
1608      if (hri == null) {
1609        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
1610      }
1611
1612      if (server.cpHost != null) {
1613        server.cpHost.preRegionOffline(hri);
1614      }
1615      LOG.info(server.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString());
1616      server.getAssignmentManager().offlineRegion(hri);
1617      if (server.cpHost != null) {
1618        server.cpHost.postRegionOffline(hri);
1619      }
1620    } catch (IOException ioe) {
1621      throw new ServiceException(ioe);
1622    }
1623    return OfflineRegionResponse.newBuilder().build();
1624  }
1625
1626  /**
1627   * Execute Restore/Clone snapshot operation.
1628   * <p>
1629   * If the specified table exists a "Restore" is executed, replacing the table schema and directory
1630   * data with the content of the snapshot. The table must be disabled, or a
1631   * UnsupportedOperationException will be thrown.
1632   * <p>
1633   * If the table doesn't exist a "Clone" is executed, a new table is created using the schema at
1634   * the time of the snapshot, and the content of the snapshot.
1635   * <p>
1636   * The restore/clone operation does not require copying HFiles. Since HFiles are immutable the
1637   * table can point to and use the same files as the original one.
1638   */
1639  @Override
1640  public RestoreSnapshotResponse restoreSnapshot(RpcController controller,
1641    RestoreSnapshotRequest request) throws ServiceException {
1642    try {
1643      long procId = server.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(),
1644        request.getNonce(), request.getRestoreACL(), request.getCustomSFT());
1645      return RestoreSnapshotResponse.newBuilder().setProcId(procId).build();
1646    } catch (ForeignException e) {
1647      throw new ServiceException(e.getCause());
1648    } catch (IOException e) {
1649      throw new ServiceException(e);
1650    }
1651  }
1652
1653  @Override
1654  public SetSnapshotCleanupResponse switchSnapshotCleanup(RpcController controller,
1655    SetSnapshotCleanupRequest request) throws ServiceException {
1656    try {
1657      server.checkInitialized();
1658      final boolean enabled = request.getEnabled();
1659      final boolean isSynchronous = request.hasSynchronous() && request.getSynchronous();
1660      final boolean prevSnapshotCleanupRunning = this.switchSnapshotCleanup(enabled, isSynchronous);
1661      return SetSnapshotCleanupResponse.newBuilder()
1662        .setPrevSnapshotCleanup(prevSnapshotCleanupRunning).build();
1663    } catch (IOException e) {
1664      throw new ServiceException(e);
1665    }
1666  }
1667
1668  @Override
1669  public IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(RpcController controller,
1670    IsSnapshotCleanupEnabledRequest request) throws ServiceException {
1671    try {
1672      server.checkInitialized();
1673      final boolean isSnapshotCleanupEnabled = server.snapshotCleanupStateStore.get();
1674      return IsSnapshotCleanupEnabledResponse.newBuilder().setEnabled(isSnapshotCleanupEnabled)
1675        .build();
1676    } catch (IOException e) {
1677      throw new ServiceException(e);
1678    }
1679  }
1680
1681  /**
1682   * Turn on/off snapshot auto-cleanup based on TTL
1683   * @param enabledNewVal Set to <code>true</code> to enable, <code>false</code> to disable
1684   * @param synchronous   If <code>true</code>, it waits until current snapshot cleanup is
1685   *                      completed, if outstanding
1686   * @return previous snapshot auto-cleanup mode
1687   */
1688  private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal,
1689    final boolean synchronous) throws IOException {
1690    final boolean oldValue = server.snapshotCleanupStateStore.get();
1691    server.switchSnapshotCleanup(enabledNewVal, synchronous);
1692    LOG.info("{} Successfully set snapshot cleanup to {}", server.getClientIdAuditPrefix(),
1693      enabledNewVal);
1694    return oldValue;
1695  }
1696
1697  @Override
1698  public RunCatalogScanResponse runCatalogScan(RpcController c, RunCatalogScanRequest req)
1699    throws ServiceException {
1700    rpcPreCheck("runCatalogScan");
1701    try {
1702      return ResponseConverter.buildRunCatalogScanResponse(this.server.catalogJanitorChore.scan());
1703    } catch (IOException ioe) {
1704      throw new ServiceException(ioe);
1705    }
1706  }
1707
1708  @Override
1709  public RunCleanerChoreResponse runCleanerChore(RpcController c, RunCleanerChoreRequest req)
1710    throws ServiceException {
1711    rpcPreCheck("runCleanerChore");
1712    try {
1713      CompletableFuture<Boolean> fileCleanerFuture = server.getHFileCleaner().triggerCleanerNow();
1714      CompletableFuture<Boolean> logCleanerFuture = server.getLogCleaner().triggerCleanerNow();
1715      boolean result = fileCleanerFuture.get() && logCleanerFuture.get();
1716      return ResponseConverter.buildRunCleanerChoreResponse(result);
1717    } catch (InterruptedException e) {
1718      throw new ServiceException(e);
1719    } catch (ExecutionException e) {
1720      throw new ServiceException(e.getCause());
1721    }
1722  }
1723
1724  @Override
1725  public SetBalancerRunningResponse setBalancerRunning(RpcController c,
1726    SetBalancerRunningRequest req) throws ServiceException {
1727    try {
1728      server.checkInitialized();
1729      boolean prevValue = (req.getSynchronous())
1730        ? synchronousBalanceSwitch(req.getOn())
1731        : server.balanceSwitch(req.getOn());
1732      return SetBalancerRunningResponse.newBuilder().setPrevBalanceValue(prevValue).build();
1733    } catch (IOException ioe) {
1734      throw new ServiceException(ioe);
1735    }
1736  }
1737
1738  @Override
1739  public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request)
1740    throws ServiceException {
1741    LOG.info(server.getClientIdAuditPrefix() + " shutdown");
1742    try {
1743      server.shutdown();
1744    } catch (IOException e) {
1745      LOG.error("Exception occurred in HMaster.shutdown()", e);
1746      throw new ServiceException(e);
1747    }
1748    return ShutdownResponse.newBuilder().build();
1749  }
1750
1751  /**
1752   * Triggers an asynchronous attempt to take a snapshot. {@inheritDoc}
1753   */
1754  @Override
1755  public SnapshotResponse snapshot(RpcController controller, SnapshotRequest request)
1756    throws ServiceException {
1757    try {
1758      server.checkInitialized();
1759      server.snapshotManager.checkSnapshotSupport();
1760
1761      LOG.info(server.getClientIdAuditPrefix() + " snapshot request for:"
1762        + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()));
1763      // get the snapshot information
1764      SnapshotDescription snapshot =
1765        SnapshotDescriptionUtils.validate(request.getSnapshot(), server.getConfiguration());
1766      // send back the max amount of time the client should wait for the snapshot to complete
1767      long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(server.getConfiguration(),
1768        snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
1769
1770      SnapshotResponse.Builder builder = SnapshotResponse.newBuilder().setExpectedTimeout(waitTime);
1771
1772      // If there is nonce group and nonce in the snapshot request, then the client can
1773      // handle snapshot procedure procId. And if enable the snapshot procedure, we
1774      // will do the snapshot work with proc-v2, otherwise we will fall back to zk proc.
1775      if (
1776        request.hasNonceGroup() && request.hasNonce()
1777          && server.snapshotManager.snapshotProcedureEnabled()
1778      ) {
1779        long nonceGroup = request.getNonceGroup();
1780        long nonce = request.getNonce();
1781        long procId = server.snapshotManager.takeSnapshot(snapshot, nonceGroup, nonce);
1782        return builder.setProcId(procId).build();
1783      } else {
1784        server.snapshotManager.takeSnapshot(snapshot);
1785        return builder.build();
1786      }
1787    } catch (ForeignException e) {
1788      throw new ServiceException(e.getCause());
1789    } catch (IOException e) {
1790      throw new ServiceException(e);
1791    }
1792  }
1793
1794  @Override
1795  public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request)
1796    throws ServiceException {
1797    LOG.info(server.getClientIdAuditPrefix() + " stop");
1798    try {
1799      server.stopMaster();
1800    } catch (IOException e) {
1801      LOG.error("Exception occurred while stopping master", e);
1802      throw new ServiceException(e);
1803    }
1804    return StopMasterResponse.newBuilder().build();
1805  }
1806
1807  @Override
1808  public IsInMaintenanceModeResponse isMasterInMaintenanceMode(final RpcController controller,
1809    final IsInMaintenanceModeRequest request) throws ServiceException {
1810    IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder();
1811    response.setInMaintenanceMode(server.isInMaintenanceMode());
1812    return response.build();
1813  }
1814
1815  @Override
1816  public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req)
1817    throws ServiceException {
1818    try {
1819      final byte[] regionName = req.getRegion().getValue().toByteArray();
1820      RegionSpecifierType type = req.getRegion().getType();
1821      UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build();
1822
1823      server.checkInitialized();
1824      if (type != RegionSpecifierType.REGION_NAME) {
1825        LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
1826          + " actual: " + type);
1827      }
1828      RegionStateNode rsn =
1829        server.getAssignmentManager().getRegionStates().getRegionStateNodeFromName(regionName);
1830      if (rsn == null) {
1831        throw new UnknownRegionException(Bytes.toString(regionName));
1832      }
1833
1834      RegionInfo hri = rsn.getRegionInfo();
1835      if (server.cpHost != null) {
1836        server.cpHost.preUnassign(hri);
1837      }
1838      LOG.debug(server.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString()
1839        + " in current location if it is online");
1840      server.getAssignmentManager().unassign(hri);
1841      if (server.cpHost != null) {
1842        server.cpHost.postUnassign(hri);
1843      }
1844
1845      return urr;
1846    } catch (IOException ioe) {
1847      throw new ServiceException(ioe);
1848    }
1849  }
1850
1851  @Override
1852  public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c,
1853    ReportRegionStateTransitionRequest req) throws ServiceException {
1854    try {
1855      server.checkServiceStarted();
1856      for (RegionServerStatusProtos.RegionStateTransition transition : req.getTransitionList()) {
1857        long procId =
1858          transition.getProcIdCount() > 0 ? transition.getProcId(0) : Procedure.NO_PROC_ID;
1859        // -1 is less than any possible MasterActiveCode
1860        long initiatingMasterActiveTime = transition.hasInitiatingMasterActiveTime()
1861          ? transition.getInitiatingMasterActiveTime()
1862          : -1;
1863        throwOnOldMaster(procId, initiatingMasterActiveTime);
1864      }
1865      return server.getAssignmentManager().reportRegionStateTransition(req);
1866    } catch (IOException ioe) {
1867      throw new ServiceException(ioe);
1868    }
1869  }
1870
1871  @Override
1872  public SetQuotaResponse setQuota(RpcController c, SetQuotaRequest req) throws ServiceException {
1873    try {
1874      server.checkInitialized();
1875      SetQuotaResponse response = server.getMasterQuotaManager().setQuota(req);
1876      server.reloadRegionServerQuotas();
1877
1878      return response;
1879    } catch (Exception e) {
1880      throw new ServiceException(e);
1881    }
1882  }
1883
1884  @Override
1885  public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller,
1886    MajorCompactionTimestampRequest request) throws ServiceException {
1887    MajorCompactionTimestampResponse.Builder response =
1888      MajorCompactionTimestampResponse.newBuilder();
1889    try {
1890      server.checkInitialized();
1891      response.setCompactionTimestamp(
1892        server.getLastMajorCompactionTimestamp(ProtobufUtil.toTableName(request.getTableName())));
1893    } catch (IOException e) {
1894      throw new ServiceException(e);
1895    }
1896    return response.build();
1897  }
1898
1899  @Override
1900  public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
1901    RpcController controller, MajorCompactionTimestampForRegionRequest request)
1902    throws ServiceException {
1903    MajorCompactionTimestampResponse.Builder response =
1904      MajorCompactionTimestampResponse.newBuilder();
1905    try {
1906      server.checkInitialized();
1907      response.setCompactionTimestamp(server
1908        .getLastMajorCompactionTimestampForRegion(request.getRegion().getValue().toByteArray()));
1909    } catch (IOException e) {
1910      throw new ServiceException(e);
1911    }
1912    return response.build();
1913  }
1914
1915  @Override
1916  public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller,
1917    IsBalancerEnabledRequest request) throws ServiceException {
1918    IsBalancerEnabledResponse.Builder response = IsBalancerEnabledResponse.newBuilder();
1919    response.setEnabled(server.isBalancerOn());
1920    return response.build();
1921  }
1922
1923  @Override
1924  public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller,
1925    SetSplitOrMergeEnabledRequest request) throws ServiceException {
1926    SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder();
1927    try {
1928      server.checkInitialized();
1929      boolean newValue = request.getEnabled();
1930      for (MasterProtos.MasterSwitchType masterSwitchType : request.getSwitchTypesList()) {
1931        MasterSwitchType switchType = convert(masterSwitchType);
1932        boolean oldValue = server.isSplitOrMergeEnabled(switchType);
1933        response.addPrevValue(oldValue);
1934        if (server.cpHost != null) {
1935          server.cpHost.preSetSplitOrMergeEnabled(newValue, switchType);
1936        }
1937        server.getSplitOrMergeStateStore().setSplitOrMergeEnabled(newValue, switchType);
1938        if (server.cpHost != null) {
1939          server.cpHost.postSetSplitOrMergeEnabled(newValue, switchType);
1940        }
1941      }
1942    } catch (IOException e) {
1943      throw new ServiceException(e);
1944    }
1945    return response.build();
1946  }
1947
1948  @Override
1949  public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller,
1950    IsSplitOrMergeEnabledRequest request) throws ServiceException {
1951    IsSplitOrMergeEnabledResponse.Builder response = IsSplitOrMergeEnabledResponse.newBuilder();
1952    response.setEnabled(server.isSplitOrMergeEnabled(convert(request.getSwitchType())));
1953    return response.build();
1954  }
1955
1956  @Override
1957  public NormalizeResponse normalize(RpcController controller, NormalizeRequest request)
1958    throws ServiceException {
1959    rpcPreCheck("normalize");
1960    try {
1961      final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder()
1962        .tableNames(ProtobufUtil.toTableNameList(request.getTableNamesList()))
1963        .regex(request.hasRegex() ? request.getRegex() : null)
1964        .namespace(request.hasNamespace() ? request.getNamespace() : null).build();
1965      return NormalizeResponse.newBuilder()
1966        // all API requests are considered priority requests.
1967        .setNormalizerRan(server.normalizeRegions(ntfp, true)).build();
1968    } catch (IOException ex) {
1969      throw new ServiceException(ex);
1970    }
1971  }
1972
1973  @Override
1974  public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller,
1975    SetNormalizerRunningRequest request) throws ServiceException {
1976    rpcPreCheck("setNormalizerRunning");
1977
1978    // Sets normalizer on/off flag in ZK.
1979    // TODO: this method is totally broken in terms of atomicity of actions and values read.
1980    // 1. The contract has this RPC returning the previous value. There isn't a ZKUtil method
1981    // that lets us retrieve the previous value as part of setting a new value, so we simply
1982    // perform a read before issuing the update. Thus we have a data race opportunity, between
1983    // when the `prevValue` is read and whatever is actually overwritten.
1984    // 2. Down in `setNormalizerOn`, the call to `createAndWatch` inside of the catch clause can
1985    // itself fail in the event that the znode already exists. Thus, another data race, between
1986    // when the initial `setData` call is notified of the absence of the target znode and the
1987    // subsequent `createAndWatch`, with another client creating said node.
1988    // That said, there's supposed to be only one active master and thus there's supposed to be
1989    // only one process with the authority to modify the value.
1990    final boolean prevValue = server.getRegionNormalizerManager().isNormalizerOn();
1991    final boolean newValue = request.getOn();
1992    try {
1993      server.getRegionNormalizerManager().setNormalizerOn(newValue);
1994    } catch (IOException e) {
1995      throw new ServiceException(e);
1996    }
1997    LOG.info("{} set normalizerSwitch={}", server.getClientIdAuditPrefix(), newValue);
1998    return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build();
1999  }
2000
2001  @Override
2002  public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller,
2003    IsNormalizerEnabledRequest request) {
2004    IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder();
2005    response.setEnabled(server.isNormalizerOn());
2006    return response.build();
2007  }
2008
2009  /**
2010   * Returns the security capabilities in effect on the cluster
2011   */
2012  @Override
2013  public SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller,
2014    SecurityCapabilitiesRequest request) throws ServiceException {
2015    SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder();
2016    try {
2017      server.checkInitialized();
2018      Set<SecurityCapabilitiesResponse.Capability> capabilities = new HashSet<>();
2019      // Authentication
2020      if (User.isHBaseSecurityEnabled(server.getConfiguration())) {
2021        capabilities.add(SecurityCapabilitiesResponse.Capability.SECURE_AUTHENTICATION);
2022      } else {
2023        capabilities.add(SecurityCapabilitiesResponse.Capability.SIMPLE_AUTHENTICATION);
2024      }
2025      // A coprocessor that implements AccessControlService can provide AUTHORIZATION and
2026      // CELL_AUTHORIZATION
2027      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
2028        if (AccessChecker.isAuthorizationSupported(server.getConfiguration())) {
2029          capabilities.add(SecurityCapabilitiesResponse.Capability.AUTHORIZATION);
2030        }
2031        if (AccessController.isCellAuthorizationSupported(server.getConfiguration())) {
2032          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_AUTHORIZATION);
2033        }
2034      }
2035      // A coprocessor that implements VisibilityLabelsService can provide CELL_VISIBILITY.
2036      if (server.cpHost != null && hasVisibilityLabelsServiceCoprocessor(server.cpHost)) {
2037        if (VisibilityController.isCellAuthorizationSupported(server.getConfiguration())) {
2038          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_VISIBILITY);
2039        }
2040      }
2041      response.addAllCapabilities(capabilities);
2042    } catch (IOException e) {
2043      throw new ServiceException(e);
2044    }
2045    return response.build();
2046  }
2047
2048  /**
2049   * Determines if there is a MasterCoprocessor deployed which implements
2050   * {@link AccessControlService.Interface}.
2051   */
2052  boolean hasAccessControlServiceCoprocessor(MasterCoprocessorHost cpHost) {
2053    return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class),
2054      AccessControlService.Interface.class);
2055  }
2056
2057  /**
2058   * Determines if there is a MasterCoprocessor deployed which implements
2059   * {@link VisibilityLabelsService.Interface}.
2060   */
2061  boolean hasVisibilityLabelsServiceCoprocessor(MasterCoprocessorHost cpHost) {
2062    return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class),
2063      VisibilityLabelsService.Interface.class);
2064  }
2065
2066  /**
2067   * Determines if there is a coprocessor implementation in the provided argument which extends or
2068   * implements the provided {@code service}.
2069   */
2070  boolean checkCoprocessorWithService(List<MasterCoprocessor> coprocessorsToCheck,
2071    Class<?> service) {
2072    if (coprocessorsToCheck == null || coprocessorsToCheck.isEmpty()) {
2073      return false;
2074    }
2075    for (MasterCoprocessor cp : coprocessorsToCheck) {
2076      if (service.isAssignableFrom(cp.getClass())) {
2077        return true;
2078      }
2079    }
2080    return false;
2081  }
2082
2083  private MasterSwitchType convert(MasterProtos.MasterSwitchType switchType) {
2084    switch (switchType) {
2085      case SPLIT:
2086        return MasterSwitchType.SPLIT;
2087      case MERGE:
2088        return MasterSwitchType.MERGE;
2089      default:
2090        break;
2091    }
2092    return null;
2093  }
2094
2095  @Override
2096  public AddReplicationPeerResponse addReplicationPeer(RpcController controller,
2097    AddReplicationPeerRequest request) throws ServiceException {
2098    try {
2099      long procId = server.addReplicationPeer(request.getPeerId(),
2100        ReplicationPeerConfigUtil.convert(request.getPeerConfig()),
2101        request.getPeerState().getState().equals(ReplicationState.State.ENABLED));
2102      return AddReplicationPeerResponse.newBuilder().setProcId(procId).build();
2103    } catch (ReplicationException | IOException e) {
2104      throw new ServiceException(e);
2105    }
2106  }
2107
2108  @Override
2109  public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller,
2110    RemoveReplicationPeerRequest request) throws ServiceException {
2111    try {
2112      long procId = server.removeReplicationPeer(request.getPeerId());
2113      return RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build();
2114    } catch (ReplicationException | IOException e) {
2115      throw new ServiceException(e);
2116    }
2117  }
2118
2119  @Override
2120  public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller,
2121    EnableReplicationPeerRequest request) throws ServiceException {
2122    try {
2123      long procId = server.enableReplicationPeer(request.getPeerId());
2124      return EnableReplicationPeerResponse.newBuilder().setProcId(procId).build();
2125    } catch (ReplicationException | IOException e) {
2126      throw new ServiceException(e);
2127    }
2128  }
2129
2130  @Override
2131  public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller,
2132    DisableReplicationPeerRequest request) throws ServiceException {
2133    try {
2134      long procId = server.disableReplicationPeer(request.getPeerId());
2135      return DisableReplicationPeerResponse.newBuilder().setProcId(procId).build();
2136    } catch (ReplicationException | IOException e) {
2137      throw new ServiceException(e);
2138    }
2139  }
2140
2141  @Override
2142  public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller,
2143    GetReplicationPeerConfigRequest request) throws ServiceException {
2144    GetReplicationPeerConfigResponse.Builder response =
2145      GetReplicationPeerConfigResponse.newBuilder();
2146    try {
2147      String peerId = request.getPeerId();
2148      ReplicationPeerConfig peerConfig = server.getReplicationPeerConfig(peerId);
2149      response.setPeerId(peerId);
2150      response.setPeerConfig(ReplicationPeerConfigUtil.convert(peerConfig));
2151    } catch (ReplicationException | IOException e) {
2152      throw new ServiceException(e);
2153    }
2154    return response.build();
2155  }
2156
2157  @Override
2158  public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller,
2159    UpdateReplicationPeerConfigRequest request) throws ServiceException {
2160    try {
2161      long procId = server.updateReplicationPeerConfig(request.getPeerId(),
2162        ReplicationPeerConfigUtil.convert(request.getPeerConfig()));
2163      return UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build();
2164    } catch (ReplicationException | IOException e) {
2165      throw new ServiceException(e);
2166    }
2167  }
2168
2169  @Override
2170  public TransitReplicationPeerSyncReplicationStateResponse
2171    transitReplicationPeerSyncReplicationState(RpcController controller,
2172      TransitReplicationPeerSyncReplicationStateRequest request) throws ServiceException {
2173    try {
2174      long procId = server.transitReplicationPeerSyncReplicationState(request.getPeerId(),
2175        ReplicationPeerConfigUtil.toSyncReplicationState(request.getSyncReplicationState()));
2176      return TransitReplicationPeerSyncReplicationStateResponse.newBuilder().setProcId(procId)
2177        .build();
2178    } catch (ReplicationException | IOException e) {
2179      throw new ServiceException(e);
2180    }
2181  }
2182
2183  @Override
2184  public ListReplicationPeersResponse listReplicationPeers(RpcController controller,
2185    ListReplicationPeersRequest request) throws ServiceException {
2186    ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder();
2187    try {
2188      List<ReplicationPeerDescription> peers =
2189        server.listReplicationPeers(request.hasRegex() ? request.getRegex() : null);
2190      for (ReplicationPeerDescription peer : peers) {
2191        response.addPeerDesc(ReplicationPeerConfigUtil.toProtoReplicationPeerDescription(peer));
2192      }
2193    } catch (ReplicationException | IOException e) {
2194      throw new ServiceException(e);
2195    }
2196    return response.build();
2197  }
2198
2199  @Override
2200  public GetReplicationPeerStateResponse isReplicationPeerEnabled(RpcController controller,
2201    GetReplicationPeerStateRequest request) throws ServiceException {
2202    boolean isEnabled;
2203    try {
2204      isEnabled = server.getReplicationPeerManager().getPeerState(request.getPeerId());
2205    } catch (ReplicationException ioe) {
2206      throw new ServiceException(ioe);
2207    }
2208    return GetReplicationPeerStateResponse.newBuilder().setIsEnabled(isEnabled).build();
2209  }
2210
2211  @Override
2212  public ReplicationPeerModificationSwitchResponse replicationPeerModificationSwitch(
2213    RpcController controller, ReplicationPeerModificationSwitchRequest request)
2214    throws ServiceException {
2215    try {
2216      server.checkInitialized();
2217      boolean prevValue = server.replicationPeerModificationSwitch(request.getOn());
2218      return ReplicationPeerModificationSwitchResponse.newBuilder().setPreviousValue(prevValue)
2219        .build();
2220    } catch (IOException ioe) {
2221      throw new ServiceException(ioe);
2222    }
2223  }
2224
2225  @Override
2226  public GetReplicationPeerModificationProceduresResponse getReplicationPeerModificationProcedures(
2227    RpcController controller, GetReplicationPeerModificationProceduresRequest request)
2228    throws ServiceException {
2229    try {
2230      server.checkInitialized();
2231      GetReplicationPeerModificationProceduresResponse.Builder builder =
2232        GetReplicationPeerModificationProceduresResponse.newBuilder();
2233      for (Procedure<?> proc : server.getProcedures()) {
2234        if (proc.isFinished()) {
2235          continue;
2236        }
2237        if (!(proc instanceof AbstractPeerNoLockProcedure)) {
2238          continue;
2239        }
2240        builder.addProcedure(ProcedureUtil.convertToProtoProcedure(proc));
2241      }
2242      return builder.build();
2243    } catch (IOException ioe) {
2244      throw new ServiceException(ioe);
2245    }
2246  }
2247
2248  @Override
2249  public IsReplicationPeerModificationEnabledResponse isReplicationPeerModificationEnabled(
2250    RpcController controller, IsReplicationPeerModificationEnabledRequest request)
2251    throws ServiceException {
2252    try {
2253      server.checkInitialized();
2254      return IsReplicationPeerModificationEnabledResponse.newBuilder()
2255        .setEnabled(server.isReplicationPeerModificationEnabled()).build();
2256    } catch (IOException ioe) {
2257      throw new ServiceException(ioe);
2258    }
2259  }
2260
2261  @Override
2262  public ListDecommissionedRegionServersResponse listDecommissionedRegionServers(
2263    RpcController controller, ListDecommissionedRegionServersRequest request)
2264    throws ServiceException {
2265    ListDecommissionedRegionServersResponse.Builder response =
2266      ListDecommissionedRegionServersResponse.newBuilder();
2267    try {
2268      server.checkInitialized();
2269      if (server.cpHost != null) {
2270        server.cpHost.preListDecommissionedRegionServers();
2271      }
2272      List<ServerName> servers = server.listDecommissionedRegionServers();
2273      response.addAllServerName((servers.stream().map(server -> ProtobufUtil.toServerName(server)))
2274        .collect(Collectors.toList()));
2275      if (server.cpHost != null) {
2276        server.cpHost.postListDecommissionedRegionServers();
2277      }
2278    } catch (IOException io) {
2279      throw new ServiceException(io);
2280    }
2281
2282    return response.build();
2283  }
2284
2285  @Override
2286  public DecommissionRegionServersResponse decommissionRegionServers(RpcController controller,
2287    DecommissionRegionServersRequest request) throws ServiceException {
2288    try {
2289      server.checkInitialized();
2290      List<ServerName> servers = request.getServerNameList().stream()
2291        .map(pbServer -> ProtobufUtil.toServerName(pbServer)).collect(Collectors.toList());
2292      boolean offload = request.getOffload();
2293      if (server.cpHost != null) {
2294        server.cpHost.preDecommissionRegionServers(servers, offload);
2295      }
2296      server.decommissionRegionServers(servers, offload);
2297      if (server.cpHost != null) {
2298        server.cpHost.postDecommissionRegionServers(servers, offload);
2299      }
2300    } catch (IOException io) {
2301      throw new ServiceException(io);
2302    }
2303
2304    return DecommissionRegionServersResponse.newBuilder().build();
2305  }
2306
2307  @Override
2308  public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller,
2309    RecommissionRegionServerRequest request) throws ServiceException {
2310    try {
2311      server.checkInitialized();
2312      ServerName sn = ProtobufUtil.toServerName(request.getServerName());
2313      List<byte[]> encodedRegionNames = request.getRegionList().stream()
2314        .map(regionSpecifier -> regionSpecifier.getValue().toByteArray())
2315        .collect(Collectors.toList());
2316      if (server.cpHost != null) {
2317        server.cpHost.preRecommissionRegionServer(sn, encodedRegionNames);
2318      }
2319      server.recommissionRegionServer(sn, encodedRegionNames);
2320      if (server.cpHost != null) {
2321        server.cpHost.postRecommissionRegionServer(sn, encodedRegionNames);
2322      }
2323    } catch (IOException io) {
2324      throw new ServiceException(io);
2325    }
2326
2327    return RecommissionRegionServerResponse.newBuilder().build();
2328  }
2329
2330  @Override
2331  public LockResponse requestLock(RpcController controller, final LockRequest request)
2332    throws ServiceException {
2333    try {
2334      if (request.getDescription().isEmpty()) {
2335        throw new IllegalArgumentException("Empty description");
2336      }
2337      NonceProcedureRunnable npr;
2338      LockType type = LockType.valueOf(request.getLockType().name());
2339      if (request.getRegionInfoCount() > 0) {
2340        final RegionInfo[] regionInfos = new RegionInfo[request.getRegionInfoCount()];
2341        for (int i = 0; i < request.getRegionInfoCount(); ++i) {
2342          regionInfos[i] = ProtobufUtil.toRegionInfo(request.getRegionInfo(i));
2343        }
2344        npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) {
2345          @Override
2346          protected void run() throws IOException {
2347            setProcId(server.getLockManager().remoteLocks().requestRegionsLock(regionInfos,
2348              request.getDescription(), getNonceKey()));
2349          }
2350
2351          @Override
2352          protected String getDescription() {
2353            return "RequestLock";
2354          }
2355        };
2356      } else if (request.hasTableName()) {
2357        final TableName tableName = ProtobufUtil.toTableName(request.getTableName());
2358        npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) {
2359          @Override
2360          protected void run() throws IOException {
2361            setProcId(server.getLockManager().remoteLocks().requestTableLock(tableName, type,
2362              request.getDescription(), getNonceKey()));
2363          }
2364
2365          @Override
2366          protected String getDescription() {
2367            return "RequestLock";
2368          }
2369        };
2370      } else if (request.hasNamespace()) {
2371        npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) {
2372          @Override
2373          protected void run() throws IOException {
2374            setProcId(server.getLockManager().remoteLocks().requestNamespaceLock(
2375              request.getNamespace(), type, request.getDescription(), getNonceKey()));
2376          }
2377
2378          @Override
2379          protected String getDescription() {
2380            return "RequestLock";
2381          }
2382        };
2383      } else {
2384        throw new IllegalArgumentException("one of table/namespace/region should be specified");
2385      }
2386      long procId = MasterProcedureUtil.submitProcedure(npr);
2387      return LockResponse.newBuilder().setProcId(procId).build();
2388    } catch (IllegalArgumentException e) {
2389      LOG.warn("Exception when queuing lock", e);
2390      throw new ServiceException(new DoNotRetryIOException(e));
2391    } catch (IOException e) {
2392      LOG.warn("Exception when queuing lock", e);
2393      throw new ServiceException(e);
2394    }
2395  }
2396
2397  /**
2398   * @return LOCKED, if procedure is found and it has the lock; else UNLOCKED.
2399   * @throws ServiceException if given proc id is found but it is not a LockProcedure.
2400   */
2401  @Override
2402  public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request)
2403    throws ServiceException {
2404    try {
2405      if (
2406        server.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(),
2407          request.getKeepAlive())
2408      ) {
2409        return LockHeartbeatResponse.newBuilder()
2410          .setTimeoutMs(server.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF,
2411            LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS))
2412          .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).build();
2413      } else {
2414        return LockHeartbeatResponse.newBuilder()
2415          .setLockStatus(LockHeartbeatResponse.LockStatus.UNLOCKED).build();
2416      }
2417    } catch (IOException e) {
2418      throw new ServiceException(e);
2419    }
2420  }
2421
2422  @Override
2423  public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller,
2424    RegionSpaceUseReportRequest request) throws ServiceException {
2425    try {
2426      server.checkInitialized();
2427      if (!QuotaUtil.isQuotaEnabled(server.getConfiguration())) {
2428        return RegionSpaceUseReportResponse.newBuilder().build();
2429      }
2430      MasterQuotaManager quotaManager = this.server.getMasterQuotaManager();
2431      if (quotaManager != null) {
2432        final long now = EnvironmentEdgeManager.currentTime();
2433        for (RegionSpaceUse report : request.getSpaceUseList()) {
2434          quotaManager.addRegionSize(ProtobufUtil.toRegionInfo(report.getRegionInfo()),
2435            report.getRegionSize(), now);
2436        }
2437      } else {
2438        LOG.debug("Received region space usage report but HMaster is not ready to process it, "
2439          + "skipping");
2440      }
2441      return RegionSpaceUseReportResponse.newBuilder().build();
2442    } catch (Exception e) {
2443      throw new ServiceException(e);
2444    }
2445  }
2446
2447  @Override
2448  public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(RpcController controller,
2449    GetSpaceQuotaRegionSizesRequest request) throws ServiceException {
2450    try {
2451      server.checkInitialized();
2452      MasterQuotaManager quotaManager = this.server.getMasterQuotaManager();
2453      GetSpaceQuotaRegionSizesResponse.Builder builder =
2454        GetSpaceQuotaRegionSizesResponse.newBuilder();
2455      if (quotaManager != null) {
2456        Map<RegionInfo, Long> regionSizes = quotaManager.snapshotRegionSizes();
2457        Map<TableName, Long> regionSizesByTable = new HashMap<>();
2458        // Translate hregioninfo+long -> tablename+long
2459        for (Entry<RegionInfo, Long> entry : regionSizes.entrySet()) {
2460          final TableName tableName = entry.getKey().getTable();
2461          Long prevSize = regionSizesByTable.get(tableName);
2462          if (prevSize == null) {
2463            prevSize = 0L;
2464          }
2465          regionSizesByTable.put(tableName, prevSize + entry.getValue());
2466        }
2467        // Serialize them into the protobuf
2468        for (Entry<TableName, Long> tableSize : regionSizesByTable.entrySet()) {
2469          builder.addSizes(
2470            RegionSizes.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableSize.getKey()))
2471              .setSize(tableSize.getValue()).build());
2472        }
2473        return builder.build();
2474      } else {
2475        LOG.debug("Received space quota region size report but HMaster is not ready to process it,"
2476          + "skipping");
2477      }
2478      return builder.build();
2479    } catch (Exception e) {
2480      throw new ServiceException(e);
2481    }
2482  }
2483
2484  @Override
2485  public GetQuotaStatesResponse getQuotaStates(RpcController controller,
2486    GetQuotaStatesRequest request) throws ServiceException {
2487    try {
2488      server.checkInitialized();
2489      QuotaObserverChore quotaChore = this.server.getQuotaObserverChore();
2490      GetQuotaStatesResponse.Builder builder = GetQuotaStatesResponse.newBuilder();
2491      if (quotaChore != null) {
2492        // The "current" view of all tables with quotas
2493        Map<TableName, SpaceQuotaSnapshot> tableSnapshots = quotaChore.getTableQuotaSnapshots();
2494        for (Entry<TableName, SpaceQuotaSnapshot> entry : tableSnapshots.entrySet()) {
2495          builder.addTableSnapshots(TableQuotaSnapshot.newBuilder()
2496            .setTableName(ProtobufUtil.toProtoTableName(entry.getKey()))
2497            .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build());
2498        }
2499        // The "current" view of all namespaces with quotas
2500        Map<String, SpaceQuotaSnapshot> nsSnapshots = quotaChore.getNamespaceQuotaSnapshots();
2501        for (Entry<String, SpaceQuotaSnapshot> entry : nsSnapshots.entrySet()) {
2502          builder.addNsSnapshots(NamespaceQuotaSnapshot.newBuilder().setNamespace(entry.getKey())
2503            .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build());
2504        }
2505        return builder.build();
2506      }
2507      return builder.build();
2508    } catch (Exception e) {
2509      throw new ServiceException(e);
2510    }
2511  }
2512
2513  @Override
2514  public ClearDeadServersResponse clearDeadServers(RpcController controller,
2515    ClearDeadServersRequest request) throws ServiceException {
2516    LOG.debug(server.getClientIdAuditPrefix() + " clear dead region servers.");
2517    ClearDeadServersResponse.Builder response = ClearDeadServersResponse.newBuilder();
2518    try {
2519      server.checkInitialized();
2520      if (server.cpHost != null) {
2521        server.cpHost.preClearDeadServers();
2522      }
2523
2524      if (server.getServerManager().areDeadServersInProgress()) {
2525        LOG.debug("Some dead server is still under processing, won't clear the dead server list");
2526        response.addAllServerName(request.getServerNameList());
2527      } else {
2528        DeadServer deadServer = server.getServerManager().getDeadServers();
2529        Set<Address> clearedServers = new HashSet<>();
2530        for (HBaseProtos.ServerName pbServer : request.getServerNameList()) {
2531          ServerName serverName = ProtobufUtil.toServerName(pbServer);
2532          final boolean deadInProcess =
2533            server.getProcedures().stream().anyMatch(p -> (p instanceof ServerCrashProcedure)
2534              && ((ServerCrashProcedure) p).getServerName().equals(serverName));
2535          if (deadInProcess) {
2536            throw new ServiceException(
2537              String.format("Dead server '%s' is not 'dead' in fact...", serverName));
2538          }
2539
2540          if (!deadServer.removeDeadServer(serverName)) {
2541            response.addServerName(pbServer);
2542          } else {
2543            clearedServers.add(serverName.getAddress());
2544          }
2545        }
2546        server.getRSGroupInfoManager().removeServers(clearedServers);
2547        LOG.info("Remove decommissioned servers {} from RSGroup done", clearedServers);
2548      }
2549
2550      if (server.cpHost != null) {
2551        server.cpHost.postClearDeadServers(
2552          ProtobufUtil.toServerNameList(request.getServerNameList()),
2553          ProtobufUtil.toServerNameList(response.getServerNameList()));
2554      }
2555    } catch (IOException io) {
2556      throw new ServiceException(io);
2557    }
2558    return response.build();
2559  }
2560
2561  @Override
2562  public ReportProcedureDoneResponse reportProcedureDone(RpcController controller,
2563    ReportProcedureDoneRequest request) throws ServiceException {
2564    // Check Masters is up and ready for duty before progressing. Remote side will keep trying.
2565    try {
2566      this.server.checkServiceStarted();
2567      for (RemoteProcedureResult result : request.getResultList()) {
2568        // -1 is less than any possible MasterActiveCode
2569        long initiatingMasterActiveTime =
2570          result.hasInitiatingMasterActiveTime() ? result.getInitiatingMasterActiveTime() : -1;
2571        throwOnOldMaster(result.getProcId(), initiatingMasterActiveTime);
2572      }
2573    } catch (IOException ioe) {
2574      throw new ServiceException(ioe);
2575    }
2576    request.getResultList().forEach(result -> {
2577      if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) {
2578        server.remoteProcedureCompleted(result.getProcId());
2579      } else {
2580        server.remoteProcedureFailed(result.getProcId(),
2581          RemoteProcedureException.fromProto(result.getError()));
2582      }
2583    });
2584    return ReportProcedureDoneResponse.getDefaultInstance();
2585  }
2586
2587  private void throwOnOldMaster(long procId, long initiatingMasterActiveTime)
2588    throws MasterNotRunningException {
2589    if (initiatingMasterActiveTime > server.getMasterActiveTime()) {
2590      // procedure is initiated by new active master but report received on master with older active
2591      // time
2592      LOG.warn(
2593        "Report for procId: {} and initiatingMasterAT {} received on master with activeTime {}",
2594        procId, initiatingMasterActiveTime, server.getMasterActiveTime());
2595      throw new MasterNotRunningException("Another master is active");
2596    }
2597  }
2598
2599  @Override
2600  public FileArchiveNotificationResponse reportFileArchival(RpcController controller,
2601    FileArchiveNotificationRequest request) throws ServiceException {
2602    try {
2603      server.checkInitialized();
2604      if (!QuotaUtil.isQuotaEnabled(server.getConfiguration())) {
2605        return FileArchiveNotificationResponse.newBuilder().build();
2606      }
2607      server.getMasterQuotaManager().processFileArchivals(request, server.getConnection(),
2608        server.getConfiguration(), server.getFileSystem());
2609      return FileArchiveNotificationResponse.newBuilder().build();
2610    } catch (Exception e) {
2611      throw new ServiceException(e);
2612    }
2613  }
2614
2615  // HBCK Services
2616
2617  @Override
2618  public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest req)
2619    throws ServiceException {
2620    rpcPreCheck("runHbckChore");
2621    LOG.info("{} request HBCK chore to run", server.getClientIdAuditPrefix());
2622    HbckChore hbckChore = server.getHbckChore();
2623    boolean ran = hbckChore.runChore();
2624    return RunHbckChoreResponse.newBuilder().setRan(ran).build();
2625  }
2626
2627  /**
2628   * Update state of the table in meta only. This is required by hbck in some situations to cleanup
2629   * stuck assign/ unassign regions procedures for the table.
2630   * @return previous state of the table
2631   */
2632  @Override
2633  public GetTableStateResponse setTableStateInMeta(RpcController controller,
2634    SetTableStateInMetaRequest request) throws ServiceException {
2635    rpcPreCheck("setTableStateInMeta");
2636    TableName tn = ProtobufUtil.toTableName(request.getTableName());
2637    try {
2638      TableState prevState = this.server.getTableStateManager().getTableState(tn);
2639      TableState newState = TableState.convert(tn, request.getTableState());
2640      LOG.info("{} set table={} state from {} to {}", server.getClientIdAuditPrefix(), tn,
2641        prevState.getState(), newState.getState());
2642      this.server.getTableStateManager().setTableState(tn, newState.getState());
2643      return GetTableStateResponse.newBuilder().setTableState(prevState.convert()).build();
2644    } catch (Exception e) {
2645      throw new ServiceException(e);
2646    }
2647  }
2648
2649  /**
2650   * Update state of the region in meta only. This is required by hbck in some situations to cleanup
2651   * stuck assign/ unassign regions procedures for the table.
2652   * @return previous states of the regions
2653   */
2654  @Override
2655  public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller,
2656    SetRegionStateInMetaRequest request) throws ServiceException {
2657    rpcPreCheck("setRegionStateInMeta");
2658    SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder();
2659    final AssignmentManager am = server.getAssignmentManager();
2660    try {
2661      for (RegionSpecifierAndState s : request.getStatesList()) {
2662        final RegionSpecifier spec = s.getRegionSpecifier();
2663        final RegionInfo targetRegionInfo = getRegionInfo(spec);
2664        final RegionState.State targetState = RegionState.State.convert(s.getState());
2665        final RegionState.State currentState = Optional.ofNullable(targetRegionInfo)
2666          .map(info -> am.getRegionStates().getRegionState(info)).map(RegionState::getState)
2667          .orElseThrow(
2668            () -> new ServiceException("No existing state known for region '" + spec + "'."));
2669        LOG.info("{} set region={} state from {} to {}", server.getClientIdAuditPrefix(),
2670          targetRegionInfo, currentState, targetState);
2671        if (currentState == targetState) {
2672          LOG.debug("Proposed state matches current state. {}, {}", targetRegionInfo, currentState);
2673          continue;
2674        }
2675        MetaTableAccessor.updateRegionState(server.getConnection(), targetRegionInfo, targetState);
2676        // Loads from meta again to refresh AM cache with the new region state
2677        am.populateRegionStatesFromMeta(targetRegionInfo);
2678        builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec)
2679          .setState(currentState.convert()));
2680      }
2681    } catch (IOException e) {
2682      throw new ServiceException(e);
2683    }
2684    return builder.build();
2685  }
2686
2687  /**
2688   * Get {@link RegionInfo} from Master using content of {@link RegionSpecifier} as key.
2689   * @return {@link RegionInfo} found by decoding {@code rs} or {@code null} if {@code rs} is
2690   *         unknown to the master.
2691   * @throws ServiceException If some error occurs while querying META or parsing results.
2692   */
2693  private RegionInfo getRegionInfo(HBaseProtos.RegionSpecifier rs) throws ServiceException {
2694    // TODO: this doesn't handle MOB regions. Should it? See the public method #getRegionInfo
2695    final AssignmentManager am = server.getAssignmentManager();
2696    final String encodedRegionName;
2697    final RegionInfo info;
2698    // first try resolving from the AM's caches.
2699    switch (rs.getType()) {
2700      case REGION_NAME:
2701        final byte[] regionName = rs.getValue().toByteArray();
2702        encodedRegionName = RegionInfo.encodeRegionName(regionName);
2703        info = am.getRegionInfo(regionName);
2704        break;
2705      case ENCODED_REGION_NAME:
2706        encodedRegionName = rs.getValue().toStringUtf8();
2707        info = am.getRegionInfo(encodedRegionName);
2708        break;
2709      default:
2710        throw new IllegalArgumentException("Unrecognized RegionSpecifierType " + rs.getType());
2711    }
2712    if (info != null) {
2713      return info;
2714    }
2715    // fall back to a meta scan and check the cache again.
2716    try {
2717      am.populateRegionStatesFromMeta(encodedRegionName);
2718    } catch (IOException e) {
2719      throw new ServiceException(e);
2720    }
2721    return am.getRegionInfo(encodedRegionName);
2722  }
2723
2724  /**
2725   * @throws ServiceException If no MasterProcedureExecutor
2726   */
2727  private void checkMasterProcedureExecutor() throws ServiceException {
2728    if (this.server.getMasterProcedureExecutor() == null) {
2729      throw new ServiceException("Master's ProcedureExecutor not initialized; retry later");
2730    }
2731  }
2732
2733  /**
2734   * A 'raw' version of assign that does bulk and can skirt Master state checks if override is set;
2735   * i.e. assigns can be forced during Master startup or if RegionState is unclean. Used by HBCK2.
2736   */
2737  @Override
2738  public MasterProtos.AssignsResponse assigns(RpcController controller,
2739    MasterProtos.AssignsRequest request) throws ServiceException {
2740    checkMasterProcedureExecutor();
2741    final ProcedureExecutor<MasterProcedureEnv> pe = server.getMasterProcedureExecutor();
2742    final AssignmentManager am = server.getAssignmentManager();
2743    MasterProtos.AssignsResponse.Builder responseBuilder =
2744      MasterProtos.AssignsResponse.newBuilder();
2745    final boolean override = request.getOverride();
2746    final boolean force = request.getForce();
2747    LOG.info("{} assigns, override={}", server.getClientIdAuditPrefix(), override);
2748    for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
2749      final RegionInfo info = getRegionInfo(rs);
2750      if (info == null) {
2751        LOG.info("Unknown region {}", rs);
2752        continue;
2753      }
2754      responseBuilder.addPid(Optional.ofNullable(am.createOneAssignProcedure(info, override, force))
2755        .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID));
2756    }
2757    return responseBuilder.build();
2758  }
2759
2760  /**
2761   * A 'raw' version of unassign that does bulk and can skirt Master state checks if override is
2762   * set; i.e. unassigns can be forced during Master startup or if RegionState is unclean. Used by
2763   * HBCK2.
2764   */
2765  @Override
2766  public MasterProtos.UnassignsResponse unassigns(RpcController controller,
2767    MasterProtos.UnassignsRequest request) throws ServiceException {
2768    checkMasterProcedureExecutor();
2769    final ProcedureExecutor<MasterProcedureEnv> pe = server.getMasterProcedureExecutor();
2770    final AssignmentManager am = server.getAssignmentManager();
2771    MasterProtos.UnassignsResponse.Builder responseBuilder =
2772      MasterProtos.UnassignsResponse.newBuilder();
2773    final boolean override = request.getOverride();
2774    final boolean force = request.getForce();
2775    LOG.info("{} unassigns, override={}", server.getClientIdAuditPrefix(), override);
2776    for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
2777      final RegionInfo info = getRegionInfo(rs);
2778      if (info == null) {
2779        LOG.info("Unknown region {}", rs);
2780        continue;
2781      }
2782      responseBuilder
2783        .addPid(Optional.ofNullable(am.createOneUnassignProcedure(info, override, force))
2784          .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID));
2785    }
2786    return responseBuilder.build();
2787  }
2788
2789  /**
2790   * Bypass specified procedure to completion. Procedure is marked completed but no actual work is
2791   * done from the current state/ step onwards. Parents of the procedure are also marked for bypass.
2792   * NOTE: this is a dangerous operation and may be used to unstuck buggy procedures. This may leave
2793   * system in incoherent state. This may need to be followed by some cleanup steps/ actions by
2794   * operator.
2795   * @return BypassProcedureToCompletionResponse indicating success or failure
2796   */
2797  @Override
2798  public MasterProtos.BypassProcedureResponse bypassProcedure(RpcController controller,
2799    MasterProtos.BypassProcedureRequest request) throws ServiceException {
2800    try {
2801      LOG.info("{} bypass procedures={}, waitTime={}, override={}, recursive={}",
2802        server.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(),
2803        request.getOverride(), request.getRecursive());
2804      List<Boolean> ret =
2805        server.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(),
2806          request.getWaitTime(), request.getOverride(), request.getRecursive());
2807      return MasterProtos.BypassProcedureResponse.newBuilder().addAllBypassed(ret).build();
2808    } catch (IOException e) {
2809      throw new ServiceException(e);
2810    }
2811  }
2812
2813  @Override
2814  public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProcedure(
2815    RpcController controller, MasterProtos.ScheduleServerCrashProcedureRequest request)
2816    throws ServiceException {
2817    List<Long> pids = new ArrayList<>();
2818    for (HBaseProtos.ServerName sn : request.getServerNameList()) {
2819      ServerName serverName = ProtobufUtil.toServerName(sn);
2820      LOG.info("{} schedule ServerCrashProcedure for {}", this.server.getClientIdAuditPrefix(),
2821        serverName);
2822      if (shouldSubmitSCP(serverName)) {
2823        pids.add(this.server.getServerManager().expireServer(serverName, true));
2824      } else {
2825        pids.add(Procedure.NO_PROC_ID);
2826      }
2827    }
2828    return MasterProtos.ScheduleServerCrashProcedureResponse.newBuilder().addAllPid(pids).build();
2829  }
2830
2831  @Override
2832  public MasterProtos.ScheduleSCPsForUnknownServersResponse scheduleSCPsForUnknownServers(
2833    RpcController controller, MasterProtos.ScheduleSCPsForUnknownServersRequest request)
2834    throws ServiceException {
2835    List<Long> pids = new ArrayList<>();
2836    final Set<ServerName> serverNames = server.getAssignmentManager().getRegionStates()
2837      .getRegionStates().stream().map(RegionState::getServerName).collect(Collectors.toSet());
2838
2839    final Set<ServerName> unknownServerNames = serverNames.stream()
2840      .filter(sn -> server.getServerManager().isServerUnknown(sn)).collect(Collectors.toSet());
2841
2842    for (ServerName sn : unknownServerNames) {
2843      LOG.info("{} schedule ServerCrashProcedure for unknown {}",
2844        this.server.getClientIdAuditPrefix(), sn);
2845      if (shouldSubmitSCP(sn)) {
2846        pids.add(this.server.getServerManager().expireServer(sn, true));
2847      } else {
2848        pids.add(Procedure.NO_PROC_ID);
2849      }
2850    }
2851    return MasterProtos.ScheduleSCPsForUnknownServersResponse.newBuilder().addAllPid(pids).build();
2852  }
2853
2854  @Override
2855  public FixMetaResponse fixMeta(RpcController controller, FixMetaRequest request)
2856    throws ServiceException {
2857    rpcPreCheck("fixMeta");
2858    try {
2859      MetaFixer mf = new MetaFixer(this.server);
2860      mf.fix();
2861      return FixMetaResponse.newBuilder().build();
2862    } catch (IOException ioe) {
2863      throw new ServiceException(ioe);
2864    }
2865  }
2866
2867  @Override
2868  public SwitchRpcThrottleResponse switchRpcThrottle(RpcController controller,
2869    SwitchRpcThrottleRequest request) throws ServiceException {
2870    try {
2871      server.checkInitialized();
2872      return server.getMasterQuotaManager().switchRpcThrottle(request);
2873    } catch (Exception e) {
2874      throw new ServiceException(e);
2875    }
2876  }
2877
2878  @Override
2879  public MasterProtos.IsRpcThrottleEnabledResponse isRpcThrottleEnabled(RpcController controller,
2880    MasterProtos.IsRpcThrottleEnabledRequest request) throws ServiceException {
2881    try {
2882      server.checkInitialized();
2883      return server.getMasterQuotaManager().isRpcThrottleEnabled(request);
2884    } catch (Exception e) {
2885      throw new ServiceException(e);
2886    }
2887  }
2888
2889  @Override
2890  public SwitchExceedThrottleQuotaResponse switchExceedThrottleQuota(RpcController controller,
2891    SwitchExceedThrottleQuotaRequest request) throws ServiceException {
2892    try {
2893      server.checkInitialized();
2894      return server.getMasterQuotaManager().switchExceedThrottleQuota(request);
2895    } catch (Exception e) {
2896      throw new ServiceException(e);
2897    }
2898  }
2899
2900  @Override
2901  public GrantResponse grant(RpcController controller, GrantRequest request)
2902    throws ServiceException {
2903    try {
2904      server.checkInitialized();
2905      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
2906        final UserPermission perm =
2907          ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
2908        boolean mergeExistingPermissions = request.getMergeExistingPermissions();
2909        server.cpHost.preGrant(perm, mergeExistingPermissions);
2910        try (Table table = server.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
2911          PermissionStorage.addUserPermission(getConfiguration(), perm, table,
2912            mergeExistingPermissions);
2913        }
2914        server.cpHost.postGrant(perm, mergeExistingPermissions);
2915        return GrantResponse.getDefaultInstance();
2916      } else {
2917        throw new DoNotRetryIOException(
2918          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2919      }
2920    } catch (IOException ioe) {
2921      throw new ServiceException(ioe);
2922    }
2923  }
2924
2925  @Override
2926  public RevokeResponse revoke(RpcController controller, RevokeRequest request)
2927    throws ServiceException {
2928    try {
2929      server.checkInitialized();
2930      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
2931        final UserPermission userPermission =
2932          ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
2933        server.cpHost.preRevoke(userPermission);
2934        try (Table table = server.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
2935          PermissionStorage.removeUserPermission(server.getConfiguration(), userPermission, table);
2936        }
2937        server.cpHost.postRevoke(userPermission);
2938        return RevokeResponse.getDefaultInstance();
2939      } else {
2940        throw new DoNotRetryIOException(
2941          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2942      }
2943    } catch (IOException ioe) {
2944      throw new ServiceException(ioe);
2945    }
2946  }
2947
2948  @Override
2949  public GetUserPermissionsResponse getUserPermissions(RpcController controller,
2950    GetUserPermissionsRequest request) throws ServiceException {
2951    try {
2952      server.checkInitialized();
2953      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
2954        final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null;
2955        String namespace =
2956          request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null;
2957        TableName table =
2958          request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null;
2959        byte[] cf = request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null;
2960        byte[] cq =
2961          request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null;
2962        Type permissionType = request.hasType() ? request.getType() : null;
2963        server.getMasterCoprocessorHost().preGetUserPermissions(userName, namespace, table, cf, cq);
2964
2965        List<UserPermission> perms = null;
2966        if (permissionType == Type.Table) {
2967          boolean filter = (cf != null || userName != null) ? true : false;
2968          perms = PermissionStorage.getUserTablePermissions(server.getConfiguration(), table, cf,
2969            cq, userName, filter);
2970        } else if (permissionType == Type.Namespace) {
2971          perms = PermissionStorage.getUserNamespacePermissions(server.getConfiguration(),
2972            namespace, userName, userName != null ? true : false);
2973        } else {
2974          perms = PermissionStorage.getUserPermissions(server.getConfiguration(), null, null, null,
2975            userName, userName != null ? true : false);
2976          // Skip super users when filter user is specified
2977          if (userName == null) {
2978            // Adding superusers explicitly to the result set as PermissionStorage do not store
2979            // them. Also using acl as table name to be inline with the results of global admin and
2980            // will help in avoiding any leakage of information about being superusers.
2981            for (String user : Superusers.getSuperUsers()) {
2982              perms.add(new UserPermission(user,
2983                Permission.newBuilder().withActions(Action.values()).build()));
2984            }
2985          }
2986        }
2987
2988        server.getMasterCoprocessorHost().postGetUserPermissions(userName, namespace, table, cf,
2989          cq);
2990        AccessControlProtos.GetUserPermissionsResponse response =
2991          ShadedAccessControlUtil.buildGetUserPermissionsResponse(perms);
2992        return response;
2993      } else {
2994        throw new DoNotRetryIOException(
2995          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2996      }
2997    } catch (IOException ioe) {
2998      throw new ServiceException(ioe);
2999    }
3000  }
3001
3002  @Override
3003  public HasUserPermissionsResponse hasUserPermissions(RpcController controller,
3004    HasUserPermissionsRequest request) throws ServiceException {
3005    try {
3006      server.checkInitialized();
3007      if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) {
3008        User caller = RpcServer.getRequestUser().orElse(null);
3009        String userName =
3010          request.hasUserName() ? request.getUserName().toStringUtf8() : caller.getShortName();
3011        List<Permission> permissions = new ArrayList<>();
3012        for (int i = 0; i < request.getPermissionCount(); i++) {
3013          permissions.add(ShadedAccessControlUtil.toPermission(request.getPermission(i)));
3014        }
3015        server.getMasterCoprocessorHost().preHasUserPermissions(userName, permissions);
3016        if (!caller.getShortName().equals(userName)) {
3017          List<String> groups = AccessChecker.getUserGroups(userName);
3018          caller = new InputUser(userName, groups.toArray(new String[groups.size()]));
3019        }
3020        List<Boolean> hasUserPermissions = new ArrayList<>();
3021        if (getAccessChecker() != null) {
3022          for (Permission permission : permissions) {
3023            boolean hasUserPermission =
3024              getAccessChecker().hasUserPermission(caller, "hasUserPermissions", permission);
3025            hasUserPermissions.add(hasUserPermission);
3026          }
3027        } else {
3028          for (int i = 0; i < permissions.size(); i++) {
3029            hasUserPermissions.add(true);
3030          }
3031        }
3032        server.getMasterCoprocessorHost().postHasUserPermissions(userName, permissions);
3033        HasUserPermissionsResponse.Builder builder =
3034          HasUserPermissionsResponse.newBuilder().addAllHasUserPermission(hasUserPermissions);
3035        return builder.build();
3036      } else {
3037        throw new DoNotRetryIOException(
3038          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
3039      }
3040    } catch (IOException ioe) {
3041      throw new ServiceException(ioe);
3042    }
3043  }
3044
3045  private boolean shouldSubmitSCP(ServerName serverName) {
3046    // check if there is already a SCP of this server running
3047    List<Procedure<MasterProcedureEnv>> procedures =
3048      server.getMasterProcedureExecutor().getProcedures();
3049    for (Procedure<MasterProcedureEnv> procedure : procedures) {
3050      if (procedure instanceof ServerCrashProcedure) {
3051        if (
3052          serverName.compareTo(((ServerCrashProcedure) procedure).getServerName()) == 0
3053            && !procedure.isFinished()
3054        ) {
3055          LOG.info("there is already a SCP of this server {} running, pid {}", serverName,
3056            procedure.getProcId());
3057          return false;
3058        }
3059      }
3060    }
3061    return true;
3062  }
3063
3064  @Override
3065  public GetRSGroupInfoResponse getRSGroupInfo(RpcController controller,
3066    GetRSGroupInfoRequest request) throws ServiceException {
3067    String groupName = request.getRSGroupName();
3068    LOG.info(
3069      server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName);
3070    try {
3071      if (server.getMasterCoprocessorHost() != null) {
3072        server.getMasterCoprocessorHost().preGetRSGroupInfo(groupName);
3073      }
3074      RSGroupInfo rsGroupInfo = server.getRSGroupInfoManager().getRSGroup(groupName);
3075      GetRSGroupInfoResponse resp;
3076      if (rsGroupInfo != null) {
3077        resp = GetRSGroupInfoResponse.newBuilder()
3078          .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build();
3079      } else {
3080        resp = GetRSGroupInfoResponse.getDefaultInstance();
3081      }
3082      if (server.getMasterCoprocessorHost() != null) {
3083        server.getMasterCoprocessorHost().postGetRSGroupInfo(groupName);
3084      }
3085      return resp;
3086    } catch (IOException e) {
3087      throw new ServiceException(e);
3088    }
3089  }
3090
3091  @Override
3092  public GetRSGroupInfoOfTableResponse getRSGroupInfoOfTable(RpcController controller,
3093    GetRSGroupInfoOfTableRequest request) throws ServiceException {
3094    TableName tableName = ProtobufUtil.toTableName(request.getTableName());
3095    LOG.info(
3096      server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName);
3097    try {
3098      if (server.getMasterCoprocessorHost() != null) {
3099        server.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName);
3100      }
3101      GetRSGroupInfoOfTableResponse resp;
3102      TableDescriptor td = server.getTableDescriptors().get(tableName);
3103      if (td == null) {
3104        resp = GetRSGroupInfoOfTableResponse.getDefaultInstance();
3105      } else {
3106        RSGroupInfo rsGroupInfo =
3107          RSGroupUtil.getRSGroupInfo(server, server.getRSGroupInfoManager(), tableName)
3108            .orElse(server.getRSGroupInfoManager().getRSGroup(RSGroupInfo.DEFAULT_GROUP));
3109        resp = GetRSGroupInfoOfTableResponse.newBuilder()
3110          .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build();
3111      }
3112      if (server.getMasterCoprocessorHost() != null) {
3113        server.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName);
3114      }
3115      return resp;
3116    } catch (IOException e) {
3117      throw new ServiceException(e);
3118    }
3119  }
3120
3121  @Override
3122  public GetRSGroupInfoOfServerResponse getRSGroupInfoOfServer(RpcController controller,
3123    GetRSGroupInfoOfServerRequest request) throws ServiceException {
3124    Address hp =
3125      Address.fromParts(request.getServer().getHostName(), request.getServer().getPort());
3126    LOG.info(server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp);
3127    try {
3128      if (server.getMasterCoprocessorHost() != null) {
3129        server.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp);
3130      }
3131      RSGroupInfo rsGroupInfo = server.getRSGroupInfoManager().getRSGroupOfServer(hp);
3132      GetRSGroupInfoOfServerResponse resp;
3133      if (rsGroupInfo != null) {
3134        resp = GetRSGroupInfoOfServerResponse.newBuilder()
3135          .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build();
3136      } else {
3137        resp = GetRSGroupInfoOfServerResponse.getDefaultInstance();
3138      }
3139      if (server.getMasterCoprocessorHost() != null) {
3140        server.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp);
3141      }
3142      return resp;
3143    } catch (IOException e) {
3144      throw new ServiceException(e);
3145    }
3146  }
3147
3148  @Override
3149  public MoveServersResponse moveServers(RpcController controller, MoveServersRequest request)
3150    throws ServiceException {
3151    Set<Address> hostPorts = Sets.newHashSet();
3152    MoveServersResponse.Builder builder = MoveServersResponse.newBuilder();
3153    for (HBaseProtos.ServerName el : request.getServersList()) {
3154      hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
3155    }
3156    LOG.info(server.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup "
3157      + request.getTargetGroup());
3158    try {
3159      if (server.getMasterCoprocessorHost() != null) {
3160        server.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup());
3161      }
3162      server.getRSGroupInfoManager().moveServers(hostPorts, request.getTargetGroup());
3163      if (server.getMasterCoprocessorHost() != null) {
3164        server.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup());
3165      }
3166    } catch (IOException e) {
3167      throw new ServiceException(e);
3168    }
3169    return builder.build();
3170  }
3171
3172  @Override
3173  public AddRSGroupResponse addRSGroup(RpcController controller, AddRSGroupRequest request)
3174    throws ServiceException {
3175    AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder();
3176    LOG.info(server.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName());
3177    try {
3178      if (server.getMasterCoprocessorHost() != null) {
3179        server.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName());
3180      }
3181      server.getRSGroupInfoManager().addRSGroup(new RSGroupInfo(request.getRSGroupName()));
3182      if (server.getMasterCoprocessorHost() != null) {
3183        server.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName());
3184      }
3185    } catch (IOException e) {
3186      throw new ServiceException(e);
3187    }
3188    return builder.build();
3189  }
3190
3191  @Override
3192  public RemoveRSGroupResponse removeRSGroup(RpcController controller, RemoveRSGroupRequest request)
3193    throws ServiceException {
3194    RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder();
3195    LOG.info(server.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName());
3196    try {
3197      if (server.getMasterCoprocessorHost() != null) {
3198        server.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName());
3199      }
3200      server.getRSGroupInfoManager().removeRSGroup(request.getRSGroupName());
3201      if (server.getMasterCoprocessorHost() != null) {
3202        server.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName());
3203      }
3204    } catch (IOException e) {
3205      throw new ServiceException(e);
3206    }
3207    return builder.build();
3208  }
3209
3210  @Override
3211  public BalanceRSGroupResponse balanceRSGroup(RpcController controller,
3212    BalanceRSGroupRequest request) throws ServiceException {
3213    BalanceRequest balanceRequest = ProtobufUtil.toBalanceRequest(request);
3214
3215    BalanceRSGroupResponse.Builder builder =
3216      BalanceRSGroupResponse.newBuilder().setBalanceRan(false);
3217
3218    LOG.info(
3219      server.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName());
3220    try {
3221      if (server.getMasterCoprocessorHost() != null) {
3222        server.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName(),
3223          balanceRequest);
3224      }
3225      BalanceResponse response =
3226        server.getRSGroupInfoManager().balanceRSGroup(request.getRSGroupName(), balanceRequest);
3227      ProtobufUtil.populateBalanceRSGroupResponse(builder, response);
3228      if (server.getMasterCoprocessorHost() != null) {
3229        server.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(),
3230          balanceRequest, response);
3231      }
3232    } catch (IOException e) {
3233      throw new ServiceException(e);
3234    }
3235    return builder.build();
3236  }
3237
3238  @Override
3239  public ListRSGroupInfosResponse listRSGroupInfos(RpcController controller,
3240    ListRSGroupInfosRequest request) throws ServiceException {
3241    ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder();
3242    LOG.info(server.getClientIdAuditPrefix() + " list rsgroup");
3243    try {
3244      if (server.getMasterCoprocessorHost() != null) {
3245        server.getMasterCoprocessorHost().preListRSGroups();
3246      }
3247      List<RSGroupInfo> rsGroupInfos = server.getRSGroupInfoManager().listRSGroups().stream()
3248        .map(RSGroupInfo::new).collect(Collectors.toList());
3249      Map<String, RSGroupInfo> name2Info = new HashMap<>();
3250      List<TableDescriptor> needToFill =
3251        new ArrayList<>(server.getTableDescriptors().getAll().values());
3252      for (RSGroupInfo rsGroupInfo : rsGroupInfos) {
3253        name2Info.put(rsGroupInfo.getName(), rsGroupInfo);
3254        for (TableDescriptor td : server.getTableDescriptors().getAll().values()) {
3255          if (rsGroupInfo.containsTable(td.getTableName())) {
3256            needToFill.remove(td);
3257          }
3258        }
3259      }
3260      for (TableDescriptor td : needToFill) {
3261        String groupName = td.getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP);
3262        RSGroupInfo rsGroupInfo = name2Info.get(groupName);
3263        if (rsGroupInfo != null) {
3264          rsGroupInfo.addTable(td.getTableName());
3265        }
3266      }
3267      for (RSGroupInfo rsGroupInfo : rsGroupInfos) {
3268        // TODO: this can be done at once outside this loop, do not need to scan all every time.
3269        builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo));
3270      }
3271      if (server.getMasterCoprocessorHost() != null) {
3272        server.getMasterCoprocessorHost().postListRSGroups();
3273      }
3274    } catch (IOException e) {
3275      throw new ServiceException(e);
3276    }
3277    return builder.build();
3278  }
3279
3280  @Override
3281  public RemoveServersResponse removeServers(RpcController controller, RemoveServersRequest request)
3282    throws ServiceException {
3283    RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder();
3284    Set<Address> servers = Sets.newHashSet();
3285    for (HBaseProtos.ServerName el : request.getServersList()) {
3286      servers.add(Address.fromParts(el.getHostName(), el.getPort()));
3287    }
3288    LOG.info(
3289      server.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + servers);
3290    try {
3291      if (server.getMasterCoprocessorHost() != null) {
3292        server.getMasterCoprocessorHost().preRemoveServers(servers);
3293      }
3294      server.getRSGroupInfoManager().removeServers(servers);
3295      if (server.getMasterCoprocessorHost() != null) {
3296        server.getMasterCoprocessorHost().postRemoveServers(servers);
3297      }
3298    } catch (IOException e) {
3299      throw new ServiceException(e);
3300    }
3301    return builder.build();
3302  }
3303
3304  @Override
3305  public ListTablesInRSGroupResponse listTablesInRSGroup(RpcController controller,
3306    ListTablesInRSGroupRequest request) throws ServiceException {
3307    ListTablesInRSGroupResponse.Builder builder = ListTablesInRSGroupResponse.newBuilder();
3308    String groupName = request.getGroupName();
3309    LOG.info(server.getClientIdAuditPrefix() + " list tables in rsgroup " + groupName);
3310    try {
3311      if (server.getMasterCoprocessorHost() != null) {
3312        server.getMasterCoprocessorHost().preListTablesInRSGroup(groupName);
3313      }
3314      RSGroupUtil.listTablesInRSGroup(server, groupName).stream()
3315        .map(ProtobufUtil::toProtoTableName).forEach(builder::addTableName);
3316      if (server.getMasterCoprocessorHost() != null) {
3317        server.getMasterCoprocessorHost().postListTablesInRSGroup(groupName);
3318      }
3319    } catch (IOException e) {
3320      throw new ServiceException(e);
3321    }
3322    return builder.build();
3323  }
3324
3325  @Override
3326  public GetConfiguredNamespacesAndTablesInRSGroupResponse
3327    getConfiguredNamespacesAndTablesInRSGroup(RpcController controller,
3328      GetConfiguredNamespacesAndTablesInRSGroupRequest request) throws ServiceException {
3329    GetConfiguredNamespacesAndTablesInRSGroupResponse.Builder builder =
3330      GetConfiguredNamespacesAndTablesInRSGroupResponse.newBuilder();
3331    String groupName = request.getGroupName();
3332    LOG.info(server.getClientIdAuditPrefix() + " get configured namespaces and tables in rsgroup "
3333      + groupName);
3334    try {
3335      if (server.getMasterCoprocessorHost() != null) {
3336        server.getMasterCoprocessorHost().preGetConfiguredNamespacesAndTablesInRSGroup(groupName);
3337      }
3338      for (NamespaceDescriptor nd : server.getClusterSchema().getNamespaces()) {
3339        if (groupName.equals(nd.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP))) {
3340          builder.addNamespace(nd.getName());
3341        }
3342      }
3343      for (TableDescriptor td : server.getTableDescriptors().getAll().values()) {
3344        if (td.getRegionServerGroup().map(g -> g.equals(groupName)).orElse(false)) {
3345          builder.addTableName(ProtobufUtil.toProtoTableName(td.getTableName()));
3346        }
3347      }
3348      if (server.getMasterCoprocessorHost() != null) {
3349        server.getMasterCoprocessorHost().postGetConfiguredNamespacesAndTablesInRSGroup(groupName);
3350      }
3351    } catch (IOException e) {
3352      throw new ServiceException(e);
3353    }
3354    return builder.build();
3355  }
3356
3357  @Override
3358  public RenameRSGroupResponse renameRSGroup(RpcController controller, RenameRSGroupRequest request)
3359    throws ServiceException {
3360    RenameRSGroupResponse.Builder builder = RenameRSGroupResponse.newBuilder();
3361    String oldRSGroup = request.getOldRsgroupName();
3362    String newRSGroup = request.getNewRsgroupName();
3363    LOG.info("{} rename rsgroup from {} to {} ", server.getClientIdAuditPrefix(), oldRSGroup,
3364      newRSGroup);
3365    try {
3366      if (server.getMasterCoprocessorHost() != null) {
3367        server.getMasterCoprocessorHost().preRenameRSGroup(oldRSGroup, newRSGroup);
3368      }
3369      server.getRSGroupInfoManager().renameRSGroup(oldRSGroup, newRSGroup);
3370      if (server.getMasterCoprocessorHost() != null) {
3371        server.getMasterCoprocessorHost().postRenameRSGroup(oldRSGroup, newRSGroup);
3372      }
3373    } catch (IOException e) {
3374      throw new ServiceException(e);
3375    }
3376    return builder.build();
3377  }
3378
3379  @Override
3380  public UpdateRSGroupConfigResponse updateRSGroupConfig(RpcController controller,
3381    UpdateRSGroupConfigRequest request) throws ServiceException {
3382    UpdateRSGroupConfigResponse.Builder builder = UpdateRSGroupConfigResponse.newBuilder();
3383    String groupName = request.getGroupName();
3384    Map<String, String> configuration = new HashMap<>();
3385    request.getConfigurationList().forEach(p -> configuration.put(p.getName(), p.getValue()));
3386    LOG.info("{} update rsgroup {} configuration {}", server.getClientIdAuditPrefix(), groupName,
3387      configuration);
3388    try {
3389      if (server.getMasterCoprocessorHost() != null) {
3390        server.getMasterCoprocessorHost().preUpdateRSGroupConfig(groupName, configuration);
3391      }
3392      server.getRSGroupInfoManager().updateRSGroupConfig(groupName, configuration);
3393      if (server.getMasterCoprocessorHost() != null) {
3394        server.getMasterCoprocessorHost().postUpdateRSGroupConfig(groupName, configuration);
3395      }
3396    } catch (IOException e) {
3397      throw new ServiceException(e);
3398    }
3399    return builder.build();
3400  }
3401
3402  @Override
3403  public HBaseProtos.LogEntry getLogEntries(RpcController controller,
3404    HBaseProtos.LogRequest request) throws ServiceException {
3405    try {
3406      final String logClassName = request.getLogClassName();
3407      Class<?> logClass = Class.forName(logClassName).asSubclass(Message.class);
3408      Method method = logClass.getMethod("parseFrom", ByteString.class);
3409      if (logClassName.contains("BalancerDecisionsRequest")) {
3410        MasterProtos.BalancerDecisionsRequest balancerDecisionsRequest =
3411          (MasterProtos.BalancerDecisionsRequest) method.invoke(null, request.getLogMessage());
3412        MasterProtos.BalancerDecisionsResponse balancerDecisionsResponse =
3413          getBalancerDecisions(balancerDecisionsRequest);
3414        return HBaseProtos.LogEntry.newBuilder()
3415          .setLogClassName(balancerDecisionsResponse.getClass().getName())
3416          .setLogMessage(balancerDecisionsResponse.toByteString()).build();
3417      } else if (logClassName.contains("BalancerRejectionsRequest")) {
3418        MasterProtos.BalancerRejectionsRequest balancerRejectionsRequest =
3419          (MasterProtos.BalancerRejectionsRequest) method.invoke(null, request.getLogMessage());
3420        MasterProtos.BalancerRejectionsResponse balancerRejectionsResponse =
3421          getBalancerRejections(balancerRejectionsRequest);
3422        return HBaseProtos.LogEntry.newBuilder()
3423          .setLogClassName(balancerRejectionsResponse.getClass().getName())
3424          .setLogMessage(balancerRejectionsResponse.toByteString()).build();
3425      }
3426    } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException
3427      | InvocationTargetException e) {
3428      LOG.error("Error while retrieving log entries.", e);
3429      throw new ServiceException(e);
3430    }
3431    throw new ServiceException("Invalid request params");
3432  }
3433
3434  private MasterProtos.BalancerDecisionsResponse
3435    getBalancerDecisions(MasterProtos.BalancerDecisionsRequest request) {
3436    final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder();
3437    if (namedQueueRecorder == null) {
3438      return MasterProtos.BalancerDecisionsResponse.newBuilder()
3439        .addAllBalancerDecision(Collections.emptyList()).build();
3440    }
3441    final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest();
3442    namedQueueGetRequest.setNamedQueueEvent(BalancerDecisionDetails.BALANCER_DECISION_EVENT);
3443    namedQueueGetRequest.setBalancerDecisionsRequest(request);
3444    NamedQueueGetResponse namedQueueGetResponse =
3445      namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest);
3446    List<RecentLogs.BalancerDecision> balancerDecisions = namedQueueGetResponse != null
3447      ? namedQueueGetResponse.getBalancerDecisions()
3448      : Collections.emptyList();
3449    return MasterProtos.BalancerDecisionsResponse.newBuilder()
3450      .addAllBalancerDecision(balancerDecisions).build();
3451  }
3452
3453  private MasterProtos.BalancerRejectionsResponse
3454    getBalancerRejections(MasterProtos.BalancerRejectionsRequest request) {
3455    final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder();
3456    if (namedQueueRecorder == null) {
3457      return MasterProtos.BalancerRejectionsResponse.newBuilder()
3458        .addAllBalancerRejection(Collections.emptyList()).build();
3459    }
3460    final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest();
3461    namedQueueGetRequest.setNamedQueueEvent(BalancerRejectionDetails.BALANCER_REJECTION_EVENT);
3462    namedQueueGetRequest.setBalancerRejectionsRequest(request);
3463    NamedQueueGetResponse namedQueueGetResponse =
3464      namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest);
3465    List<RecentLogs.BalancerRejection> balancerRejections = namedQueueGetResponse != null
3466      ? namedQueueGetResponse.getBalancerRejections()
3467      : Collections.emptyList();
3468    return MasterProtos.BalancerRejectionsResponse.newBuilder()
3469      .addAllBalancerRejection(balancerRejections).build();
3470  }
3471
3472  @Override
3473  @QosPriority(priority = HConstants.ADMIN_QOS)
3474  public GetRegionInfoResponse getRegionInfo(final RpcController controller,
3475    final GetRegionInfoRequest request) throws ServiceException {
3476    final GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
3477    final RegionInfo info = getRegionInfo(request.getRegion());
3478    if (info != null) {
3479      builder.setRegionInfo(ProtobufUtil.toRegionInfo(info));
3480    } else {
3481      // Is it a MOB name? These work differently.
3482      byte[] regionName = request.getRegion().getValue().toByteArray();
3483      TableName tableName = RegionInfo.getTable(regionName);
3484      if (MobUtils.isMobRegionName(tableName, regionName)) {
3485        // a dummy region info contains the compaction state.
3486        RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName);
3487        builder.setRegionInfo(ProtobufUtil.toRegionInfo(mobRegionInfo));
3488        if (request.hasCompactionState() && request.getCompactionState()) {
3489          builder.setCompactionState(server.getMobCompactionState(tableName));
3490        }
3491      } else {
3492        // If unknown RegionInfo and not a MOB region, it is unknown.
3493        throw new ServiceException(new UnknownRegionException(Bytes.toString(regionName)));
3494      }
3495    }
3496    return builder.build();
3497  }
3498
3499  @Override
3500  public GetStoreFileResponse getStoreFile(RpcController controller, GetStoreFileRequest request)
3501    throws ServiceException {
3502    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3503  }
3504
3505  @Override
3506  public GetOnlineRegionResponse getOnlineRegion(RpcController controller,
3507    GetOnlineRegionRequest request) throws ServiceException {
3508    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3509  }
3510
3511  @Override
3512  public OpenRegionResponse openRegion(RpcController controller, OpenRegionRequest request)
3513    throws ServiceException {
3514    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3515  }
3516
3517  @Override
3518  public WarmupRegionResponse warmupRegion(RpcController controller, WarmupRegionRequest request)
3519    throws ServiceException {
3520    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3521  }
3522
3523  @Override
3524  public CloseRegionResponse closeRegion(RpcController controller, CloseRegionRequest request)
3525    throws ServiceException {
3526    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3527  }
3528
3529  @Override
3530  public FlushRegionResponse flushRegion(RpcController controller, FlushRegionRequest request)
3531    throws ServiceException {
3532    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3533  }
3534
3535  @Override
3536  public CompactionSwitchResponse compactionSwitch(RpcController controller,
3537    CompactionSwitchRequest request) throws ServiceException {
3538    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3539  }
3540
3541  @Override
3542  public CompactRegionResponse compactRegion(RpcController controller, CompactRegionRequest request)
3543    throws ServiceException {
3544    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3545  }
3546
3547  @Override
3548  public ReplicateWALEntryResponse replicateWALEntry(RpcController controller,
3549    ReplicateWALEntryRequest request) throws ServiceException {
3550    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3551  }
3552
3553  @Override
3554  public ReplicateWALEntryResponse replay(RpcController controller,
3555    ReplicateWALEntryRequest request) throws ServiceException {
3556    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3557  }
3558
3559  @Override
3560  public RollWALWriterResponse rollWALWriter(RpcController controller, RollWALWriterRequest request)
3561    throws ServiceException {
3562    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3563  }
3564
3565  @Override
3566  public GetServerInfoResponse getServerInfo(RpcController controller, GetServerInfoRequest request)
3567    throws ServiceException {
3568    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3569  }
3570
3571  @Override
3572  public StopServerResponse stopServer(RpcController controller, StopServerRequest request)
3573    throws ServiceException {
3574    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3575  }
3576
3577  @Override
3578  public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller,
3579    UpdateFavoredNodesRequest request) throws ServiceException {
3580    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3581  }
3582
3583  @Override
3584  public GetRegionLoadResponse getRegionLoad(RpcController controller, GetRegionLoadRequest request)
3585    throws ServiceException {
3586    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3587  }
3588
3589  @Override
3590  public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller,
3591    ClearCompactionQueuesRequest request) throws ServiceException {
3592    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3593  }
3594
3595  @Override
3596  public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller,
3597    ClearRegionBlockCacheRequest request) throws ServiceException {
3598    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3599  }
3600
3601  @Override
3602  public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(RpcController controller,
3603    GetSpaceQuotaSnapshotsRequest request) throws ServiceException {
3604    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3605  }
3606
3607  @Override
3608  public ExecuteProceduresResponse executeProcedures(RpcController controller,
3609    ExecuteProceduresRequest request) throws ServiceException {
3610    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3611  }
3612
3613  @Override
3614  public GetCachedFilesListResponse getCachedFilesList(RpcController controller,
3615    GetCachedFilesListRequest request) throws ServiceException {
3616    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3617  }
3618
3619  @Override
3620  public GetLiveRegionServersResponse getLiveRegionServers(RpcController controller,
3621    GetLiveRegionServersRequest request) throws ServiceException {
3622    List<ServerName> regionServers = new ArrayList<>(server.getLiveRegionServers());
3623    Collections.shuffle(regionServers, ThreadLocalRandom.current());
3624    GetLiveRegionServersResponse.Builder builder =
3625      GetLiveRegionServersResponse.newBuilder().setTotal(regionServers.size());
3626    regionServers.stream().limit(request.getCount()).map(ProtobufUtil::toServerName)
3627      .forEach(builder::addServer);
3628    return builder.build();
3629  }
3630
3631  @Override
3632  public ReplicateWALEntryResponse replicateToReplica(RpcController controller,
3633    ReplicateWALEntryRequest request) throws ServiceException {
3634    throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
3635  }
3636
3637  @Override
3638  public FlushMasterStoreResponse flushMasterStore(RpcController controller,
3639    FlushMasterStoreRequest request) throws ServiceException {
3640    rpcPreCheck("flushMasterStore");
3641    try {
3642      server.flushMasterStore();
3643    } catch (IOException ioe) {
3644      throw new ServiceException(ioe);
3645    }
3646    return FlushMasterStoreResponse.newBuilder().build();
3647  }
3648
3649  @Override
3650  public FlushTableResponse flushTable(RpcController controller, FlushTableRequest req)
3651    throws ServiceException {
3652    TableName tableName = ProtobufUtil.toTableName(req.getTableName());
3653    List<byte[]> columnFamilies = req.getColumnFamilyCount() > 0
3654      ? req.getColumnFamilyList().stream().filter(cf -> !cf.isEmpty()).map(ByteString::toByteArray)
3655        .collect(Collectors.toList())
3656      : null;
3657    try {
3658      long procId =
3659        server.flushTable(tableName, columnFamilies, req.getNonceGroup(), req.getNonce());
3660      return FlushTableResponse.newBuilder().setProcId(procId).build();
3661    } catch (IOException ioe) {
3662      throw new ServiceException(ioe);
3663    }
3664  }
3665}