001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master;
019
020import static org.apache.hadoop.hbase.master.MasterWalManager.META_FILTER;
021
022import java.io.FileNotFoundException;
023import java.io.IOException;
024import java.net.BindException;
025import java.net.InetAddress;
026import java.net.InetSocketAddress;
027import java.util.ArrayList;
028import java.util.HashMap;
029import java.util.HashSet;
030import java.util.List;
031import java.util.Map;
032import java.util.Map.Entry;
033import java.util.Optional;
034import java.util.Set;
035import java.util.stream.Collectors;
036import org.apache.hadoop.conf.Configuration;
037import org.apache.hadoop.fs.Path;
038import org.apache.hadoop.hbase.ClusterMetricsBuilder;
039import org.apache.hadoop.hbase.DoNotRetryIOException;
040import org.apache.hadoop.hbase.HConstants;
041import org.apache.hadoop.hbase.HRegionLocation;
042import org.apache.hadoop.hbase.MetaTableAccessor;
043import org.apache.hadoop.hbase.NamespaceDescriptor;
044import org.apache.hadoop.hbase.Server;
045import org.apache.hadoop.hbase.ServerMetrics;
046import org.apache.hadoop.hbase.ServerMetricsBuilder;
047import org.apache.hadoop.hbase.ServerName;
048import org.apache.hadoop.hbase.TableName;
049import org.apache.hadoop.hbase.UnknownRegionException;
050import org.apache.hadoop.hbase.client.MasterSwitchType;
051import org.apache.hadoop.hbase.client.Put;
052import org.apache.hadoop.hbase.client.RegionInfo;
053import org.apache.hadoop.hbase.client.RegionInfoBuilder;
054import org.apache.hadoop.hbase.client.Table;
055import org.apache.hadoop.hbase.client.TableDescriptor;
056import org.apache.hadoop.hbase.client.TableState;
057import org.apache.hadoop.hbase.client.VersionInfoUtil;
058import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
059import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
060import org.apache.hadoop.hbase.errorhandling.ForeignException;
061import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
062import org.apache.hadoop.hbase.io.ByteBuffAllocator;
063import org.apache.hadoop.hbase.io.hfile.HFile;
064import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
065import org.apache.hadoop.hbase.ipc.PriorityFunction;
066import org.apache.hadoop.hbase.ipc.QosPriority;
067import org.apache.hadoop.hbase.ipc.RpcServer;
068import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
069import org.apache.hadoop.hbase.ipc.RpcServerFactory;
070import org.apache.hadoop.hbase.ipc.RpcServerInterface;
071import org.apache.hadoop.hbase.ipc.ServerRpcController;
072import org.apache.hadoop.hbase.master.assignment.RegionStates;
073import org.apache.hadoop.hbase.master.locking.LockProcedure;
074import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
075import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
076import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
077import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
078import org.apache.hadoop.hbase.mob.MobUtils;
079import org.apache.hadoop.hbase.net.Address;
080import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
081import org.apache.hadoop.hbase.procedure2.LockType;
082import org.apache.hadoop.hbase.procedure2.LockedResource;
083import org.apache.hadoop.hbase.procedure2.Procedure;
084import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
085import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
086import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
087import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
088import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
089import org.apache.hadoop.hbase.quotas.QuotaUtil;
090import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
091import org.apache.hadoop.hbase.regionserver.RSRpcServices;
092import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
093import org.apache.hadoop.hbase.replication.ReplicationException;
094import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
095import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
096import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
097import org.apache.hadoop.hbase.rsgroup.RSGroupUtil;
098import org.apache.hadoop.hbase.security.Superusers;
099import org.apache.hadoop.hbase.security.User;
100import org.apache.hadoop.hbase.security.access.AccessChecker;
101import org.apache.hadoop.hbase.security.access.AccessChecker.InputUser;
102import org.apache.hadoop.hbase.security.access.AccessController;
103import org.apache.hadoop.hbase.security.access.Permission;
104import org.apache.hadoop.hbase.security.access.Permission.Action;
105import org.apache.hadoop.hbase.security.access.PermissionStorage;
106import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
107import org.apache.hadoop.hbase.security.access.UserPermission;
108import org.apache.hadoop.hbase.security.visibility.VisibilityController;
109import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
110import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
111import org.apache.hadoop.hbase.util.Bytes;
112import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
113import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
114import org.apache.hadoop.hbase.util.Pair;
115import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
116import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
117import org.apache.yetus.audience.InterfaceAudience;
118import org.apache.zookeeper.KeeperException;
119import org.slf4j.Logger;
120import org.slf4j.LoggerFactory;
121
122import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
123import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
124import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.ServiceDescriptor;
125import org.apache.hbase.thirdparty.com.google.protobuf.Message;
126import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
127import org.apache.hbase.thirdparty.com.google.protobuf.Service;
128import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
129import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
130
131import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
132import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
133import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos;
134import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.AccessControlService;
135import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest;
136import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse;
137import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest;
138import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantResponse;
139import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest;
140import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsResponse;
141import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type;
142import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest;
143import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeResponse;
144import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
145import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
146import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
147import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
148import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
149import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
150import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
151import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
152import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
153import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
154import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
155import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
156import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
157import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
158import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
159import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
160import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
161import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
162import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
163import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
164import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
165import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
166import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
167import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
168import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
169import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
170import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
171import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
172import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService;
173import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
174import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
175import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
176import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
186import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
187import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
188import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
189import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
190import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
191import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
192import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
193import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
194import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
195import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaRequest;
196import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaResponse;
197import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetActiveMasterRequest;
198import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetActiveMasterResponse;
199import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterIdRequest;
200import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterIdResponse;
201import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
202import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
203import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
204import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
205import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
206import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse;
207import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest;
208import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse;
209import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
210import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
211import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
212import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
213import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
214import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
215import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetRegionStateInMetaResponse;
216import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
217import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
218import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
219import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
220import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
221import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
222import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest;
223import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
224import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService;
225import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
226import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
227import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
228import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
229import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
230import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
231import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
232import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
233import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest;
234import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
235import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
236import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
237import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
238import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
239import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest;
240import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse;
241import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
242import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
243import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
244import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
245import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
246import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
247import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
248import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
249import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest;
250import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesResponse;
251import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
252import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
253import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
254import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
255import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
256import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
257import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
258import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
259import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
260import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
261import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
262import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
263import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
264import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
265import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
266import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
267import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
268import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse;
269import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
270import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
271import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
272import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
273import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
274import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
275import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
276import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
277import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
278import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
279import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
280import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
281import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreRequest;
282import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreResponse;
283import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
284import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
285import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
286import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
287import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
288import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse;
289import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
290import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
291import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
292import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
293import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest;
294import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
295import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse;
296import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
297import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
298import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest;
299import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
300import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
301import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
302import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
303import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
304import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
305import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
306import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
307import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaRequest;
308import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaResponse;
309import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleRequest;
310import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleResponse;
311import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
312import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
313import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
314import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
315import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
316import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
317import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.NamespaceQuotaSnapshot;
318import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.TableQuotaSnapshot;
319import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
320import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
321import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
322import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
323import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
324import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
325import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
326import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetConfiguredNamespacesAndTablesInRSGroupRequest;
327import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetConfiguredNamespacesAndTablesInRSGroupResponse;
328import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
329import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
330import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
331import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
332import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
333import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
334import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
335import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
336import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListTablesInRSGroupRequest;
337import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListTablesInRSGroupResponse;
338import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
339import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
340import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
341import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
342import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
343import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse;
344import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest;
345import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse;
346import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
347import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
348import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
349import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse;
350import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
351import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
352import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
353import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
354import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
355import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
356import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult;
357import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest;
358import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse;
359import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
360import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
361import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
362import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
363import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
364import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
365import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
366import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
367import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
368import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
369import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
370import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
371import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
372import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
373import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
374import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
375import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState;
376import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequest;
377import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse;
378import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
379import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
380import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
381import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
382
383/**
384 * Implements the master RPC services.
385 */
386@InterfaceAudience.Private
387@SuppressWarnings("deprecation")
388public class MasterRpcServices extends RSRpcServices implements
389    MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface,
390    LockService.BlockingInterface, HbckService.BlockingInterface,
391    ClientMetaService.BlockingInterface {
392
393  private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName());
394  private static final Logger AUDITLOG =
395      LoggerFactory.getLogger("SecurityLogger."+MasterRpcServices.class.getName());
396
397  private final HMaster master;
398
399  /**
400   * @return Subset of configuration to pass initializing regionservers: e.g.
401   *     the filesystem to use and root directory to use.
402   */
403  private RegionServerStartupResponse.Builder createConfigurationSubset() {
404    RegionServerStartupResponse.Builder resp = addConfig(
405      RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR);
406    resp = addConfig(resp, "fs.defaultFS");
407    return addConfig(resp, "hbase.master.info.port");
408  }
409
410  private RegionServerStartupResponse.Builder addConfig(
411      final RegionServerStartupResponse.Builder resp, final String key) {
412    NameStringPair.Builder entry = NameStringPair.newBuilder()
413      .setName(key)
414      .setValue(master.getConfiguration().get(key));
415    resp.addMapEntries(entry.build());
416    return resp;
417  }
418
419  public MasterRpcServices(HMaster m) throws IOException {
420    super(m);
421    master = m;
422  }
423
424  @Override
425  protected Class<?> getRpcSchedulerFactoryClass() {
426    Configuration conf = getConfiguration();
427    if (conf != null) {
428      return conf.getClass(MASTER_RPC_SCHEDULER_FACTORY_CLASS, super.getRpcSchedulerFactoryClass());
429    } else {
430      return super.getRpcSchedulerFactoryClass();
431    }
432  }
433
434  @Override
435  protected RpcServerInterface createRpcServer(final Server server,
436      final RpcSchedulerFactory rpcSchedulerFactory, final InetSocketAddress bindAddress,
437      final String name) throws IOException {
438    final Configuration conf = regionServer.getConfiguration();
439    // RpcServer at HM by default enable ByteBufferPool iff HM having user table region in it
440    boolean reservoirEnabled = conf.getBoolean(ByteBuffAllocator.ALLOCATOR_POOL_ENABLED_KEY,
441      LoadBalancer.isMasterCanHostUserRegions(conf));
442    try {
443      return RpcServerFactory.createRpcServer(server, name, getServices(),
444          bindAddress, // use final bindAddress for this server.
445          conf, rpcSchedulerFactory.create(conf, this, server), reservoirEnabled);
446    } catch (BindException be) {
447      throw new IOException(be.getMessage() + ". To switch ports use the '"
448          + HConstants.MASTER_PORT + "' configuration property.",
449          be.getCause() != null ? be.getCause() : be);
450    }
451  }
452
453  @Override
454  protected PriorityFunction createPriority() {
455    return new MasterAnnotationReadingPriorityFunction(this);
456  }
457
458  /**
459   * Checks for the following pre-checks in order:
460   * <ol>
461   *   <li>Master is initialized</li>
462   *   <li>Rpc caller has admin permissions</li>
463   * </ol>
464   * @param requestName name of rpc request. Used in reporting failures to provide context.
465   * @throws ServiceException If any of the above listed pre-check fails.
466   */
467  private void rpcPreCheck(String requestName) throws ServiceException {
468    try {
469      master.checkInitialized();
470      requirePermission(requestName, Permission.Action.ADMIN);
471    } catch (IOException ioe) {
472      throw new ServiceException(ioe);
473    }
474  }
475
476  enum BalanceSwitchMode {
477    SYNC,
478    ASYNC
479  }
480
481  /**
482   * Assigns balancer switch according to BalanceSwitchMode
483   * @param b new balancer switch
484   * @param mode BalanceSwitchMode
485   * @return old balancer switch
486   */
487  boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException {
488    boolean oldValue = master.loadBalancerTracker.isBalancerOn();
489    boolean newValue = b;
490    try {
491      if (master.cpHost != null) {
492        master.cpHost.preBalanceSwitch(newValue);
493      }
494      try {
495        if (mode == BalanceSwitchMode.SYNC) {
496          synchronized (master.getLoadBalancer()) {
497            master.loadBalancerTracker.setBalancerOn(newValue);
498          }
499        } else {
500          master.loadBalancerTracker.setBalancerOn(newValue);
501        }
502      } catch (KeeperException ke) {
503        throw new IOException(ke);
504      }
505      LOG.info(master.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue);
506      if (master.cpHost != null) {
507        master.cpHost.postBalanceSwitch(oldValue, newValue);
508      }
509      master.getLoadBalancer().updateBalancerStatus(newValue);
510    } catch (IOException ioe) {
511      LOG.warn("Error flipping balance switch", ioe);
512    }
513    return oldValue;
514  }
515
516  boolean synchronousBalanceSwitch(final boolean b) throws IOException {
517    return switchBalancer(b, BalanceSwitchMode.SYNC);
518  }
519
520  /**
521   * @return list of blocking services and their security info classes that this server supports
522   */
523  @Override
524  protected List<BlockingServiceAndInterface> getServices() {
525    List<BlockingServiceAndInterface> bssi = new ArrayList<>(5);
526    bssi.add(new BlockingServiceAndInterface(
527        MasterService.newReflectiveBlockingService(this),
528        MasterService.BlockingInterface.class));
529    bssi.add(new BlockingServiceAndInterface(
530        RegionServerStatusService.newReflectiveBlockingService(this),
531        RegionServerStatusService.BlockingInterface.class));
532    bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this),
533        LockService.BlockingInterface.class));
534    bssi.add(new BlockingServiceAndInterface(HbckService.newReflectiveBlockingService(this),
535        HbckService.BlockingInterface.class));
536    bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this),
537        ClientMetaService.BlockingInterface.class));
538    bssi.addAll(super.getServices());
539    return bssi;
540  }
541
542  @Override
543  @QosPriority(priority = HConstants.ADMIN_QOS)
544  public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller,
545      GetLastFlushedSequenceIdRequest request) throws ServiceException {
546    try {
547      master.checkServiceStarted();
548    } catch (IOException ioe) {
549      throw new ServiceException(ioe);
550    }
551    byte[] encodedRegionName = request.getRegionName().toByteArray();
552    RegionStoreSequenceIds ids = master.getServerManager()
553      .getLastFlushedSequenceId(encodedRegionName);
554    return ResponseConverter.buildGetLastFlushedSequenceIdResponse(ids);
555  }
556
557  @Override
558  public RegionServerReportResponse regionServerReport(RpcController controller,
559      RegionServerReportRequest request) throws ServiceException {
560    try {
561      master.checkServiceStarted();
562      int versionNumber = 0;
563      String version = "0.0.0";
564      VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
565      if (versionInfo != null) {
566        version = versionInfo.getVersion();
567        versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
568      }
569      ClusterStatusProtos.ServerLoad sl = request.getLoad();
570      ServerName serverName = ProtobufUtil.toServerName(request.getServer());
571      ServerMetrics oldLoad = master.getServerManager().getLoad(serverName);
572      ServerMetrics newLoad =
573        ServerMetricsBuilder.toServerMetrics(serverName, versionNumber, version, sl);
574      master.getServerManager().regionServerReport(serverName, newLoad);
575      master.getAssignmentManager().reportOnlineRegions(serverName,
576        newLoad.getRegionMetrics().keySet());
577      if (sl != null && master.metricsMaster != null) {
578        // Up our metrics.
579        master.metricsMaster.incrementRequests(
580          sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getRequestCount() : 0));
581      }
582    } catch (IOException ioe) {
583      throw new ServiceException(ioe);
584    }
585    return RegionServerReportResponse.newBuilder().build();
586  }
587
588  @Override
589  public RegionServerStartupResponse regionServerStartup(RpcController controller,
590      RegionServerStartupRequest request) throws ServiceException {
591    // Register with server manager
592    try {
593      master.checkServiceStarted();
594      int versionNumber = 0;
595      String version = "0.0.0";
596      VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
597      if (versionInfo != null) {
598        version = versionInfo.getVersion();
599        versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
600      }
601      InetAddress ia = master.getRemoteInetAddress(request.getPort(), request.getServerStartCode());
602      // if regionserver passed hostname to use,
603      // then use it instead of doing a reverse DNS lookup
604      ServerName rs =
605        master.getServerManager().regionServerStartup(request, versionNumber, version, ia);
606
607      // Send back some config info
608      RegionServerStartupResponse.Builder resp = createConfigurationSubset();
609      NameStringPair.Builder entry = NameStringPair.newBuilder()
610        .setName(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER).setValue(rs.getHostname());
611      resp.addMapEntries(entry.build());
612
613      return resp.build();
614    } catch (IOException ioe) {
615      throw new ServiceException(ioe);
616    }
617  }
618
619  @Override
620  public ReportRSFatalErrorResponse reportRSFatalError(
621      RpcController controller, ReportRSFatalErrorRequest request) throws ServiceException {
622    String errorText = request.getErrorMessage();
623    ServerName sn = ProtobufUtil.toServerName(request.getServer());
624    String msg = sn + " reported a fatal error:\n" + errorText;
625    LOG.warn(msg);
626    master.rsFatals.add(msg);
627    return ReportRSFatalErrorResponse.newBuilder().build();
628  }
629
630  @Override
631  public AddColumnResponse addColumn(RpcController controller,
632      AddColumnRequest req) throws ServiceException {
633    try {
634      long procId = master.addColumn(
635          ProtobufUtil.toTableName(req.getTableName()),
636          ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
637          req.getNonceGroup(),
638          req.getNonce());
639      if (procId == -1) {
640        // This mean operation was not performed in server, so do not set any procId
641        return AddColumnResponse.newBuilder().build();
642      } else {
643        return AddColumnResponse.newBuilder().setProcId(procId).build();
644      }
645    } catch (IOException ioe) {
646      throw new ServiceException(ioe);
647    }
648  }
649
650  @Override
651  public AssignRegionResponse assignRegion(RpcController controller,
652      AssignRegionRequest req) throws ServiceException {
653    try {
654      master.checkInitialized();
655
656      final RegionSpecifierType type = req.getRegion().getType();
657      if (type != RegionSpecifierType.REGION_NAME) {
658        LOG.warn("assignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
659          + " actual: " + type);
660      }
661
662      final byte[] regionName = req.getRegion().getValue().toByteArray();
663      final RegionInfo regionInfo = master.getAssignmentManager().getRegionInfo(regionName);
664      if (regionInfo == null) {
665        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
666      }
667
668      final AssignRegionResponse arr = AssignRegionResponse.newBuilder().build();
669      if (master.cpHost != null) {
670        master.cpHost.preAssign(regionInfo);
671      }
672      LOG.info(master.getClientIdAuditPrefix() + " assign " + regionInfo.getRegionNameAsString());
673      master.getAssignmentManager().assign(regionInfo);
674      if (master.cpHost != null) {
675        master.cpHost.postAssign(regionInfo);
676      }
677      return arr;
678    } catch (IOException ioe) {
679      throw new ServiceException(ioe);
680    }
681  }
682
683
684  @Override
685  public BalanceResponse balance(RpcController controller,
686      BalanceRequest request) throws ServiceException {
687    try {
688      return BalanceResponse.newBuilder().setBalancerRan(master.balance(
689        request.hasForce()? request.getForce(): false)).build();
690    } catch (IOException ex) {
691      throw new ServiceException(ex);
692    }
693  }
694
695  @Override
696  public CreateNamespaceResponse createNamespace(RpcController controller,
697     CreateNamespaceRequest request) throws ServiceException {
698    try {
699      long procId = master.createNamespace(
700        ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
701        request.getNonceGroup(),
702        request.getNonce());
703      return CreateNamespaceResponse.newBuilder().setProcId(procId).build();
704    } catch (IOException e) {
705      throw new ServiceException(e);
706    }
707  }
708
709  @Override
710  public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
711      throws ServiceException {
712    TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema());
713    byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
714    try {
715      long procId =
716          master.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
717      LOG.info(master.getClientIdAuditPrefix() + " procedure request for creating table: " +
718              req.getTableSchema().getTableName() + " procId is: " + procId);
719      return CreateTableResponse.newBuilder().setProcId(procId).build();
720    } catch (IOException ioe) {
721      throw new ServiceException(ioe);
722    }
723  }
724
725  @Override
726  public DeleteColumnResponse deleteColumn(RpcController controller,
727      DeleteColumnRequest req) throws ServiceException {
728    try {
729      long procId = master.deleteColumn(
730        ProtobufUtil.toTableName(req.getTableName()),
731        req.getColumnName().toByteArray(),
732        req.getNonceGroup(),
733        req.getNonce());
734      if (procId == -1) {
735        // This mean operation was not performed in server, so do not set any procId
736        return DeleteColumnResponse.newBuilder().build();
737      } else {
738        return DeleteColumnResponse.newBuilder().setProcId(procId).build();
739      }
740    } catch (IOException ioe) {
741      throw new ServiceException(ioe);
742    }
743  }
744
745  @Override
746  public DeleteNamespaceResponse deleteNamespace(RpcController controller,
747      DeleteNamespaceRequest request) throws ServiceException {
748    try {
749      long procId = master.deleteNamespace(
750        request.getNamespaceName(),
751        request.getNonceGroup(),
752        request.getNonce());
753      return DeleteNamespaceResponse.newBuilder().setProcId(procId).build();
754    } catch (IOException e) {
755      throw new ServiceException(e);
756    }
757  }
758
759  /**
760   * Execute Delete Snapshot operation.
761   * @return DeleteSnapshotResponse (a protobuf wrapped void) if the snapshot existed and was
762   *    deleted properly.
763   * @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not
764   *    exist.
765   */
766  @Override
767  public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
768      DeleteSnapshotRequest request) throws ServiceException {
769    try {
770      master.checkInitialized();
771      master.snapshotManager.checkSnapshotSupport();
772
773      LOG.info(master.getClientIdAuditPrefix() + " delete " + request.getSnapshot());
774      master.snapshotManager.deleteSnapshot(request.getSnapshot());
775      return DeleteSnapshotResponse.newBuilder().build();
776    } catch (IOException e) {
777      throw new ServiceException(e);
778    }
779  }
780
781  @Override
782  public DeleteTableResponse deleteTable(RpcController controller,
783      DeleteTableRequest request) throws ServiceException {
784    try {
785      long procId = master.deleteTable(ProtobufUtil.toTableName(
786          request.getTableName()), request.getNonceGroup(), request.getNonce());
787      return DeleteTableResponse.newBuilder().setProcId(procId).build();
788    } catch (IOException ioe) {
789      throw new ServiceException(ioe);
790    }
791  }
792
793  @Override
794  public TruncateTableResponse truncateTable(RpcController controller, TruncateTableRequest request)
795      throws ServiceException {
796    try {
797      long procId = master.truncateTable(
798        ProtobufUtil.toTableName(request.getTableName()),
799        request.getPreserveSplits(),
800        request.getNonceGroup(),
801        request.getNonce());
802      return TruncateTableResponse.newBuilder().setProcId(procId).build();
803    } catch (IOException ioe) {
804      throw new ServiceException(ioe);
805    }
806  }
807
808  @Override
809  public DisableTableResponse disableTable(RpcController controller,
810      DisableTableRequest request) throws ServiceException {
811    try {
812      long procId = master.disableTable(
813        ProtobufUtil.toTableName(request.getTableName()),
814        request.getNonceGroup(),
815        request.getNonce());
816      return DisableTableResponse.newBuilder().setProcId(procId).build();
817    } catch (IOException ioe) {
818      throw new ServiceException(ioe);
819    }
820  }
821
822  @Override
823  public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c,
824      EnableCatalogJanitorRequest req) throws ServiceException {
825    rpcPreCheck("enableCatalogJanitor");
826    return EnableCatalogJanitorResponse.newBuilder().setPrevValue(
827      master.catalogJanitorChore.setEnabled(req.getEnable())).build();
828  }
829
830  @Override
831  public SetCleanerChoreRunningResponse setCleanerChoreRunning(
832    RpcController c, SetCleanerChoreRunningRequest req) throws ServiceException {
833    rpcPreCheck("setCleanerChoreRunning");
834
835    boolean prevValue =
836      master.getLogCleaner().getEnabled() && master.getHFileCleaner().getEnabled();
837    master.getLogCleaner().setEnabled(req.getOn());
838    master.getHFileCleaner().setEnabled(req.getOn());
839    return SetCleanerChoreRunningResponse.newBuilder().setPrevValue(prevValue).build();
840  }
841
842  @Override
843  public EnableTableResponse enableTable(RpcController controller,
844      EnableTableRequest request) throws ServiceException {
845    try {
846      long procId = master.enableTable(
847        ProtobufUtil.toTableName(request.getTableName()),
848        request.getNonceGroup(),
849        request.getNonce());
850      return EnableTableResponse.newBuilder().setProcId(procId).build();
851    } catch (IOException ioe) {
852      throw new ServiceException(ioe);
853    }
854  }
855
856  @Override
857  public MergeTableRegionsResponse mergeTableRegions(
858      RpcController c, MergeTableRegionsRequest request) throws ServiceException {
859    try {
860      master.checkInitialized();
861    } catch (IOException ioe) {
862      throw new ServiceException(ioe);
863    }
864
865    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
866
867    RegionInfo[] regionsToMerge = new RegionInfo[request.getRegionCount()];
868    for (int i = 0; i < request.getRegionCount(); i++) {
869      final byte[] encodedNameOfRegion = request.getRegion(i).getValue().toByteArray();
870      if (request.getRegion(i).getType() != RegionSpecifierType.ENCODED_REGION_NAME) {
871        LOG.warn("MergeRegions specifier type: expected: "
872          + RegionSpecifierType.ENCODED_REGION_NAME + " actual: region " + i + " ="
873          + request.getRegion(i).getType());
874      }
875      RegionState regionState = regionStates.getRegionState(Bytes.toString(encodedNameOfRegion));
876      if (regionState == null) {
877        throw new ServiceException(
878          new UnknownRegionException(Bytes.toStringBinary(encodedNameOfRegion)));
879      }
880      regionsToMerge[i] = regionState.getRegion();
881    }
882
883    try {
884      long procId = master.mergeRegions(
885        regionsToMerge,
886        request.getForcible(),
887        request.getNonceGroup(),
888        request.getNonce());
889      return MergeTableRegionsResponse.newBuilder().setProcId(procId).build();
890    } catch (IOException ioe) {
891      throw new ServiceException(ioe);
892    }
893  }
894
895  @Override
896  public SplitTableRegionResponse splitRegion(final RpcController controller,
897      final SplitTableRegionRequest request) throws ServiceException {
898    try {
899      long procId = master.splitRegion(
900        ProtobufUtil.toRegionInfo(request.getRegionInfo()),
901        request.hasSplitRow() ? request.getSplitRow().toByteArray() : null,
902        request.getNonceGroup(),
903        request.getNonce());
904      return SplitTableRegionResponse.newBuilder().setProcId(procId).build();
905    } catch (IOException ie) {
906      throw new ServiceException(ie);
907    }
908  }
909
910  @Override
911  public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller,
912      final ClientProtos.CoprocessorServiceRequest request) throws ServiceException {
913    rpcPreCheck("execMasterService");
914    try {
915      ServerRpcController execController = new ServerRpcController();
916      ClientProtos.CoprocessorServiceCall call = request.getCall();
917      String serviceName = call.getServiceName();
918      String methodName = call.getMethodName();
919      if (!master.coprocessorServiceHandlers.containsKey(serviceName)) {
920        throw new UnknownProtocolException(null,
921          "No registered Master Coprocessor Endpoint found for " + serviceName +
922          ". Has it been enabled?");
923      }
924
925      Service service = master.coprocessorServiceHandlers.get(serviceName);
926      ServiceDescriptor serviceDesc = service.getDescriptorForType();
927      MethodDescriptor methodDesc =
928          CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc);
929
930      Message execRequest =
931          CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest());
932      final Message.Builder responseBuilder =
933          service.getResponsePrototype(methodDesc).newBuilderForType();
934      service.callMethod(methodDesc, execController, execRequest,
935        (message) -> {
936          if (message != null) {
937            responseBuilder.mergeFrom(message);
938          }
939        });
940      Message execResult = responseBuilder.build();
941      if (execController.getFailedOn() != null) {
942        throw execController.getFailedOn();
943      }
944      return CoprocessorRpcUtils.getResponse(execResult, HConstants.EMPTY_BYTE_ARRAY);
945    } catch (IOException ie) {
946      throw new ServiceException(ie);
947    }
948  }
949
950  /**
951   * Triggers an asynchronous attempt to run a distributed procedure.
952   * {@inheritDoc}
953   */
954  @Override
955  public ExecProcedureResponse execProcedure(RpcController controller,
956      ExecProcedureRequest request) throws ServiceException {
957    try {
958      master.checkInitialized();
959      ProcedureDescription desc = request.getProcedure();
960      MasterProcedureManager mpm = master.getMasterProcedureManagerHost().getProcedureManager(
961        desc.getSignature());
962      if (mpm == null) {
963        throw new ServiceException(new DoNotRetryIOException("The procedure is not registered: "
964          + desc.getSignature()));
965      }
966      LOG.info(master.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
967      mpm.checkPermissions(desc, getAccessChecker(), RpcServer.getRequestUser().orElse(null));
968      mpm.execProcedure(desc);
969      // send back the max amount of time the client should wait for the procedure
970      // to complete
971      long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME;
972      return ExecProcedureResponse.newBuilder().setExpectedTimeout(
973        waitTime).build();
974    } catch (ForeignException e) {
975      throw new ServiceException(e.getCause());
976    } catch (IOException e) {
977      throw new ServiceException(e);
978    }
979  }
980
981  /**
982   * Triggers a synchronous attempt to run a distributed procedure and sets
983   * return data in response.
984   * {@inheritDoc}
985   */
986  @Override
987  public ExecProcedureResponse execProcedureWithRet(RpcController controller,
988      ExecProcedureRequest request) throws ServiceException {
989    rpcPreCheck("execProcedureWithRet");
990    try {
991      ProcedureDescription desc = request.getProcedure();
992      MasterProcedureManager mpm =
993        master.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
994      if (mpm == null) {
995        throw new ServiceException("The procedure is not registered: " + desc.getSignature());
996      }
997      LOG.info(master.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
998      byte[] data = mpm.execProcedureWithRet(desc);
999      ExecProcedureResponse.Builder builder = ExecProcedureResponse.newBuilder();
1000      // set return data if available
1001      if (data != null) {
1002        builder.setReturnData(UnsafeByteOperations.unsafeWrap(data));
1003      }
1004      return builder.build();
1005    } catch (IOException e) {
1006      throw new ServiceException(e);
1007    }
1008  }
1009
1010  @Override
1011  public GetClusterStatusResponse getClusterStatus(RpcController controller,
1012      GetClusterStatusRequest req) throws ServiceException {
1013    GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
1014    try {
1015      // We used to check if Master was up at this point but let this call proceed even if
1016      // Master is initializing... else we shut out stuff like hbck2 tool from making progress
1017      // since it queries this method to figure cluster version. hbck2 wants to be able to work
1018      // against Master even if it is 'initializing' so it can do fixup.
1019      response.setClusterStatus(ClusterMetricsBuilder.toClusterStatus(
1020        master.getClusterMetrics(ClusterMetricsBuilder.toOptions(req.getOptionsList()))));
1021    } catch (IOException e) {
1022      throw new ServiceException(e);
1023    }
1024    return response.build();
1025  }
1026
1027  /**
1028   * List the currently available/stored snapshots. Any in-progress snapshots are ignored
1029   */
1030  @Override
1031  public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller,
1032      GetCompletedSnapshotsRequest request) throws ServiceException {
1033    try {
1034      master.checkInitialized();
1035      GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder();
1036      List<SnapshotDescription> snapshots = master.snapshotManager.getCompletedSnapshots();
1037
1038      // convert to protobuf
1039      for (SnapshotDescription snapshot : snapshots) {
1040        builder.addSnapshots(snapshot);
1041      }
1042      return builder.build();
1043    } catch (IOException e) {
1044      throw new ServiceException(e);
1045    }
1046  }
1047
1048  @Override
1049  public ListNamespacesResponse listNamespaces(
1050      RpcController controller, ListNamespacesRequest request)
1051      throws ServiceException {
1052    try {
1053      return ListNamespacesResponse.newBuilder()
1054        .addAllNamespaceName(master.listNamespaces())
1055        .build();
1056    } catch (IOException e) {
1057      throw new ServiceException(e);
1058    }
1059  }
1060
1061  @Override
1062  public GetNamespaceDescriptorResponse getNamespaceDescriptor(
1063      RpcController controller, GetNamespaceDescriptorRequest request)
1064      throws ServiceException {
1065    try {
1066      return GetNamespaceDescriptorResponse.newBuilder()
1067        .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(
1068            master.getNamespace(request.getNamespaceName())))
1069        .build();
1070    } catch (IOException e) {
1071      throw new ServiceException(e);
1072    }
1073  }
1074
1075  /**
1076   * Get the number of regions of the table that have been updated by the alter.
1077   *
1078   * @return Pair indicating the number of regions updated Pair.getFirst is the
1079   *         regions that are yet to be updated Pair.getSecond is the total number
1080   *         of regions of the table
1081   * @throws ServiceException
1082   */
1083  @Override
1084  public GetSchemaAlterStatusResponse getSchemaAlterStatus(
1085      RpcController controller, GetSchemaAlterStatusRequest req) throws ServiceException {
1086    // TODO: currently, we query using the table name on the client side. this
1087    // may overlap with other table operations or the table operation may
1088    // have completed before querying this API. We need to refactor to a
1089    // transaction system in the future to avoid these ambiguities.
1090    TableName tableName = ProtobufUtil.toTableName(req.getTableName());
1091
1092    try {
1093      master.checkInitialized();
1094      Pair<Integer,Integer> pair = master.getAssignmentManager().getReopenStatus(tableName);
1095      GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder();
1096      ret.setYetToUpdateRegions(pair.getFirst());
1097      ret.setTotalRegions(pair.getSecond());
1098      return ret.build();
1099    } catch (IOException ioe) {
1100      throw new ServiceException(ioe);
1101    }
1102  }
1103
1104  /**
1105   * Get list of TableDescriptors for requested tables.
1106   * @param c Unused (set to null).
1107   * @param req GetTableDescriptorsRequest that contains:
1108   *     - tableNames: requested tables, or if empty, all are requested.
1109   * @return GetTableDescriptorsResponse
1110   * @throws ServiceException
1111   */
1112  @Override
1113  public GetTableDescriptorsResponse getTableDescriptors(RpcController c,
1114      GetTableDescriptorsRequest req) throws ServiceException {
1115    try {
1116      master.checkInitialized();
1117
1118      final String regex = req.hasRegex() ? req.getRegex() : null;
1119      final String namespace = req.hasNamespace() ? req.getNamespace() : null;
1120      List<TableName> tableNameList = null;
1121      if (req.getTableNamesCount() > 0) {
1122        tableNameList = new ArrayList<TableName>(req.getTableNamesCount());
1123        for (HBaseProtos.TableName tableNamePB: req.getTableNamesList()) {
1124          tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
1125        }
1126      }
1127
1128      List<TableDescriptor> descriptors = master.listTableDescriptors(namespace, regex,
1129          tableNameList, req.getIncludeSysTables());
1130
1131      GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder();
1132      if (descriptors != null && descriptors.size() > 0) {
1133        // Add the table descriptors to the response
1134        for (TableDescriptor htd: descriptors) {
1135          builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
1136        }
1137      }
1138      return builder.build();
1139    } catch (IOException ioe) {
1140      throw new ServiceException(ioe);
1141    }
1142  }
1143
1144  /**
1145   * Get list of userspace table names
1146   * @param controller Unused (set to null).
1147   * @param req GetTableNamesRequest
1148   * @return GetTableNamesResponse
1149   * @throws ServiceException
1150   */
1151  @Override
1152  public GetTableNamesResponse getTableNames(RpcController controller,
1153      GetTableNamesRequest req) throws ServiceException {
1154    try {
1155      master.checkServiceStarted();
1156
1157      final String regex = req.hasRegex() ? req.getRegex() : null;
1158      final String namespace = req.hasNamespace() ? req.getNamespace() : null;
1159      List<TableName> tableNames = master.listTableNames(namespace, regex,
1160          req.getIncludeSysTables());
1161
1162      GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
1163      if (tableNames != null && tableNames.size() > 0) {
1164        // Add the table names to the response
1165        for (TableName table: tableNames) {
1166          builder.addTableNames(ProtobufUtil.toProtoTableName(table));
1167        }
1168      }
1169      return builder.build();
1170    } catch (IOException e) {
1171      throw new ServiceException(e);
1172    }
1173  }
1174
1175  @Override
1176  public GetTableStateResponse getTableState(RpcController controller,
1177      GetTableStateRequest request) throws ServiceException {
1178    try {
1179      master.checkServiceStarted();
1180      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
1181      TableState ts = master.getTableStateManager().getTableState(tableName);
1182      GetTableStateResponse.Builder builder = GetTableStateResponse.newBuilder();
1183      builder.setTableState(ts.convert());
1184      return builder.build();
1185    } catch (IOException e) {
1186      throw new ServiceException(e);
1187    }
1188  }
1189
1190  @Override
1191  public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
1192      IsCatalogJanitorEnabledRequest req) throws ServiceException {
1193    return IsCatalogJanitorEnabledResponse.newBuilder().setValue(
1194      master.isCatalogJanitorEnabled()).build();
1195  }
1196
1197  @Override
1198  public IsCleanerChoreEnabledResponse isCleanerChoreEnabled(RpcController c,
1199                                                             IsCleanerChoreEnabledRequest req)
1200    throws ServiceException {
1201    return IsCleanerChoreEnabledResponse.newBuilder().setValue(master.isCleanerChoreEnabled())
1202                                        .build();
1203  }
1204
1205  @Override
1206  public IsMasterRunningResponse isMasterRunning(RpcController c,
1207      IsMasterRunningRequest req) throws ServiceException {
1208    try {
1209      master.checkServiceStarted();
1210      return IsMasterRunningResponse.newBuilder().setIsMasterRunning(
1211        !master.isStopped()).build();
1212    } catch (IOException e) {
1213      throw new ServiceException(e);
1214    }
1215  }
1216
1217  /**
1218   * Checks if the specified procedure is done.
1219   * @return true if the procedure is done, false if the procedure is in the process of completing
1220   * @throws ServiceException if invalid procedure or failed procedure with progress failure reason.
1221   */
1222  @Override
1223  public IsProcedureDoneResponse isProcedureDone(RpcController controller,
1224      IsProcedureDoneRequest request) throws ServiceException {
1225    try {
1226      master.checkInitialized();
1227      ProcedureDescription desc = request.getProcedure();
1228      MasterProcedureManager mpm = master.getMasterProcedureManagerHost().getProcedureManager(
1229        desc.getSignature());
1230      if (mpm == null) {
1231        throw new ServiceException("The procedure is not registered: "
1232          + desc.getSignature());
1233      }
1234      LOG.debug("Checking to see if procedure from request:"
1235        + desc.getSignature() + " is done");
1236
1237      IsProcedureDoneResponse.Builder builder =
1238        IsProcedureDoneResponse.newBuilder();
1239      boolean done = mpm.isProcedureDone(desc);
1240      builder.setDone(done);
1241      return builder.build();
1242    } catch (ForeignException e) {
1243      throw new ServiceException(e.getCause());
1244    } catch (IOException e) {
1245      throw new ServiceException(e);
1246    }
1247  }
1248
1249  /**
1250   * Checks if the specified snapshot is done.
1251   * @return true if the snapshot is in file system ready to use,
1252   *     false if the snapshot is in the process of completing
1253   * @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or
1254   *     a wrapped HBaseSnapshotException with progress failure reason.
1255   */
1256  @Override
1257  public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
1258      IsSnapshotDoneRequest request) throws ServiceException {
1259    LOG.debug("Checking to see if snapshot from request:" +
1260      ClientSnapshotDescriptionUtils.toString(request.getSnapshot()) + " is done");
1261    try {
1262      master.checkInitialized();
1263      IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
1264      boolean done = master.snapshotManager.isSnapshotDone(request.getSnapshot());
1265      builder.setDone(done);
1266      return builder.build();
1267    } catch (ForeignException e) {
1268      throw new ServiceException(e.getCause());
1269    } catch (IOException e) {
1270      throw new ServiceException(e);
1271    }
1272  }
1273
1274  @Override
1275  public GetProcedureResultResponse getProcedureResult(RpcController controller,
1276      GetProcedureResultRequest request) throws ServiceException {
1277    LOG.debug("Checking to see if procedure is done pid=" + request.getProcId());
1278    try {
1279      master.checkInitialized();
1280      GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder();
1281      long procId = request.getProcId();
1282      ProcedureExecutor<?> executor = master.getMasterProcedureExecutor();
1283      Procedure<?> result = executor.getResultOrProcedure(procId);
1284      if (result != null) {
1285        builder.setSubmittedTime(result.getSubmittedTime());
1286        builder.setLastUpdate(result.getLastUpdate());
1287        if (executor.isFinished(procId)) {
1288          builder.setState(GetProcedureResultResponse.State.FINISHED);
1289          if (result.isFailed()) {
1290            IOException exception =
1291                MasterProcedureUtil.unwrapRemoteIOException(result);
1292            builder.setException(ForeignExceptionUtil.toProtoForeignException(exception));
1293          }
1294          byte[] resultData = result.getResult();
1295          if (resultData != null) {
1296            builder.setResult(UnsafeByteOperations.unsafeWrap(resultData));
1297          }
1298          master.getMasterProcedureExecutor().removeResult(request.getProcId());
1299        } else {
1300          builder.setState(GetProcedureResultResponse.State.RUNNING);
1301        }
1302      } else {
1303        builder.setState(GetProcedureResultResponse.State.NOT_FOUND);
1304      }
1305      return builder.build();
1306    } catch (IOException e) {
1307      throw new ServiceException(e);
1308    }
1309  }
1310
1311  @Override
1312  public AbortProcedureResponse abortProcedure(
1313      RpcController rpcController, AbortProcedureRequest request) throws ServiceException {
1314    try {
1315      AbortProcedureResponse.Builder response = AbortProcedureResponse.newBuilder();
1316      boolean abortResult =
1317          master.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning());
1318      response.setIsProcedureAborted(abortResult);
1319      return response.build();
1320    } catch (IOException e) {
1321      throw new ServiceException(e);
1322    }
1323  }
1324
1325  @Override
1326  public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c,
1327      ListNamespaceDescriptorsRequest request) throws ServiceException {
1328    try {
1329      ListNamespaceDescriptorsResponse.Builder response =
1330        ListNamespaceDescriptorsResponse.newBuilder();
1331      for(NamespaceDescriptor ns: master.getNamespaces()) {
1332        response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns));
1333      }
1334      return response.build();
1335    } catch (IOException e) {
1336      throw new ServiceException(e);
1337    }
1338  }
1339
1340  @Override
1341  public GetProceduresResponse getProcedures(
1342      RpcController rpcController,
1343      GetProceduresRequest request) throws ServiceException {
1344    try {
1345      final GetProceduresResponse.Builder response = GetProceduresResponse.newBuilder();
1346      for (Procedure<?> p: master.getProcedures()) {
1347        response.addProcedure(ProcedureUtil.convertToProtoProcedure(p));
1348      }
1349      return response.build();
1350    } catch (IOException e) {
1351      throw new ServiceException(e);
1352    }
1353  }
1354
1355  @Override
1356  public GetLocksResponse getLocks(
1357      RpcController controller,
1358      GetLocksRequest request) throws ServiceException {
1359    try {
1360      final GetLocksResponse.Builder builder = GetLocksResponse.newBuilder();
1361
1362      for (LockedResource lockedResource: master.getLocks()) {
1363        builder.addLock(ProcedureUtil.convertToProtoLockedResource(lockedResource));
1364      }
1365
1366      return builder.build();
1367    } catch (IOException e) {
1368      throw new ServiceException(e);
1369    }
1370  }
1371
1372  @Override
1373  public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c,
1374      ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
1375    try {
1376      ListTableDescriptorsByNamespaceResponse.Builder b =
1377          ListTableDescriptorsByNamespaceResponse.newBuilder();
1378      for (TableDescriptor htd : master
1379          .listTableDescriptorsByNamespace(request.getNamespaceName())) {
1380        b.addTableSchema(ProtobufUtil.toTableSchema(htd));
1381      }
1382      return b.build();
1383    } catch (IOException e) {
1384      throw new ServiceException(e);
1385    }
1386  }
1387
1388  @Override
1389  public ListTableNamesByNamespaceResponse listTableNamesByNamespace(RpcController c,
1390      ListTableNamesByNamespaceRequest request) throws ServiceException {
1391    try {
1392      ListTableNamesByNamespaceResponse.Builder b =
1393        ListTableNamesByNamespaceResponse.newBuilder();
1394      for (TableName tableName: master.listTableNamesByNamespace(request.getNamespaceName())) {
1395        b.addTableName(ProtobufUtil.toProtoTableName(tableName));
1396      }
1397      return b.build();
1398    } catch (IOException e) {
1399      throw new ServiceException(e);
1400    }
1401  }
1402
1403  @Override
1404  public ModifyColumnResponse modifyColumn(RpcController controller,
1405      ModifyColumnRequest req) throws ServiceException {
1406    try {
1407      long procId = master.modifyColumn(
1408        ProtobufUtil.toTableName(req.getTableName()),
1409        ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
1410        req.getNonceGroup(),
1411        req.getNonce());
1412      if (procId == -1) {
1413        // This mean operation was not performed in server, so do not set any procId
1414        return ModifyColumnResponse.newBuilder().build();
1415      } else {
1416        return ModifyColumnResponse.newBuilder().setProcId(procId).build();
1417      }
1418    } catch (IOException ioe) {
1419      throw new ServiceException(ioe);
1420    }
1421  }
1422
1423  @Override
1424  public ModifyNamespaceResponse modifyNamespace(RpcController controller,
1425      ModifyNamespaceRequest request) throws ServiceException {
1426    try {
1427      long procId = master.modifyNamespace(
1428        ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
1429        request.getNonceGroup(),
1430        request.getNonce());
1431      return ModifyNamespaceResponse.newBuilder().setProcId(procId).build();
1432    } catch (IOException e) {
1433      throw new ServiceException(e);
1434    }
1435  }
1436
1437  @Override
1438  public ModifyTableResponse modifyTable(RpcController controller,
1439      ModifyTableRequest req) throws ServiceException {
1440    try {
1441      long procId = master.modifyTable(
1442        ProtobufUtil.toTableName(req.getTableName()),
1443        ProtobufUtil.toTableDescriptor(req.getTableSchema()),
1444        req.getNonceGroup(),
1445        req.getNonce());
1446      return ModifyTableResponse.newBuilder().setProcId(procId).build();
1447    } catch (IOException ioe) {
1448      throw new ServiceException(ioe);
1449    }
1450  }
1451
1452  @Override
1453  public MoveRegionResponse moveRegion(RpcController controller,
1454      MoveRegionRequest req) throws ServiceException {
1455    final byte [] encodedRegionName = req.getRegion().getValue().toByteArray();
1456    RegionSpecifierType type = req.getRegion().getType();
1457    final byte [] destServerName = (req.hasDestServerName())?
1458      Bytes.toBytes(ProtobufUtil.toServerName(req.getDestServerName()).getServerName()):null;
1459    MoveRegionResponse mrr = MoveRegionResponse.newBuilder().build();
1460
1461    if (type != RegionSpecifierType.ENCODED_REGION_NAME) {
1462      LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME
1463        + " actual: " + type);
1464    }
1465
1466    try {
1467      master.checkInitialized();
1468      master.move(encodedRegionName, destServerName);
1469    } catch (IOException ioe) {
1470      throw new ServiceException(ioe);
1471    }
1472    return mrr;
1473  }
1474
1475  /**
1476   * Offline specified region from master's in-memory state. It will not attempt to
1477   * reassign the region as in unassign.
1478   *
1479   * This is a special method that should be used by experts or hbck.
1480   *
1481   */
1482  @Override
1483  public OfflineRegionResponse offlineRegion(RpcController controller,
1484      OfflineRegionRequest request) throws ServiceException {
1485    try {
1486      master.checkInitialized();
1487
1488      final RegionSpecifierType type = request.getRegion().getType();
1489      if (type != RegionSpecifierType.REGION_NAME) {
1490        LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
1491          + " actual: " + type);
1492      }
1493
1494      final byte[] regionName = request.getRegion().getValue().toByteArray();
1495      final RegionInfo hri = master.getAssignmentManager().getRegionInfo(regionName);
1496      if (hri == null) {
1497        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
1498      }
1499
1500      if (master.cpHost != null) {
1501        master.cpHost.preRegionOffline(hri);
1502      }
1503      LOG.info(master.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString());
1504      master.getAssignmentManager().offlineRegion(hri);
1505      if (master.cpHost != null) {
1506        master.cpHost.postRegionOffline(hri);
1507      }
1508    } catch (IOException ioe) {
1509      throw new ServiceException(ioe);
1510    }
1511    return OfflineRegionResponse.newBuilder().build();
1512  }
1513
1514  /**
1515   * Execute Restore/Clone snapshot operation.
1516   *
1517   * <p>If the specified table exists a "Restore" is executed, replacing the table
1518   * schema and directory data with the content of the snapshot.
1519   * The table must be disabled, or a UnsupportedOperationException will be thrown.
1520   *
1521   * <p>If the table doesn't exist a "Clone" is executed, a new table is created
1522   * using the schema at the time of the snapshot, and the content of the snapshot.
1523   *
1524   * <p>The restore/clone operation does not require copying HFiles. Since HFiles
1525   * are immutable the table can point to and use the same files as the original one.
1526   */
1527  @Override
1528  public RestoreSnapshotResponse restoreSnapshot(RpcController controller,
1529      RestoreSnapshotRequest request) throws ServiceException {
1530    try {
1531      long procId = master.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(),
1532        request.getNonce(), request.getRestoreACL());
1533      return RestoreSnapshotResponse.newBuilder().setProcId(procId).build();
1534    } catch (ForeignException e) {
1535      throw new ServiceException(e.getCause());
1536    } catch (IOException e) {
1537      throw new ServiceException(e);
1538    }
1539  }
1540
1541  @Override
1542  public SetSnapshotCleanupResponse switchSnapshotCleanup(
1543      RpcController controller, SetSnapshotCleanupRequest request)
1544      throws ServiceException {
1545    try {
1546      master.checkInitialized();
1547      final boolean enabled = request.getEnabled();
1548      final boolean isSynchronous = request.hasSynchronous() && request.getSynchronous();
1549      final boolean prevSnapshotCleanupRunning = this.switchSnapshotCleanup(enabled, isSynchronous);
1550      return SetSnapshotCleanupResponse.newBuilder()
1551          .setPrevSnapshotCleanup(prevSnapshotCleanupRunning).build();
1552    } catch (IOException e) {
1553      throw new ServiceException(e);
1554    }
1555  }
1556
1557  @Override
1558  public IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(
1559      RpcController controller, IsSnapshotCleanupEnabledRequest request)
1560      throws ServiceException {
1561    try {
1562      master.checkInitialized();
1563      final boolean isSnapshotCleanupEnabled = master.snapshotCleanupTracker
1564          .isSnapshotCleanupEnabled();
1565      return IsSnapshotCleanupEnabledResponse.newBuilder()
1566          .setEnabled(isSnapshotCleanupEnabled).build();
1567    } catch (IOException e) {
1568      throw new ServiceException(e);
1569    }
1570  }
1571
1572  /**
1573   * Turn on/off snapshot auto-cleanup based on TTL
1574   *
1575   * @param enabledNewVal Set to <code>true</code> to enable, <code>false</code> to disable
1576   * @param synchronous If <code>true</code>, it waits until current snapshot cleanup is completed,
1577   *   if outstanding
1578   * @return previous snapshot auto-cleanup mode
1579   */
1580  private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal,
1581    final boolean synchronous) {
1582    final boolean oldValue = master.snapshotCleanupTracker.isSnapshotCleanupEnabled();
1583    master.switchSnapshotCleanup(enabledNewVal, synchronous);
1584    LOG.info("{} Successfully set snapshot cleanup to {}", master.getClientIdAuditPrefix(),
1585      enabledNewVal);
1586    return oldValue;
1587  }
1588
1589
1590  @Override
1591  public RunCatalogScanResponse runCatalogScan(RpcController c,
1592      RunCatalogScanRequest req) throws ServiceException {
1593    rpcPreCheck("runCatalogScan");
1594    try {
1595      return ResponseConverter.buildRunCatalogScanResponse(
1596          this.master.catalogJanitorChore.scan());
1597    } catch (IOException ioe) {
1598      throw new ServiceException(ioe);
1599    }
1600  }
1601
1602  @Override
1603  public RunCleanerChoreResponse runCleanerChore(RpcController c, RunCleanerChoreRequest req)
1604    throws ServiceException {
1605    rpcPreCheck("runCleanerChore");
1606    boolean result = master.getHFileCleaner().runCleaner() && master.getLogCleaner().runCleaner();
1607    return ResponseConverter.buildRunCleanerChoreResponse(result);
1608  }
1609
1610  @Override
1611  public SetBalancerRunningResponse setBalancerRunning(RpcController c,
1612      SetBalancerRunningRequest req) throws ServiceException {
1613    try {
1614      master.checkInitialized();
1615      boolean prevValue = (req.getSynchronous())?
1616        synchronousBalanceSwitch(req.getOn()) : master.balanceSwitch(req.getOn());
1617      return SetBalancerRunningResponse.newBuilder().setPrevBalanceValue(prevValue).build();
1618    } catch (IOException ioe) {
1619      throw new ServiceException(ioe);
1620    }
1621  }
1622
1623  @Override
1624  public ShutdownResponse shutdown(RpcController controller,
1625      ShutdownRequest request) throws ServiceException {
1626    LOG.info(master.getClientIdAuditPrefix() + " shutdown");
1627    try {
1628      master.shutdown();
1629    } catch (IOException e) {
1630      LOG.error("Exception occurred in HMaster.shutdown()", e);
1631      throw new ServiceException(e);
1632    }
1633    return ShutdownResponse.newBuilder().build();
1634  }
1635
1636  /**
1637   * Triggers an asynchronous attempt to take a snapshot.
1638   * {@inheritDoc}
1639   */
1640  @Override
1641  public SnapshotResponse snapshot(RpcController controller,
1642      SnapshotRequest request) throws ServiceException {
1643    try {
1644      master.checkInitialized();
1645      master.snapshotManager.checkSnapshotSupport();
1646
1647      LOG.info(master.getClientIdAuditPrefix() + " snapshot request for:" +
1648        ClientSnapshotDescriptionUtils.toString(request.getSnapshot()));
1649      // get the snapshot information
1650      SnapshotDescription snapshot = SnapshotDescriptionUtils.validate(
1651        request.getSnapshot(), master.getConfiguration());
1652      master.snapshotManager.takeSnapshot(snapshot);
1653
1654      // send back the max amount of time the client should wait for the snapshot to complete
1655      long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(master.getConfiguration(),
1656        snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
1657      return SnapshotResponse.newBuilder().setExpectedTimeout(waitTime).build();
1658    } catch (ForeignException e) {
1659      throw new ServiceException(e.getCause());
1660    } catch (IOException e) {
1661      throw new ServiceException(e);
1662    }
1663  }
1664
1665  @Override
1666  public StopMasterResponse stopMaster(RpcController controller,
1667      StopMasterRequest request) throws ServiceException {
1668    LOG.info(master.getClientIdAuditPrefix() + " stop");
1669    try {
1670      master.stopMaster();
1671    } catch (IOException e) {
1672      LOG.error("Exception occurred while stopping master", e);
1673      throw new ServiceException(e);
1674    }
1675    return StopMasterResponse.newBuilder().build();
1676  }
1677
1678  @Override
1679  public IsInMaintenanceModeResponse isMasterInMaintenanceMode(
1680      final RpcController controller,
1681      final IsInMaintenanceModeRequest request) throws ServiceException {
1682    IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder();
1683    response.setInMaintenanceMode(master.isInMaintenanceMode());
1684    return response.build();
1685  }
1686
1687  @Override
1688  public UnassignRegionResponse unassignRegion(RpcController controller,
1689      UnassignRegionRequest req) throws ServiceException {
1690    try {
1691      final byte [] regionName = req.getRegion().getValue().toByteArray();
1692      RegionSpecifierType type = req.getRegion().getType();
1693      final boolean force = req.getForce();
1694      UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build();
1695
1696      master.checkInitialized();
1697      if (type != RegionSpecifierType.REGION_NAME) {
1698        LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
1699          + " actual: " + type);
1700      }
1701      Pair<RegionInfo, ServerName> pair =
1702        MetaTableAccessor.getRegion(master.getConnection(), regionName);
1703      if (Bytes.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName(), regionName)) {
1704        pair = new Pair<>(RegionInfoBuilder.FIRST_META_REGIONINFO,
1705          MetaTableLocator.getMetaRegionLocation(master.getZooKeeper()));
1706      }
1707      if (pair == null) {
1708        throw new UnknownRegionException(Bytes.toString(regionName));
1709      }
1710
1711      RegionInfo hri = pair.getFirst();
1712      if (master.cpHost != null) {
1713        master.cpHost.preUnassign(hri, force);
1714      }
1715      LOG.debug(master.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString()
1716          + " in current location if it is online and reassign.force=" + force);
1717      master.getAssignmentManager().unassign(hri);
1718      if (master.cpHost != null) {
1719        master.cpHost.postUnassign(hri, force);
1720      }
1721
1722      return urr;
1723    } catch (IOException ioe) {
1724      throw new ServiceException(ioe);
1725    }
1726  }
1727
1728  @Override
1729  public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c,
1730      ReportRegionStateTransitionRequest req) throws ServiceException {
1731    try {
1732      master.checkServiceStarted();
1733      return master.getAssignmentManager().reportRegionStateTransition(req);
1734    } catch (IOException ioe) {
1735      throw new ServiceException(ioe);
1736    }
1737  }
1738
1739  @Override
1740  public SetQuotaResponse setQuota(RpcController c, SetQuotaRequest req)
1741      throws ServiceException {
1742    try {
1743      master.checkInitialized();
1744      return master.getMasterQuotaManager().setQuota(req);
1745    } catch (Exception e) {
1746      throw new ServiceException(e);
1747    }
1748  }
1749
1750  @Override
1751  public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller,
1752      MajorCompactionTimestampRequest request) throws ServiceException {
1753    MajorCompactionTimestampResponse.Builder response =
1754        MajorCompactionTimestampResponse.newBuilder();
1755    try {
1756      master.checkInitialized();
1757      response.setCompactionTimestamp(master.getLastMajorCompactionTimestamp(ProtobufUtil
1758          .toTableName(request.getTableName())));
1759    } catch (IOException e) {
1760      throw new ServiceException(e);
1761    }
1762    return response.build();
1763  }
1764
1765  @Override
1766  public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
1767      RpcController controller, MajorCompactionTimestampForRegionRequest request)
1768      throws ServiceException {
1769    MajorCompactionTimestampResponse.Builder response =
1770        MajorCompactionTimestampResponse.newBuilder();
1771    try {
1772      master.checkInitialized();
1773      response.setCompactionTimestamp(master.getLastMajorCompactionTimestampForRegion(request
1774          .getRegion().getValue().toByteArray()));
1775    } catch (IOException e) {
1776      throw new ServiceException(e);
1777    }
1778    return response.build();
1779  }
1780
1781  /**
1782   * Compact a region on the master.
1783   *
1784   * @param controller the RPC controller
1785   * @param request the request
1786   * @throws ServiceException
1787   */
1788  @Override
1789  @QosPriority(priority=HConstants.ADMIN_QOS)
1790  public CompactRegionResponse compactRegion(final RpcController controller,
1791    final CompactRegionRequest request) throws ServiceException {
1792    try {
1793      master.checkInitialized();
1794      byte[] regionName = request.getRegion().getValue().toByteArray();
1795      TableName tableName = RegionInfo.getTable(regionName);
1796      // TODO: support CompactType.MOB
1797      // if the region is a mob region, do the mob file compaction.
1798      if (MobUtils.isMobRegionName(tableName, regionName)) {
1799        checkHFileFormatVersionForMob();
1800        //TODO: support CompactType.MOB
1801        // HBASE-23571
1802        LOG.warn("CompactType.MOB is not supported yet, will run regular compaction."+
1803            " Refer to HBASE-23571.");
1804        return super.compactRegion(controller, request);
1805      } else {
1806        return super.compactRegion(controller, request);
1807      }
1808    } catch (IOException ie) {
1809      throw new ServiceException(ie);
1810    }
1811  }
1812
1813  /**
1814   * check configured hfile format version before to do compaction
1815   * @throws IOException throw IOException
1816   */
1817  private void checkHFileFormatVersionForMob() throws IOException {
1818    if (HFile.getFormatVersion(master.getConfiguration()) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
1819      LOG.error("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
1820          + " is required for MOB compaction. Compaction will not run.");
1821      throw new IOException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
1822          + " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY
1823          + " accordingly.");
1824    }
1825  }
1826
1827  /**
1828   * This method implements Admin getRegionInfo. On RegionServer, it is
1829   * able to return RegionInfo and detail. On Master, it just returns
1830   * RegionInfo. On Master it has been hijacked to return Mob detail.
1831   * Master implementation is good for querying full region name if
1832   * you only have the encoded name (useful around region replicas
1833   * for example which do not have a row in hbase:meta).
1834   */
1835  @Override
1836  @QosPriority(priority=HConstants.ADMIN_QOS)
1837  public GetRegionInfoResponse getRegionInfo(final RpcController controller,
1838    final GetRegionInfoRequest request) throws ServiceException {
1839    RegionInfo ri = null;
1840    try {
1841      ri = getRegionInfo(request.getRegion());
1842    } catch(UnknownRegionException ure) {
1843      throw new ServiceException(ure);
1844    }
1845    GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
1846    if (ri != null) {
1847      builder.setRegionInfo(ProtobufUtil.toRegionInfo(ri));
1848    } else {
1849      // Is it a MOB name? These work differently.
1850      byte [] regionName = request.getRegion().getValue().toByteArray();
1851      TableName tableName = RegionInfo.getTable(regionName);
1852      if (MobUtils.isMobRegionName(tableName, regionName)) {
1853        // a dummy region info contains the compaction state.
1854        RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName);
1855        builder.setRegionInfo(ProtobufUtil.toRegionInfo(mobRegionInfo));
1856        if (request.hasCompactionState() && request.getCompactionState()) {
1857          builder.setCompactionState(master.getMobCompactionState(tableName));
1858        }
1859      } else {
1860        // If unknown RegionInfo and not a MOB region, it is unknown.
1861        throw new ServiceException(new UnknownRegionException(Bytes.toString(regionName)));
1862      }
1863    }
1864    return builder.build();
1865  }
1866
1867
1868  @Override
1869  public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller,
1870      IsBalancerEnabledRequest request) throws ServiceException {
1871    IsBalancerEnabledResponse.Builder response = IsBalancerEnabledResponse.newBuilder();
1872    response.setEnabled(master.isBalancerOn());
1873    return response.build();
1874  }
1875
1876  @Override
1877  public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller,
1878    SetSplitOrMergeEnabledRequest request) throws ServiceException {
1879    SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder();
1880    try {
1881      master.checkInitialized();
1882      boolean newValue = request.getEnabled();
1883      for (MasterProtos.MasterSwitchType masterSwitchType: request.getSwitchTypesList()) {
1884        MasterSwitchType switchType = convert(masterSwitchType);
1885        boolean oldValue = master.isSplitOrMergeEnabled(switchType);
1886        response.addPrevValue(oldValue);
1887        if (master.cpHost != null) {
1888          master.cpHost.preSetSplitOrMergeEnabled(newValue, switchType);
1889        }
1890        master.getSplitOrMergeTracker().setSplitOrMergeEnabled(newValue, switchType);
1891        if (master.cpHost != null) {
1892          master.cpHost.postSetSplitOrMergeEnabled(newValue, switchType);
1893        }
1894      }
1895    } catch (IOException e) {
1896      throw new ServiceException(e);
1897    } catch (KeeperException e) {
1898      throw new ServiceException(e);
1899    }
1900    return response.build();
1901  }
1902
1903  @Override
1904  public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller,
1905    IsSplitOrMergeEnabledRequest request) throws ServiceException {
1906    IsSplitOrMergeEnabledResponse.Builder response = IsSplitOrMergeEnabledResponse.newBuilder();
1907    response.setEnabled(master.isSplitOrMergeEnabled(convert(request.getSwitchType())));
1908    return response.build();
1909  }
1910
1911  @Override
1912  public NormalizeResponse normalize(RpcController controller,
1913      NormalizeRequest request) throws ServiceException {
1914    rpcPreCheck("normalize");
1915    try {
1916      return NormalizeResponse.newBuilder().setNormalizerRan(master.normalizeRegions()).build();
1917    } catch (IOException ex) {
1918      throw new ServiceException(ex);
1919    }
1920  }
1921
1922  @Override
1923  public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller,
1924      SetNormalizerRunningRequest request) throws ServiceException {
1925    rpcPreCheck("setNormalizerRunning");
1926
1927    // Sets normalizer on/off flag in ZK.
1928    boolean prevValue = master.getRegionNormalizerTracker().isNormalizerOn();
1929    boolean newValue = request.getOn();
1930    try {
1931      master.getRegionNormalizerTracker().setNormalizerOn(newValue);
1932    } catch (KeeperException ke) {
1933      LOG.warn("Error flipping normalizer switch", ke);
1934    }
1935    LOG.info("{} set normalizerSwitch={}", master.getClientIdAuditPrefix(), newValue);
1936    return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build();
1937  }
1938
1939  @Override
1940  public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller,
1941      IsNormalizerEnabledRequest request) throws ServiceException {
1942    IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder();
1943    response.setEnabled(master.isNormalizerOn());
1944    return response.build();
1945  }
1946
1947  /**
1948   * Returns the security capabilities in effect on the cluster
1949   */
1950  @Override
1951  public SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller,
1952      SecurityCapabilitiesRequest request) throws ServiceException {
1953    SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder();
1954    try {
1955      master.checkInitialized();
1956      Set<SecurityCapabilitiesResponse.Capability> capabilities = new HashSet<>();
1957      // Authentication
1958      if (User.isHBaseSecurityEnabled(master.getConfiguration())) {
1959        capabilities.add(SecurityCapabilitiesResponse.Capability.SECURE_AUTHENTICATION);
1960      } else {
1961        capabilities.add(SecurityCapabilitiesResponse.Capability.SIMPLE_AUTHENTICATION);
1962      }
1963      // A coprocessor that implements AccessControlService can provide AUTHORIZATION and
1964      // CELL_AUTHORIZATION
1965      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
1966        if (AccessChecker.isAuthorizationSupported(master.getConfiguration())) {
1967          capabilities.add(SecurityCapabilitiesResponse.Capability.AUTHORIZATION);
1968        }
1969        if (AccessController.isCellAuthorizationSupported(master.getConfiguration())) {
1970          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_AUTHORIZATION);
1971        }
1972      }
1973      // A coprocessor that implements VisibilityLabelsService can provide CELL_VISIBILITY.
1974      if (master.cpHost != null && hasVisibilityLabelsServiceCoprocessor(master.cpHost)) {
1975        if (VisibilityController.isCellAuthorizationSupported(master.getConfiguration())) {
1976          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_VISIBILITY);
1977        }
1978      }
1979      response.addAllCapabilities(capabilities);
1980    } catch (IOException e) {
1981      throw new ServiceException(e);
1982    }
1983    return response.build();
1984  }
1985
1986  /**
1987   * Determines if there is a MasterCoprocessor deployed which implements
1988   * {@link AccessControlService.Interface}.
1989   */
1990  boolean hasAccessControlServiceCoprocessor(MasterCoprocessorHost cpHost) {
1991    return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class),
1992      AccessControlService.Interface.class);
1993  }
1994
1995  /**
1996   * Determines if there is a MasterCoprocessor deployed which implements
1997   * {@link VisibilityLabelsService.Interface}.
1998   */
1999  boolean hasVisibilityLabelsServiceCoprocessor(MasterCoprocessorHost cpHost) {
2000    return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class),
2001      VisibilityLabelsService.Interface.class);
2002  }
2003
2004  /**
2005   * Determines if there is a coprocessor implementation in the provided argument which extends
2006   * or implements the provided {@code service}.
2007   */
2008  boolean checkCoprocessorWithService(
2009      List<MasterCoprocessor> coprocessorsToCheck, Class<?> service) {
2010    if (coprocessorsToCheck == null || coprocessorsToCheck.isEmpty()) {
2011      return false;
2012    }
2013    for (MasterCoprocessor cp : coprocessorsToCheck) {
2014      if (service.isAssignableFrom(cp.getClass())) {
2015        return true;
2016      }
2017    }
2018    return false;
2019  }
2020
2021  private MasterSwitchType convert(MasterProtos.MasterSwitchType switchType) {
2022    switch (switchType) {
2023      case SPLIT:
2024        return MasterSwitchType.SPLIT;
2025      case MERGE:
2026        return MasterSwitchType.MERGE;
2027      default:
2028        break;
2029    }
2030    return null;
2031  }
2032
2033  @Override
2034  public AddReplicationPeerResponse addReplicationPeer(RpcController controller,
2035      AddReplicationPeerRequest request) throws ServiceException {
2036    try {
2037      long procId = master.addReplicationPeer(request.getPeerId(),
2038        ReplicationPeerConfigUtil.convert(request.getPeerConfig()),
2039        request.getPeerState().getState().equals(ReplicationState.State.ENABLED));
2040      return AddReplicationPeerResponse.newBuilder().setProcId(procId).build();
2041    } catch (ReplicationException | IOException e) {
2042      throw new ServiceException(e);
2043    }
2044  }
2045
2046  @Override
2047  public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller,
2048      RemoveReplicationPeerRequest request) throws ServiceException {
2049    try {
2050      long procId = master.removeReplicationPeer(request.getPeerId());
2051      return RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build();
2052    } catch (ReplicationException | IOException e) {
2053      throw new ServiceException(e);
2054    }
2055  }
2056
2057  @Override
2058  public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller,
2059      EnableReplicationPeerRequest request) throws ServiceException {
2060    try {
2061      long procId = master.enableReplicationPeer(request.getPeerId());
2062      return EnableReplicationPeerResponse.newBuilder().setProcId(procId).build();
2063    } catch (ReplicationException | IOException e) {
2064      throw new ServiceException(e);
2065    }
2066  }
2067
2068  @Override
2069  public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller,
2070      DisableReplicationPeerRequest request) throws ServiceException {
2071    try {
2072      long procId = master.disableReplicationPeer(request.getPeerId());
2073      return DisableReplicationPeerResponse.newBuilder().setProcId(procId).build();
2074    } catch (ReplicationException | IOException e) {
2075      throw new ServiceException(e);
2076    }
2077  }
2078
2079  @Override
2080  public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller,
2081      GetReplicationPeerConfigRequest request) throws ServiceException {
2082    GetReplicationPeerConfigResponse.Builder response = GetReplicationPeerConfigResponse
2083        .newBuilder();
2084    try {
2085      String peerId = request.getPeerId();
2086      ReplicationPeerConfig peerConfig = master.getReplicationPeerConfig(peerId);
2087      response.setPeerId(peerId);
2088      response.setPeerConfig(ReplicationPeerConfigUtil.convert(peerConfig));
2089    } catch (ReplicationException | IOException e) {
2090      throw new ServiceException(e);
2091    }
2092    return response.build();
2093  }
2094
2095  @Override
2096  public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller,
2097      UpdateReplicationPeerConfigRequest request) throws ServiceException {
2098    try {
2099      long procId = master.updateReplicationPeerConfig(request.getPeerId(),
2100        ReplicationPeerConfigUtil.convert(request.getPeerConfig()));
2101      return UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build();
2102    } catch (ReplicationException | IOException e) {
2103      throw new ServiceException(e);
2104    }
2105  }
2106
2107  @Override
2108  public TransitReplicationPeerSyncReplicationStateResponse
2109      transitReplicationPeerSyncReplicationState(RpcController controller,
2110          TransitReplicationPeerSyncReplicationStateRequest request) throws ServiceException {
2111    try {
2112      long procId = master.transitReplicationPeerSyncReplicationState(request.getPeerId(),
2113        ReplicationPeerConfigUtil.toSyncReplicationState(request.getSyncReplicationState()));
2114      return TransitReplicationPeerSyncReplicationStateResponse.newBuilder().setProcId(procId)
2115          .build();
2116    } catch (ReplicationException | IOException e) {
2117      throw new ServiceException(e);
2118    }
2119  }
2120
2121  @Override
2122  public ListReplicationPeersResponse listReplicationPeers(RpcController controller,
2123      ListReplicationPeersRequest request) throws ServiceException {
2124    ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder();
2125    try {
2126      List<ReplicationPeerDescription> peers = master
2127          .listReplicationPeers(request.hasRegex() ? request.getRegex() : null);
2128      for (ReplicationPeerDescription peer : peers) {
2129        response.addPeerDesc(ReplicationPeerConfigUtil.toProtoReplicationPeerDescription(peer));
2130      }
2131    } catch (ReplicationException | IOException e) {
2132      throw new ServiceException(e);
2133    }
2134    return response.build();
2135  }
2136
2137  @Override
2138  public ListDecommissionedRegionServersResponse listDecommissionedRegionServers(
2139      RpcController controller, ListDecommissionedRegionServersRequest request)
2140      throws ServiceException {
2141    ListDecommissionedRegionServersResponse.Builder response =
2142        ListDecommissionedRegionServersResponse.newBuilder();
2143    try {
2144      master.checkInitialized();
2145      if (master.cpHost != null) {
2146        master.cpHost.preListDecommissionedRegionServers();
2147      }
2148      List<ServerName> servers = master.listDecommissionedRegionServers();
2149      response.addAllServerName((servers.stream().map(server -> ProtobufUtil.toServerName(server)))
2150          .collect(Collectors.toList()));
2151      if (master.cpHost != null) {
2152        master.cpHost.postListDecommissionedRegionServers();
2153      }
2154    } catch (IOException io) {
2155      throw new ServiceException(io);
2156    }
2157
2158    return response.build();
2159  }
2160
2161  @Override
2162  public DecommissionRegionServersResponse decommissionRegionServers(RpcController controller,
2163      DecommissionRegionServersRequest request) throws ServiceException {
2164    try {
2165      master.checkInitialized();
2166      List<ServerName> servers = request.getServerNameList().stream()
2167          .map(pbServer -> ProtobufUtil.toServerName(pbServer)).collect(Collectors.toList());
2168      boolean offload = request.getOffload();
2169      if (master.cpHost != null) {
2170        master.cpHost.preDecommissionRegionServers(servers, offload);
2171      }
2172      master.decommissionRegionServers(servers, offload);
2173      if (master.cpHost != null) {
2174        master.cpHost.postDecommissionRegionServers(servers, offload);
2175      }
2176    } catch (IOException io) {
2177      throw new ServiceException(io);
2178    }
2179
2180    return DecommissionRegionServersResponse.newBuilder().build();
2181  }
2182
2183  @Override
2184  public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller,
2185      RecommissionRegionServerRequest request) throws ServiceException {
2186    try {
2187      master.checkInitialized();
2188      ServerName server = ProtobufUtil.toServerName(request.getServerName());
2189      List<byte[]> encodedRegionNames = request.getRegionList().stream()
2190          .map(regionSpecifier -> regionSpecifier.getValue().toByteArray())
2191          .collect(Collectors.toList());
2192      if (master.cpHost != null) {
2193        master.cpHost.preRecommissionRegionServer(server, encodedRegionNames);
2194      }
2195      master.recommissionRegionServer(server, encodedRegionNames);
2196      if (master.cpHost != null) {
2197        master.cpHost.postRecommissionRegionServer(server, encodedRegionNames);
2198      }
2199    } catch (IOException io) {
2200      throw new ServiceException(io);
2201    }
2202
2203    return RecommissionRegionServerResponse.newBuilder().build();
2204  }
2205
2206  @Override
2207  public LockResponse requestLock(RpcController controller, final LockRequest request)
2208      throws ServiceException {
2209    try {
2210      if (request.getDescription().isEmpty()) {
2211        throw new IllegalArgumentException("Empty description");
2212      }
2213      NonceProcedureRunnable npr;
2214      LockType type = LockType.valueOf(request.getLockType().name());
2215      if (request.getRegionInfoCount() > 0) {
2216        final RegionInfo[] regionInfos = new RegionInfo[request.getRegionInfoCount()];
2217        for (int i = 0; i < request.getRegionInfoCount(); ++i) {
2218          regionInfos[i] = ProtobufUtil.toRegionInfo(request.getRegionInfo(i));
2219        }
2220        npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
2221          @Override
2222          protected void run() throws IOException {
2223            setProcId(master.getLockManager().remoteLocks().requestRegionsLock(regionInfos,
2224                request.getDescription(), getNonceKey()));
2225          }
2226
2227          @Override
2228          protected String getDescription() {
2229            return "RequestLock";
2230          }
2231        };
2232      } else if (request.hasTableName()) {
2233        final TableName tableName = ProtobufUtil.toTableName(request.getTableName());
2234        npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
2235          @Override
2236          protected void run() throws IOException {
2237            setProcId(master.getLockManager().remoteLocks().requestTableLock(tableName, type,
2238                request.getDescription(), getNonceKey()));
2239          }
2240
2241          @Override
2242          protected String getDescription() {
2243            return "RequestLock";
2244          }
2245        };
2246      } else if (request.hasNamespace()) {
2247        npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
2248          @Override
2249          protected void run() throws IOException {
2250            setProcId(master.getLockManager().remoteLocks().requestNamespaceLock(
2251                request.getNamespace(), type, request.getDescription(), getNonceKey()));
2252          }
2253
2254          @Override
2255          protected String getDescription() {
2256            return "RequestLock";
2257          }
2258        };
2259      } else {
2260        throw new IllegalArgumentException("one of table/namespace/region should be specified");
2261      }
2262      long procId = MasterProcedureUtil.submitProcedure(npr);
2263      return LockResponse.newBuilder().setProcId(procId).build();
2264    } catch (IllegalArgumentException e) {
2265      LOG.warn("Exception when queuing lock", e);
2266      throw new ServiceException(new DoNotRetryIOException(e));
2267    } catch (IOException e) {
2268      LOG.warn("Exception when queuing lock", e);
2269      throw new ServiceException(e);
2270    }
2271  }
2272
2273  /**
2274   * @return LOCKED, if procedure is found and it has the lock; else UNLOCKED.
2275   * @throws ServiceException if given proc id is found but it is not a LockProcedure.
2276   */
2277  @Override
2278  public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request)
2279      throws ServiceException {
2280    try {
2281      if (master.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(),
2282          request.getKeepAlive())) {
2283        return LockHeartbeatResponse.newBuilder().setTimeoutMs(
2284            master.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF,
2285                LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS))
2286            .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).build();
2287      } else {
2288        return LockHeartbeatResponse.newBuilder()
2289            .setLockStatus(LockHeartbeatResponse.LockStatus.UNLOCKED).build();
2290      }
2291    } catch (IOException e) {
2292      throw new ServiceException(e);
2293    }
2294  }
2295
2296  @Override
2297  public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller,
2298      RegionSpaceUseReportRequest request) throws ServiceException {
2299    try {
2300      master.checkInitialized();
2301      if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) {
2302        return RegionSpaceUseReportResponse.newBuilder().build();
2303      }
2304      MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
2305      if (quotaManager != null) {
2306        final long now = EnvironmentEdgeManager.currentTime();
2307        for (RegionSpaceUse report : request.getSpaceUseList()) {
2308          quotaManager.addRegionSize(ProtobufUtil.toRegionInfo(report.getRegionInfo()),
2309            report.getRegionSize(), now);
2310        }
2311      } else {
2312        LOG.debug("Received region space usage report but HMaster is not ready to process it, "
2313            + "skipping");
2314      }
2315      return RegionSpaceUseReportResponse.newBuilder().build();
2316    } catch (Exception e) {
2317      throw new ServiceException(e);
2318    }
2319  }
2320
2321  @Override
2322  public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(
2323      RpcController controller, GetSpaceQuotaRegionSizesRequest request) throws ServiceException {
2324    try {
2325      master.checkInitialized();
2326      MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
2327      GetSpaceQuotaRegionSizesResponse.Builder builder =
2328          GetSpaceQuotaRegionSizesResponse.newBuilder();
2329      if (quotaManager != null) {
2330        Map<RegionInfo,Long> regionSizes = quotaManager.snapshotRegionSizes();
2331        Map<TableName,Long> regionSizesByTable = new HashMap<>();
2332        // Translate hregioninfo+long -> tablename+long
2333        for (Entry<RegionInfo,Long> entry : regionSizes.entrySet()) {
2334          final TableName tableName = entry.getKey().getTable();
2335          Long prevSize = regionSizesByTable.get(tableName);
2336          if (prevSize == null) {
2337            prevSize = 0L;
2338          }
2339          regionSizesByTable.put(tableName, prevSize + entry.getValue());
2340        }
2341        // Serialize them into the protobuf
2342        for (Entry<TableName,Long> tableSize : regionSizesByTable.entrySet()) {
2343          builder.addSizes(RegionSizes.newBuilder()
2344              .setTableName(ProtobufUtil.toProtoTableName(tableSize.getKey()))
2345              .setSize(tableSize.getValue()).build());
2346        }
2347        return builder.build();
2348      } else {
2349        LOG.debug("Received space quota region size report but HMaster is not ready to process it,"
2350            + "skipping");
2351      }
2352      return builder.build();
2353    } catch (Exception e) {
2354      throw new ServiceException(e);
2355    }
2356  }
2357
2358  @Override
2359  public GetQuotaStatesResponse getQuotaStates(
2360      RpcController controller, GetQuotaStatesRequest request) throws ServiceException {
2361    try {
2362      master.checkInitialized();
2363      QuotaObserverChore quotaChore = this.master.getQuotaObserverChore();
2364      GetQuotaStatesResponse.Builder builder = GetQuotaStatesResponse.newBuilder();
2365      if (quotaChore != null) {
2366        // The "current" view of all tables with quotas
2367        Map<TableName, SpaceQuotaSnapshot> tableSnapshots = quotaChore.getTableQuotaSnapshots();
2368        for (Entry<TableName, SpaceQuotaSnapshot> entry : tableSnapshots.entrySet()) {
2369          builder.addTableSnapshots(
2370              TableQuotaSnapshot.newBuilder()
2371                  .setTableName(ProtobufUtil.toProtoTableName(entry.getKey()))
2372                  .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build());
2373        }
2374        // The "current" view of all namespaces with quotas
2375        Map<String, SpaceQuotaSnapshot> nsSnapshots = quotaChore.getNamespaceQuotaSnapshots();
2376        for (Entry<String, SpaceQuotaSnapshot> entry : nsSnapshots.entrySet()) {
2377          builder.addNsSnapshots(
2378              NamespaceQuotaSnapshot.newBuilder()
2379                  .setNamespace(entry.getKey())
2380                  .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build());
2381        }
2382        return builder.build();
2383      }
2384      return builder.build();
2385    } catch (Exception e) {
2386      throw new ServiceException(e);
2387    }
2388  }
2389
2390  @Override
2391  public ClearDeadServersResponse clearDeadServers(RpcController controller,
2392      ClearDeadServersRequest request) throws ServiceException {
2393    LOG.debug(master.getClientIdAuditPrefix() + " clear dead region servers.");
2394    ClearDeadServersResponse.Builder response = ClearDeadServersResponse.newBuilder();
2395    try {
2396      master.checkInitialized();
2397      if (master.cpHost != null) {
2398        master.cpHost.preClearDeadServers();
2399      }
2400
2401      if (master.getServerManager().areDeadServersInProgress()) {
2402        LOG.debug("Some dead server is still under processing, won't clear the dead server list");
2403        response.addAllServerName(request.getServerNameList());
2404      } else {
2405        DeadServer deadServer = master.getServerManager().getDeadServers();
2406        Set<Address> clearedServers = new HashSet<>();
2407        for (HBaseProtos.ServerName pbServer : request.getServerNameList()) {
2408          ServerName server = ProtobufUtil.toServerName(pbServer);
2409          if (!deadServer.removeDeadServer(server)) {
2410            response.addServerName(pbServer);
2411          } else {
2412            clearedServers.add(server.getAddress());
2413          }
2414        }
2415        master.getRSGroupInfoManager().removeServers(clearedServers);
2416        LOG.info("Remove decommissioned servers {} from RSGroup done", clearedServers);
2417      }
2418
2419      if (master.cpHost != null) {
2420        master.cpHost.postClearDeadServers(
2421            ProtobufUtil.toServerNameList(request.getServerNameList()),
2422            ProtobufUtil.toServerNameList(response.getServerNameList()));
2423      }
2424    } catch (IOException io) {
2425      throw new ServiceException(io);
2426    }
2427    return response.build();
2428  }
2429
2430  @Override
2431  public ReportProcedureDoneResponse reportProcedureDone(RpcController controller,
2432      ReportProcedureDoneRequest request) throws ServiceException {
2433    request.getResultList().forEach(result -> {
2434      if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) {
2435        master.remoteProcedureCompleted(result.getProcId());
2436      } else {
2437        master.remoteProcedureFailed(result.getProcId(),
2438          RemoteProcedureException.fromProto(result.getError()));
2439      }
2440    });
2441    return ReportProcedureDoneResponse.getDefaultInstance();
2442  }
2443
2444  @Override
2445  public FileArchiveNotificationResponse reportFileArchival(RpcController controller,
2446      FileArchiveNotificationRequest request) throws ServiceException {
2447    try {
2448      master.checkInitialized();
2449      if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) {
2450        return FileArchiveNotificationResponse.newBuilder().build();
2451      }
2452      master.getMasterQuotaManager().processFileArchivals(request, master.getConnection(),
2453          master.getConfiguration(), master.getFileSystem());
2454      return FileArchiveNotificationResponse.newBuilder().build();
2455    } catch (Exception e) {
2456      throw new ServiceException(e);
2457    }
2458  }
2459 
2460  // HBCK Services
2461
2462  @Override
2463  public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest req)
2464      throws ServiceException {
2465    rpcPreCheck("runHbckChore");
2466    LOG.info("{} request HBCK chore to run", master.getClientIdAuditPrefix());
2467    HbckChore hbckChore = master.getHbckChore();
2468    boolean ran = hbckChore.runChore();
2469    return RunHbckChoreResponse.newBuilder().setRan(ran).build();
2470  }
2471
2472  /**
2473   * Update state of the table in meta only. This is required by hbck in some situations to cleanup
2474   * stuck assign/ unassign regions procedures for the table.
2475   *
2476   * @return previous state of the table
2477   */
2478  @Override
2479  public GetTableStateResponse setTableStateInMeta(RpcController controller,
2480      SetTableStateInMetaRequest request) throws ServiceException {
2481    TableName tn = ProtobufUtil.toTableName(request.getTableName());
2482    try {
2483      TableState prevState = this.master.getTableStateManager().getTableState(tn);
2484      TableState newState = TableState.convert(tn, request.getTableState());
2485      LOG.info("{} set table={} state from {} to {}", master.getClientIdAuditPrefix(),
2486          tn, prevState.getState(), newState.getState());
2487      this.master.getTableStateManager().setTableState(tn, newState.getState());
2488      return GetTableStateResponse.newBuilder().setTableState(prevState.convert()).build();
2489    } catch (Exception e) {
2490      throw new ServiceException(e);
2491    }
2492  }
2493
2494  /**
2495   * Update state of the region in meta only. This is required by hbck in some situations to cleanup
2496   * stuck assign/ unassign regions procedures for the table.
2497   *
2498   * @return previous states of the regions
2499   */
2500  @Override
2501  public GetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller,
2502    SetRegionStateInMetaRequest request) throws ServiceException {
2503    final GetRegionStateInMetaResponse.Builder builder = GetRegionStateInMetaResponse.newBuilder();
2504    for(ClusterStatusProtos.RegionState s : request.getStatesList()) {
2505      try {
2506        RegionInfo info = this.master.getAssignmentManager().
2507          loadRegionFromMeta(s.getRegionInfo().getRegionEncodedName());
2508        LOG.trace("region info loaded from meta table: {}", info);
2509        RegionState prevState = this.master.getAssignmentManager().getRegionStates().
2510          getRegionState(info);
2511        RegionState newState = RegionState.convert(s);
2512        LOG.info("{} set region={} state from {} to {}", master.getClientIdAuditPrefix(), info,
2513          prevState.getState(), newState.getState());
2514        Put metaPut = MetaTableAccessor.makePutFromRegionInfo(info, System.currentTimeMillis());
2515        metaPut.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
2516          Bytes.toBytes(newState.getState().name()));
2517        List<Put> putList = new ArrayList<>();
2518        putList.add(metaPut);
2519        MetaTableAccessor.putsToMetaTable(this.master.getConnection(), putList);
2520        //Loads from meta again to refresh AM cache with the new region state
2521        this.master.getAssignmentManager().loadRegionFromMeta(info.getEncodedName());
2522        builder.addStates(prevState.convert());
2523      } catch (Exception e) {
2524        throw new ServiceException(e);
2525      }
2526    }
2527    return builder.build();
2528  }
2529
2530  /**
2531   * Get RegionInfo from Master using content of RegionSpecifier as key.
2532   * @return RegionInfo found by decoding <code>rs</code> or null if none found
2533   */
2534  private RegionInfo getRegionInfo(HBaseProtos.RegionSpecifier rs) throws UnknownRegionException {
2535    RegionInfo ri = null;
2536    switch(rs.getType()) {
2537      case REGION_NAME:
2538        final byte[] regionName = rs.getValue().toByteArray();
2539        ri = this.master.getAssignmentManager().getRegionInfo(regionName);
2540        break;
2541      case ENCODED_REGION_NAME:
2542        String encodedRegionName = Bytes.toString(rs.getValue().toByteArray());
2543        RegionState regionState = this.master.getAssignmentManager().getRegionStates().
2544            getRegionState(encodedRegionName);
2545        ri = regionState == null ?
2546          this.master.getAssignmentManager().loadRegionFromMeta(encodedRegionName) :
2547            regionState.getRegion();
2548        break;
2549      default:
2550        break;
2551    }
2552    return ri;
2553  }
2554
2555  /**
2556   * A 'raw' version of assign that does bulk and skirts Master state checks (assigns can be made
2557   * during Master startup). For use by Hbck2.
2558   */
2559  @Override
2560  public MasterProtos.AssignsResponse assigns(RpcController controller,
2561      MasterProtos.AssignsRequest request)
2562    throws ServiceException {
2563    if (this.master.getMasterProcedureExecutor() == null) {
2564      throw new ServiceException("Master's ProcedureExecutor not initialized; retry later");
2565    }
2566    MasterProtos.AssignsResponse.Builder responseBuilder =
2567        MasterProtos.AssignsResponse.newBuilder();
2568    try {
2569      boolean override = request.getOverride();
2570      LOG.info("{} assigns, override={}", master.getClientIdAuditPrefix(), override);
2571      for (HBaseProtos.RegionSpecifier rs: request.getRegionList()) {
2572        RegionInfo ri = getRegionInfo(rs);
2573        if (ri == null) {
2574          LOG.info("Unknown={}", rs);
2575          responseBuilder.addPid(Procedure.NO_PROC_ID);
2576          continue;
2577        }
2578        responseBuilder.addPid(this.master.getMasterProcedureExecutor().submitProcedure(this.master
2579            .getAssignmentManager().createOneAssignProcedure(ri, override)));
2580      }
2581      return responseBuilder.build();
2582    } catch (IOException ioe) {
2583      throw new ServiceException(ioe);
2584    }
2585  }
2586
2587  /**
2588   * A 'raw' version of unassign that does bulk and skirts Master state checks (unassigns can be
2589   * made during Master startup). For use by Hbck2.
2590   */
2591  @Override
2592  public MasterProtos.UnassignsResponse unassigns(RpcController controller,
2593      MasterProtos.UnassignsRequest request)
2594      throws ServiceException {
2595    if (this.master.getMasterProcedureExecutor() == null) {
2596      throw new ServiceException("Master's ProcedureExecutor not initialized; retry later");
2597    }
2598    MasterProtos.UnassignsResponse.Builder responseBuilder =
2599        MasterProtos.UnassignsResponse.newBuilder();
2600    try {
2601      boolean override = request.getOverride();
2602      LOG.info("{} unassigns, override={}", master.getClientIdAuditPrefix(), override);
2603      for (HBaseProtos.RegionSpecifier rs: request.getRegionList()) {
2604        RegionInfo ri = getRegionInfo(rs);
2605        if (ri == null) {
2606          LOG.info("Unknown={}", rs);
2607          responseBuilder.addPid(Procedure.NO_PROC_ID);
2608          continue;
2609        }
2610        responseBuilder.addPid(this.master.getMasterProcedureExecutor().submitProcedure(this.master
2611            .getAssignmentManager().createOneUnassignProcedure(ri, override)));
2612      }
2613      return responseBuilder.build();
2614    } catch (IOException ioe) {
2615      throw new ServiceException(ioe);
2616    }
2617  }
2618
2619  /**
2620   * Bypass specified procedure to completion. Procedure is marked completed but no actual work
2621   * is done from the current state/ step onwards. Parents of the procedure are also marked for
2622   * bypass.
2623   *
2624   * NOTE: this is a dangerous operation and may be used to unstuck buggy procedures. This may
2625   * leave system in inconherent state. This may need to be followed by some cleanup steps/
2626   * actions by operator.
2627   *
2628   * @return BypassProcedureToCompletionResponse indicating success or failure
2629   */
2630  @Override
2631  public MasterProtos.BypassProcedureResponse bypassProcedure(RpcController controller,
2632      MasterProtos.BypassProcedureRequest request) throws ServiceException {
2633    try {
2634      LOG.info("{} bypass procedures={}, waitTime={}, override={}, recursive={}",
2635          master.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(),
2636          request.getOverride(), request.getRecursive());
2637      List<Boolean> ret =
2638          master.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(),
2639          request.getWaitTime(), request.getOverride(), request.getRecursive());
2640      return MasterProtos.BypassProcedureResponse.newBuilder().addAllBypassed(ret).build();
2641    } catch (IOException e) {
2642      throw new ServiceException(e);
2643    }
2644  }
2645
2646  @Override
2647  public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProcedure(
2648      RpcController controller, MasterProtos.ScheduleServerCrashProcedureRequest request)
2649      throws ServiceException {
2650    List<Long> pids = new ArrayList<>();
2651    for (HBaseProtos.ServerName sn: request.getServerNameList()) {
2652      ServerName serverName = ProtobufUtil.toServerName(sn);
2653      LOG.info("{} schedule ServerCrashProcedure for {}",
2654          this.master.getClientIdAuditPrefix(), serverName);
2655      if (shouldSubmitSCP(serverName)) {
2656        pids.add(this.master.getServerManager().expireServer(serverName, true));
2657      } else {
2658        pids.add(Procedure.NO_PROC_ID);
2659      }
2660    }
2661    return MasterProtos.ScheduleServerCrashProcedureResponse.newBuilder().addAllPid(pids).build();
2662  }
2663
2664  @Override
2665  public FixMetaResponse fixMeta(RpcController controller, FixMetaRequest request)
2666      throws ServiceException {
2667    try {
2668      MetaFixer mf = new MetaFixer(this.master);
2669      mf.fix();
2670      return FixMetaResponse.newBuilder().build();
2671    } catch (IOException ioe) {
2672      throw new ServiceException(ioe);
2673    }
2674  }
2675
2676  @Override
2677  public SwitchRpcThrottleResponse switchRpcThrottle(RpcController controller,
2678      SwitchRpcThrottleRequest request) throws ServiceException {
2679    try {
2680      master.checkInitialized();
2681      return master.getMasterQuotaManager().switchRpcThrottle(request);
2682    } catch (Exception e) {
2683      throw new ServiceException(e);
2684    }
2685  }
2686
2687  @Override
2688  public MasterProtos.IsRpcThrottleEnabledResponse isRpcThrottleEnabled(RpcController controller,
2689      MasterProtos.IsRpcThrottleEnabledRequest request) throws ServiceException {
2690    try {
2691      master.checkInitialized();
2692      return master.getMasterQuotaManager().isRpcThrottleEnabled(request);
2693    } catch (Exception e) {
2694      throw new ServiceException(e);
2695    }
2696  }
2697
2698  @Override
2699  public SwitchExceedThrottleQuotaResponse switchExceedThrottleQuota(RpcController controller,
2700      SwitchExceedThrottleQuotaRequest request) throws ServiceException {
2701    try {
2702      master.checkInitialized();
2703      return master.getMasterQuotaManager().switchExceedThrottleQuota(request);
2704    } catch (Exception e) {
2705      throw new ServiceException(e);
2706    }
2707  }
2708
2709  @Override
2710  public GrantResponse grant(RpcController controller, GrantRequest request)
2711      throws ServiceException {
2712    try {
2713      master.checkInitialized();
2714      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
2715        final UserPermission perm =
2716            ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
2717        boolean mergeExistingPermissions = request.getMergeExistingPermissions();
2718        master.cpHost.preGrant(perm, mergeExistingPermissions);
2719        try (Table table = master.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
2720          PermissionStorage.addUserPermission(getConfiguration(), perm, table,
2721            mergeExistingPermissions);
2722        }
2723        master.cpHost.postGrant(perm, mergeExistingPermissions);
2724        User caller = RpcServer.getRequestUser().orElse(null);
2725        if (AUDITLOG.isTraceEnabled()) {
2726          // audit log should store permission changes in addition to auth results
2727          String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("");
2728          AUDITLOG.trace("User {} (remote address: {}) granted permission {}", caller,
2729            remoteAddress, perm);
2730        }
2731        return GrantResponse.getDefaultInstance();
2732      } else {
2733        throw new DoNotRetryIOException(
2734            new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2735      }
2736    } catch (IOException ioe) {
2737      throw new ServiceException(ioe);
2738    }
2739  }
2740
2741  @Override
2742  public RevokeResponse revoke(RpcController controller, RevokeRequest request)
2743      throws ServiceException {
2744    try {
2745      master.checkInitialized();
2746      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
2747        final UserPermission userPermission =
2748            ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
2749        master.cpHost.preRevoke(userPermission);
2750        try (Table table = master.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
2751          PermissionStorage.removeUserPermission(master.getConfiguration(), userPermission, table);
2752        }
2753        master.cpHost.postRevoke(userPermission);
2754        User caller = RpcServer.getRequestUser().orElse(null);
2755        if (AUDITLOG.isTraceEnabled()) {
2756          // audit log should record all permission changes
2757          String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("");
2758          AUDITLOG.trace("User {} (remote address: {}) revoked permission {}", caller,
2759            remoteAddress, userPermission);
2760        }
2761        return RevokeResponse.getDefaultInstance();
2762      } else {
2763        throw new DoNotRetryIOException(
2764            new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2765      }
2766    } catch (IOException ioe) {
2767      throw new ServiceException(ioe);
2768    }
2769  }
2770
2771  @Override
2772  public GetUserPermissionsResponse getUserPermissions(RpcController controller,
2773      GetUserPermissionsRequest request) throws ServiceException {
2774    try {
2775      master.checkInitialized();
2776      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
2777        final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null;
2778        String namespace =
2779            request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null;
2780        TableName table =
2781            request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null;
2782        byte[] cf = request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null;
2783        byte[] cq =
2784            request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null;
2785        Type permissionType = request.hasType() ? request.getType() : null;
2786        master.getMasterCoprocessorHost().preGetUserPermissions(userName, namespace, table, cf, cq);
2787
2788        List<UserPermission> perms = null;
2789        if (permissionType == Type.Table) {
2790          boolean filter = (cf != null || userName != null) ? true : false;
2791          perms = PermissionStorage.getUserTablePermissions(master.getConfiguration(), table, cf,
2792            cq, userName, filter);
2793        } else if (permissionType == Type.Namespace) {
2794          perms = PermissionStorage.getUserNamespacePermissions(master.getConfiguration(),
2795            namespace, userName, userName != null ? true : false);
2796        } else {
2797          perms = PermissionStorage.getUserPermissions(master.getConfiguration(), null, null, null,
2798            userName, userName != null ? true : false);
2799          // Skip super users when filter user is specified
2800          if (userName == null) {
2801            // Adding superusers explicitly to the result set as PermissionStorage do not store
2802            // them. Also using acl as table name to be inline with the results of global admin and
2803            // will help in avoiding any leakage of information about being superusers.
2804            for (String user : Superusers.getSuperUsers()) {
2805              perms.add(new UserPermission(user,
2806                  Permission.newBuilder().withActions(Action.values()).build()));
2807            }
2808          }
2809        }
2810
2811        master.getMasterCoprocessorHost().postGetUserPermissions(userName, namespace, table, cf,
2812          cq);
2813        AccessControlProtos.GetUserPermissionsResponse response =
2814            ShadedAccessControlUtil.buildGetUserPermissionsResponse(perms);
2815        return response;
2816      } else {
2817        throw new DoNotRetryIOException(
2818            new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2819      }
2820    } catch (IOException ioe) {
2821      throw new ServiceException(ioe);
2822    }
2823  }
2824
2825  @Override
2826  public HasUserPermissionsResponse hasUserPermissions(RpcController controller,
2827      HasUserPermissionsRequest request) throws ServiceException {
2828    try {
2829      master.checkInitialized();
2830      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
2831        User caller = RpcServer.getRequestUser().orElse(null);
2832        String userName =
2833            request.hasUserName() ? request.getUserName().toStringUtf8() : caller.getShortName();
2834        List<Permission> permissions = new ArrayList<>();
2835        for (int i = 0; i < request.getPermissionCount(); i++) {
2836          permissions.add(ShadedAccessControlUtil.toPermission(request.getPermission(i)));
2837        }
2838        master.getMasterCoprocessorHost().preHasUserPermissions(userName, permissions);
2839        if (!caller.getShortName().equals(userName)) {
2840          List<String> groups = AccessChecker.getUserGroups(userName);
2841          caller = new InputUser(userName, groups.toArray(new String[groups.size()]));
2842        }
2843        List<Boolean> hasUserPermissions = new ArrayList<>();
2844        if (getAccessChecker() != null) {
2845          for (Permission permission : permissions) {
2846            boolean hasUserPermission =
2847                getAccessChecker().hasUserPermission(caller, "hasUserPermissions", permission);
2848            hasUserPermissions.add(hasUserPermission);
2849          }
2850        } else {
2851          for (int i = 0; i < permissions.size(); i++) {
2852            hasUserPermissions.add(true);
2853          }
2854        }
2855        master.getMasterCoprocessorHost().postHasUserPermissions(userName, permissions);
2856        HasUserPermissionsResponse.Builder builder =
2857            HasUserPermissionsResponse.newBuilder().addAllHasUserPermission(hasUserPermissions);
2858        return builder.build();
2859      } else {
2860        throw new DoNotRetryIOException(
2861            new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2862      }
2863    } catch (IOException ioe) {
2864      throw new ServiceException(ioe);
2865    }
2866  }
2867
2868  private boolean containMetaWals(ServerName serverName) throws IOException {
2869    Path logDir = new Path(master.getWALRootDir(),
2870        AbstractFSWALProvider.getWALDirectoryName(serverName.toString()));
2871    Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
2872    Path checkDir = master.getFileSystem().exists(splitDir) ? splitDir : logDir;
2873    try {
2874      return master.getFileSystem().listStatus(checkDir, META_FILTER).length > 0;
2875    } catch (FileNotFoundException fnfe) {
2876      // If no files, then we don't contain metas; was failing schedule of
2877      // SCP because this was FNFE'ing when no server dirs ('Unknown Server').
2878      LOG.warn("No dir for WALs for {}; continuing", serverName.toString());
2879      return false;
2880    }
2881  }
2882
2883  private boolean shouldSubmitSCP(ServerName serverName) {
2884    // check if there is already a SCP of this server running
2885    List<Procedure<MasterProcedureEnv>> procedures =
2886        master.getMasterProcedureExecutor().getProcedures();
2887    for (Procedure<MasterProcedureEnv> procedure : procedures) {
2888      if (procedure instanceof ServerCrashProcedure) {
2889        if (serverName.compareTo(((ServerCrashProcedure) procedure).getServerName()) == 0
2890            && !procedure.isFinished()) {
2891          LOG.info("there is already a SCP of this server {} running, pid {}", serverName,
2892            procedure.getProcId());
2893          return false;
2894        }
2895      }
2896    }
2897    return true;
2898  }
2899
2900  @Override
2901  public GetClusterIdResponse getClusterId(RpcController rpcController, GetClusterIdRequest request)
2902      throws ServiceException {
2903    GetClusterIdResponse.Builder resp = GetClusterIdResponse.newBuilder();
2904    String clusterId = master.getClusterId();
2905    if (clusterId != null) {
2906      resp.setClusterId(clusterId);
2907    }
2908    return resp.build();
2909  }
2910
2911  @Override
2912  public GetActiveMasterResponse getActiveMaster(RpcController rpcController,
2913      GetActiveMasterRequest request) throws ServiceException {
2914    GetActiveMasterResponse.Builder resp = GetActiveMasterResponse.newBuilder();
2915    Optional<ServerName> serverName = master.getActiveMaster();
2916    serverName.ifPresent(name -> resp.setServerName(ProtobufUtil.toServerName(name)));
2917    return resp.build();
2918  }
2919
2920  @Override
2921  public GetMetaRegionLocationsResponse getMetaRegionLocations(RpcController rpcController,
2922      GetMetaRegionLocationsRequest request) throws ServiceException {
2923    GetMetaRegionLocationsResponse.Builder response = GetMetaRegionLocationsResponse.newBuilder();
2924    Optional<List<HRegionLocation>> metaLocations =
2925        master.getMetaRegionLocationCache().getMetaRegionLocations();
2926    metaLocations.ifPresent(hRegionLocations -> hRegionLocations.forEach(
2927      location -> response.addMetaLocations(ProtobufUtil.toRegionLocation(location))));
2928    return response.build();
2929  }
2930
2931  @Override
2932  public GetRSGroupInfoResponse getRSGroupInfo(RpcController controller,
2933    GetRSGroupInfoRequest request) throws ServiceException {
2934    String groupName = request.getRSGroupName();
2935    LOG.info(
2936      master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName);
2937    try {
2938      if (master.getMasterCoprocessorHost() != null) {
2939        master.getMasterCoprocessorHost().preGetRSGroupInfo(groupName);
2940      }
2941      RSGroupInfo rsGroupInfo = master.getRSGroupInfoManager().getRSGroup(groupName);
2942      GetRSGroupInfoResponse resp;
2943      if (rsGroupInfo != null) {
2944        resp = GetRSGroupInfoResponse.newBuilder()
2945          .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build();
2946      } else {
2947        resp = GetRSGroupInfoResponse.getDefaultInstance();
2948      }
2949      if (master.getMasterCoprocessorHost() != null) {
2950        master.getMasterCoprocessorHost().postGetRSGroupInfo(groupName);
2951      }
2952      return resp;
2953    } catch (IOException e) {
2954      throw new ServiceException(e);
2955    }
2956  }
2957
2958  @Override
2959  public GetRSGroupInfoOfTableResponse getRSGroupInfoOfTable(RpcController controller,
2960    GetRSGroupInfoOfTableRequest request) throws ServiceException {
2961    TableName tableName = ProtobufUtil.toTableName(request.getTableName());
2962    LOG.info(
2963      master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName);
2964    try {
2965      if (master.getMasterCoprocessorHost() != null) {
2966        master.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName);
2967      }
2968      GetRSGroupInfoOfTableResponse resp;
2969      TableDescriptor td = master.getTableDescriptors().get(tableName);
2970      if (td == null) {
2971        resp = GetRSGroupInfoOfTableResponse.getDefaultInstance();
2972      } else {
2973        RSGroupInfo rsGroupInfo = null;
2974        if (td.getRegionServerGroup().isPresent()) {
2975          rsGroupInfo = master.getRSGroupInfoManager().getRSGroup(td.getRegionServerGroup().get());
2976        }
2977        if (rsGroupInfo == null) {
2978          rsGroupInfo = master.getRSGroupInfoManager().getRSGroup(RSGroupInfo.DEFAULT_GROUP);
2979        }
2980        resp = GetRSGroupInfoOfTableResponse.newBuilder()
2981          .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build();
2982      }
2983      if (master.getMasterCoprocessorHost() != null) {
2984        master.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName);
2985      }
2986      return resp;
2987    } catch (IOException e) {
2988      throw new ServiceException(e);
2989    }
2990  }
2991
2992  @Override
2993  public GetRSGroupInfoOfServerResponse getRSGroupInfoOfServer(RpcController controller,
2994    GetRSGroupInfoOfServerRequest request) throws ServiceException {
2995    Address hp =
2996      Address.fromParts(request.getServer().getHostName(), request.getServer().getPort());
2997    LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp);
2998    try {
2999      if (master.getMasterCoprocessorHost() != null) {
3000        master.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp);
3001      }
3002      RSGroupInfo rsGroupInfo = master.getRSGroupInfoManager().getRSGroupOfServer(hp);
3003      GetRSGroupInfoOfServerResponse resp;
3004      if (rsGroupInfo != null) {
3005        resp = GetRSGroupInfoOfServerResponse.newBuilder()
3006          .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build();
3007      } else {
3008        resp = GetRSGroupInfoOfServerResponse.getDefaultInstance();
3009      }
3010      if (master.getMasterCoprocessorHost() != null) {
3011        master.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp);
3012      }
3013      return resp;
3014    } catch (IOException e) {
3015      throw new ServiceException(e);
3016    }
3017  }
3018
3019  @Override
3020  public MoveServersResponse moveServers(RpcController controller, MoveServersRequest request)
3021      throws ServiceException {
3022    Set<Address> hostPorts = Sets.newHashSet();
3023    MoveServersResponse.Builder builder = MoveServersResponse.newBuilder();
3024    for (HBaseProtos.ServerName el : request.getServersList()) {
3025      hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
3026    }
3027    LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " +
3028        request.getTargetGroup());
3029    try {
3030      if (master.getMasterCoprocessorHost() != null) {
3031        master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup());
3032      }
3033      master.getRSGroupInfoManager().moveServers(hostPorts, request.getTargetGroup());
3034      if (master.getMasterCoprocessorHost() != null) {
3035        master.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup());
3036      }
3037    } catch (IOException e) {
3038      throw new ServiceException(e);
3039    }
3040    return builder.build();
3041  }
3042
3043  @Override
3044  public AddRSGroupResponse addRSGroup(RpcController controller, AddRSGroupRequest request)
3045      throws ServiceException {
3046    AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder();
3047    LOG.info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName());
3048    try {
3049      if (master.getMasterCoprocessorHost() != null) {
3050        master.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName());
3051      }
3052      master.getRSGroupInfoManager().addRSGroup(new RSGroupInfo(request.getRSGroupName()));
3053      if (master.getMasterCoprocessorHost() != null) {
3054        master.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName());
3055      }
3056    } catch (IOException e) {
3057      throw new ServiceException(e);
3058    }
3059    return builder.build();
3060  }
3061
3062  @Override
3063  public RemoveRSGroupResponse removeRSGroup(RpcController controller, RemoveRSGroupRequest request)
3064      throws ServiceException {
3065    RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder();
3066    LOG.info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName());
3067    try {
3068      if (master.getMasterCoprocessorHost() != null) {
3069        master.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName());
3070      }
3071      master.getRSGroupInfoManager().removeRSGroup(request.getRSGroupName());
3072      if (master.getMasterCoprocessorHost() != null) {
3073        master.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName());
3074      }
3075    } catch (IOException e) {
3076      throw new ServiceException(e);
3077    }
3078    return builder.build();
3079  }
3080
3081  @Override
3082  public BalanceRSGroupResponse balanceRSGroup(RpcController controller,
3083      BalanceRSGroupRequest request) throws ServiceException {
3084    BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder();
3085    LOG.info(
3086        master.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName());
3087    try {
3088      if (master.getMasterCoprocessorHost() != null) {
3089        master.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName());
3090      }
3091      boolean balancerRan =
3092          master.getRSGroupInfoManager().balanceRSGroup(request.getRSGroupName());
3093      builder.setBalanceRan(balancerRan);
3094      if (master.getMasterCoprocessorHost() != null) {
3095        master.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(), balancerRan);
3096      }
3097    } catch (IOException e) {
3098      throw new ServiceException(e);
3099    }
3100    return builder.build();
3101  }
3102
3103  @Override
3104  public ListRSGroupInfosResponse listRSGroupInfos(RpcController controller,
3105      ListRSGroupInfosRequest request) throws ServiceException {
3106    ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder();
3107    LOG.info(master.getClientIdAuditPrefix() + " list rsgroup");
3108    try {
3109      if (master.getMasterCoprocessorHost() != null) {
3110        master.getMasterCoprocessorHost().preListRSGroups();
3111      }
3112      List<RSGroupInfo> rsGroupInfos = master.getRSGroupInfoManager().listRSGroups().stream()
3113          .map(RSGroupInfo::new).collect(Collectors.toList());
3114      Map<String, RSGroupInfo> name2Info = new HashMap<>();
3115      List<TableDescriptor> needToFill =
3116          new ArrayList<>(master.getTableDescriptors().getAll().values());
3117      for (RSGroupInfo rsGroupInfo : rsGroupInfos) {
3118        name2Info.put(rsGroupInfo.getName(), rsGroupInfo);
3119        for (TableDescriptor td : master.getTableDescriptors().getAll().values()) {
3120          if (rsGroupInfo.containsTable(td.getTableName())){
3121            needToFill.remove(td);
3122          }
3123        }
3124      }
3125      for (TableDescriptor td : needToFill) {
3126        String groupName = td.getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP);
3127        RSGroupInfo rsGroupInfo = name2Info.get(groupName);
3128        if (rsGroupInfo != null) {
3129          rsGroupInfo.addTable(td.getTableName());
3130        }
3131      }
3132      for (RSGroupInfo rsGroupInfo : rsGroupInfos) {
3133        // TODO: this can be done at once outside this loop, do not need to scan all every time.
3134        builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo));
3135      }
3136      if (master.getMasterCoprocessorHost() != null) {
3137        master.getMasterCoprocessorHost().postListRSGroups();
3138      }
3139    } catch (IOException e) {
3140      throw new ServiceException(e);
3141    }
3142    return builder.build();
3143  }
3144
3145  @Override
3146  public RemoveServersResponse removeServers(RpcController controller,
3147      RemoveServersRequest request) throws ServiceException {
3148    RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder();
3149    Set<Address> servers = Sets.newHashSet();
3150    for (HBaseProtos.ServerName el : request.getServersList()) {
3151      servers.add(Address.fromParts(el.getHostName(), el.getPort()));
3152    }
3153    LOG.info(master.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " +
3154        servers);
3155    try {
3156      if (master.getMasterCoprocessorHost() != null) {
3157        master.getMasterCoprocessorHost().preRemoveServers(servers);
3158      }
3159      master.getRSGroupInfoManager().removeServers(servers);
3160      if (master.getMasterCoprocessorHost() != null) {
3161        master.getMasterCoprocessorHost().postRemoveServers(servers);
3162      }
3163    } catch (IOException e) {
3164      throw new ServiceException(e);
3165    }
3166    return builder.build();
3167  }
3168
3169  @Override
3170  public ListTablesInRSGroupResponse listTablesInRSGroup(RpcController controller,
3171    ListTablesInRSGroupRequest request) throws ServiceException {
3172    ListTablesInRSGroupResponse.Builder builder = ListTablesInRSGroupResponse.newBuilder();
3173    String groupName = request.getGroupName();
3174    LOG.info(master.getClientIdAuditPrefix() + " list tables in rsgroup " + groupName);
3175    try {
3176      if (master.getMasterCoprocessorHost() != null) {
3177        master.getMasterCoprocessorHost().preListTablesInRSGroup(groupName);
3178      }
3179      RSGroupUtil.listTablesInRSGroup(master, groupName).stream()
3180        .map(ProtobufUtil::toProtoTableName).forEach(builder::addTableName);
3181      if (master.getMasterCoprocessorHost() != null) {
3182        master.getMasterCoprocessorHost().postListTablesInRSGroup(groupName);
3183      }
3184    } catch (IOException e) {
3185      throw new ServiceException(e);
3186    }
3187    return builder.build();
3188  }
3189
3190  @Override
3191  public GetConfiguredNamespacesAndTablesInRSGroupResponse
3192    getConfiguredNamespacesAndTablesInRSGroup(RpcController controller,
3193      GetConfiguredNamespacesAndTablesInRSGroupRequest request) throws ServiceException {
3194    GetConfiguredNamespacesAndTablesInRSGroupResponse.Builder builder =
3195      GetConfiguredNamespacesAndTablesInRSGroupResponse.newBuilder();
3196    String groupName = request.getGroupName();
3197    LOG.info(master.getClientIdAuditPrefix() + " get configured namespaces and tables in rsgroup " +
3198      groupName);
3199    try {
3200      if (master.getMasterCoprocessorHost() != null) {
3201        master.getMasterCoprocessorHost().preGetConfiguredNamespacesAndTablesInRSGroup(groupName);
3202      }
3203      for (NamespaceDescriptor nd : master.getClusterSchema().getNamespaces()) {
3204        if (groupName.equals(nd.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP))) {
3205          builder.addNamespace(nd.getName());
3206        }
3207      }
3208      for (TableDescriptor td : master.getTableDescriptors().getAll().values()) {
3209        if (td.getRegionServerGroup().map(g -> g.equals(groupName)).orElse(false)) {
3210          builder.addTableName(ProtobufUtil.toProtoTableName(td.getTableName()));
3211        }
3212      }
3213      if (master.getMasterCoprocessorHost() != null) {
3214        master.getMasterCoprocessorHost().postGetConfiguredNamespacesAndTablesInRSGroup(groupName);
3215      }
3216    } catch (IOException e) {
3217      throw new ServiceException(e);
3218    }
3219    return builder.build();
3220  }
3221}