001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master;
019
020import static org.apache.hadoop.hbase.master.MasterWalManager.META_FILTER;
021
022import java.io.FileNotFoundException;
023import java.io.IOException;
024import java.lang.reflect.InvocationTargetException;
025import java.lang.reflect.Method;
026import java.net.BindException;
027import java.net.InetAddress;
028import java.net.InetSocketAddress;
029import java.util.ArrayList;
030import java.util.Collections;
031import java.util.HashMap;
032import java.util.HashSet;
033import java.util.List;
034import java.util.Map;
035import java.util.Map.Entry;
036import java.util.Set;
037import java.util.stream.Collectors;
038import org.apache.hadoop.conf.Configuration;
039import org.apache.hadoop.fs.Path;
040import org.apache.hadoop.hbase.ClusterMetricsBuilder;
041import org.apache.hadoop.hbase.DoNotRetryIOException;
042import org.apache.hadoop.hbase.HConstants;
043import org.apache.hadoop.hbase.MetaTableAccessor;
044import org.apache.hadoop.hbase.NamespaceDescriptor;
045import org.apache.hadoop.hbase.Server;
046import org.apache.hadoop.hbase.ServerMetrics;
047import org.apache.hadoop.hbase.ServerMetricsBuilder;
048import org.apache.hadoop.hbase.ServerName;
049import org.apache.hadoop.hbase.TableName;
050import org.apache.hadoop.hbase.UnknownRegionException;
051import org.apache.hadoop.hbase.client.MasterSwitchType;
052import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
053import org.apache.hadoop.hbase.client.Put;
054import org.apache.hadoop.hbase.client.RegionInfo;
055import org.apache.hadoop.hbase.client.Table;
056import org.apache.hadoop.hbase.client.TableDescriptor;
057import org.apache.hadoop.hbase.client.TableState;
058import org.apache.hadoop.hbase.client.VersionInfoUtil;
059import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
060import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
061import org.apache.hadoop.hbase.errorhandling.ForeignException;
062import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
063import org.apache.hadoop.hbase.io.ByteBuffAllocator;
064import org.apache.hadoop.hbase.io.hfile.HFile;
065import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
066import org.apache.hadoop.hbase.ipc.PriorityFunction;
067import org.apache.hadoop.hbase.ipc.QosPriority;
068import org.apache.hadoop.hbase.ipc.RpcServer;
069import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
070import org.apache.hadoop.hbase.ipc.RpcServerFactory;
071import org.apache.hadoop.hbase.ipc.RpcServerInterface;
072import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
073import org.apache.hadoop.hbase.ipc.ServerRpcController;
074import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
075import org.apache.hadoop.hbase.master.assignment.RegionStates;
076import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
077import org.apache.hadoop.hbase.master.hbck.HbckChore;
078import org.apache.hadoop.hbase.master.janitor.MetaFixer;
079import org.apache.hadoop.hbase.master.locking.LockProcedure;
080import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
081import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
082import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
083import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
084import org.apache.hadoop.hbase.mob.MobUtils;
085import org.apache.hadoop.hbase.namequeues.BalancerDecisionDetails;
086import org.apache.hadoop.hbase.namequeues.BalancerRejectionDetails;
087import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder;
088import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest;
089import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse;
090import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
091import org.apache.hadoop.hbase.procedure2.LockType;
092import org.apache.hadoop.hbase.procedure2.LockedResource;
093import org.apache.hadoop.hbase.procedure2.Procedure;
094import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
095import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
096import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
097import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
098import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
099import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
100import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
101import org.apache.hadoop.hbase.quotas.QuotaUtil;
102import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
103import org.apache.hadoop.hbase.regionserver.RSRpcServices;
104import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
105import org.apache.hadoop.hbase.replication.ReplicationException;
106import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
107import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
108import org.apache.hadoop.hbase.security.Superusers;
109import org.apache.hadoop.hbase.security.User;
110import org.apache.hadoop.hbase.security.access.AccessChecker;
111import org.apache.hadoop.hbase.security.access.AccessChecker.InputUser;
112import org.apache.hadoop.hbase.security.access.AccessController;
113import org.apache.hadoop.hbase.security.access.Permission;
114import org.apache.hadoop.hbase.security.access.Permission.Action;
115import org.apache.hadoop.hbase.security.access.PermissionStorage;
116import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
117import org.apache.hadoop.hbase.security.access.UserPermission;
118import org.apache.hadoop.hbase.security.visibility.VisibilityController;
119import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
120import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
121import org.apache.hadoop.hbase.util.Bytes;
122import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
123import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
124import org.apache.hadoop.hbase.util.Pair;
125import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
126import org.apache.yetus.audience.InterfaceAudience;
127import org.apache.zookeeper.KeeperException;
128import org.slf4j.Logger;
129import org.slf4j.LoggerFactory;
130
131import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
132import org.apache.hbase.thirdparty.com.google.protobuf.Message;
133import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
134import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
135import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
136
137import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
138import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
139import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos;
140import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest;
141import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse;
142import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest;
143import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantResponse;
144import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest;
145import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsResponse;
146import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type;
147import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest;
148import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeResponse;
149import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
150import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
151import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
152import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
153import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
154import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
155import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
156import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
157import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
158import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
159import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
160import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
161import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
162import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
163import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
164import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
165import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
166import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
167import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
168import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
169import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
170import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
171import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
172import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
173import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
174import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
175import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
176import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
186import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
187import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
188import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
189import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
190import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
191import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
192import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
193import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
194import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
195import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
196import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
197import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
198import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
199import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
200import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaRequest;
201import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaResponse;
202import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreRequest;
203import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreResponse;
204import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
205import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
206import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
207import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
208import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
209import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse;
210import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
211import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
212import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
213import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
214import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
215import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
216import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
217import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
218import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
219import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
220import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
221import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
222import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest;
223import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
224import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService;
225import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
226import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
227import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
228import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
229import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
230import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
231import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
232import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
233import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest;
234import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
235import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
236import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
237import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
238import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
239import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest;
240import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse;
241import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
242import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
243import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
244import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
245import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
246import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
247import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
248import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
249import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest;
250import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesResponse;
251import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
252import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
253import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
254import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
255import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
256import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
257import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
258import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
259import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
260import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
261import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
262import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
263import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerRequest;
264import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerResponse;
265import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
266import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
267import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
268import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
269import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerRequest;
270import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerResponse;
271import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
272import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse;
273import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
274import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
275import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
276import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
277import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
278import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
279import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState;
280import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
281import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
282import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
283import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
284import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
285import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
286import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreRequest;
287import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreResponse;
288import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
289import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
290import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
291import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
292import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
293import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse;
294import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
295import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
296import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
297import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
298import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest;
299import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaResponse;
300import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
301import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse;
302import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
303import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
304import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest;
305import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
306import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
307import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
308import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
309import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
310import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
311import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
312import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
313import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaRequest;
314import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaResponse;
315import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleRequest;
316import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleResponse;
317import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
318import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
319import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
320import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
321import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
322import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
323import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.NamespaceQuotaSnapshot;
324import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.TableQuotaSnapshot;
325import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
326import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
327import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
328import org.apache.hadoop.hbase.shaded.protobuf.generated.RecentLogs;
329import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest;
330import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse;
331import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
332import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
333import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
334import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse;
335import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
336import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
337import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
338import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
339import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
340import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
341import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult;
342import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest;
343import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse;
344import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
345import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
346import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
347import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
348import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ClientMetaService;
349import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
350import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
351import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
352import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
353import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
354import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
355import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
356import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
357import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
358import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
359import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
360import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
361import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState;
362import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
363import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
364import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
365
366/**
367 * Implements the master RPC services.
368 */
369@InterfaceAudience.Private
370@SuppressWarnings("deprecation")
371public class MasterRpcServices extends RSRpcServices
372  implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface,
373  LockService.BlockingInterface, HbckService.BlockingInterface {
374
375  private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName());
376  private static final Logger AUDITLOG =
377    LoggerFactory.getLogger("SecurityLogger." + MasterRpcServices.class.getName());
378
379  private final HMaster master;
380
381  /**
382   * @return Subset of configuration to pass initializing regionservers: e.g. the filesystem to use
383   *         and root directory to use.
384   */
385  private RegionServerStartupResponse.Builder createConfigurationSubset() {
386    RegionServerStartupResponse.Builder resp =
387      addConfig(RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR);
388    resp = addConfig(resp, "fs.defaultFS");
389    return addConfig(resp, "hbase.master.info.port");
390  }
391
392  private RegionServerStartupResponse.Builder
393    addConfig(final RegionServerStartupResponse.Builder resp, final String key) {
394    NameStringPair.Builder entry =
395      NameStringPair.newBuilder().setName(key).setValue(master.getConfiguration().get(key));
396    resp.addMapEntries(entry.build());
397    return resp;
398  }
399
400  public MasterRpcServices(HMaster m) throws IOException {
401    super(m);
402    master = m;
403  }
404
405  @Override
406  protected Class<?> getRpcSchedulerFactoryClass() {
407    Configuration conf = getConfiguration();
408    if (conf != null) {
409      return conf.getClass(MASTER_RPC_SCHEDULER_FACTORY_CLASS, super.getRpcSchedulerFactoryClass());
410    } else {
411      return super.getRpcSchedulerFactoryClass();
412    }
413  }
414
415  @Override
416  protected RpcServerInterface createRpcServer(final Server server,
417    final RpcSchedulerFactory rpcSchedulerFactory, final InetSocketAddress bindAddress,
418    final String name) throws IOException {
419    final Configuration conf = regionServer.getConfiguration();
420    // RpcServer at HM by default enable ByteBufferPool iff HM having user table region in it
421    boolean reservoirEnabled = conf.getBoolean(ByteBuffAllocator.ALLOCATOR_POOL_ENABLED_KEY,
422      LoadBalancer.isMasterCanHostUserRegions(conf));
423    try {
424      return RpcServerFactory.createRpcServer(server, name, getServices(), bindAddress, // use final
425                                                                                        // bindAddress
426                                                                                        // for this
427                                                                                        // server.
428        conf, rpcSchedulerFactory.create(conf, this, server), reservoirEnabled);
429    } catch (BindException be) {
430      throw new IOException(be.getMessage() + ". To switch ports use the '" + HConstants.MASTER_PORT
431        + "' configuration property.", be.getCause() != null ? be.getCause() : be);
432    }
433  }
434
435  @Override
436  protected PriorityFunction createPriority() {
437    return new MasterAnnotationReadingPriorityFunction(this);
438  }
439
440  /**
441   * Checks for the following pre-checks in order:
442   * <ol>
443   * <li>Master is initialized</li>
444   * <li>Rpc caller has admin permissions</li>
445   * </ol>
446   * @param requestName name of rpc request. Used in reporting failures to provide context.
447   * @throws ServiceException If any of the above listed pre-check fails.
448   */
449  private void rpcPreCheck(String requestName) throws ServiceException {
450    try {
451      master.checkInitialized();
452      requirePermission(requestName, Permission.Action.ADMIN);
453    } catch (IOException ioe) {
454      throw new ServiceException(ioe);
455    }
456  }
457
458  enum BalanceSwitchMode {
459    SYNC,
460    ASYNC
461  }
462
463  /**
464   * Assigns balancer switch according to BalanceSwitchMode
465   * @param b    new balancer switch
466   * @param mode BalanceSwitchMode
467   * @return old balancer switch
468   */
469  boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException {
470    boolean oldValue = master.loadBalancerTracker.isBalancerOn();
471    boolean newValue = b;
472    try {
473      if (master.cpHost != null) {
474        master.cpHost.preBalanceSwitch(newValue);
475      }
476      try {
477        if (mode == BalanceSwitchMode.SYNC) {
478          synchronized (master.getLoadBalancer()) {
479            master.loadBalancerTracker.setBalancerOn(newValue);
480          }
481        } else {
482          master.loadBalancerTracker.setBalancerOn(newValue);
483        }
484      } catch (KeeperException ke) {
485        throw new IOException(ke);
486      }
487      LOG.info(master.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue);
488      if (master.cpHost != null) {
489        master.cpHost.postBalanceSwitch(oldValue, newValue);
490      }
491      master.getLoadBalancer().updateBalancerStatus(newValue);
492    } catch (IOException ioe) {
493      LOG.warn("Error flipping balance switch", ioe);
494    }
495    return oldValue;
496  }
497
498  boolean synchronousBalanceSwitch(final boolean b) throws IOException {
499    return switchBalancer(b, BalanceSwitchMode.SYNC);
500  }
501
502  /** Returns list of blocking services and their security info classes that this server supports */
503  @Override
504  protected List<BlockingServiceAndInterface> getServices() {
505    List<BlockingServiceAndInterface> bssi = new ArrayList<>(5);
506    bssi.add(new BlockingServiceAndInterface(MasterService.newReflectiveBlockingService(this),
507      MasterService.BlockingInterface.class));
508    bssi.add(
509      new BlockingServiceAndInterface(RegionServerStatusService.newReflectiveBlockingService(this),
510        RegionServerStatusService.BlockingInterface.class));
511    bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this),
512      LockService.BlockingInterface.class));
513    bssi.add(new BlockingServiceAndInterface(HbckService.newReflectiveBlockingService(this),
514      HbckService.BlockingInterface.class));
515    bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this),
516      ClientMetaService.BlockingInterface.class));
517    bssi.addAll(super.getServices());
518    return bssi;
519  }
520
521  @Override
522  @QosPriority(priority = HConstants.ADMIN_QOS)
523  public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller,
524    GetLastFlushedSequenceIdRequest request) throws ServiceException {
525    try {
526      master.checkServiceStarted();
527    } catch (IOException ioe) {
528      throw new ServiceException(ioe);
529    }
530    byte[] encodedRegionName = request.getRegionName().toByteArray();
531    RegionStoreSequenceIds ids =
532      master.getServerManager().getLastFlushedSequenceId(encodedRegionName);
533    return ResponseConverter.buildGetLastFlushedSequenceIdResponse(ids);
534  }
535
536  @Override
537  public RegionServerReportResponse regionServerReport(RpcController controller,
538    RegionServerReportRequest request) throws ServiceException {
539    try {
540      master.checkServiceStarted();
541      int versionNumber = 0;
542      String version = "0.0.0";
543      VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
544      if (versionInfo != null) {
545        version = versionInfo.getVersion();
546        versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
547      }
548      ClusterStatusProtos.ServerLoad sl = request.getLoad();
549      ServerName serverName = ProtobufUtil.toServerName(request.getServer());
550      ServerMetrics oldLoad = master.getServerManager().getLoad(serverName);
551      ServerMetrics newLoad =
552        ServerMetricsBuilder.toServerMetrics(serverName, versionNumber, version, sl);
553      master.getServerManager().regionServerReport(serverName, newLoad);
554      master.getAssignmentManager().reportOnlineRegions(serverName,
555        newLoad.getRegionMetrics().keySet());
556      if (sl != null && master.metricsMaster != null) {
557        // Up our metrics.
558        master.metricsMaster.incrementRequests(
559          sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getRequestCount() : 0));
560      }
561    } catch (IOException ioe) {
562      throw new ServiceException(ioe);
563    }
564    return RegionServerReportResponse.newBuilder().build();
565  }
566
567  @Override
568  public RegionServerStartupResponse regionServerStartup(RpcController controller,
569    RegionServerStartupRequest request) throws ServiceException {
570    // Register with server manager
571    try {
572      master.checkServiceStarted();
573      int versionNumber = 0;
574      String version = "0.0.0";
575      VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
576      if (versionInfo != null) {
577        version = versionInfo.getVersion();
578        versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
579      }
580      InetAddress ia = master.getRemoteInetAddress(request.getPort(), request.getServerStartCode());
581      // if regionserver passed hostname to use,
582      // then use it instead of doing a reverse DNS lookup
583      ServerName rs =
584        master.getServerManager().regionServerStartup(request, versionNumber, version, ia);
585
586      // Send back some config info
587      RegionServerStartupResponse.Builder resp = createConfigurationSubset();
588      NameStringPair.Builder entry = NameStringPair.newBuilder()
589        .setName(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER).setValue(rs.getHostname());
590      resp.addMapEntries(entry.build());
591
592      return resp.build();
593    } catch (IOException ioe) {
594      throw new ServiceException(ioe);
595    }
596  }
597
598  @Override
599  public ReportRSFatalErrorResponse reportRSFatalError(RpcController controller,
600    ReportRSFatalErrorRequest request) throws ServiceException {
601    String errorText = request.getErrorMessage();
602    ServerName sn = ProtobufUtil.toServerName(request.getServer());
603    String msg = sn + " reported a fatal error:\n" + errorText;
604    LOG.warn(msg);
605    master.rsFatals.add(msg);
606    return ReportRSFatalErrorResponse.newBuilder().build();
607  }
608
609  @Override
610  public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req)
611    throws ServiceException {
612    try {
613      long procId = master.addColumn(ProtobufUtil.toTableName(req.getTableName()),
614        ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(),
615        req.getNonce());
616      if (procId == -1) {
617        // This mean operation was not performed in server, so do not set any procId
618        return AddColumnResponse.newBuilder().build();
619      } else {
620        return AddColumnResponse.newBuilder().setProcId(procId).build();
621      }
622    } catch (IOException ioe) {
623      throw new ServiceException(ioe);
624    }
625  }
626
627  @Override
628  public AssignRegionResponse assignRegion(RpcController controller, AssignRegionRequest req)
629    throws ServiceException {
630    try {
631      master.checkInitialized();
632
633      final RegionSpecifierType type = req.getRegion().getType();
634      if (type != RegionSpecifierType.REGION_NAME) {
635        LOG.warn("assignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
636          + " actual: " + type);
637      }
638
639      final byte[] regionName = req.getRegion().getValue().toByteArray();
640      final RegionInfo regionInfo = master.getAssignmentManager().getRegionInfo(regionName);
641      if (regionInfo == null) {
642        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
643      }
644
645      final AssignRegionResponse arr = AssignRegionResponse.newBuilder().build();
646      if (master.cpHost != null) {
647        master.cpHost.preAssign(regionInfo);
648      }
649      LOG.info(master.getClientIdAuditPrefix() + " assign " + regionInfo.getRegionNameAsString());
650      master.getAssignmentManager().assign(regionInfo);
651      if (master.cpHost != null) {
652        master.cpHost.postAssign(regionInfo);
653      }
654      return arr;
655    } catch (IOException ioe) {
656      throw new ServiceException(ioe);
657    }
658  }
659
660  @Override
661  public BalanceResponse balance(RpcController controller, BalanceRequest request)
662    throws ServiceException {
663    try {
664      return ProtobufUtil.toBalanceResponse(master.balance(ProtobufUtil.toBalanceRequest(request)));
665    } catch (IOException ex) {
666      throw new ServiceException(ex);
667    }
668  }
669
670  @Override
671  public CreateNamespaceResponse createNamespace(RpcController controller,
672    CreateNamespaceRequest request) throws ServiceException {
673    try {
674      long procId =
675        master.createNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
676          request.getNonceGroup(), request.getNonce());
677      return CreateNamespaceResponse.newBuilder().setProcId(procId).build();
678    } catch (IOException e) {
679      throw new ServiceException(e);
680    }
681  }
682
683  @Override
684  public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
685    throws ServiceException {
686    TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema());
687    byte[][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
688    try {
689      long procId =
690        master.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
691      LOG.info(master.getClientIdAuditPrefix() + " procedure request for creating table: "
692        + req.getTableSchema().getTableName() + " procId is: " + procId);
693      return CreateTableResponse.newBuilder().setProcId(procId).build();
694    } catch (IOException ioe) {
695      throw new ServiceException(ioe);
696    }
697  }
698
699  @Override
700  public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req)
701    throws ServiceException {
702    try {
703      long procId = master.deleteColumn(ProtobufUtil.toTableName(req.getTableName()),
704        req.getColumnName().toByteArray(), req.getNonceGroup(), req.getNonce());
705      if (procId == -1) {
706        // This mean operation was not performed in server, so do not set any procId
707        return DeleteColumnResponse.newBuilder().build();
708      } else {
709        return DeleteColumnResponse.newBuilder().setProcId(procId).build();
710      }
711    } catch (IOException ioe) {
712      throw new ServiceException(ioe);
713    }
714  }
715
716  @Override
717  public DeleteNamespaceResponse deleteNamespace(RpcController controller,
718    DeleteNamespaceRequest request) throws ServiceException {
719    try {
720      long procId = master.deleteNamespace(request.getNamespaceName(), request.getNonceGroup(),
721        request.getNonce());
722      return DeleteNamespaceResponse.newBuilder().setProcId(procId).build();
723    } catch (IOException e) {
724      throw new ServiceException(e);
725    }
726  }
727
728  /**
729   * Execute Delete Snapshot operation.
730   * @return DeleteSnapshotResponse (a protobuf wrapped void) if the snapshot existed and was
731   *         deleted properly.
732   * @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not
733   *                          exist.
734   */
735  @Override
736  public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
737    DeleteSnapshotRequest request) throws ServiceException {
738    try {
739      master.checkInitialized();
740      master.snapshotManager.checkSnapshotSupport();
741
742      LOG.info(master.getClientIdAuditPrefix() + " delete " + request.getSnapshot());
743      master.snapshotManager.deleteSnapshot(request.getSnapshot());
744      return DeleteSnapshotResponse.newBuilder().build();
745    } catch (IOException e) {
746      throw new ServiceException(e);
747    }
748  }
749
750  @Override
751  public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request)
752    throws ServiceException {
753    try {
754      long procId = master.deleteTable(ProtobufUtil.toTableName(request.getTableName()),
755        request.getNonceGroup(), request.getNonce());
756      return DeleteTableResponse.newBuilder().setProcId(procId).build();
757    } catch (IOException ioe) {
758      throw new ServiceException(ioe);
759    }
760  }
761
762  @Override
763  public TruncateTableResponse truncateTable(RpcController controller, TruncateTableRequest request)
764    throws ServiceException {
765    try {
766      long procId = master.truncateTable(ProtobufUtil.toTableName(request.getTableName()),
767        request.getPreserveSplits(), request.getNonceGroup(), request.getNonce());
768      return TruncateTableResponse.newBuilder().setProcId(procId).build();
769    } catch (IOException ioe) {
770      throw new ServiceException(ioe);
771    }
772  }
773
774  @Override
775  public DisableTableResponse disableTable(RpcController controller, DisableTableRequest request)
776    throws ServiceException {
777    try {
778      long procId = master.disableTable(ProtobufUtil.toTableName(request.getTableName()),
779        request.getNonceGroup(), request.getNonce());
780      return DisableTableResponse.newBuilder().setProcId(procId).build();
781    } catch (IOException ioe) {
782      throw new ServiceException(ioe);
783    }
784  }
785
786  @Override
787  public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c,
788    EnableCatalogJanitorRequest req) throws ServiceException {
789    rpcPreCheck("enableCatalogJanitor");
790    return EnableCatalogJanitorResponse.newBuilder()
791      .setPrevValue(master.catalogJanitorChore.setEnabled(req.getEnable())).build();
792  }
793
794  @Override
795  public SetCleanerChoreRunningResponse setCleanerChoreRunning(RpcController c,
796    SetCleanerChoreRunningRequest req) throws ServiceException {
797    rpcPreCheck("setCleanerChoreRunning");
798
799    boolean prevValue =
800      master.getLogCleaner().getEnabled() && master.getHFileCleaner().getEnabled();
801    master.getLogCleaner().setEnabled(req.getOn());
802    for (HFileCleaner hFileCleaner : master.getHFileCleaners()) {
803      hFileCleaner.setEnabled(req.getOn());
804    }
805    return SetCleanerChoreRunningResponse.newBuilder().setPrevValue(prevValue).build();
806  }
807
808  @Override
809  public EnableTableResponse enableTable(RpcController controller, EnableTableRequest request)
810    throws ServiceException {
811    try {
812      long procId = master.enableTable(ProtobufUtil.toTableName(request.getTableName()),
813        request.getNonceGroup(), request.getNonce());
814      return EnableTableResponse.newBuilder().setProcId(procId).build();
815    } catch (IOException ioe) {
816      throw new ServiceException(ioe);
817    }
818  }
819
820  @Override
821  public MergeTableRegionsResponse mergeTableRegions(RpcController c,
822    MergeTableRegionsRequest request) throws ServiceException {
823    try {
824      master.checkInitialized();
825    } catch (IOException ioe) {
826      throw new ServiceException(ioe);
827    }
828
829    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
830
831    RegionInfo[] regionsToMerge = new RegionInfo[request.getRegionCount()];
832    for (int i = 0; i < request.getRegionCount(); i++) {
833      final byte[] encodedNameOfRegion = request.getRegion(i).getValue().toByteArray();
834      if (request.getRegion(i).getType() != RegionSpecifierType.ENCODED_REGION_NAME) {
835        LOG.warn("MergeRegions specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME
836          + " actual: region " + i + " =" + request.getRegion(i).getType());
837      }
838      RegionState regionState = regionStates.getRegionState(Bytes.toString(encodedNameOfRegion));
839      if (regionState == null) {
840        throw new ServiceException(
841          new UnknownRegionException(Bytes.toStringBinary(encodedNameOfRegion)));
842      }
843      regionsToMerge[i] = regionState.getRegion();
844    }
845
846    try {
847      long procId = master.mergeRegions(regionsToMerge, request.getForcible(),
848        request.getNonceGroup(), request.getNonce());
849      return MergeTableRegionsResponse.newBuilder().setProcId(procId).build();
850    } catch (IOException ioe) {
851      throw new ServiceException(ioe);
852    }
853  }
854
855  @Override
856  public SplitTableRegionResponse splitRegion(final RpcController controller,
857    final SplitTableRegionRequest request) throws ServiceException {
858    try {
859      long procId = master.splitRegion(ProtobufUtil.toRegionInfo(request.getRegionInfo()),
860        request.hasSplitRow() ? request.getSplitRow().toByteArray() : null, request.getNonceGroup(),
861        request.getNonce());
862      return SplitTableRegionResponse.newBuilder().setProcId(procId).build();
863    } catch (IOException ie) {
864      throw new ServiceException(ie);
865    }
866  }
867
868  @Override
869  public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller,
870    final ClientProtos.CoprocessorServiceRequest request) throws ServiceException {
871    rpcPreCheck("execMasterService");
872    try {
873      ServerRpcController execController = new ServerRpcController();
874      ClientProtos.CoprocessorServiceCall call = request.getCall();
875      String serviceName = call.getServiceName();
876      String methodName = call.getMethodName();
877      if (!master.coprocessorServiceHandlers.containsKey(serviceName)) {
878        throw new UnknownProtocolException(null,
879          "No registered Master Coprocessor Endpoint found for " + serviceName
880            + ". Has it been enabled?");
881      }
882
883      com.google.protobuf.Service service = master.coprocessorServiceHandlers.get(serviceName);
884      com.google.protobuf.Descriptors.ServiceDescriptor serviceDesc =
885        service.getDescriptorForType();
886      com.google.protobuf.Descriptors.MethodDescriptor methodDesc =
887        CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc);
888
889      com.google.protobuf.Message execRequest =
890        CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest());
891      final com.google.protobuf.Message.Builder responseBuilder =
892        service.getResponsePrototype(methodDesc).newBuilderForType();
893      service.callMethod(methodDesc, execController, execRequest, (message) -> {
894        if (message != null) {
895          responseBuilder.mergeFrom(message);
896        }
897      });
898      com.google.protobuf.Message execResult = responseBuilder.build();
899      if (execController.getFailedOn() != null) {
900        throw execController.getFailedOn();
901      }
902
903      String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("");
904      User caller = RpcServer.getRequestUser().orElse(null);
905      AUDITLOG.info("User {} (remote address: {}) master service request for {}.{}", caller,
906        remoteAddress, serviceName, methodName);
907
908      return CoprocessorRpcUtils.getResponse(execResult, HConstants.EMPTY_BYTE_ARRAY);
909    } catch (IOException ie) {
910      throw new ServiceException(ie);
911    }
912  }
913
914  /**
915   * Triggers an asynchronous attempt to run a distributed procedure. {@inheritDoc}
916   */
917  @Override
918  public ExecProcedureResponse execProcedure(RpcController controller, ExecProcedureRequest request)
919    throws ServiceException {
920    try {
921      master.checkInitialized();
922      ProcedureDescription desc = request.getProcedure();
923      MasterProcedureManager mpm =
924        master.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
925      if (mpm == null) {
926        throw new ServiceException(
927          new DoNotRetryIOException("The procedure is not registered: " + desc.getSignature()));
928      }
929      LOG.info(master.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
930      mpm.checkPermissions(desc, getAccessChecker(), RpcServer.getRequestUser().orElse(null));
931      mpm.execProcedure(desc);
932      // send back the max amount of time the client should wait for the procedure
933      // to complete
934      long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME;
935      return ExecProcedureResponse.newBuilder().setExpectedTimeout(waitTime).build();
936    } catch (ForeignException e) {
937      throw new ServiceException(e.getCause());
938    } catch (IOException e) {
939      throw new ServiceException(e);
940    }
941  }
942
943  /**
944   * Triggers a synchronous attempt to run a distributed procedure and sets return data in response.
945   * {@inheritDoc}
946   */
947  @Override
948  public ExecProcedureResponse execProcedureWithRet(RpcController controller,
949    ExecProcedureRequest request) throws ServiceException {
950    rpcPreCheck("execProcedureWithRet");
951    try {
952      ProcedureDescription desc = request.getProcedure();
953      MasterProcedureManager mpm =
954        master.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
955      if (mpm == null) {
956        throw new ServiceException("The procedure is not registered: " + desc.getSignature());
957      }
958      LOG.info(master.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
959      byte[] data = mpm.execProcedureWithRet(desc);
960      ExecProcedureResponse.Builder builder = ExecProcedureResponse.newBuilder();
961      // set return data if available
962      if (data != null) {
963        builder.setReturnData(UnsafeByteOperations.unsafeWrap(data));
964      }
965      return builder.build();
966    } catch (IOException e) {
967      throw new ServiceException(e);
968    }
969  }
970
971  @Override
972  public GetClusterStatusResponse getClusterStatus(RpcController controller,
973    GetClusterStatusRequest req) throws ServiceException {
974    GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
975    try {
976      // We used to check if Master was up at this point but let this call proceed even if
977      // Master is initializing... else we shut out stuff like hbck2 tool from making progress
978      // since it queries this method to figure cluster version. hbck2 wants to be able to work
979      // against Master even if it is 'initializing' so it can do fixup.
980      response.setClusterStatus(ClusterMetricsBuilder.toClusterStatus(
981        master.getClusterMetrics(ClusterMetricsBuilder.toOptions(req.getOptionsList()))));
982    } catch (IOException e) {
983      throw new ServiceException(e);
984    }
985    return response.build();
986  }
987
988  /**
989   * List the currently available/stored snapshots. Any in-progress snapshots are ignored
990   */
991  @Override
992  public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller,
993    GetCompletedSnapshotsRequest request) throws ServiceException {
994    try {
995      master.checkInitialized();
996      GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder();
997      List<SnapshotDescription> snapshots = master.snapshotManager.getCompletedSnapshots();
998
999      // convert to protobuf
1000      for (SnapshotDescription snapshot : snapshots) {
1001        builder.addSnapshots(snapshot);
1002      }
1003      return builder.build();
1004    } catch (IOException e) {
1005      throw new ServiceException(e);
1006    }
1007  }
1008
1009  @Override
1010  public ListNamespacesResponse listNamespaces(RpcController controller,
1011    ListNamespacesRequest request) throws ServiceException {
1012    try {
1013      return ListNamespacesResponse.newBuilder().addAllNamespaceName(master.listNamespaces())
1014        .build();
1015    } catch (IOException e) {
1016      throw new ServiceException(e);
1017    }
1018  }
1019
1020  @Override
1021  public GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller,
1022    GetNamespaceDescriptorRequest request) throws ServiceException {
1023    try {
1024      return GetNamespaceDescriptorResponse.newBuilder()
1025        .setNamespaceDescriptor(
1026          ProtobufUtil.toProtoNamespaceDescriptor(master.getNamespace(request.getNamespaceName())))
1027        .build();
1028    } catch (IOException e) {
1029      throw new ServiceException(e);
1030    }
1031  }
1032
1033  /**
1034   * Get the number of regions of the table that have been updated by the alter.
1035   * @return Pair indicating the number of regions updated Pair.getFirst is the regions that are yet
1036   *         to be updated Pair.getSecond is the total number of regions of the table n
1037   */
1038  @Override
1039  public GetSchemaAlterStatusResponse getSchemaAlterStatus(RpcController controller,
1040    GetSchemaAlterStatusRequest req) throws ServiceException {
1041    // TODO: currently, we query using the table name on the client side. this
1042    // may overlap with other table operations or the table operation may
1043    // have completed before querying this API. We need to refactor to a
1044    // transaction system in the future to avoid these ambiguities.
1045    TableName tableName = ProtobufUtil.toTableName(req.getTableName());
1046
1047    try {
1048      master.checkInitialized();
1049      Pair<Integer, Integer> pair = master.getAssignmentManager().getReopenStatus(tableName);
1050      GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder();
1051      ret.setYetToUpdateRegions(pair.getFirst());
1052      ret.setTotalRegions(pair.getSecond());
1053      return ret.build();
1054    } catch (IOException ioe) {
1055      throw new ServiceException(ioe);
1056    }
1057  }
1058
1059  /**
1060   * Get list of TableDescriptors for requested tables.
1061   * @param c   Unused (set to null).
1062   * @param req GetTableDescriptorsRequest that contains: - tableNames: requested tables, or if
1063   *            empty, all are requested. nn
1064   */
1065  @Override
1066  public GetTableDescriptorsResponse getTableDescriptors(RpcController c,
1067    GetTableDescriptorsRequest req) throws ServiceException {
1068    try {
1069      master.checkInitialized();
1070
1071      final String regex = req.hasRegex() ? req.getRegex() : null;
1072      final String namespace = req.hasNamespace() ? req.getNamespace() : null;
1073      List<TableName> tableNameList = null;
1074      if (req.getTableNamesCount() > 0) {
1075        tableNameList = new ArrayList<TableName>(req.getTableNamesCount());
1076        for (HBaseProtos.TableName tableNamePB : req.getTableNamesList()) {
1077          tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
1078        }
1079      }
1080
1081      List<TableDescriptor> descriptors =
1082        master.listTableDescriptors(namespace, regex, tableNameList, req.getIncludeSysTables());
1083
1084      GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder();
1085      if (descriptors != null && descriptors.size() > 0) {
1086        // Add the table descriptors to the response
1087        for (TableDescriptor htd : descriptors) {
1088          builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
1089        }
1090      }
1091      return builder.build();
1092    } catch (IOException ioe) {
1093      throw new ServiceException(ioe);
1094    }
1095  }
1096
1097  /**
1098   * Get list of userspace table names
1099   * @param controller Unused (set to null).
1100   * @param req        GetTableNamesRequest nn
1101   */
1102  @Override
1103  public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest req)
1104    throws ServiceException {
1105    try {
1106      master.checkServiceStarted();
1107
1108      final String regex = req.hasRegex() ? req.getRegex() : null;
1109      final String namespace = req.hasNamespace() ? req.getNamespace() : null;
1110      List<TableName> tableNames =
1111        master.listTableNames(namespace, regex, req.getIncludeSysTables());
1112
1113      GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
1114      if (tableNames != null && tableNames.size() > 0) {
1115        // Add the table names to the response
1116        for (TableName table : tableNames) {
1117          builder.addTableNames(ProtobufUtil.toProtoTableName(table));
1118        }
1119      }
1120      return builder.build();
1121    } catch (IOException e) {
1122      throw new ServiceException(e);
1123    }
1124  }
1125
1126  @Override
1127  public GetTableStateResponse getTableState(RpcController controller, GetTableStateRequest request)
1128    throws ServiceException {
1129    try {
1130      master.checkServiceStarted();
1131      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
1132      TableState ts = master.getTableStateManager().getTableState(tableName);
1133      GetTableStateResponse.Builder builder = GetTableStateResponse.newBuilder();
1134      builder.setTableState(ts.convert());
1135      return builder.build();
1136    } catch (IOException e) {
1137      throw new ServiceException(e);
1138    }
1139  }
1140
1141  @Override
1142  public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
1143    IsCatalogJanitorEnabledRequest req) throws ServiceException {
1144    return IsCatalogJanitorEnabledResponse.newBuilder().setValue(master.isCatalogJanitorEnabled())
1145      .build();
1146  }
1147
1148  @Override
1149  public IsCleanerChoreEnabledResponse isCleanerChoreEnabled(RpcController c,
1150    IsCleanerChoreEnabledRequest req) throws ServiceException {
1151    return IsCleanerChoreEnabledResponse.newBuilder().setValue(master.isCleanerChoreEnabled())
1152      .build();
1153  }
1154
1155  @Override
1156  public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req)
1157    throws ServiceException {
1158    try {
1159      master.checkServiceStarted();
1160      return IsMasterRunningResponse.newBuilder().setIsMasterRunning(!master.isStopped()).build();
1161    } catch (IOException e) {
1162      throw new ServiceException(e);
1163    }
1164  }
1165
1166  /**
1167   * Checks if the specified procedure is done.
1168   * @return true if the procedure is done, false if the procedure is in the process of completing
1169   * @throws ServiceException if invalid procedure or failed procedure with progress failure reason.
1170   */
1171  @Override
1172  public IsProcedureDoneResponse isProcedureDone(RpcController controller,
1173    IsProcedureDoneRequest request) throws ServiceException {
1174    try {
1175      master.checkInitialized();
1176      ProcedureDescription desc = request.getProcedure();
1177      MasterProcedureManager mpm =
1178        master.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
1179      if (mpm == null) {
1180        throw new ServiceException("The procedure is not registered: " + desc.getSignature());
1181      }
1182      LOG.debug("Checking to see if procedure from request:" + desc.getSignature() + " is done");
1183
1184      IsProcedureDoneResponse.Builder builder = IsProcedureDoneResponse.newBuilder();
1185      boolean done = mpm.isProcedureDone(desc);
1186      builder.setDone(done);
1187      return builder.build();
1188    } catch (ForeignException e) {
1189      throw new ServiceException(e.getCause());
1190    } catch (IOException e) {
1191      throw new ServiceException(e);
1192    }
1193  }
1194
1195  /**
1196   * Checks if the specified snapshot is done.
1197   * @return true if the snapshot is in file system ready to use, false if the snapshot is in the
1198   *         process of completing
1199   * @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or a wrapped
1200   *                          HBaseSnapshotException with progress failure reason.
1201   */
1202  @Override
1203  public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
1204    IsSnapshotDoneRequest request) throws ServiceException {
1205    LOG.debug("Checking to see if snapshot from request:"
1206      + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()) + " is done");
1207    try {
1208      master.checkInitialized();
1209      IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
1210      boolean done = master.snapshotManager.isSnapshotDone(request.getSnapshot());
1211      builder.setDone(done);
1212      return builder.build();
1213    } catch (ForeignException e) {
1214      throw new ServiceException(e.getCause());
1215    } catch (IOException e) {
1216      throw new ServiceException(e);
1217    }
1218  }
1219
1220  @Override
1221  public GetProcedureResultResponse getProcedureResult(RpcController controller,
1222    GetProcedureResultRequest request) throws ServiceException {
1223    LOG.debug("Checking to see if procedure is done pid=" + request.getProcId());
1224    try {
1225      master.checkInitialized();
1226      GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder();
1227      long procId = request.getProcId();
1228      ProcedureExecutor<?> executor = master.getMasterProcedureExecutor();
1229      Procedure<?> result = executor.getResultOrProcedure(procId);
1230      if (result != null) {
1231        builder.setSubmittedTime(result.getSubmittedTime());
1232        builder.setLastUpdate(result.getLastUpdate());
1233        if (executor.isFinished(procId)) {
1234          builder.setState(GetProcedureResultResponse.State.FINISHED);
1235          if (result.isFailed()) {
1236            IOException exception = MasterProcedureUtil.unwrapRemoteIOException(result);
1237            builder.setException(ForeignExceptionUtil.toProtoForeignException(exception));
1238          }
1239          byte[] resultData = result.getResult();
1240          if (resultData != null) {
1241            builder.setResult(UnsafeByteOperations.unsafeWrap(resultData));
1242          }
1243          master.getMasterProcedureExecutor().removeResult(request.getProcId());
1244        } else {
1245          builder.setState(GetProcedureResultResponse.State.RUNNING);
1246        }
1247      } else {
1248        builder.setState(GetProcedureResultResponse.State.NOT_FOUND);
1249      }
1250      return builder.build();
1251    } catch (IOException e) {
1252      throw new ServiceException(e);
1253    }
1254  }
1255
1256  @Override
1257  public AbortProcedureResponse abortProcedure(RpcController rpcController,
1258    AbortProcedureRequest request) throws ServiceException {
1259    try {
1260      AbortProcedureResponse.Builder response = AbortProcedureResponse.newBuilder();
1261      boolean abortResult =
1262        master.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning());
1263      response.setIsProcedureAborted(abortResult);
1264      return response.build();
1265    } catch (IOException e) {
1266      throw new ServiceException(e);
1267    }
1268  }
1269
1270  @Override
1271  public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c,
1272    ListNamespaceDescriptorsRequest request) throws ServiceException {
1273    try {
1274      ListNamespaceDescriptorsResponse.Builder response =
1275        ListNamespaceDescriptorsResponse.newBuilder();
1276      for (NamespaceDescriptor ns : master.getNamespaces()) {
1277        response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns));
1278      }
1279      return response.build();
1280    } catch (IOException e) {
1281      throw new ServiceException(e);
1282    }
1283  }
1284
1285  @Override
1286  public GetProceduresResponse getProcedures(RpcController rpcController,
1287    GetProceduresRequest request) throws ServiceException {
1288    try {
1289      final GetProceduresResponse.Builder response = GetProceduresResponse.newBuilder();
1290      for (Procedure<?> p : master.getProcedures()) {
1291        response.addProcedure(ProcedureUtil.convertToProtoProcedure(p));
1292      }
1293      return response.build();
1294    } catch (IOException e) {
1295      throw new ServiceException(e);
1296    }
1297  }
1298
1299  @Override
1300  public GetLocksResponse getLocks(RpcController controller, GetLocksRequest request)
1301    throws ServiceException {
1302    try {
1303      final GetLocksResponse.Builder builder = GetLocksResponse.newBuilder();
1304
1305      for (LockedResource lockedResource : master.getLocks()) {
1306        builder.addLock(ProcedureUtil.convertToProtoLockedResource(lockedResource));
1307      }
1308
1309      return builder.build();
1310    } catch (IOException e) {
1311      throw new ServiceException(e);
1312    }
1313  }
1314
1315  @Override
1316  public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c,
1317    ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
1318    try {
1319      ListTableDescriptorsByNamespaceResponse.Builder b =
1320        ListTableDescriptorsByNamespaceResponse.newBuilder();
1321      for (TableDescriptor htd : master
1322        .listTableDescriptorsByNamespace(request.getNamespaceName())) {
1323        b.addTableSchema(ProtobufUtil.toTableSchema(htd));
1324      }
1325      return b.build();
1326    } catch (IOException e) {
1327      throw new ServiceException(e);
1328    }
1329  }
1330
1331  @Override
1332  public ListTableNamesByNamespaceResponse listTableNamesByNamespace(RpcController c,
1333    ListTableNamesByNamespaceRequest request) throws ServiceException {
1334    try {
1335      ListTableNamesByNamespaceResponse.Builder b = ListTableNamesByNamespaceResponse.newBuilder();
1336      for (TableName tableName : master.listTableNamesByNamespace(request.getNamespaceName())) {
1337        b.addTableName(ProtobufUtil.toProtoTableName(tableName));
1338      }
1339      return b.build();
1340    } catch (IOException e) {
1341      throw new ServiceException(e);
1342    }
1343  }
1344
1345  @Override
1346  public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req)
1347    throws ServiceException {
1348    try {
1349      long procId = master.modifyColumn(ProtobufUtil.toTableName(req.getTableName()),
1350        ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(),
1351        req.getNonce());
1352      if (procId == -1) {
1353        // This mean operation was not performed in server, so do not set any procId
1354        return ModifyColumnResponse.newBuilder().build();
1355      } else {
1356        return ModifyColumnResponse.newBuilder().setProcId(procId).build();
1357      }
1358    } catch (IOException ioe) {
1359      throw new ServiceException(ioe);
1360    }
1361  }
1362
1363  @Override
1364  public ModifyColumnStoreFileTrackerResponse modifyColumnStoreFileTracker(RpcController controller,
1365    ModifyColumnStoreFileTrackerRequest req) throws ServiceException {
1366    try {
1367      long procId =
1368        master.modifyColumnStoreFileTracker(ProtobufUtil.toTableName(req.getTableName()),
1369          req.getFamily().toByteArray(), req.getDstSft(), req.getNonceGroup(), req.getNonce());
1370      return ModifyColumnStoreFileTrackerResponse.newBuilder().setProcId(procId).build();
1371    } catch (IOException ioe) {
1372      throw new ServiceException(ioe);
1373    }
1374  }
1375
1376  @Override
1377  public FlushMasterStoreResponse flushMasterStore(RpcController controller,
1378    FlushMasterStoreRequest request) throws ServiceException {
1379    rpcPreCheck("flushMasterStore");
1380    try {
1381      master.flushMasterStore();
1382    } catch (IOException ioe) {
1383      throw new ServiceException(ioe);
1384    }
1385    return FlushMasterStoreResponse.newBuilder().build();
1386  }
1387
1388  @Override
1389  public ModifyNamespaceResponse modifyNamespace(RpcController controller,
1390    ModifyNamespaceRequest request) throws ServiceException {
1391    try {
1392      long procId =
1393        master.modifyNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
1394          request.getNonceGroup(), request.getNonce());
1395      return ModifyNamespaceResponse.newBuilder().setProcId(procId).build();
1396    } catch (IOException e) {
1397      throw new ServiceException(e);
1398    }
1399  }
1400
1401  @Override
1402  public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req)
1403    throws ServiceException {
1404    try {
1405      long procId = master.modifyTable(ProtobufUtil.toTableName(req.getTableName()),
1406        ProtobufUtil.toTableDescriptor(req.getTableSchema()), req.getNonceGroup(), req.getNonce());
1407      return ModifyTableResponse.newBuilder().setProcId(procId).build();
1408    } catch (IOException ioe) {
1409      throw new ServiceException(ioe);
1410    }
1411  }
1412
1413  @Override
1414  public ModifyTableStoreFileTrackerResponse modifyTableStoreFileTracker(RpcController controller,
1415    ModifyTableStoreFileTrackerRequest req) throws ServiceException {
1416    try {
1417      long procId = master.modifyTableStoreFileTracker(ProtobufUtil.toTableName(req.getTableName()),
1418        req.getDstSft(), req.getNonceGroup(), req.getNonce());
1419      return ModifyTableStoreFileTrackerResponse.newBuilder().setProcId(procId).build();
1420    } catch (IOException ioe) {
1421      throw new ServiceException(ioe);
1422    }
1423  }
1424
1425  @Override
1426  public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest req)
1427    throws ServiceException {
1428    final byte[] encodedRegionName = req.getRegion().getValue().toByteArray();
1429    RegionSpecifierType type = req.getRegion().getType();
1430    final byte[] destServerName = (req.hasDestServerName())
1431      ? Bytes.toBytes(ProtobufUtil.toServerName(req.getDestServerName()).getServerName())
1432      : null;
1433    MoveRegionResponse mrr = MoveRegionResponse.newBuilder().build();
1434
1435    if (type != RegionSpecifierType.ENCODED_REGION_NAME) {
1436      LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME
1437        + " actual: " + type);
1438    }
1439
1440    try {
1441      master.checkInitialized();
1442      master.move(encodedRegionName, destServerName);
1443    } catch (IOException ioe) {
1444      throw new ServiceException(ioe);
1445    }
1446    return mrr;
1447  }
1448
1449  /**
1450   * Offline specified region from master's in-memory state. It will not attempt to reassign the
1451   * region as in unassign. This is a special method that should be used by experts or hbck.
1452   */
1453  @Override
1454  public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request)
1455    throws ServiceException {
1456    try {
1457      master.checkInitialized();
1458
1459      final RegionSpecifierType type = request.getRegion().getType();
1460      if (type != RegionSpecifierType.REGION_NAME) {
1461        LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
1462          + " actual: " + type);
1463      }
1464
1465      final byte[] regionName = request.getRegion().getValue().toByteArray();
1466      final RegionInfo hri = master.getAssignmentManager().getRegionInfo(regionName);
1467      if (hri == null) {
1468        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
1469      }
1470
1471      if (master.cpHost != null) {
1472        master.cpHost.preRegionOffline(hri);
1473      }
1474      LOG.info(master.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString());
1475      master.getAssignmentManager().offlineRegion(hri);
1476      if (master.cpHost != null) {
1477        master.cpHost.postRegionOffline(hri);
1478      }
1479    } catch (IOException ioe) {
1480      throw new ServiceException(ioe);
1481    }
1482    return OfflineRegionResponse.newBuilder().build();
1483  }
1484
1485  /**
1486   * Execute Restore/Clone snapshot operation.
1487   * <p>
1488   * If the specified table exists a "Restore" is executed, replacing the table schema and directory
1489   * data with the content of the snapshot. The table must be disabled, or a
1490   * UnsupportedOperationException will be thrown.
1491   * <p>
1492   * If the table doesn't exist a "Clone" is executed, a new table is created using the schema at
1493   * the time of the snapshot, and the content of the snapshot.
1494   * <p>
1495   * The restore/clone operation does not require copying HFiles. Since HFiles are immutable the
1496   * table can point to and use the same files as the original one.
1497   */
1498  @Override
1499  public RestoreSnapshotResponse restoreSnapshot(RpcController controller,
1500    RestoreSnapshotRequest request) throws ServiceException {
1501    try {
1502      long procId = master.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(),
1503        request.getNonce(), request.getRestoreACL(), request.getCustomSFT());
1504      return RestoreSnapshotResponse.newBuilder().setProcId(procId).build();
1505    } catch (ForeignException e) {
1506      throw new ServiceException(e.getCause());
1507    } catch (IOException e) {
1508      throw new ServiceException(e);
1509    }
1510  }
1511
1512  @Override
1513  public SetSnapshotCleanupResponse switchSnapshotCleanup(RpcController controller,
1514    SetSnapshotCleanupRequest request) throws ServiceException {
1515    try {
1516      master.checkInitialized();
1517      final boolean enabled = request.getEnabled();
1518      final boolean isSynchronous = request.hasSynchronous() && request.getSynchronous();
1519      final boolean prevSnapshotCleanupRunning = this.switchSnapshotCleanup(enabled, isSynchronous);
1520      return SetSnapshotCleanupResponse.newBuilder()
1521        .setPrevSnapshotCleanup(prevSnapshotCleanupRunning).build();
1522    } catch (IOException e) {
1523      throw new ServiceException(e);
1524    }
1525  }
1526
1527  @Override
1528  public IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(RpcController controller,
1529    IsSnapshotCleanupEnabledRequest request) throws ServiceException {
1530    try {
1531      master.checkInitialized();
1532      final boolean isSnapshotCleanupEnabled =
1533        master.snapshotCleanupTracker.isSnapshotCleanupEnabled();
1534      return IsSnapshotCleanupEnabledResponse.newBuilder().setEnabled(isSnapshotCleanupEnabled)
1535        .build();
1536    } catch (IOException e) {
1537      throw new ServiceException(e);
1538    }
1539  }
1540
1541  /**
1542   * Turn on/off snapshot auto-cleanup based on TTL
1543   * @param enabledNewVal Set to <code>true</code> to enable, <code>false</code> to disable
1544   * @param synchronous   If <code>true</code>, it waits until current snapshot cleanup is
1545   *                      completed, if outstanding
1546   * @return previous snapshot auto-cleanup mode
1547   */
1548  private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal,
1549    final boolean synchronous) {
1550    final boolean oldValue = master.snapshotCleanupTracker.isSnapshotCleanupEnabled();
1551    master.switchSnapshotCleanup(enabledNewVal, synchronous);
1552    LOG.info("{} Successfully set snapshot cleanup to {}", master.getClientIdAuditPrefix(),
1553      enabledNewVal);
1554    return oldValue;
1555  }
1556
1557  @Override
1558  public RunCatalogScanResponse runCatalogScan(RpcController c, RunCatalogScanRequest req)
1559    throws ServiceException {
1560    rpcPreCheck("runCatalogScan");
1561    try {
1562      return ResponseConverter.buildRunCatalogScanResponse(this.master.catalogJanitorChore.scan());
1563    } catch (IOException ioe) {
1564      throw new ServiceException(ioe);
1565    }
1566  }
1567
1568  @Override
1569  public RunCleanerChoreResponse runCleanerChore(RpcController c, RunCleanerChoreRequest req)
1570    throws ServiceException {
1571    rpcPreCheck("runCleanerChore");
1572    boolean result = master.getHFileCleaner().runCleaner() && master.getLogCleaner().runCleaner();
1573    return ResponseConverter.buildRunCleanerChoreResponse(result);
1574  }
1575
1576  @Override
1577  public SetBalancerRunningResponse setBalancerRunning(RpcController c,
1578    SetBalancerRunningRequest req) throws ServiceException {
1579    try {
1580      master.checkInitialized();
1581      boolean prevValue = (req.getSynchronous())
1582        ? synchronousBalanceSwitch(req.getOn())
1583        : master.balanceSwitch(req.getOn());
1584      return SetBalancerRunningResponse.newBuilder().setPrevBalanceValue(prevValue).build();
1585    } catch (IOException ioe) {
1586      throw new ServiceException(ioe);
1587    }
1588  }
1589
1590  @Override
1591  public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request)
1592    throws ServiceException {
1593    LOG.info(master.getClientIdAuditPrefix() + " shutdown");
1594    try {
1595      master.shutdown();
1596    } catch (IOException e) {
1597      LOG.error("Exception occurred in HMaster.shutdown()", e);
1598      throw new ServiceException(e);
1599    }
1600    return ShutdownResponse.newBuilder().build();
1601  }
1602
1603  /**
1604   * Triggers an asynchronous attempt to take a snapshot. {@inheritDoc}
1605   */
1606  @Override
1607  public SnapshotResponse snapshot(RpcController controller, SnapshotRequest request)
1608    throws ServiceException {
1609    try {
1610      master.checkInitialized();
1611      master.snapshotManager.checkSnapshotSupport();
1612
1613      LOG.info(master.getClientIdAuditPrefix() + " snapshot request for:"
1614        + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()));
1615      // get the snapshot information
1616      SnapshotDescription snapshot =
1617        SnapshotDescriptionUtils.validate(request.getSnapshot(), master.getConfiguration());
1618      master.snapshotManager.takeSnapshot(snapshot);
1619
1620      // send back the max amount of time the client should wait for the snapshot to complete
1621      long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(master.getConfiguration(),
1622        snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
1623      return SnapshotResponse.newBuilder().setExpectedTimeout(waitTime).build();
1624    } catch (ForeignException e) {
1625      throw new ServiceException(e.getCause());
1626    } catch (IOException e) {
1627      throw new ServiceException(e);
1628    }
1629  }
1630
1631  @Override
1632  public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request)
1633    throws ServiceException {
1634    LOG.info(master.getClientIdAuditPrefix() + " stop");
1635    try {
1636      master.stopMaster();
1637    } catch (IOException e) {
1638      LOG.error("Exception occurred while stopping master", e);
1639      throw new ServiceException(e);
1640    }
1641    return StopMasterResponse.newBuilder().build();
1642  }
1643
1644  @Override
1645  public IsInMaintenanceModeResponse isMasterInMaintenanceMode(final RpcController controller,
1646    final IsInMaintenanceModeRequest request) throws ServiceException {
1647    IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder();
1648    response.setInMaintenanceMode(master.isInMaintenanceMode());
1649    return response.build();
1650  }
1651
1652  @Override
1653  public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req)
1654    throws ServiceException {
1655    try {
1656      final byte[] regionName = req.getRegion().getValue().toByteArray();
1657      RegionSpecifierType type = req.getRegion().getType();
1658      UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build();
1659
1660      master.checkInitialized();
1661      if (type != RegionSpecifierType.REGION_NAME) {
1662        LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
1663          + " actual: " + type);
1664      }
1665      RegionStateNode rsn =
1666        master.getAssignmentManager().getRegionStates().getRegionStateNodeFromName(regionName);
1667      if (rsn == null) {
1668        throw new UnknownRegionException(Bytes.toString(regionName));
1669      }
1670
1671      RegionInfo hri = rsn.getRegionInfo();
1672      if (master.cpHost != null) {
1673        master.cpHost.preUnassign(hri);
1674      }
1675      LOG.debug(master.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString()
1676        + " in current location if it is online");
1677      master.getAssignmentManager().unassign(hri);
1678      if (master.cpHost != null) {
1679        master.cpHost.postUnassign(hri);
1680      }
1681
1682      return urr;
1683    } catch (IOException ioe) {
1684      throw new ServiceException(ioe);
1685    }
1686  }
1687
1688  @Override
1689  public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c,
1690    ReportRegionStateTransitionRequest req) throws ServiceException {
1691    try {
1692      master.checkServiceStarted();
1693      return master.getAssignmentManager().reportRegionStateTransition(req);
1694    } catch (IOException ioe) {
1695      throw new ServiceException(ioe);
1696    }
1697  }
1698
1699  @Override
1700  public SetQuotaResponse setQuota(RpcController c, SetQuotaRequest req) throws ServiceException {
1701    try {
1702      master.checkInitialized();
1703      return master.getMasterQuotaManager().setQuota(req);
1704    } catch (Exception e) {
1705      throw new ServiceException(e);
1706    }
1707  }
1708
1709  @Override
1710  public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller,
1711    MajorCompactionTimestampRequest request) throws ServiceException {
1712    MajorCompactionTimestampResponse.Builder response =
1713      MajorCompactionTimestampResponse.newBuilder();
1714    try {
1715      master.checkInitialized();
1716      response.setCompactionTimestamp(
1717        master.getLastMajorCompactionTimestamp(ProtobufUtil.toTableName(request.getTableName())));
1718    } catch (IOException e) {
1719      throw new ServiceException(e);
1720    }
1721    return response.build();
1722  }
1723
1724  @Override
1725  public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
1726    RpcController controller, MajorCompactionTimestampForRegionRequest request)
1727    throws ServiceException {
1728    MajorCompactionTimestampResponse.Builder response =
1729      MajorCompactionTimestampResponse.newBuilder();
1730    try {
1731      master.checkInitialized();
1732      response.setCompactionTimestamp(master
1733        .getLastMajorCompactionTimestampForRegion(request.getRegion().getValue().toByteArray()));
1734    } catch (IOException e) {
1735      throw new ServiceException(e);
1736    }
1737    return response.build();
1738  }
1739
1740  /**
1741   * Compact a region on the master.
1742   * @param controller the RPC controller
1743   * @param request    the request n
1744   */
1745  @Override
1746  @QosPriority(priority = HConstants.ADMIN_QOS)
1747  public CompactRegionResponse compactRegion(final RpcController controller,
1748    final CompactRegionRequest request) throws ServiceException {
1749    try {
1750      master.checkInitialized();
1751      byte[] regionName = request.getRegion().getValue().toByteArray();
1752      TableName tableName = RegionInfo.getTable(regionName);
1753      // TODO: support CompactType.MOB
1754      // if the region is a mob region, do the mob file compaction.
1755      if (MobUtils.isMobRegionName(tableName, regionName)) {
1756        checkHFileFormatVersionForMob();
1757        // TODO: support CompactType.MOB
1758        // HBASE-23571
1759        LOG.warn("CompactType.MOB is not supported yet, will run regular compaction."
1760          + " Refer to HBASE-23571.");
1761        return super.compactRegion(controller, request);
1762      } else {
1763        return super.compactRegion(controller, request);
1764      }
1765    } catch (IOException ie) {
1766      throw new ServiceException(ie);
1767    }
1768  }
1769
1770  /**
1771   * check configured hfile format version before to do compaction
1772   * @throws IOException throw IOException
1773   */
1774  private void checkHFileFormatVersionForMob() throws IOException {
1775    if (HFile.getFormatVersion(master.getConfiguration()) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
1776      LOG.error("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
1777        + " is required for MOB compaction. Compaction will not run.");
1778      throw new IOException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
1779        + " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY
1780        + " accordingly.");
1781    }
1782  }
1783
1784  /**
1785   * This method implements Admin getRegionInfo. On RegionServer, it is able to return RegionInfo
1786   * and detail. On Master, it just returns RegionInfo. On Master it has been hijacked to return Mob
1787   * detail. Master implementation is good for querying full region name if you only have the
1788   * encoded name (useful around region replicas for example which do not have a row in hbase:meta).
1789   */
1790  @Override
1791  @QosPriority(priority = HConstants.ADMIN_QOS)
1792  public GetRegionInfoResponse getRegionInfo(final RpcController controller,
1793    final GetRegionInfoRequest request) throws ServiceException {
1794    RegionInfo ri = null;
1795    try {
1796      ri = getRegionInfo(request.getRegion());
1797    } catch (UnknownRegionException ure) {
1798      throw new ServiceException(ure);
1799    }
1800    GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
1801    if (ri != null) {
1802      builder.setRegionInfo(ProtobufUtil.toRegionInfo(ri));
1803    } else {
1804      // Is it a MOB name? These work differently.
1805      byte[] regionName = request.getRegion().getValue().toByteArray();
1806      TableName tableName = RegionInfo.getTable(regionName);
1807      if (MobUtils.isMobRegionName(tableName, regionName)) {
1808        // a dummy region info contains the compaction state.
1809        RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName);
1810        builder.setRegionInfo(ProtobufUtil.toRegionInfo(mobRegionInfo));
1811        if (request.hasCompactionState() && request.getCompactionState()) {
1812          builder.setCompactionState(master.getMobCompactionState(tableName));
1813        }
1814      } else {
1815        // If unknown RegionInfo and not a MOB region, it is unknown.
1816        throw new ServiceException(new UnknownRegionException(Bytes.toString(regionName)));
1817      }
1818    }
1819    return builder.build();
1820  }
1821
1822  @Override
1823  public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller,
1824    IsBalancerEnabledRequest request) throws ServiceException {
1825    IsBalancerEnabledResponse.Builder response = IsBalancerEnabledResponse.newBuilder();
1826    response.setEnabled(master.isBalancerOn());
1827    return response.build();
1828  }
1829
1830  @Override
1831  public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller,
1832    SetSplitOrMergeEnabledRequest request) throws ServiceException {
1833    SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder();
1834    try {
1835      master.checkInitialized();
1836      boolean newValue = request.getEnabled();
1837      for (MasterProtos.MasterSwitchType masterSwitchType : request.getSwitchTypesList()) {
1838        MasterSwitchType switchType = convert(masterSwitchType);
1839        boolean oldValue = master.isSplitOrMergeEnabled(switchType);
1840        response.addPrevValue(oldValue);
1841        if (master.cpHost != null) {
1842          master.cpHost.preSetSplitOrMergeEnabled(newValue, switchType);
1843        }
1844        master.getSplitOrMergeTracker().setSplitOrMergeEnabled(newValue, switchType);
1845        if (master.cpHost != null) {
1846          master.cpHost.postSetSplitOrMergeEnabled(newValue, switchType);
1847        }
1848      }
1849    } catch (IOException | KeeperException e) {
1850      throw new ServiceException(e);
1851    }
1852    return response.build();
1853  }
1854
1855  @Override
1856  public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller,
1857    IsSplitOrMergeEnabledRequest request) throws ServiceException {
1858    IsSplitOrMergeEnabledResponse.Builder response = IsSplitOrMergeEnabledResponse.newBuilder();
1859    response.setEnabled(master.isSplitOrMergeEnabled(convert(request.getSwitchType())));
1860    return response.build();
1861  }
1862
1863  @Override
1864  public NormalizeResponse normalize(RpcController controller, NormalizeRequest request)
1865    throws ServiceException {
1866    rpcPreCheck("normalize");
1867    try {
1868      final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder()
1869        .tableNames(ProtobufUtil.toTableNameList(request.getTableNamesList()))
1870        .regex(request.hasRegex() ? request.getRegex() : null)
1871        .namespace(request.hasNamespace() ? request.getNamespace() : null).build();
1872      return NormalizeResponse.newBuilder()
1873        // all API requests are considered priority requests.
1874        .setNormalizerRan(master.normalizeRegions(ntfp, true)).build();
1875    } catch (IOException ex) {
1876      throw new ServiceException(ex);
1877    }
1878  }
1879
1880  @Override
1881  public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller,
1882    SetNormalizerRunningRequest request) throws ServiceException {
1883    rpcPreCheck("setNormalizerRunning");
1884
1885    // Sets normalizer on/off flag in ZK.
1886    // TODO: this method is totally broken in terms of atomicity of actions and values read.
1887    // 1. The contract has this RPC returning the previous value. There isn't a ZKUtil method
1888    // that lets us retrieve the previous value as part of setting a new value, so we simply
1889    // perform a read before issuing the update. Thus we have a data race opportunity, between
1890    // when the `prevValue` is read and whatever is actually overwritten.
1891    // 2. Down in `setNormalizerOn`, the call to `createAndWatch` inside of the catch clause can
1892    // itself fail in the event that the znode already exists. Thus, another data race, between
1893    // when the initial `setData` call is notified of the absence of the target znode and the
1894    // subsequent `createAndWatch`, with another client creating said node.
1895    // That said, there's supposed to be only one active master and thus there's supposed to be
1896    // only one process with the authority to modify the value.
1897    final boolean prevValue = master.getRegionNormalizerManager().isNormalizerOn();
1898    final boolean newValue = request.getOn();
1899    master.getRegionNormalizerManager().setNormalizerOn(newValue);
1900    LOG.info("{} set normalizerSwitch={}", master.getClientIdAuditPrefix(), newValue);
1901    return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build();
1902  }
1903
1904  @Override
1905  public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller,
1906    IsNormalizerEnabledRequest request) {
1907    IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder();
1908    response.setEnabled(master.isNormalizerOn());
1909    return response.build();
1910  }
1911
1912  /**
1913   * Returns the security capabilities in effect on the cluster
1914   */
1915  @Override
1916  public SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller,
1917    SecurityCapabilitiesRequest request) throws ServiceException {
1918    SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder();
1919    try {
1920      master.checkInitialized();
1921      Set<SecurityCapabilitiesResponse.Capability> capabilities = new HashSet<>();
1922      // Authentication
1923      if (User.isHBaseSecurityEnabled(master.getConfiguration())) {
1924        capabilities.add(SecurityCapabilitiesResponse.Capability.SECURE_AUTHENTICATION);
1925      } else {
1926        capabilities.add(SecurityCapabilitiesResponse.Capability.SIMPLE_AUTHENTICATION);
1927      }
1928      // A coprocessor that implements AccessControlService can provide AUTHORIZATION and
1929      // CELL_AUTHORIZATION
1930      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
1931        if (AccessChecker.isAuthorizationSupported(master.getConfiguration())) {
1932          capabilities.add(SecurityCapabilitiesResponse.Capability.AUTHORIZATION);
1933        }
1934        if (AccessController.isCellAuthorizationSupported(master.getConfiguration())) {
1935          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_AUTHORIZATION);
1936        }
1937      }
1938      // A coprocessor that implements VisibilityLabelsService can provide CELL_VISIBILITY.
1939      if (master.cpHost != null && hasVisibilityLabelsServiceCoprocessor(master.cpHost)) {
1940        if (VisibilityController.isCellAuthorizationSupported(master.getConfiguration())) {
1941          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_VISIBILITY);
1942        }
1943      }
1944      response.addAllCapabilities(capabilities);
1945    } catch (IOException e) {
1946      throw new ServiceException(e);
1947    }
1948    return response.build();
1949  }
1950
1951  /**
1952   * Determines if there is a MasterCoprocessor deployed which implements
1953   * {@link org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService.Interface}.
1954   */
1955  boolean hasAccessControlServiceCoprocessor(MasterCoprocessorHost cpHost) {
1956    return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class),
1957      AccessControlService.Interface.class);
1958  }
1959
1960  /**
1961   * Determines if there is a MasterCoprocessor deployed which implements
1962   * {@link org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService.Interface}.
1963   */
1964  boolean hasVisibilityLabelsServiceCoprocessor(MasterCoprocessorHost cpHost) {
1965    return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class),
1966      VisibilityLabelsService.Interface.class);
1967  }
1968
1969  /**
1970   * Determines if there is a coprocessor implementation in the provided argument which extends or
1971   * implements the provided {@code service}.
1972   */
1973  boolean checkCoprocessorWithService(List<MasterCoprocessor> coprocessorsToCheck,
1974    Class<?> service) {
1975    if (coprocessorsToCheck == null || coprocessorsToCheck.isEmpty()) {
1976      return false;
1977    }
1978    for (MasterCoprocessor cp : coprocessorsToCheck) {
1979      if (service.isAssignableFrom(cp.getClass())) {
1980        return true;
1981      }
1982    }
1983    return false;
1984  }
1985
1986  private MasterSwitchType convert(MasterProtos.MasterSwitchType switchType) {
1987    switch (switchType) {
1988      case SPLIT:
1989        return MasterSwitchType.SPLIT;
1990      case MERGE:
1991        return MasterSwitchType.MERGE;
1992      default:
1993        break;
1994    }
1995    return null;
1996  }
1997
1998  @Override
1999  public AddReplicationPeerResponse addReplicationPeer(RpcController controller,
2000    AddReplicationPeerRequest request) throws ServiceException {
2001    try {
2002      long procId = master.addReplicationPeer(request.getPeerId(),
2003        ReplicationPeerConfigUtil.convert(request.getPeerConfig()),
2004        request.getPeerState().getState().equals(ReplicationState.State.ENABLED));
2005      return AddReplicationPeerResponse.newBuilder().setProcId(procId).build();
2006    } catch (ReplicationException | IOException e) {
2007      throw new ServiceException(e);
2008    }
2009  }
2010
2011  @Override
2012  public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller,
2013    RemoveReplicationPeerRequest request) throws ServiceException {
2014    try {
2015      long procId = master.removeReplicationPeer(request.getPeerId());
2016      return RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build();
2017    } catch (ReplicationException | IOException e) {
2018      throw new ServiceException(e);
2019    }
2020  }
2021
2022  @Override
2023  public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller,
2024    EnableReplicationPeerRequest request) throws ServiceException {
2025    try {
2026      long procId = master.enableReplicationPeer(request.getPeerId());
2027      return EnableReplicationPeerResponse.newBuilder().setProcId(procId).build();
2028    } catch (ReplicationException | IOException e) {
2029      throw new ServiceException(e);
2030    }
2031  }
2032
2033  @Override
2034  public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller,
2035    DisableReplicationPeerRequest request) throws ServiceException {
2036    try {
2037      long procId = master.disableReplicationPeer(request.getPeerId());
2038      return DisableReplicationPeerResponse.newBuilder().setProcId(procId).build();
2039    } catch (ReplicationException | IOException e) {
2040      throw new ServiceException(e);
2041    }
2042  }
2043
2044  @Override
2045  public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller,
2046    GetReplicationPeerConfigRequest request) throws ServiceException {
2047    GetReplicationPeerConfigResponse.Builder response =
2048      GetReplicationPeerConfigResponse.newBuilder();
2049    try {
2050      String peerId = request.getPeerId();
2051      ReplicationPeerConfig peerConfig = master.getReplicationPeerConfig(peerId);
2052      response.setPeerId(peerId);
2053      response.setPeerConfig(ReplicationPeerConfigUtil.convert(peerConfig));
2054    } catch (ReplicationException | IOException e) {
2055      throw new ServiceException(e);
2056    }
2057    return response.build();
2058  }
2059
2060  @Override
2061  public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller,
2062    UpdateReplicationPeerConfigRequest request) throws ServiceException {
2063    try {
2064      long procId = master.updateReplicationPeerConfig(request.getPeerId(),
2065        ReplicationPeerConfigUtil.convert(request.getPeerConfig()));
2066      return UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build();
2067    } catch (ReplicationException | IOException e) {
2068      throw new ServiceException(e);
2069    }
2070  }
2071
2072  @Override
2073  public ListReplicationPeersResponse listReplicationPeers(RpcController controller,
2074    ListReplicationPeersRequest request) throws ServiceException {
2075    ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder();
2076    try {
2077      List<ReplicationPeerDescription> peers =
2078        master.listReplicationPeers(request.hasRegex() ? request.getRegex() : null);
2079      for (ReplicationPeerDescription peer : peers) {
2080        response.addPeerDesc(ReplicationPeerConfigUtil.toProtoReplicationPeerDescription(peer));
2081      }
2082    } catch (ReplicationException | IOException e) {
2083      throw new ServiceException(e);
2084    }
2085    return response.build();
2086  }
2087
2088  @Override
2089  public ListDecommissionedRegionServersResponse listDecommissionedRegionServers(
2090    RpcController controller, ListDecommissionedRegionServersRequest request)
2091    throws ServiceException {
2092    ListDecommissionedRegionServersResponse.Builder response =
2093      ListDecommissionedRegionServersResponse.newBuilder();
2094    try {
2095      master.checkInitialized();
2096      if (master.cpHost != null) {
2097        master.cpHost.preListDecommissionedRegionServers();
2098      }
2099      List<ServerName> servers = master.listDecommissionedRegionServers();
2100      response.addAllServerName((servers.stream().map(server -> ProtobufUtil.toServerName(server)))
2101        .collect(Collectors.toList()));
2102      if (master.cpHost != null) {
2103        master.cpHost.postListDecommissionedRegionServers();
2104      }
2105    } catch (IOException io) {
2106      throw new ServiceException(io);
2107    }
2108
2109    return response.build();
2110  }
2111
2112  @Override
2113  public DecommissionRegionServersResponse decommissionRegionServers(RpcController controller,
2114    DecommissionRegionServersRequest request) throws ServiceException {
2115    try {
2116      master.checkInitialized();
2117      List<ServerName> servers = request.getServerNameList().stream()
2118        .map(pbServer -> ProtobufUtil.toServerName(pbServer)).collect(Collectors.toList());
2119      boolean offload = request.getOffload();
2120      if (master.cpHost != null) {
2121        master.cpHost.preDecommissionRegionServers(servers, offload);
2122      }
2123      master.decommissionRegionServers(servers, offload);
2124      if (master.cpHost != null) {
2125        master.cpHost.postDecommissionRegionServers(servers, offload);
2126      }
2127    } catch (IOException io) {
2128      throw new ServiceException(io);
2129    }
2130
2131    return DecommissionRegionServersResponse.newBuilder().build();
2132  }
2133
2134  @Override
2135  public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller,
2136    RecommissionRegionServerRequest request) throws ServiceException {
2137    try {
2138      master.checkInitialized();
2139      ServerName server = ProtobufUtil.toServerName(request.getServerName());
2140      List<byte[]> encodedRegionNames = request.getRegionList().stream()
2141        .map(regionSpecifier -> regionSpecifier.getValue().toByteArray())
2142        .collect(Collectors.toList());
2143      if (master.cpHost != null) {
2144        master.cpHost.preRecommissionRegionServer(server, encodedRegionNames);
2145      }
2146      master.recommissionRegionServer(server, encodedRegionNames);
2147      if (master.cpHost != null) {
2148        master.cpHost.postRecommissionRegionServer(server, encodedRegionNames);
2149      }
2150    } catch (IOException io) {
2151      throw new ServiceException(io);
2152    }
2153
2154    return RecommissionRegionServerResponse.newBuilder().build();
2155  }
2156
2157  @Override
2158  public LockResponse requestLock(RpcController controller, final LockRequest request)
2159    throws ServiceException {
2160    try {
2161      if (request.getDescription().isEmpty()) {
2162        throw new IllegalArgumentException("Empty description");
2163      }
2164      NonceProcedureRunnable npr;
2165      LockType type = LockType.valueOf(request.getLockType().name());
2166      if (request.getRegionInfoCount() > 0) {
2167        final RegionInfo[] regionInfos = new RegionInfo[request.getRegionInfoCount()];
2168        for (int i = 0; i < request.getRegionInfoCount(); ++i) {
2169          regionInfos[i] = ProtobufUtil.toRegionInfo(request.getRegionInfo(i));
2170        }
2171        npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
2172          @Override
2173          protected void run() throws IOException {
2174            setProcId(master.getLockManager().remoteLocks().requestRegionsLock(regionInfos,
2175              request.getDescription(), getNonceKey()));
2176          }
2177
2178          @Override
2179          protected String getDescription() {
2180            return "RequestLock";
2181          }
2182        };
2183      } else if (request.hasTableName()) {
2184        final TableName tableName = ProtobufUtil.toTableName(request.getTableName());
2185        npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
2186          @Override
2187          protected void run() throws IOException {
2188            setProcId(master.getLockManager().remoteLocks().requestTableLock(tableName, type,
2189              request.getDescription(), getNonceKey()));
2190          }
2191
2192          @Override
2193          protected String getDescription() {
2194            return "RequestLock";
2195          }
2196        };
2197      } else if (request.hasNamespace()) {
2198        npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
2199          @Override
2200          protected void run() throws IOException {
2201            setProcId(master.getLockManager().remoteLocks().requestNamespaceLock(
2202              request.getNamespace(), type, request.getDescription(), getNonceKey()));
2203          }
2204
2205          @Override
2206          protected String getDescription() {
2207            return "RequestLock";
2208          }
2209        };
2210      } else {
2211        throw new IllegalArgumentException("one of table/namespace/region should be specified");
2212      }
2213      long procId = MasterProcedureUtil.submitProcedure(npr);
2214      return LockResponse.newBuilder().setProcId(procId).build();
2215    } catch (IllegalArgumentException e) {
2216      LOG.warn("Exception when queuing lock", e);
2217      throw new ServiceException(new DoNotRetryIOException(e));
2218    } catch (IOException e) {
2219      LOG.warn("Exception when queuing lock", e);
2220      throw new ServiceException(e);
2221    }
2222  }
2223
2224  /**
2225   * @return LOCKED, if procedure is found and it has the lock; else UNLOCKED.
2226   * @throws ServiceException if given proc id is found but it is not a LockProcedure.
2227   */
2228  @Override
2229  public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request)
2230    throws ServiceException {
2231    try {
2232      if (
2233        master.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(),
2234          request.getKeepAlive())
2235      ) {
2236        return LockHeartbeatResponse.newBuilder()
2237          .setTimeoutMs(master.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF,
2238            LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS))
2239          .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).build();
2240      } else {
2241        return LockHeartbeatResponse.newBuilder()
2242          .setLockStatus(LockHeartbeatResponse.LockStatus.UNLOCKED).build();
2243      }
2244    } catch (IOException e) {
2245      throw new ServiceException(e);
2246    }
2247  }
2248
2249  @Override
2250  public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller,
2251    RegionSpaceUseReportRequest request) throws ServiceException {
2252    try {
2253      master.checkInitialized();
2254      if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) {
2255        return RegionSpaceUseReportResponse.newBuilder().build();
2256      }
2257      MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
2258      if (quotaManager != null) {
2259        final long now = EnvironmentEdgeManager.currentTime();
2260        for (RegionSpaceUse report : request.getSpaceUseList()) {
2261          quotaManager.addRegionSize(ProtobufUtil.toRegionInfo(report.getRegionInfo()),
2262            report.getRegionSize(), now);
2263        }
2264      } else {
2265        LOG.debug("Received region space usage report but HMaster is not ready to process it, "
2266          + "skipping");
2267      }
2268      return RegionSpaceUseReportResponse.newBuilder().build();
2269    } catch (Exception e) {
2270      throw new ServiceException(e);
2271    }
2272  }
2273
2274  @Override
2275  public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(RpcController controller,
2276    GetSpaceQuotaRegionSizesRequest request) throws ServiceException {
2277    try {
2278      master.checkInitialized();
2279      MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
2280      GetSpaceQuotaRegionSizesResponse.Builder builder =
2281        GetSpaceQuotaRegionSizesResponse.newBuilder();
2282      if (quotaManager != null) {
2283        Map<RegionInfo, Long> regionSizes = quotaManager.snapshotRegionSizes();
2284        Map<TableName, Long> regionSizesByTable = new HashMap<>();
2285        // Translate hregioninfo+long -> tablename+long
2286        for (Entry<RegionInfo, Long> entry : regionSizes.entrySet()) {
2287          final TableName tableName = entry.getKey().getTable();
2288          Long prevSize = regionSizesByTable.get(tableName);
2289          if (prevSize == null) {
2290            prevSize = 0L;
2291          }
2292          regionSizesByTable.put(tableName, prevSize + entry.getValue());
2293        }
2294        // Serialize them into the protobuf
2295        for (Entry<TableName, Long> tableSize : regionSizesByTable.entrySet()) {
2296          builder.addSizes(
2297            RegionSizes.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableSize.getKey()))
2298              .setSize(tableSize.getValue()).build());
2299        }
2300        return builder.build();
2301      } else {
2302        LOG.debug("Received space quota region size report but HMaster is not ready to process it,"
2303          + "skipping");
2304      }
2305      return builder.build();
2306    } catch (Exception e) {
2307      throw new ServiceException(e);
2308    }
2309  }
2310
2311  @Override
2312  public GetQuotaStatesResponse getQuotaStates(RpcController controller,
2313    GetQuotaStatesRequest request) throws ServiceException {
2314    try {
2315      master.checkInitialized();
2316      QuotaObserverChore quotaChore = this.master.getQuotaObserverChore();
2317      GetQuotaStatesResponse.Builder builder = GetQuotaStatesResponse.newBuilder();
2318      if (quotaChore != null) {
2319        // The "current" view of all tables with quotas
2320        Map<TableName, SpaceQuotaSnapshot> tableSnapshots = quotaChore.getTableQuotaSnapshots();
2321        for (Entry<TableName, SpaceQuotaSnapshot> entry : tableSnapshots.entrySet()) {
2322          builder.addTableSnapshots(TableQuotaSnapshot.newBuilder()
2323            .setTableName(ProtobufUtil.toProtoTableName(entry.getKey()))
2324            .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build());
2325        }
2326        // The "current" view of all namespaces with quotas
2327        Map<String, SpaceQuotaSnapshot> nsSnapshots = quotaChore.getNamespaceQuotaSnapshots();
2328        for (Entry<String, SpaceQuotaSnapshot> entry : nsSnapshots.entrySet()) {
2329          builder.addNsSnapshots(NamespaceQuotaSnapshot.newBuilder().setNamespace(entry.getKey())
2330            .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build());
2331        }
2332        return builder.build();
2333      }
2334      return builder.build();
2335    } catch (Exception e) {
2336      throw new ServiceException(e);
2337    }
2338  }
2339
2340  @Override
2341  public ClearDeadServersResponse clearDeadServers(RpcController controller,
2342    ClearDeadServersRequest request) throws ServiceException {
2343    LOG.debug(master.getClientIdAuditPrefix() + " clear dead region servers.");
2344    ClearDeadServersResponse.Builder response = ClearDeadServersResponse.newBuilder();
2345    try {
2346      master.checkInitialized();
2347      if (master.cpHost != null) {
2348        master.cpHost.preClearDeadServers();
2349      }
2350
2351      if (master.getServerManager().areDeadServersInProgress()) {
2352        LOG.debug("Some dead server is still under processing, won't clear the dead server list");
2353        response.addAllServerName(request.getServerNameList());
2354      } else {
2355        for (HBaseProtos.ServerName pbServer : request.getServerNameList()) {
2356          if (
2357            !master.getServerManager().getDeadServers()
2358              .removeDeadServer(ProtobufUtil.toServerName(pbServer))
2359          ) {
2360            response.addServerName(pbServer);
2361          }
2362        }
2363      }
2364
2365      if (master.cpHost != null) {
2366        master.cpHost.postClearDeadServers(
2367          ProtobufUtil.toServerNameList(request.getServerNameList()),
2368          ProtobufUtil.toServerNameList(response.getServerNameList()));
2369      }
2370    } catch (IOException io) {
2371      throw new ServiceException(io);
2372    }
2373    return response.build();
2374  }
2375
2376  @Override
2377  public ReportProcedureDoneResponse reportProcedureDone(RpcController controller,
2378    ReportProcedureDoneRequest request) throws ServiceException {
2379    // Check Masters is up and ready for duty before progressing. Remote side will keep trying.
2380    try {
2381      this.master.checkServiceStarted();
2382    } catch (ServerNotRunningYetException snrye) {
2383      throw new ServiceException(snrye);
2384    }
2385    request.getResultList().forEach(result -> {
2386      if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) {
2387        master.remoteProcedureCompleted(result.getProcId());
2388      } else {
2389        master.remoteProcedureFailed(result.getProcId(),
2390          RemoteProcedureException.fromProto(result.getError()));
2391      }
2392    });
2393    return ReportProcedureDoneResponse.getDefaultInstance();
2394  }
2395
2396  // HBCK Services
2397
2398  @Override
2399  public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest req)
2400    throws ServiceException {
2401    rpcPreCheck("runHbckChore");
2402    LOG.info("{} request HBCK chore to run", master.getClientIdAuditPrefix());
2403    HbckChore hbckChore = master.getHbckChore();
2404    boolean ran = hbckChore.runChore();
2405    return RunHbckChoreResponse.newBuilder().setRan(ran).build();
2406  }
2407
2408  /**
2409   * Update state of the table in meta only. This is required by hbck in some situations to cleanup
2410   * stuck assign/ unassign regions procedures for the table.
2411   * @return previous state of the table
2412   */
2413  @Override
2414  public GetTableStateResponse setTableStateInMeta(RpcController controller,
2415    SetTableStateInMetaRequest request) throws ServiceException {
2416    rpcPreCheck("setTableStateInMeta");
2417    TableName tn = ProtobufUtil.toTableName(request.getTableName());
2418    try {
2419      TableState prevState = this.master.getTableStateManager().getTableState(tn);
2420      TableState newState = TableState.convert(tn, request.getTableState());
2421      LOG.info("{} set table={} state from {} to {}", master.getClientIdAuditPrefix(), tn,
2422        prevState.getState(), newState.getState());
2423      this.master.getTableStateManager().setTableState(tn, newState.getState());
2424      return GetTableStateResponse.newBuilder().setTableState(prevState.convert()).build();
2425    } catch (Exception e) {
2426      throw new ServiceException(e);
2427    }
2428  }
2429
2430  /**
2431   * Update state of the region in meta only. This is required by hbck in some situations to cleanup
2432   * stuck assign/ unassign regions procedures for the table.
2433   * @return previous states of the regions
2434   */
2435  @Override
2436  public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller,
2437    SetRegionStateInMetaRequest request) throws ServiceException {
2438    rpcPreCheck("setRegionStateInMeta");
2439    SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder();
2440    try {
2441      for (RegionSpecifierAndState s : request.getStatesList()) {
2442        RegionSpecifier spec = s.getRegionSpecifier();
2443        String encodedName;
2444        if (spec.getType() == RegionSpecifierType.ENCODED_REGION_NAME) {
2445          encodedName = spec.getValue().toStringUtf8();
2446        } else {
2447          // TODO: actually, a full region name can save a lot on meta scan, improve later.
2448          encodedName = RegionInfo.encodeRegionName(spec.getValue().toByteArray());
2449        }
2450        RegionInfo info = this.master.getAssignmentManager().loadRegionFromMeta(encodedName);
2451        LOG.trace("region info loaded from meta table: {}", info);
2452        RegionState prevState =
2453          this.master.getAssignmentManager().getRegionStates().getRegionState(info);
2454        RegionState.State newState = RegionState.State.convert(s.getState());
2455        LOG.info("{} set region={} state from {} to {}", master.getClientIdAuditPrefix(), info,
2456          prevState.getState(), newState);
2457        Put metaPut =
2458          MetaTableAccessor.makePutFromRegionInfo(info, EnvironmentEdgeManager.currentTime());
2459        metaPut.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
2460          Bytes.toBytes(newState.name()));
2461        List<Put> putList = new ArrayList<>();
2462        putList.add(metaPut);
2463        MetaTableAccessor.putsToMetaTable(this.master.getConnection(), putList);
2464        // Loads from meta again to refresh AM cache with the new region state
2465        this.master.getAssignmentManager().loadRegionFromMeta(encodedName);
2466        builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec)
2467          .setState(prevState.getState().convert()));
2468      }
2469    } catch (Exception e) {
2470      throw new ServiceException(e);
2471    }
2472    return builder.build();
2473  }
2474
2475  /**
2476   * Get RegionInfo from Master using content of RegionSpecifier as key.
2477   * @return RegionInfo found by decoding <code>rs</code> or null if none found
2478   */
2479  private RegionInfo getRegionInfo(HBaseProtos.RegionSpecifier rs) throws UnknownRegionException {
2480    RegionInfo ri = null;
2481    switch (rs.getType()) {
2482      case REGION_NAME:
2483        final byte[] regionName = rs.getValue().toByteArray();
2484        ri = this.master.getAssignmentManager().getRegionInfo(regionName);
2485        break;
2486      case ENCODED_REGION_NAME:
2487        String encodedRegionName = Bytes.toString(rs.getValue().toByteArray());
2488        RegionState regionState =
2489          this.master.getAssignmentManager().getRegionStates().getRegionState(encodedRegionName);
2490        ri = regionState == null
2491          ? this.master.getAssignmentManager().loadRegionFromMeta(encodedRegionName)
2492          : regionState.getRegion();
2493        break;
2494      default:
2495        break;
2496    }
2497    return ri;
2498  }
2499
2500  /**
2501   * @throws ServiceException If no MasterProcedureExecutor
2502   */
2503  private void checkMasterProcedureExecutor() throws ServiceException {
2504    if (this.master.getMasterProcedureExecutor() == null) {
2505      throw new ServiceException("Master's ProcedureExecutor not initialized; retry later");
2506    }
2507  }
2508
2509  /**
2510   * A 'raw' version of assign that does bulk and can skirt Master state checks if override is set;
2511   * i.e. assigns can be forced during Master startup or if RegionState is unclean. Used by HBCK2.
2512   */
2513  @Override
2514  public MasterProtos.AssignsResponse assigns(RpcController controller,
2515    MasterProtos.AssignsRequest request) throws ServiceException {
2516    checkMasterProcedureExecutor();
2517    MasterProtos.AssignsResponse.Builder responseBuilder =
2518      MasterProtos.AssignsResponse.newBuilder();
2519    try {
2520      boolean override = request.getOverride();
2521      LOG.info("{} assigns, override={}", master.getClientIdAuditPrefix(), override);
2522      for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
2523        long pid = Procedure.NO_PROC_ID;
2524        RegionInfo ri = getRegionInfo(rs);
2525        if (ri == null) {
2526          LOG.info("Unknown={}", rs);
2527        } else {
2528          Procedure p = this.master.getAssignmentManager().createOneAssignProcedure(ri, override);
2529          if (p != null) {
2530            pid = this.master.getMasterProcedureExecutor().submitProcedure(p);
2531          }
2532        }
2533        responseBuilder.addPid(pid);
2534      }
2535      return responseBuilder.build();
2536    } catch (IOException ioe) {
2537      throw new ServiceException(ioe);
2538    }
2539  }
2540
2541  /**
2542   * A 'raw' version of unassign that does bulk and can skirt Master state checks if override is
2543   * set; i.e. unassigns can be forced during Master startup or if RegionState is unclean. Used by
2544   * HBCK2.
2545   */
2546  @Override
2547  public MasterProtos.UnassignsResponse unassigns(RpcController controller,
2548    MasterProtos.UnassignsRequest request) throws ServiceException {
2549    checkMasterProcedureExecutor();
2550    MasterProtos.UnassignsResponse.Builder responseBuilder =
2551      MasterProtos.UnassignsResponse.newBuilder();
2552    try {
2553      boolean override = request.getOverride();
2554      LOG.info("{} unassigns, override={}", master.getClientIdAuditPrefix(), override);
2555      for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
2556        long pid = Procedure.NO_PROC_ID;
2557        RegionInfo ri = getRegionInfo(rs);
2558        if (ri == null) {
2559          LOG.info("Unknown={}", rs);
2560        } else {
2561          Procedure p = this.master.getAssignmentManager().createOneUnassignProcedure(ri, override);
2562          if (p != null) {
2563            pid = this.master.getMasterProcedureExecutor().submitProcedure(p);
2564          }
2565        }
2566        responseBuilder.addPid(pid);
2567      }
2568      return responseBuilder.build();
2569    } catch (IOException ioe) {
2570      throw new ServiceException(ioe);
2571    }
2572  }
2573
2574  /**
2575   * Bypass specified procedure to completion. Procedure is marked completed but no actual work is
2576   * done from the current state/ step onwards. Parents of the procedure are also marked for bypass.
2577   * NOTE: this is a dangerous operation and may be used to unstuck buggy procedures. This may leave
2578   * system in inconherent state. This may need to be followed by some cleanup steps/ actions by
2579   * operator.
2580   * @return BypassProcedureToCompletionResponse indicating success or failure
2581   */
2582  @Override
2583  public MasterProtos.BypassProcedureResponse bypassProcedure(RpcController controller,
2584    MasterProtos.BypassProcedureRequest request) throws ServiceException {
2585    try {
2586      LOG.info("{} bypass procedures={}, waitTime={}, override={}, recursive={}",
2587        master.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(),
2588        request.getOverride(), request.getRecursive());
2589      List<Boolean> ret =
2590        master.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(),
2591          request.getWaitTime(), request.getOverride(), request.getRecursive());
2592      return MasterProtos.BypassProcedureResponse.newBuilder().addAllBypassed(ret).build();
2593    } catch (IOException e) {
2594      throw new ServiceException(e);
2595    }
2596  }
2597
2598  @Override
2599  public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProcedure(
2600    RpcController controller, MasterProtos.ScheduleServerCrashProcedureRequest request)
2601    throws ServiceException {
2602    List<Long> pids = new ArrayList<>();
2603    for (HBaseProtos.ServerName sn : request.getServerNameList()) {
2604      ServerName serverName = ProtobufUtil.toServerName(sn);
2605      LOG.info("{} schedule ServerCrashProcedure for {}", this.master.getClientIdAuditPrefix(),
2606        serverName);
2607      if (shouldSubmitSCP(serverName)) {
2608        pids.add(this.master.getServerManager().expireServer(serverName, true));
2609      } else {
2610        pids.add(Procedure.NO_PROC_ID);
2611      }
2612    }
2613    return MasterProtos.ScheduleServerCrashProcedureResponse.newBuilder().addAllPid(pids).build();
2614  }
2615
2616  @Override
2617  public MasterProtos.ScheduleSCPsForUnknownServersResponse scheduleSCPsForUnknownServers(
2618    RpcController controller, MasterProtos.ScheduleSCPsForUnknownServersRequest request)
2619    throws ServiceException {
2620
2621    List<Long> pids = new ArrayList<>();
2622    final Set<ServerName> serverNames = master.getAssignmentManager().getRegionStates()
2623      .getRegionStates().stream().map(RegionState::getServerName).collect(Collectors.toSet());
2624
2625    final Set<ServerName> unknownServerNames = serverNames.stream()
2626      .filter(sn -> master.getServerManager().isServerUnknown(sn)).collect(Collectors.toSet());
2627
2628    for (ServerName sn : unknownServerNames) {
2629      LOG.info("{} schedule ServerCrashProcedure for unknown {}",
2630        this.master.getClientIdAuditPrefix(), sn);
2631      if (shouldSubmitSCP(sn)) {
2632        pids.add(this.master.getServerManager().expireServer(sn, true));
2633      } else {
2634        pids.add(Procedure.NO_PROC_ID);
2635      }
2636    }
2637    return MasterProtos.ScheduleSCPsForUnknownServersResponse.newBuilder().addAllPid(pids).build();
2638  }
2639
2640  @Override
2641  public FixMetaResponse fixMeta(RpcController controller, FixMetaRequest request)
2642    throws ServiceException {
2643    rpcPreCheck("fixMeta");
2644    try {
2645      MetaFixer mf = new MetaFixer(this.master);
2646      mf.fix();
2647      return FixMetaResponse.newBuilder().build();
2648    } catch (IOException ioe) {
2649      throw new ServiceException(ioe);
2650    }
2651  }
2652
2653  @Override
2654  public SwitchRpcThrottleResponse switchRpcThrottle(RpcController controller,
2655    SwitchRpcThrottleRequest request) throws ServiceException {
2656    try {
2657      master.checkInitialized();
2658      return master.getMasterQuotaManager().switchRpcThrottle(request);
2659    } catch (Exception e) {
2660      throw new ServiceException(e);
2661    }
2662  }
2663
2664  @Override
2665  public MasterProtos.IsRpcThrottleEnabledResponse isRpcThrottleEnabled(RpcController controller,
2666    MasterProtos.IsRpcThrottleEnabledRequest request) throws ServiceException {
2667    try {
2668      master.checkInitialized();
2669      return master.getMasterQuotaManager().isRpcThrottleEnabled(request);
2670    } catch (Exception e) {
2671      throw new ServiceException(e);
2672    }
2673  }
2674
2675  @Override
2676  public SwitchExceedThrottleQuotaResponse switchExceedThrottleQuota(RpcController controller,
2677    SwitchExceedThrottleQuotaRequest request) throws ServiceException {
2678    try {
2679      master.checkInitialized();
2680      return master.getMasterQuotaManager().switchExceedThrottleQuota(request);
2681    } catch (Exception e) {
2682      throw new ServiceException(e);
2683    }
2684  }
2685
2686  @Override
2687  public FileArchiveNotificationResponse reportFileArchival(RpcController controller,
2688    FileArchiveNotificationRequest request) throws ServiceException {
2689    try {
2690      master.checkInitialized();
2691      if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) {
2692        return FileArchiveNotificationResponse.newBuilder().build();
2693      }
2694      master.getMasterQuotaManager().processFileArchivals(request, master.getConnection(),
2695        master.getConfiguration(), master.getFileSystem());
2696      return FileArchiveNotificationResponse.newBuilder().build();
2697    } catch (Exception e) {
2698      throw new ServiceException(e);
2699    }
2700  }
2701
2702  @Override
2703  public GrantResponse grant(RpcController controller, GrantRequest request)
2704    throws ServiceException {
2705    try {
2706      master.checkInitialized();
2707      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
2708        final UserPermission perm =
2709          ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
2710        boolean mergeExistingPermissions = request.getMergeExistingPermissions();
2711        master.cpHost.preGrant(perm, mergeExistingPermissions);
2712        try (Table table = master.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
2713          PermissionStorage.addUserPermission(getConfiguration(), perm, table,
2714            mergeExistingPermissions);
2715        }
2716        master.cpHost.postGrant(perm, mergeExistingPermissions);
2717        User caller = RpcServer.getRequestUser().orElse(null);
2718        if (AUDITLOG.isTraceEnabled()) {
2719          // audit log should store permission changes in addition to auth results
2720          String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("");
2721          AUDITLOG.trace("User {} (remote address: {}) granted permission {}", caller,
2722            remoteAddress, perm);
2723        }
2724        return GrantResponse.getDefaultInstance();
2725      } else {
2726        throw new DoNotRetryIOException(
2727          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2728      }
2729    } catch (IOException ioe) {
2730      throw new ServiceException(ioe);
2731    }
2732  }
2733
2734  @Override
2735  public RevokeResponse revoke(RpcController controller, RevokeRequest request)
2736    throws ServiceException {
2737    try {
2738      master.checkInitialized();
2739      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
2740        final UserPermission userPermission =
2741          ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
2742        master.cpHost.preRevoke(userPermission);
2743        try (Table table = master.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
2744          PermissionStorage.removeUserPermission(master.getConfiguration(), userPermission, table);
2745        }
2746        master.cpHost.postRevoke(userPermission);
2747        User caller = RpcServer.getRequestUser().orElse(null);
2748        if (AUDITLOG.isTraceEnabled()) {
2749          // audit log should record all permission changes
2750          String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("");
2751          AUDITLOG.trace("User {} (remote address: {}) revoked permission {}", caller,
2752            remoteAddress, userPermission);
2753        }
2754        return RevokeResponse.getDefaultInstance();
2755      } else {
2756        throw new DoNotRetryIOException(
2757          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2758      }
2759    } catch (IOException ioe) {
2760      throw new ServiceException(ioe);
2761    }
2762  }
2763
2764  @Override
2765  public GetUserPermissionsResponse getUserPermissions(RpcController controller,
2766    GetUserPermissionsRequest request) throws ServiceException {
2767    try {
2768      master.checkInitialized();
2769      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
2770        final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null;
2771        String namespace =
2772          request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null;
2773        TableName table =
2774          request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null;
2775        byte[] cf = request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null;
2776        byte[] cq =
2777          request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null;
2778        Type permissionType = request.hasType() ? request.getType() : null;
2779        master.getMasterCoprocessorHost().preGetUserPermissions(userName, namespace, table, cf, cq);
2780
2781        List<UserPermission> perms = null;
2782        if (permissionType == Type.Table) {
2783          boolean filter = (cf != null || userName != null) ? true : false;
2784          perms = PermissionStorage.getUserTablePermissions(master.getConfiguration(), table, cf,
2785            cq, userName, filter);
2786        } else if (permissionType == Type.Namespace) {
2787          perms = PermissionStorage.getUserNamespacePermissions(master.getConfiguration(),
2788            namespace, userName, userName != null ? true : false);
2789        } else {
2790          perms = PermissionStorage.getUserPermissions(master.getConfiguration(), null, null, null,
2791            userName, userName != null ? true : false);
2792          // Skip super users when filter user is specified
2793          if (userName == null) {
2794            // Adding superusers explicitly to the result set as PermissionStorage do not store
2795            // them. Also using acl as table name to be inline with the results of global admin and
2796            // will help in avoiding any leakage of information about being superusers.
2797            for (String user : Superusers.getSuperUsers()) {
2798              perms.add(new UserPermission(user,
2799                Permission.newBuilder().withActions(Action.values()).build()));
2800            }
2801          }
2802        }
2803
2804        master.getMasterCoprocessorHost().postGetUserPermissions(userName, namespace, table, cf,
2805          cq);
2806        AccessControlProtos.GetUserPermissionsResponse response =
2807          ShadedAccessControlUtil.buildGetUserPermissionsResponse(perms);
2808        return response;
2809      } else {
2810        throw new DoNotRetryIOException(
2811          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2812      }
2813    } catch (IOException ioe) {
2814      throw new ServiceException(ioe);
2815    }
2816  }
2817
2818  @Override
2819  public HasUserPermissionsResponse hasUserPermissions(RpcController controller,
2820    HasUserPermissionsRequest request) throws ServiceException {
2821    try {
2822      master.checkInitialized();
2823      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
2824        User caller = RpcServer.getRequestUser().orElse(null);
2825        String userName =
2826          request.hasUserName() ? request.getUserName().toStringUtf8() : caller.getShortName();
2827        List<Permission> permissions = new ArrayList<>();
2828        for (int i = 0; i < request.getPermissionCount(); i++) {
2829          permissions.add(ShadedAccessControlUtil.toPermission(request.getPermission(i)));
2830        }
2831        master.getMasterCoprocessorHost().preHasUserPermissions(userName, permissions);
2832        if (!caller.getShortName().equals(userName)) {
2833          List<String> groups = AccessChecker.getUserGroups(userName);
2834          caller = new InputUser(userName, groups.toArray(new String[groups.size()]));
2835        }
2836        List<Boolean> hasUserPermissions = new ArrayList<>();
2837        if (getAccessChecker() != null) {
2838          for (Permission permission : permissions) {
2839            boolean hasUserPermission =
2840              getAccessChecker().hasUserPermission(caller, "hasUserPermissions", permission);
2841            hasUserPermissions.add(hasUserPermission);
2842          }
2843        } else {
2844          for (int i = 0; i < permissions.size(); i++) {
2845            hasUserPermissions.add(true);
2846          }
2847        }
2848        master.getMasterCoprocessorHost().postHasUserPermissions(userName, permissions);
2849        HasUserPermissionsResponse.Builder builder =
2850          HasUserPermissionsResponse.newBuilder().addAllHasUserPermission(hasUserPermissions);
2851        return builder.build();
2852      } else {
2853        throw new DoNotRetryIOException(
2854          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2855      }
2856    } catch (IOException ioe) {
2857      throw new ServiceException(ioe);
2858    }
2859  }
2860
2861  private boolean containMetaWals(ServerName serverName) throws IOException {
2862    Path logDir = new Path(master.getWALRootDir(),
2863      AbstractFSWALProvider.getWALDirectoryName(serverName.toString()));
2864    Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
2865    Path checkDir = master.getFileSystem().exists(splitDir) ? splitDir : logDir;
2866    try {
2867      return master.getFileSystem().listStatus(checkDir, META_FILTER).length > 0;
2868    } catch (FileNotFoundException fnfe) {
2869      // If no files, then we don't contain metas; was failing schedule of
2870      // SCP because this was FNFE'ing when no server dirs ('Unknown Server').
2871      LOG.warn("No dir for WALs for {}; continuing", serverName.toString());
2872      return false;
2873    }
2874  }
2875
2876  private boolean shouldSubmitSCP(ServerName serverName) {
2877    // check if there is already a SCP of this server running
2878    List<Procedure<MasterProcedureEnv>> procedures =
2879      master.getMasterProcedureExecutor().getProcedures();
2880    for (Procedure<MasterProcedureEnv> procedure : procedures) {
2881      if (procedure instanceof ServerCrashProcedure) {
2882        if (
2883          serverName.compareTo(((ServerCrashProcedure) procedure).getServerName()) == 0
2884            && !procedure.isFinished()
2885        ) {
2886          LOG.info("there is already a SCP of this server {} running, pid {}", serverName,
2887            procedure.getProcId());
2888          return false;
2889        }
2890      }
2891    }
2892    return true;
2893  }
2894
2895  @Override
2896  public HBaseProtos.LogEntry getLogEntries(RpcController controller,
2897    HBaseProtos.LogRequest request) throws ServiceException {
2898    try {
2899      final String logClassName = request.getLogClassName();
2900      Class<?> logClass = Class.forName(logClassName).asSubclass(Message.class);
2901      Method method = logClass.getMethod("parseFrom", ByteString.class);
2902      if (logClassName.contains("BalancerDecisionsRequest")) {
2903        MasterProtos.BalancerDecisionsRequest balancerDecisionsRequest =
2904          (MasterProtos.BalancerDecisionsRequest) method.invoke(null, request.getLogMessage());
2905        MasterProtos.BalancerDecisionsResponse balancerDecisionsResponse =
2906          getBalancerDecisions(balancerDecisionsRequest);
2907        return HBaseProtos.LogEntry.newBuilder()
2908          .setLogClassName(balancerDecisionsResponse.getClass().getName())
2909          .setLogMessage(balancerDecisionsResponse.toByteString()).build();
2910      } else if (logClassName.contains("BalancerRejectionsRequest")) {
2911        MasterProtos.BalancerRejectionsRequest balancerRejectionsRequest =
2912          (MasterProtos.BalancerRejectionsRequest) method.invoke(null, request.getLogMessage());
2913        MasterProtos.BalancerRejectionsResponse balancerRejectionsResponse =
2914          getBalancerRejections(balancerRejectionsRequest);
2915        return HBaseProtos.LogEntry.newBuilder()
2916          .setLogClassName(balancerRejectionsResponse.getClass().getName())
2917          .setLogMessage(balancerRejectionsResponse.toByteString()).build();
2918      }
2919    } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException
2920      | InvocationTargetException e) {
2921      LOG.error("Error while retrieving log entries.", e);
2922      throw new ServiceException(e);
2923    }
2924    throw new ServiceException("Invalid request params");
2925  }
2926
2927  private MasterProtos.BalancerDecisionsResponse
2928    getBalancerDecisions(MasterProtos.BalancerDecisionsRequest request) {
2929    final NamedQueueRecorder namedQueueRecorder = this.regionServer.getNamedQueueRecorder();
2930    if (namedQueueRecorder == null) {
2931      return MasterProtos.BalancerDecisionsResponse.newBuilder()
2932        .addAllBalancerDecision(Collections.emptyList()).build();
2933    }
2934    final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest();
2935    namedQueueGetRequest.setNamedQueueEvent(BalancerDecisionDetails.BALANCER_DECISION_EVENT);
2936    namedQueueGetRequest.setBalancerDecisionsRequest(request);
2937    NamedQueueGetResponse namedQueueGetResponse =
2938      namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest);
2939    List<RecentLogs.BalancerDecision> balancerDecisions = namedQueueGetResponse != null
2940      ? namedQueueGetResponse.getBalancerDecisions()
2941      : Collections.emptyList();
2942    return MasterProtos.BalancerDecisionsResponse.newBuilder()
2943      .addAllBalancerDecision(balancerDecisions).build();
2944  }
2945
2946  private MasterProtos.BalancerRejectionsResponse
2947    getBalancerRejections(MasterProtos.BalancerRejectionsRequest request) {
2948    final NamedQueueRecorder namedQueueRecorder = this.regionServer.getNamedQueueRecorder();
2949    if (namedQueueRecorder == null) {
2950      return MasterProtos.BalancerRejectionsResponse.newBuilder()
2951        .addAllBalancerRejection(Collections.emptyList()).build();
2952    }
2953    final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest();
2954    namedQueueGetRequest.setNamedQueueEvent(BalancerRejectionDetails.BALANCER_REJECTION_EVENT);
2955    namedQueueGetRequest.setBalancerRejectionsRequest(request);
2956    NamedQueueGetResponse namedQueueGetResponse =
2957      namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest);
2958    List<RecentLogs.BalancerRejection> balancerRejections = namedQueueGetResponse != null
2959      ? namedQueueGetResponse.getBalancerRejections()
2960      : Collections.emptyList();
2961    return MasterProtos.BalancerRejectionsResponse.newBuilder()
2962      .addAllBalancerRejection(balancerRejections).build();
2963  }
2964
2965}