001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.master;
019
020import static org.apache.hadoop.hbase.master.MasterWalManager.META_FILTER;
021
022import java.io.FileNotFoundException;
023import java.io.IOException;
024import java.net.BindException;
025import java.net.InetAddress;
026import java.net.InetSocketAddress;
027import java.util.ArrayList;
028import java.util.HashMap;
029import java.util.HashSet;
030import java.util.List;
031import java.util.Map;
032import java.util.Map.Entry;
033import java.util.Optional;
034import java.util.Set;
035import java.util.stream.Collectors;
036import org.apache.hadoop.conf.Configuration;
037import org.apache.hadoop.fs.Path;
038import org.apache.hadoop.hbase.ClusterMetricsBuilder;
039import org.apache.hadoop.hbase.DoNotRetryIOException;
040import org.apache.hadoop.hbase.HConstants;
041import org.apache.hadoop.hbase.HRegionLocation;
042import org.apache.hadoop.hbase.MetaTableAccessor;
043import org.apache.hadoop.hbase.NamespaceDescriptor;
044import org.apache.hadoop.hbase.Server;
045import org.apache.hadoop.hbase.ServerMetrics;
046import org.apache.hadoop.hbase.ServerMetricsBuilder;
047import org.apache.hadoop.hbase.ServerName;
048import org.apache.hadoop.hbase.TableName;
049import org.apache.hadoop.hbase.UnknownRegionException;
050import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
051import org.apache.hadoop.hbase.client.MasterSwitchType;
052import org.apache.hadoop.hbase.client.Put;
053import org.apache.hadoop.hbase.client.RegionInfo;
054import org.apache.hadoop.hbase.client.RegionInfoBuilder;
055import org.apache.hadoop.hbase.client.Table;
056import org.apache.hadoop.hbase.client.TableDescriptor;
057import org.apache.hadoop.hbase.client.TableState;
058import org.apache.hadoop.hbase.client.VersionInfoUtil;
059import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
060import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
061import org.apache.hadoop.hbase.errorhandling.ForeignException;
062import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
063import org.apache.hadoop.hbase.io.ByteBuffAllocator;
064import org.apache.hadoop.hbase.io.hfile.HFile;
065import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
066import org.apache.hadoop.hbase.ipc.PriorityFunction;
067import org.apache.hadoop.hbase.ipc.QosPriority;
068import org.apache.hadoop.hbase.ipc.RpcServer;
069import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
070import org.apache.hadoop.hbase.ipc.RpcServerFactory;
071import org.apache.hadoop.hbase.ipc.RpcServerInterface;
072import org.apache.hadoop.hbase.ipc.ServerRpcController;
073import org.apache.hadoop.hbase.master.assignment.RegionStates;
074import org.apache.hadoop.hbase.master.locking.LockProcedure;
075import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
076import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
077import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
078import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
079import org.apache.hadoop.hbase.mob.MobUtils;
080import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
081import org.apache.hadoop.hbase.procedure2.LockType;
082import org.apache.hadoop.hbase.procedure2.LockedResource;
083import org.apache.hadoop.hbase.procedure2.Procedure;
084import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
085import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
086import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
087import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
088import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
089import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
090import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
091import org.apache.hadoop.hbase.quotas.QuotaUtil;
092import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
093import org.apache.hadoop.hbase.regionserver.RSRpcServices;
094import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
095import org.apache.hadoop.hbase.replication.ReplicationException;
096import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
097import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
098import org.apache.hadoop.hbase.security.Superusers;
099import org.apache.hadoop.hbase.security.User;
100import org.apache.hadoop.hbase.security.access.AccessChecker;
101import org.apache.hadoop.hbase.security.access.AccessChecker.InputUser;
102import org.apache.hadoop.hbase.security.access.AccessController;
103import org.apache.hadoop.hbase.security.access.Permission;
104import org.apache.hadoop.hbase.security.access.Permission.Action;
105import org.apache.hadoop.hbase.security.access.PermissionStorage;
106import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
107import org.apache.hadoop.hbase.security.access.UserPermission;
108import org.apache.hadoop.hbase.security.visibility.VisibilityController;
109import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
110import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
111import org.apache.hadoop.hbase.util.Bytes;
112import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
113import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
114import org.apache.hadoop.hbase.util.Pair;
115import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
116import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
117import org.apache.yetus.audience.InterfaceAudience;
118import org.apache.zookeeper.KeeperException;
119import org.slf4j.Logger;
120import org.slf4j.LoggerFactory;
121
122import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
123import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
124import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
125
126import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
127import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
128import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos;
129import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest;
130import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse;
131import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest;
132import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantResponse;
133import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest;
134import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsResponse;
135import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type;
136import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest;
137import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeResponse;
138import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
139import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
140import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
141import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
142import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
143import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
144import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
145import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
146import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
147import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
148import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
149import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
150import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
151import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
152import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
153import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
154import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
155import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
159import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
160import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
161import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
162import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
163import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
164import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
165import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
166import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
167import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService;
168import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
169import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
170import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
171import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
172import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
173import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
174import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
175import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
176import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
186import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
187import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
188import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
189import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
190import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaRequest;
191import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaResponse;
192import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetActiveMasterRequest;
193import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetActiveMasterResponse;
194import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterIdRequest;
195import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterIdResponse;
196import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
197import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
198import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
199import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
200import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
201import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse;
202import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest;
203import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse;
204import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
205import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
206import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
207import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
208import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
209import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
210import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
211import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
212import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
213import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
214import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
215import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
216import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest;
217import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
218import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService;
219import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
220import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
221import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
222import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
223import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
224import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
225import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
226import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
227import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest;
228import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
229import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
230import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
231import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
232import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
233import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
234    .IsSnapshotCleanupEnabledRequest;
235import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
236    .IsSnapshotCleanupEnabledResponse;
237import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
238import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
239import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
240import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
241import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
242import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
243import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
244import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
245import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest;
246import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesResponse;
247import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
248import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
249import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
250import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
251import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
252import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
253import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
254import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
255import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
256import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
257import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
258import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
259import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
260import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
261import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
262import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
263import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
264import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse;
265import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
266import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
267import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
268import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
269import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
270import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
271import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState;
272import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
273import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
274import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
275import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
276import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
277import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
278import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreRequest;
279import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreResponse;
280import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
281import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
282import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
283import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
284import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
285import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse;
286import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
287import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
288import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
289import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
290import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest;
291import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaResponse;
292import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
293import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse;
294import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
295import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
296import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest;
297import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
298import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
299import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
300import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
301import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
302import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
303import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
304import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
305import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaRequest;
306import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaResponse;
307import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleRequest;
308import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleResponse;
309import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
310import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
311import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
312import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
313import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
314import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
315import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.NamespaceQuotaSnapshot;
316import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.TableQuotaSnapshot;
317import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
318import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
319import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
320import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest;
321import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse;
322import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
323import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
324import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
325import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse;
326import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
327import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
328import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
329import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
330import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
331import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
332import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult;
333import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest;
334import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse;
335import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
336import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
337import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
338import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
339import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
340import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
341import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
342import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
343import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
344import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
345import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
346import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
347import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
348import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
349import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
350import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
351import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState;
352import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
353import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
354import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
355
356/**
357 * Implements the master RPC services.
358 */
359@InterfaceAudience.Private
360@SuppressWarnings("deprecation")
361public class MasterRpcServices extends RSRpcServices implements
362    MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface,
363    LockService.BlockingInterface, HbckService.BlockingInterface,
364    ClientMetaService.BlockingInterface {
365  private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName());
366  private static final Logger AUDITLOG =
367      LoggerFactory.getLogger("SecurityLogger."+MasterRpcServices.class.getName());
368
369  private final HMaster master;
370
371  /**
372   * @return Subset of configuration to pass initializing regionservers: e.g.
373   *     the filesystem to use and root directory to use.
374   */
375  private RegionServerStartupResponse.Builder createConfigurationSubset() {
376    RegionServerStartupResponse.Builder resp = addConfig(
377      RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR);
378    resp = addConfig(resp, "fs.defaultFS");
379    return addConfig(resp, "hbase.master.info.port");
380  }
381
382  private RegionServerStartupResponse.Builder addConfig(
383      final RegionServerStartupResponse.Builder resp, final String key) {
384    NameStringPair.Builder entry = NameStringPair.newBuilder()
385      .setName(key)
386      .setValue(master.getConfiguration().get(key));
387    resp.addMapEntries(entry.build());
388    return resp;
389  }
390
391  public MasterRpcServices(HMaster m) throws IOException {
392    super(m);
393    master = m;
394  }
395
396  @Override
397  protected Class<?> getRpcSchedulerFactoryClass() {
398    Configuration conf = getConfiguration();
399    if (conf != null) {
400      return conf.getClass(MASTER_RPC_SCHEDULER_FACTORY_CLASS, super.getRpcSchedulerFactoryClass());
401    } else {
402      return super.getRpcSchedulerFactoryClass();
403    }
404  }
405
406  @Override
407  protected RpcServerInterface createRpcServer(final Server server,
408      final RpcSchedulerFactory rpcSchedulerFactory, final InetSocketAddress bindAddress,
409      final String name) throws IOException {
410    final Configuration conf = regionServer.getConfiguration();
411    // RpcServer at HM by default enable ByteBufferPool iff HM having user table region in it
412    boolean reservoirEnabled = conf.getBoolean(ByteBuffAllocator.ALLOCATOR_POOL_ENABLED_KEY,
413      LoadBalancer.isMasterCanHostUserRegions(conf));
414    try {
415      return RpcServerFactory.createRpcServer(server, name, getServices(),
416          bindAddress, // use final bindAddress for this server.
417          conf, rpcSchedulerFactory.create(conf, this, server), reservoirEnabled);
418    } catch (BindException be) {
419      throw new IOException(be.getMessage() + ". To switch ports use the '"
420          + HConstants.MASTER_PORT + "' configuration property.",
421          be.getCause() != null ? be.getCause() : be);
422    }
423  }
424
425  @Override
426  protected PriorityFunction createPriority() {
427    return new MasterAnnotationReadingPriorityFunction(this);
428  }
429
430  /**
431   * Checks for the following pre-checks in order:
432   * <ol>
433   *   <li>Master is initialized</li>
434   *   <li>Rpc caller has admin permissions</li>
435   * </ol>
436   * @param requestName name of rpc request. Used in reporting failures to provide context.
437   * @throws ServiceException If any of the above listed pre-check fails.
438   */
439  private void rpcPreCheck(String requestName) throws ServiceException {
440    try {
441      master.checkInitialized();
442      requirePermission(requestName, Permission.Action.ADMIN);
443    } catch (IOException ioe) {
444      throw new ServiceException(ioe);
445    }
446  }
447
448  enum BalanceSwitchMode {
449    SYNC,
450    ASYNC
451  }
452
453  /**
454   * Assigns balancer switch according to BalanceSwitchMode
455   * @param b new balancer switch
456   * @param mode BalanceSwitchMode
457   * @return old balancer switch
458   */
459  boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException {
460    boolean oldValue = master.loadBalancerTracker.isBalancerOn();
461    boolean newValue = b;
462    try {
463      if (master.cpHost != null) {
464        master.cpHost.preBalanceSwitch(newValue);
465      }
466      try {
467        if (mode == BalanceSwitchMode.SYNC) {
468          synchronized (master.getLoadBalancer()) {
469            master.loadBalancerTracker.setBalancerOn(newValue);
470          }
471        } else {
472          master.loadBalancerTracker.setBalancerOn(newValue);
473        }
474      } catch (KeeperException ke) {
475        throw new IOException(ke);
476      }
477      LOG.info(master.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue);
478      if (master.cpHost != null) {
479        master.cpHost.postBalanceSwitch(oldValue, newValue);
480      }
481      master.getLoadBalancer().updateBalancerStatus(newValue);
482    } catch (IOException ioe) {
483      LOG.warn("Error flipping balance switch", ioe);
484    }
485    return oldValue;
486  }
487
488  boolean synchronousBalanceSwitch(final boolean b) throws IOException {
489    return switchBalancer(b, BalanceSwitchMode.SYNC);
490  }
491
492  /**
493   * @return list of blocking services and their security info classes that this server supports
494   */
495  @Override
496  protected List<BlockingServiceAndInterface> getServices() {
497    List<BlockingServiceAndInterface> bssi = new ArrayList<>(5);
498    bssi.add(new BlockingServiceAndInterface(
499        MasterService.newReflectiveBlockingService(this),
500        MasterService.BlockingInterface.class));
501    bssi.add(new BlockingServiceAndInterface(
502        RegionServerStatusService.newReflectiveBlockingService(this),
503        RegionServerStatusService.BlockingInterface.class));
504    bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this),
505        LockService.BlockingInterface.class));
506    bssi.add(new BlockingServiceAndInterface(HbckService.newReflectiveBlockingService(this),
507        HbckService.BlockingInterface.class));
508    bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this),
509        ClientMetaService.BlockingInterface.class));
510    bssi.addAll(super.getServices());
511    return bssi;
512  }
513
514  @Override
515  @QosPriority(priority = HConstants.ADMIN_QOS)
516  public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller,
517      GetLastFlushedSequenceIdRequest request) throws ServiceException {
518    try {
519      master.checkServiceStarted();
520    } catch (IOException ioe) {
521      throw new ServiceException(ioe);
522    }
523    byte[] encodedRegionName = request.getRegionName().toByteArray();
524    RegionStoreSequenceIds ids = master.getServerManager()
525      .getLastFlushedSequenceId(encodedRegionName);
526    return ResponseConverter.buildGetLastFlushedSequenceIdResponse(ids);
527  }
528
529  @Override
530  public RegionServerReportResponse regionServerReport(RpcController controller,
531      RegionServerReportRequest request) throws ServiceException {
532    try {
533      master.checkServiceStarted();
534      int versionNumber = 0;
535      String version = "0.0.0";
536      VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
537      if (versionInfo != null) {
538        version = versionInfo.getVersion();
539        versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
540      }
541      ClusterStatusProtos.ServerLoad sl = request.getLoad();
542      ServerName serverName = ProtobufUtil.toServerName(request.getServer());
543      ServerMetrics oldLoad = master.getServerManager().getLoad(serverName);
544      ServerMetrics newLoad =
545        ServerMetricsBuilder.toServerMetrics(serverName, versionNumber, version, sl);
546      master.getServerManager().regionServerReport(serverName, newLoad);
547      master.getAssignmentManager().reportOnlineRegions(serverName,
548        newLoad.getRegionMetrics().keySet());
549      if (sl != null && master.metricsMaster != null) {
550        // Up our metrics.
551        master.metricsMaster.incrementRequests(
552          sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getRequestCount() : 0));
553      }
554    } catch (IOException ioe) {
555      throw new ServiceException(ioe);
556    }
557    return RegionServerReportResponse.newBuilder().build();
558  }
559
560  @Override
561  public RegionServerStartupResponse regionServerStartup(RpcController controller,
562      RegionServerStartupRequest request) throws ServiceException {
563    // Register with server manager
564    try {
565      master.checkServiceStarted();
566      int versionNumber = 0;
567      String version = "0.0.0";
568      VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
569      if (versionInfo != null) {
570        version = versionInfo.getVersion();
571        versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
572      }
573      InetAddress ia = master.getRemoteInetAddress(request.getPort(), request.getServerStartCode());
574      // if regionserver passed hostname to use,
575      // then use it instead of doing a reverse DNS lookup
576      ServerName rs =
577        master.getServerManager().regionServerStartup(request, versionNumber, version, ia);
578
579      // Send back some config info
580      RegionServerStartupResponse.Builder resp = createConfigurationSubset();
581      NameStringPair.Builder entry = NameStringPair.newBuilder()
582        .setName(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER).setValue(rs.getHostname());
583      resp.addMapEntries(entry.build());
584
585      return resp.build();
586    } catch (IOException ioe) {
587      throw new ServiceException(ioe);
588    }
589  }
590
591  @Override
592  public ReportRSFatalErrorResponse reportRSFatalError(
593      RpcController controller, ReportRSFatalErrorRequest request) throws ServiceException {
594    String errorText = request.getErrorMessage();
595    ServerName sn = ProtobufUtil.toServerName(request.getServer());
596    String msg = sn + " reported a fatal error:\n" + errorText;
597    LOG.warn(msg);
598    master.rsFatals.add(msg);
599    return ReportRSFatalErrorResponse.newBuilder().build();
600  }
601
602  @Override
603  public AddColumnResponse addColumn(RpcController controller,
604      AddColumnRequest req) throws ServiceException {
605    try {
606      long procId = master.addColumn(
607          ProtobufUtil.toTableName(req.getTableName()),
608          ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
609          req.getNonceGroup(),
610          req.getNonce());
611      if (procId == -1) {
612        // This mean operation was not performed in server, so do not set any procId
613        return AddColumnResponse.newBuilder().build();
614      } else {
615        return AddColumnResponse.newBuilder().setProcId(procId).build();
616      }
617    } catch (IOException ioe) {
618      throw new ServiceException(ioe);
619    }
620  }
621
622  @Override
623  public AssignRegionResponse assignRegion(RpcController controller,
624      AssignRegionRequest req) throws ServiceException {
625    try {
626      master.checkInitialized();
627
628      final RegionSpecifierType type = req.getRegion().getType();
629      if (type != RegionSpecifierType.REGION_NAME) {
630        LOG.warn("assignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
631          + " actual: " + type);
632      }
633
634      final byte[] regionName = req.getRegion().getValue().toByteArray();
635      final RegionInfo regionInfo = master.getAssignmentManager().getRegionInfo(regionName);
636      if (regionInfo == null) {
637        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
638      }
639
640      final AssignRegionResponse arr = AssignRegionResponse.newBuilder().build();
641      if (master.cpHost != null) {
642        master.cpHost.preAssign(regionInfo);
643      }
644      LOG.info(master.getClientIdAuditPrefix() + " assign " + regionInfo.getRegionNameAsString());
645      master.getAssignmentManager().assign(regionInfo);
646      if (master.cpHost != null) {
647        master.cpHost.postAssign(regionInfo);
648      }
649      return arr;
650    } catch (IOException ioe) {
651      throw new ServiceException(ioe);
652    }
653  }
654
655
656  @Override
657  public BalanceResponse balance(RpcController controller,
658      BalanceRequest request) throws ServiceException {
659    try {
660      return BalanceResponse.newBuilder().setBalancerRan(master.balance(
661        request.hasForce()? request.getForce(): false)).build();
662    } catch (IOException ex) {
663      throw new ServiceException(ex);
664    }
665  }
666
667  @Override
668  public CreateNamespaceResponse createNamespace(RpcController controller,
669     CreateNamespaceRequest request) throws ServiceException {
670    try {
671      long procId = master.createNamespace(
672        ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
673        request.getNonceGroup(),
674        request.getNonce());
675      return CreateNamespaceResponse.newBuilder().setProcId(procId).build();
676    } catch (IOException e) {
677      throw new ServiceException(e);
678    }
679  }
680
681  @Override
682  public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
683      throws ServiceException {
684    TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema());
685    byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
686    try {
687      long procId =
688          master.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
689      LOG.info(master.getClientIdAuditPrefix() + " procedure request for creating table: " +
690              req.getTableSchema().getTableName() + " procId is: " + procId);
691      return CreateTableResponse.newBuilder().setProcId(procId).build();
692    } catch (IOException ioe) {
693      throw new ServiceException(ioe);
694    }
695  }
696
697  @Override
698  public DeleteColumnResponse deleteColumn(RpcController controller,
699      DeleteColumnRequest req) throws ServiceException {
700    try {
701      long procId = master.deleteColumn(
702        ProtobufUtil.toTableName(req.getTableName()),
703        req.getColumnName().toByteArray(),
704        req.getNonceGroup(),
705        req.getNonce());
706      if (procId == -1) {
707        // This mean operation was not performed in server, so do not set any procId
708        return DeleteColumnResponse.newBuilder().build();
709      } else {
710        return DeleteColumnResponse.newBuilder().setProcId(procId).build();
711      }
712    } catch (IOException ioe) {
713      throw new ServiceException(ioe);
714    }
715  }
716
717  @Override
718  public DeleteNamespaceResponse deleteNamespace(RpcController controller,
719      DeleteNamespaceRequest request) throws ServiceException {
720    try {
721      long procId = master.deleteNamespace(
722        request.getNamespaceName(),
723        request.getNonceGroup(),
724        request.getNonce());
725      return DeleteNamespaceResponse.newBuilder().setProcId(procId).build();
726    } catch (IOException e) {
727      throw new ServiceException(e);
728    }
729  }
730
731  /**
732   * Execute Delete Snapshot operation.
733   * @return DeleteSnapshotResponse (a protobuf wrapped void) if the snapshot existed and was
734   *    deleted properly.
735   * @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not
736   *    exist.
737   */
738  @Override
739  public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
740      DeleteSnapshotRequest request) throws ServiceException {
741    try {
742      master.checkInitialized();
743      master.snapshotManager.checkSnapshotSupport();
744
745      LOG.info(master.getClientIdAuditPrefix() + " delete " + request.getSnapshot());
746      master.snapshotManager.deleteSnapshot(request.getSnapshot());
747      return DeleteSnapshotResponse.newBuilder().build();
748    } catch (IOException e) {
749      throw new ServiceException(e);
750    }
751  }
752
753  @Override
754  public DeleteTableResponse deleteTable(RpcController controller,
755      DeleteTableRequest request) throws ServiceException {
756    try {
757      long procId = master.deleteTable(ProtobufUtil.toTableName(
758          request.getTableName()), request.getNonceGroup(), request.getNonce());
759      return DeleteTableResponse.newBuilder().setProcId(procId).build();
760    } catch (IOException ioe) {
761      throw new ServiceException(ioe);
762    }
763  }
764
765  @Override
766  public TruncateTableResponse truncateTable(RpcController controller, TruncateTableRequest request)
767      throws ServiceException {
768    try {
769      long procId = master.truncateTable(
770        ProtobufUtil.toTableName(request.getTableName()),
771        request.getPreserveSplits(),
772        request.getNonceGroup(),
773        request.getNonce());
774      return TruncateTableResponse.newBuilder().setProcId(procId).build();
775    } catch (IOException ioe) {
776      throw new ServiceException(ioe);
777    }
778  }
779
780  @Override
781  public DisableTableResponse disableTable(RpcController controller,
782      DisableTableRequest request) throws ServiceException {
783    try {
784      long procId = master.disableTable(
785        ProtobufUtil.toTableName(request.getTableName()),
786        request.getNonceGroup(),
787        request.getNonce());
788      return DisableTableResponse.newBuilder().setProcId(procId).build();
789    } catch (IOException ioe) {
790      throw new ServiceException(ioe);
791    }
792  }
793
794  @Override
795  public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c,
796      EnableCatalogJanitorRequest req) throws ServiceException {
797    rpcPreCheck("enableCatalogJanitor");
798    return EnableCatalogJanitorResponse.newBuilder().setPrevValue(
799      master.catalogJanitorChore.setEnabled(req.getEnable())).build();
800  }
801
802  @Override
803  public SetCleanerChoreRunningResponse setCleanerChoreRunning(
804    RpcController c, SetCleanerChoreRunningRequest req) throws ServiceException {
805    rpcPreCheck("setCleanerChoreRunning");
806
807    boolean prevValue =
808      master.getLogCleaner().getEnabled() && master.getHFileCleaner().getEnabled();
809    master.getLogCleaner().setEnabled(req.getOn());
810    master.getHFileCleaner().setEnabled(req.getOn());
811    return SetCleanerChoreRunningResponse.newBuilder().setPrevValue(prevValue).build();
812  }
813
814  @Override
815  public EnableTableResponse enableTable(RpcController controller,
816      EnableTableRequest request) throws ServiceException {
817    try {
818      long procId = master.enableTable(
819        ProtobufUtil.toTableName(request.getTableName()),
820        request.getNonceGroup(),
821        request.getNonce());
822      return EnableTableResponse.newBuilder().setProcId(procId).build();
823    } catch (IOException ioe) {
824      throw new ServiceException(ioe);
825    }
826  }
827
828  @Override
829  public MergeTableRegionsResponse mergeTableRegions(
830      RpcController c, MergeTableRegionsRequest request) throws ServiceException {
831    try {
832      master.checkInitialized();
833    } catch (IOException ioe) {
834      throw new ServiceException(ioe);
835    }
836
837    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
838
839    RegionInfo[] regionsToMerge = new RegionInfo[request.getRegionCount()];
840    for (int i = 0; i < request.getRegionCount(); i++) {
841      final byte[] encodedNameOfRegion = request.getRegion(i).getValue().toByteArray();
842      if (request.getRegion(i).getType() != RegionSpecifierType.ENCODED_REGION_NAME) {
843        LOG.warn("MergeRegions specifier type: expected: "
844          + RegionSpecifierType.ENCODED_REGION_NAME + " actual: region " + i + " ="
845          + request.getRegion(i).getType());
846      }
847      RegionState regionState = regionStates.getRegionState(Bytes.toString(encodedNameOfRegion));
848      if (regionState == null) {
849        throw new ServiceException(
850          new UnknownRegionException(Bytes.toStringBinary(encodedNameOfRegion)));
851      }
852      regionsToMerge[i] = regionState.getRegion();
853    }
854
855    try {
856      long procId = master.mergeRegions(
857        regionsToMerge,
858        request.getForcible(),
859        request.getNonceGroup(),
860        request.getNonce());
861      return MergeTableRegionsResponse.newBuilder().setProcId(procId).build();
862    } catch (IOException ioe) {
863      throw new ServiceException(ioe);
864    }
865  }
866
867  @Override
868  public SplitTableRegionResponse splitRegion(final RpcController controller,
869      final SplitTableRegionRequest request) throws ServiceException {
870    try {
871      long procId = master.splitRegion(
872        ProtobufUtil.toRegionInfo(request.getRegionInfo()),
873        request.hasSplitRow() ? request.getSplitRow().toByteArray() : null,
874        request.getNonceGroup(),
875        request.getNonce());
876      return SplitTableRegionResponse.newBuilder().setProcId(procId).build();
877    } catch (IOException ie) {
878      throw new ServiceException(ie);
879    }
880  }
881
882  @Override
883  public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller,
884      final ClientProtos.CoprocessorServiceRequest request) throws ServiceException {
885    rpcPreCheck("execMasterService");
886    try {
887      ServerRpcController execController = new ServerRpcController();
888      ClientProtos.CoprocessorServiceCall call = request.getCall();
889      String serviceName = call.getServiceName();
890      String methodName = call.getMethodName();
891      if (!master.coprocessorServiceHandlers.containsKey(serviceName)) {
892        throw new UnknownProtocolException(null,
893          "No registered Master Coprocessor Endpoint found for " + serviceName +
894          ". Has it been enabled?");
895      }
896
897      com.google.protobuf.Service service = master.coprocessorServiceHandlers.get(serviceName);
898      com.google.protobuf.Descriptors.ServiceDescriptor serviceDesc = service.getDescriptorForType();
899      com.google.protobuf.Descriptors.MethodDescriptor methodDesc =
900          CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc);
901
902      com.google.protobuf.Message execRequest =
903          CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest());
904      final com.google.protobuf.Message.Builder responseBuilder =
905          service.getResponsePrototype(methodDesc).newBuilderForType();
906      service.callMethod(methodDesc, execController, execRequest,
907        (message) -> {
908          if (message != null) {
909            responseBuilder.mergeFrom(message);
910          }
911        });
912      com.google.protobuf.Message execResult = responseBuilder.build();
913      if (execController.getFailedOn() != null) {
914        throw execController.getFailedOn();
915      }
916      return CoprocessorRpcUtils.getResponse(execResult, HConstants.EMPTY_BYTE_ARRAY);
917    } catch (IOException ie) {
918      throw new ServiceException(ie);
919    }
920  }
921
922  /**
923   * Triggers an asynchronous attempt to run a distributed procedure.
924   * {@inheritDoc}
925   */
926  @Override
927  public ExecProcedureResponse execProcedure(RpcController controller,
928      ExecProcedureRequest request) throws ServiceException {
929    try {
930      master.checkInitialized();
931      ProcedureDescription desc = request.getProcedure();
932      MasterProcedureManager mpm = master.getMasterProcedureManagerHost().getProcedureManager(
933        desc.getSignature());
934      if (mpm == null) {
935        throw new ServiceException(new DoNotRetryIOException("The procedure is not registered: "
936          + desc.getSignature()));
937      }
938      LOG.info(master.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
939      mpm.checkPermissions(desc, getAccessChecker(), RpcServer.getRequestUser().orElse(null));
940      mpm.execProcedure(desc);
941      // send back the max amount of time the client should wait for the procedure
942      // to complete
943      long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME;
944      return ExecProcedureResponse.newBuilder().setExpectedTimeout(
945        waitTime).build();
946    } catch (ForeignException e) {
947      throw new ServiceException(e.getCause());
948    } catch (IOException e) {
949      throw new ServiceException(e);
950    }
951  }
952
953  /**
954   * Triggers a synchronous attempt to run a distributed procedure and sets
955   * return data in response.
956   * {@inheritDoc}
957   */
958  @Override
959  public ExecProcedureResponse execProcedureWithRet(RpcController controller,
960      ExecProcedureRequest request) throws ServiceException {
961    rpcPreCheck("execProcedureWithRet");
962    try {
963      ProcedureDescription desc = request.getProcedure();
964      MasterProcedureManager mpm =
965        master.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
966      if (mpm == null) {
967        throw new ServiceException("The procedure is not registered: " + desc.getSignature());
968      }
969      LOG.info(master.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
970      byte[] data = mpm.execProcedureWithRet(desc);
971      ExecProcedureResponse.Builder builder = ExecProcedureResponse.newBuilder();
972      // set return data if available
973      if (data != null) {
974        builder.setReturnData(UnsafeByteOperations.unsafeWrap(data));
975      }
976      return builder.build();
977    } catch (IOException e) {
978      throw new ServiceException(e);
979    }
980  }
981
982  @Override
983  public GetClusterStatusResponse getClusterStatus(RpcController controller,
984      GetClusterStatusRequest req) throws ServiceException {
985    GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
986    try {
987      // We used to check if Master was up at this point but let this call proceed even if
988      // Master is initializing... else we shut out stuff like hbck2 tool from making progress
989      // since it queries this method to figure cluster version. hbck2 wants to be able to work
990      // against Master even if it is 'initializing' so it can do fixup.
991      response.setClusterStatus(ClusterMetricsBuilder.toClusterStatus(
992        master.getClusterMetrics(ClusterMetricsBuilder.toOptions(req.getOptionsList()))));
993    } catch (IOException e) {
994      throw new ServiceException(e);
995    }
996    return response.build();
997  }
998
999  /**
1000   * List the currently available/stored snapshots. Any in-progress snapshots are ignored
1001   */
1002  @Override
1003  public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller,
1004      GetCompletedSnapshotsRequest request) throws ServiceException {
1005    try {
1006      master.checkInitialized();
1007      GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder();
1008      List<SnapshotDescription> snapshots = master.snapshotManager.getCompletedSnapshots();
1009
1010      // convert to protobuf
1011      for (SnapshotDescription snapshot : snapshots) {
1012        builder.addSnapshots(snapshot);
1013      }
1014      return builder.build();
1015    } catch (IOException e) {
1016      throw new ServiceException(e);
1017    }
1018  }
1019
1020  @Override
1021  public ListNamespacesResponse listNamespaces(
1022      RpcController controller, ListNamespacesRequest request)
1023      throws ServiceException {
1024    try {
1025      return ListNamespacesResponse.newBuilder()
1026        .addAllNamespaceName(master.listNamespaces())
1027        .build();
1028    } catch (IOException e) {
1029      throw new ServiceException(e);
1030    }
1031  }
1032
1033  @Override
1034  public GetNamespaceDescriptorResponse getNamespaceDescriptor(
1035      RpcController controller, GetNamespaceDescriptorRequest request)
1036      throws ServiceException {
1037    try {
1038      return GetNamespaceDescriptorResponse.newBuilder()
1039        .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(
1040            master.getNamespace(request.getNamespaceName())))
1041        .build();
1042    } catch (IOException e) {
1043      throw new ServiceException(e);
1044    }
1045  }
1046
1047  /**
1048   * Get the number of regions of the table that have been updated by the alter.
1049   *
1050   * @return Pair indicating the number of regions updated Pair.getFirst is the
1051   *         regions that are yet to be updated Pair.getSecond is the total number
1052   *         of regions of the table
1053   * @throws ServiceException
1054   */
1055  @Override
1056  public GetSchemaAlterStatusResponse getSchemaAlterStatus(
1057      RpcController controller, GetSchemaAlterStatusRequest req) throws ServiceException {
1058    // TODO: currently, we query using the table name on the client side. this
1059    // may overlap with other table operations or the table operation may
1060    // have completed before querying this API. We need to refactor to a
1061    // transaction system in the future to avoid these ambiguities.
1062    TableName tableName = ProtobufUtil.toTableName(req.getTableName());
1063
1064    try {
1065      master.checkInitialized();
1066      Pair<Integer,Integer> pair = master.getAssignmentManager().getReopenStatus(tableName);
1067      GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder();
1068      ret.setYetToUpdateRegions(pair.getFirst());
1069      ret.setTotalRegions(pair.getSecond());
1070      return ret.build();
1071    } catch (IOException ioe) {
1072      throw new ServiceException(ioe);
1073    }
1074  }
1075
1076  /**
1077   * Get list of TableDescriptors for requested tables.
1078   * @param c Unused (set to null).
1079   * @param req GetTableDescriptorsRequest that contains:
1080   *     - tableNames: requested tables, or if empty, all are requested.
1081   * @return GetTableDescriptorsResponse
1082   * @throws ServiceException
1083   */
1084  @Override
1085  public GetTableDescriptorsResponse getTableDescriptors(RpcController c,
1086      GetTableDescriptorsRequest req) throws ServiceException {
1087    try {
1088      master.checkInitialized();
1089
1090      final String regex = req.hasRegex() ? req.getRegex() : null;
1091      final String namespace = req.hasNamespace() ? req.getNamespace() : null;
1092      List<TableName> tableNameList = null;
1093      if (req.getTableNamesCount() > 0) {
1094        tableNameList = new ArrayList<TableName>(req.getTableNamesCount());
1095        for (HBaseProtos.TableName tableNamePB: req.getTableNamesList()) {
1096          tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
1097        }
1098      }
1099
1100      List<TableDescriptor> descriptors = master.listTableDescriptors(namespace, regex,
1101          tableNameList, req.getIncludeSysTables());
1102
1103      GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder();
1104      if (descriptors != null && descriptors.size() > 0) {
1105        // Add the table descriptors to the response
1106        for (TableDescriptor htd: descriptors) {
1107          builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
1108        }
1109      }
1110      return builder.build();
1111    } catch (IOException ioe) {
1112      throw new ServiceException(ioe);
1113    }
1114  }
1115
1116  /**
1117   * Get list of userspace table names
1118   * @param controller Unused (set to null).
1119   * @param req GetTableNamesRequest
1120   * @return GetTableNamesResponse
1121   * @throws ServiceException
1122   */
1123  @Override
1124  public GetTableNamesResponse getTableNames(RpcController controller,
1125      GetTableNamesRequest req) throws ServiceException {
1126    try {
1127      master.checkServiceStarted();
1128
1129      final String regex = req.hasRegex() ? req.getRegex() : null;
1130      final String namespace = req.hasNamespace() ? req.getNamespace() : null;
1131      List<TableName> tableNames = master.listTableNames(namespace, regex,
1132          req.getIncludeSysTables());
1133
1134      GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
1135      if (tableNames != null && tableNames.size() > 0) {
1136        // Add the table names to the response
1137        for (TableName table: tableNames) {
1138          builder.addTableNames(ProtobufUtil.toProtoTableName(table));
1139        }
1140      }
1141      return builder.build();
1142    } catch (IOException e) {
1143      throw new ServiceException(e);
1144    }
1145  }
1146
1147  @Override
1148  public GetTableStateResponse getTableState(RpcController controller,
1149      GetTableStateRequest request) throws ServiceException {
1150    try {
1151      master.checkServiceStarted();
1152      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
1153      TableState ts = master.getTableStateManager().getTableState(tableName);
1154      GetTableStateResponse.Builder builder = GetTableStateResponse.newBuilder();
1155      builder.setTableState(ts.convert());
1156      return builder.build();
1157    } catch (IOException e) {
1158      throw new ServiceException(e);
1159    }
1160  }
1161
1162  @Override
1163  public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
1164      IsCatalogJanitorEnabledRequest req) throws ServiceException {
1165    return IsCatalogJanitorEnabledResponse.newBuilder().setValue(
1166      master.isCatalogJanitorEnabled()).build();
1167  }
1168
1169  @Override
1170  public IsCleanerChoreEnabledResponse isCleanerChoreEnabled(RpcController c,
1171                                                             IsCleanerChoreEnabledRequest req)
1172    throws ServiceException {
1173    return IsCleanerChoreEnabledResponse.newBuilder().setValue(master.isCleanerChoreEnabled())
1174                                        .build();
1175  }
1176
1177  @Override
1178  public IsMasterRunningResponse isMasterRunning(RpcController c,
1179      IsMasterRunningRequest req) throws ServiceException {
1180    try {
1181      master.checkServiceStarted();
1182      return IsMasterRunningResponse.newBuilder().setIsMasterRunning(
1183        !master.isStopped()).build();
1184    } catch (IOException e) {
1185      throw new ServiceException(e);
1186    }
1187  }
1188
1189  /**
1190   * Checks if the specified procedure is done.
1191   * @return true if the procedure is done, false if the procedure is in the process of completing
1192   * @throws ServiceException if invalid procedure or failed procedure with progress failure reason.
1193   */
1194  @Override
1195  public IsProcedureDoneResponse isProcedureDone(RpcController controller,
1196      IsProcedureDoneRequest request) throws ServiceException {
1197    try {
1198      master.checkInitialized();
1199      ProcedureDescription desc = request.getProcedure();
1200      MasterProcedureManager mpm = master.getMasterProcedureManagerHost().getProcedureManager(
1201        desc.getSignature());
1202      if (mpm == null) {
1203        throw new ServiceException("The procedure is not registered: "
1204          + desc.getSignature());
1205      }
1206      LOG.debug("Checking to see if procedure from request:"
1207        + desc.getSignature() + " is done");
1208
1209      IsProcedureDoneResponse.Builder builder =
1210        IsProcedureDoneResponse.newBuilder();
1211      boolean done = mpm.isProcedureDone(desc);
1212      builder.setDone(done);
1213      return builder.build();
1214    } catch (ForeignException e) {
1215      throw new ServiceException(e.getCause());
1216    } catch (IOException e) {
1217      throw new ServiceException(e);
1218    }
1219  }
1220
1221  /**
1222   * Checks if the specified snapshot is done.
1223   * @return true if the snapshot is in file system ready to use,
1224   *     false if the snapshot is in the process of completing
1225   * @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or
1226   *     a wrapped HBaseSnapshotException with progress failure reason.
1227   */
1228  @Override
1229  public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
1230      IsSnapshotDoneRequest request) throws ServiceException {
1231    LOG.debug("Checking to see if snapshot from request:" +
1232      ClientSnapshotDescriptionUtils.toString(request.getSnapshot()) + " is done");
1233    try {
1234      master.checkInitialized();
1235      IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
1236      boolean done = master.snapshotManager.isSnapshotDone(request.getSnapshot());
1237      builder.setDone(done);
1238      return builder.build();
1239    } catch (ForeignException e) {
1240      throw new ServiceException(e.getCause());
1241    } catch (IOException e) {
1242      throw new ServiceException(e);
1243    }
1244  }
1245
1246  @Override
1247  public GetProcedureResultResponse getProcedureResult(RpcController controller,
1248      GetProcedureResultRequest request) throws ServiceException {
1249    LOG.debug("Checking to see if procedure is done pid=" + request.getProcId());
1250    try {
1251      master.checkInitialized();
1252      GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder();
1253      long procId = request.getProcId();
1254      ProcedureExecutor<?> executor = master.getMasterProcedureExecutor();
1255      Procedure<?> result = executor.getResultOrProcedure(procId);
1256      if (result != null) {
1257        builder.setSubmittedTime(result.getSubmittedTime());
1258        builder.setLastUpdate(result.getLastUpdate());
1259        if (executor.isFinished(procId)) {
1260          builder.setState(GetProcedureResultResponse.State.FINISHED);
1261          if (result.isFailed()) {
1262            IOException exception =
1263                MasterProcedureUtil.unwrapRemoteIOException(result);
1264            builder.setException(ForeignExceptionUtil.toProtoForeignException(exception));
1265          }
1266          byte[] resultData = result.getResult();
1267          if (resultData != null) {
1268            builder.setResult(UnsafeByteOperations.unsafeWrap(resultData));
1269          }
1270          master.getMasterProcedureExecutor().removeResult(request.getProcId());
1271        } else {
1272          builder.setState(GetProcedureResultResponse.State.RUNNING);
1273        }
1274      } else {
1275        builder.setState(GetProcedureResultResponse.State.NOT_FOUND);
1276      }
1277      return builder.build();
1278    } catch (IOException e) {
1279      throw new ServiceException(e);
1280    }
1281  }
1282
1283  @Override
1284  public AbortProcedureResponse abortProcedure(
1285      RpcController rpcController, AbortProcedureRequest request) throws ServiceException {
1286    try {
1287      AbortProcedureResponse.Builder response = AbortProcedureResponse.newBuilder();
1288      boolean abortResult =
1289          master.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning());
1290      response.setIsProcedureAborted(abortResult);
1291      return response.build();
1292    } catch (IOException e) {
1293      throw new ServiceException(e);
1294    }
1295  }
1296
1297  @Override
1298  public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c,
1299      ListNamespaceDescriptorsRequest request) throws ServiceException {
1300    try {
1301      ListNamespaceDescriptorsResponse.Builder response =
1302        ListNamespaceDescriptorsResponse.newBuilder();
1303      for(NamespaceDescriptor ns: master.getNamespaces()) {
1304        response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns));
1305      }
1306      return response.build();
1307    } catch (IOException e) {
1308      throw new ServiceException(e);
1309    }
1310  }
1311
1312  @Override
1313  public GetProceduresResponse getProcedures(
1314      RpcController rpcController,
1315      GetProceduresRequest request) throws ServiceException {
1316    try {
1317      final GetProceduresResponse.Builder response = GetProceduresResponse.newBuilder();
1318      for (Procedure<?> p: master.getProcedures()) {
1319        response.addProcedure(ProcedureUtil.convertToProtoProcedure(p));
1320      }
1321      return response.build();
1322    } catch (IOException e) {
1323      throw new ServiceException(e);
1324    }
1325  }
1326
1327  @Override
1328  public GetLocksResponse getLocks(
1329      RpcController controller,
1330      GetLocksRequest request) throws ServiceException {
1331    try {
1332      final GetLocksResponse.Builder builder = GetLocksResponse.newBuilder();
1333
1334      for (LockedResource lockedResource: master.getLocks()) {
1335        builder.addLock(ProcedureUtil.convertToProtoLockedResource(lockedResource));
1336      }
1337
1338      return builder.build();
1339    } catch (IOException e) {
1340      throw new ServiceException(e);
1341    }
1342  }
1343
1344  @Override
1345  public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c,
1346      ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
1347    try {
1348      ListTableDescriptorsByNamespaceResponse.Builder b =
1349          ListTableDescriptorsByNamespaceResponse.newBuilder();
1350      for (TableDescriptor htd : master
1351          .listTableDescriptorsByNamespace(request.getNamespaceName())) {
1352        b.addTableSchema(ProtobufUtil.toTableSchema(htd));
1353      }
1354      return b.build();
1355    } catch (IOException e) {
1356      throw new ServiceException(e);
1357    }
1358  }
1359
1360  @Override
1361  public ListTableNamesByNamespaceResponse listTableNamesByNamespace(RpcController c,
1362      ListTableNamesByNamespaceRequest request) throws ServiceException {
1363    try {
1364      ListTableNamesByNamespaceResponse.Builder b =
1365        ListTableNamesByNamespaceResponse.newBuilder();
1366      for (TableName tableName: master.listTableNamesByNamespace(request.getNamespaceName())) {
1367        b.addTableName(ProtobufUtil.toProtoTableName(tableName));
1368      }
1369      return b.build();
1370    } catch (IOException e) {
1371      throw new ServiceException(e);
1372    }
1373  }
1374
1375  @Override
1376  public ModifyColumnResponse modifyColumn(RpcController controller,
1377      ModifyColumnRequest req) throws ServiceException {
1378    try {
1379      long procId = master.modifyColumn(
1380        ProtobufUtil.toTableName(req.getTableName()),
1381        ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
1382        req.getNonceGroup(),
1383        req.getNonce());
1384      if (procId == -1) {
1385        // This mean operation was not performed in server, so do not set any procId
1386        return ModifyColumnResponse.newBuilder().build();
1387      } else {
1388        return ModifyColumnResponse.newBuilder().setProcId(procId).build();
1389      }
1390    } catch (IOException ioe) {
1391      throw new ServiceException(ioe);
1392    }
1393  }
1394
1395  @Override
1396  public ModifyNamespaceResponse modifyNamespace(RpcController controller,
1397      ModifyNamespaceRequest request) throws ServiceException {
1398    try {
1399      long procId = master.modifyNamespace(
1400        ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
1401        request.getNonceGroup(),
1402        request.getNonce());
1403      return ModifyNamespaceResponse.newBuilder().setProcId(procId).build();
1404    } catch (IOException e) {
1405      throw new ServiceException(e);
1406    }
1407  }
1408
1409  @Override
1410  public ModifyTableResponse modifyTable(RpcController controller,
1411      ModifyTableRequest req) throws ServiceException {
1412    try {
1413      long procId = master.modifyTable(
1414        ProtobufUtil.toTableName(req.getTableName()),
1415        ProtobufUtil.toTableDescriptor(req.getTableSchema()),
1416        req.getNonceGroup(),
1417        req.getNonce());
1418      return ModifyTableResponse.newBuilder().setProcId(procId).build();
1419    } catch (IOException ioe) {
1420      throw new ServiceException(ioe);
1421    }
1422  }
1423
1424  @Override
1425  public MoveRegionResponse moveRegion(RpcController controller,
1426      MoveRegionRequest req) throws ServiceException {
1427    final byte [] encodedRegionName = req.getRegion().getValue().toByteArray();
1428    RegionSpecifierType type = req.getRegion().getType();
1429    final byte [] destServerName = (req.hasDestServerName())?
1430      Bytes.toBytes(ProtobufUtil.toServerName(req.getDestServerName()).getServerName()):null;
1431    MoveRegionResponse mrr = MoveRegionResponse.newBuilder().build();
1432
1433    if (type != RegionSpecifierType.ENCODED_REGION_NAME) {
1434      LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME
1435        + " actual: " + type);
1436    }
1437
1438    try {
1439      master.checkInitialized();
1440      master.move(encodedRegionName, destServerName);
1441    } catch (IOException ioe) {
1442      throw new ServiceException(ioe);
1443    }
1444    return mrr;
1445  }
1446
1447  /**
1448   * Offline specified region from master's in-memory state. It will not attempt to
1449   * reassign the region as in unassign.
1450   *
1451   * This is a special method that should be used by experts or hbck.
1452   *
1453   */
1454  @Override
1455  public OfflineRegionResponse offlineRegion(RpcController controller,
1456      OfflineRegionRequest request) throws ServiceException {
1457    try {
1458      master.checkInitialized();
1459
1460      final RegionSpecifierType type = request.getRegion().getType();
1461      if (type != RegionSpecifierType.REGION_NAME) {
1462        LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
1463          + " actual: " + type);
1464      }
1465
1466      final byte[] regionName = request.getRegion().getValue().toByteArray();
1467      final RegionInfo hri = master.getAssignmentManager().getRegionInfo(regionName);
1468      if (hri == null) {
1469        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
1470      }
1471
1472      if (master.cpHost != null) {
1473        master.cpHost.preRegionOffline(hri);
1474      }
1475      LOG.info(master.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString());
1476      master.getAssignmentManager().offlineRegion(hri);
1477      if (master.cpHost != null) {
1478        master.cpHost.postRegionOffline(hri);
1479      }
1480    } catch (IOException ioe) {
1481      throw new ServiceException(ioe);
1482    }
1483    return OfflineRegionResponse.newBuilder().build();
1484  }
1485
1486  /**
1487   * Execute Restore/Clone snapshot operation.
1488   *
1489   * <p>If the specified table exists a "Restore" is executed, replacing the table
1490   * schema and directory data with the content of the snapshot.
1491   * The table must be disabled, or a UnsupportedOperationException will be thrown.
1492   *
1493   * <p>If the table doesn't exist a "Clone" is executed, a new table is created
1494   * using the schema at the time of the snapshot, and the content of the snapshot.
1495   *
1496   * <p>The restore/clone operation does not require copying HFiles. Since HFiles
1497   * are immutable the table can point to and use the same files as the original one.
1498   */
1499  @Override
1500  public RestoreSnapshotResponse restoreSnapshot(RpcController controller,
1501      RestoreSnapshotRequest request) throws ServiceException {
1502    try {
1503      long procId = master.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(),
1504        request.getNonce(), request.getRestoreACL());
1505      return RestoreSnapshotResponse.newBuilder().setProcId(procId).build();
1506    } catch (ForeignException e) {
1507      throw new ServiceException(e.getCause());
1508    } catch (IOException e) {
1509      throw new ServiceException(e);
1510    }
1511  }
1512
1513  @Override
1514  public SetSnapshotCleanupResponse switchSnapshotCleanup(
1515      RpcController controller, SetSnapshotCleanupRequest request)
1516      throws ServiceException {
1517    try {
1518      master.checkInitialized();
1519      final boolean enabled = request.getEnabled();
1520      final boolean isSynchronous = request.hasSynchronous() && request.getSynchronous();
1521      final boolean prevSnapshotCleanupRunning = this.switchSnapshotCleanup(enabled, isSynchronous);
1522      return SetSnapshotCleanupResponse.newBuilder()
1523          .setPrevSnapshotCleanup(prevSnapshotCleanupRunning).build();
1524    } catch (IOException e) {
1525      throw new ServiceException(e);
1526    }
1527  }
1528
1529  @Override
1530  public IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(
1531      RpcController controller, IsSnapshotCleanupEnabledRequest request)
1532      throws ServiceException {
1533    try {
1534      master.checkInitialized();
1535      final boolean isSnapshotCleanupEnabled = master.snapshotCleanupTracker
1536          .isSnapshotCleanupEnabled();
1537      return IsSnapshotCleanupEnabledResponse.newBuilder()
1538          .setEnabled(isSnapshotCleanupEnabled).build();
1539    } catch (IOException e) {
1540      throw new ServiceException(e);
1541    }
1542  }
1543
1544  /**
1545   * Turn on/off snapshot auto-cleanup based on TTL
1546   *
1547   * @param enabledNewVal Set to <code>true</code> to enable, <code>false</code> to disable
1548   * @param synchronous If <code>true</code>, it waits until current snapshot cleanup is completed,
1549   *   if outstanding
1550   * @return previous snapshot auto-cleanup mode
1551   */
1552  private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal,
1553      final boolean synchronous) {
1554    final boolean oldValue = master.snapshotCleanupTracker.isSnapshotCleanupEnabled();
1555    master.switchSnapshotCleanup(enabledNewVal, synchronous);
1556    LOG.info("{} Successfully set snapshot cleanup to {}", master.getClientIdAuditPrefix(),
1557      enabledNewVal);
1558    return oldValue;
1559  }
1560
1561
1562  @Override
1563  public RunCatalogScanResponse runCatalogScan(RpcController c,
1564      RunCatalogScanRequest req) throws ServiceException {
1565    rpcPreCheck("runCatalogScan");
1566    try {
1567      return ResponseConverter.buildRunCatalogScanResponse(
1568          this.master.catalogJanitorChore.scan());
1569    } catch (IOException ioe) {
1570      throw new ServiceException(ioe);
1571    }
1572  }
1573
1574  @Override
1575  public RunCleanerChoreResponse runCleanerChore(RpcController c, RunCleanerChoreRequest req)
1576    throws ServiceException {
1577    rpcPreCheck("runCleanerChore");
1578    boolean result = master.getHFileCleaner().runCleaner() && master.getLogCleaner().runCleaner();
1579    return ResponseConverter.buildRunCleanerChoreResponse(result);
1580  }
1581
1582  @Override
1583  public SetBalancerRunningResponse setBalancerRunning(RpcController c,
1584      SetBalancerRunningRequest req) throws ServiceException {
1585    try {
1586      master.checkInitialized();
1587      boolean prevValue = (req.getSynchronous())?
1588        synchronousBalanceSwitch(req.getOn()) : master.balanceSwitch(req.getOn());
1589      return SetBalancerRunningResponse.newBuilder().setPrevBalanceValue(prevValue).build();
1590    } catch (IOException ioe) {
1591      throw new ServiceException(ioe);
1592    }
1593  }
1594
1595  @Override
1596  public ShutdownResponse shutdown(RpcController controller,
1597      ShutdownRequest request) throws ServiceException {
1598    LOG.info(master.getClientIdAuditPrefix() + " shutdown");
1599    try {
1600      master.shutdown();
1601    } catch (IOException e) {
1602      LOG.error("Exception occurred in HMaster.shutdown()", e);
1603      throw new ServiceException(e);
1604    }
1605    return ShutdownResponse.newBuilder().build();
1606  }
1607
1608  /**
1609   * Triggers an asynchronous attempt to take a snapshot.
1610   * {@inheritDoc}
1611   */
1612  @Override
1613  public SnapshotResponse snapshot(RpcController controller,
1614      SnapshotRequest request) throws ServiceException {
1615    try {
1616      master.checkInitialized();
1617      master.snapshotManager.checkSnapshotSupport();
1618
1619      LOG.info(master.getClientIdAuditPrefix() + " snapshot request for:" +
1620        ClientSnapshotDescriptionUtils.toString(request.getSnapshot()));
1621      // get the snapshot information
1622      SnapshotDescription snapshot = SnapshotDescriptionUtils.validate(
1623        request.getSnapshot(), master.getConfiguration());
1624      master.snapshotManager.takeSnapshot(snapshot);
1625
1626      // send back the max amount of time the client should wait for the snapshot to complete
1627      long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(master.getConfiguration(),
1628        snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
1629      return SnapshotResponse.newBuilder().setExpectedTimeout(waitTime).build();
1630    } catch (ForeignException e) {
1631      throw new ServiceException(e.getCause());
1632    } catch (IOException e) {
1633      throw new ServiceException(e);
1634    }
1635  }
1636
1637  @Override
1638  public StopMasterResponse stopMaster(RpcController controller,
1639      StopMasterRequest request) throws ServiceException {
1640    LOG.info(master.getClientIdAuditPrefix() + " stop");
1641    try {
1642      master.stopMaster();
1643    } catch (IOException e) {
1644      LOG.error("Exception occurred while stopping master", e);
1645      throw new ServiceException(e);
1646    }
1647    return StopMasterResponse.newBuilder().build();
1648  }
1649
1650  @Override
1651  public IsInMaintenanceModeResponse isMasterInMaintenanceMode(
1652      final RpcController controller,
1653      final IsInMaintenanceModeRequest request) throws ServiceException {
1654    IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder();
1655    response.setInMaintenanceMode(master.isInMaintenanceMode());
1656    return response.build();
1657  }
1658
1659  @Override
1660  public UnassignRegionResponse unassignRegion(RpcController controller,
1661      UnassignRegionRequest req) throws ServiceException {
1662    try {
1663      final byte [] regionName = req.getRegion().getValue().toByteArray();
1664      RegionSpecifierType type = req.getRegion().getType();
1665      final boolean force = req.getForce();
1666      UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build();
1667
1668      master.checkInitialized();
1669      if (type != RegionSpecifierType.REGION_NAME) {
1670        LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
1671          + " actual: " + type);
1672      }
1673      Pair<RegionInfo, ServerName> pair =
1674        MetaTableAccessor.getRegion(master.getConnection(), regionName);
1675      if (Bytes.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName(), regionName)) {
1676        pair = new Pair<>(RegionInfoBuilder.FIRST_META_REGIONINFO,
1677          MetaTableLocator.getMetaRegionLocation(master.getZooKeeper()));
1678      }
1679      if (pair == null) {
1680        throw new UnknownRegionException(Bytes.toString(regionName));
1681      }
1682
1683      RegionInfo hri = pair.getFirst();
1684      if (master.cpHost != null) {
1685        master.cpHost.preUnassign(hri, force);
1686      }
1687      LOG.debug(master.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString()
1688          + " in current location if it is online and reassign.force=" + force);
1689      master.getAssignmentManager().unassign(hri);
1690      if (master.cpHost != null) {
1691        master.cpHost.postUnassign(hri, force);
1692      }
1693
1694      return urr;
1695    } catch (IOException ioe) {
1696      throw new ServiceException(ioe);
1697    }
1698  }
1699
1700  @Override
1701  public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c,
1702      ReportRegionStateTransitionRequest req) throws ServiceException {
1703    try {
1704      master.checkServiceStarted();
1705      return master.getAssignmentManager().reportRegionStateTransition(req);
1706    } catch (IOException ioe) {
1707      throw new ServiceException(ioe);
1708    }
1709  }
1710
1711  @Override
1712  public SetQuotaResponse setQuota(RpcController c, SetQuotaRequest req)
1713      throws ServiceException {
1714    try {
1715      master.checkInitialized();
1716      return master.getMasterQuotaManager().setQuota(req);
1717    } catch (Exception e) {
1718      throw new ServiceException(e);
1719    }
1720  }
1721
1722  @Override
1723  public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller,
1724      MajorCompactionTimestampRequest request) throws ServiceException {
1725    MajorCompactionTimestampResponse.Builder response =
1726        MajorCompactionTimestampResponse.newBuilder();
1727    try {
1728      master.checkInitialized();
1729      response.setCompactionTimestamp(master.getLastMajorCompactionTimestamp(ProtobufUtil
1730          .toTableName(request.getTableName())));
1731    } catch (IOException e) {
1732      throw new ServiceException(e);
1733    }
1734    return response.build();
1735  }
1736
1737  @Override
1738  public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
1739      RpcController controller, MajorCompactionTimestampForRegionRequest request)
1740      throws ServiceException {
1741    MajorCompactionTimestampResponse.Builder response =
1742        MajorCompactionTimestampResponse.newBuilder();
1743    try {
1744      master.checkInitialized();
1745      response.setCompactionTimestamp(master.getLastMajorCompactionTimestampForRegion(request
1746          .getRegion().getValue().toByteArray()));
1747    } catch (IOException e) {
1748      throw new ServiceException(e);
1749    }
1750    return response.build();
1751  }
1752
1753  /**
1754   * Compact a region on the master.
1755   *
1756   * @param controller the RPC controller
1757   * @param request the request
1758   * @throws ServiceException
1759   */
1760  @Override
1761  @QosPriority(priority=HConstants.ADMIN_QOS)
1762  public CompactRegionResponse compactRegion(final RpcController controller,
1763    final CompactRegionRequest request) throws ServiceException {
1764    try {
1765      master.checkInitialized();
1766      byte[] regionName = request.getRegion().getValue().toByteArray();
1767      TableName tableName = RegionInfo.getTable(regionName);
1768      // if the region is a mob region, do the mob file compaction.
1769      if (MobUtils.isMobRegionName(tableName, regionName)) {
1770        checkHFileFormatVersionForMob();
1771        return compactMob(request, tableName);
1772      } else {
1773        return super.compactRegion(controller, request);
1774      }
1775    } catch (IOException ie) {
1776      throw new ServiceException(ie);
1777    }
1778  }
1779
1780  /**
1781   * check configured hfile format version before to do compaction
1782   * @throws IOException throw IOException
1783   */
1784  private void checkHFileFormatVersionForMob() throws IOException {
1785    if (HFile.getFormatVersion(master.getConfiguration()) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
1786      LOG.error("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
1787          + " is required for MOB compaction. Compaction will not run.");
1788      throw new IOException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
1789          + " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY
1790          + " accordingly.");
1791    }
1792  }
1793
1794  /**
1795   * This method implements Admin getRegionInfo. On RegionServer, it is
1796   * able to return RegionInfo and detail. On Master, it just returns
1797   * RegionInfo. On Master it has been hijacked to return Mob detail.
1798   * Master implementation is good for querying full region name if
1799   * you only have the encoded name (useful around region replicas
1800   * for example which do not have a row in hbase:meta).
1801   */
1802  @Override
1803  @QosPriority(priority=HConstants.ADMIN_QOS)
1804  public GetRegionInfoResponse getRegionInfo(final RpcController controller,
1805    final GetRegionInfoRequest request) throws ServiceException {
1806    RegionInfo ri = null;
1807    try {
1808      ri = getRegionInfo(request.getRegion());
1809    } catch(UnknownRegionException ure) {
1810      throw new ServiceException(ure);
1811    }
1812    GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
1813    if (ri != null) {
1814      builder.setRegionInfo(ProtobufUtil.toRegionInfo(ri));
1815    } else {
1816      // Is it a MOB name? These work differently.
1817      byte [] regionName = request.getRegion().getValue().toByteArray();
1818      TableName tableName = RegionInfo.getTable(regionName);
1819      if (MobUtils.isMobRegionName(tableName, regionName)) {
1820        // a dummy region info contains the compaction state.
1821        RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName);
1822        builder.setRegionInfo(ProtobufUtil.toRegionInfo(mobRegionInfo));
1823        if (request.hasCompactionState() && request.getCompactionState()) {
1824          builder.setCompactionState(master.getMobCompactionState(tableName));
1825        }
1826      } else {
1827        // If unknown RegionInfo and not a MOB region, it is unknown.
1828        throw new ServiceException(new UnknownRegionException(Bytes.toString(regionName)));
1829      }
1830    }
1831    return builder.build();
1832  }
1833
1834  /**
1835   * Compacts the mob files in the current table.
1836   * @param request the request.
1837   * @param tableName the current table name.
1838   * @return The response of the mob file compaction.
1839   * @throws IOException
1840   */
1841  private CompactRegionResponse compactMob(final CompactRegionRequest request,
1842    TableName tableName) throws IOException {
1843    if (!master.getTableStateManager().isTableState(tableName, TableState.State.ENABLED)) {
1844      throw new DoNotRetryIOException("Table " + tableName + " is not enabled");
1845    }
1846    boolean allFiles = false;
1847    List<ColumnFamilyDescriptor> compactedColumns = new ArrayList<>();
1848    ColumnFamilyDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies();
1849    byte[] family = null;
1850    if (request.hasFamily()) {
1851      family = request.getFamily().toByteArray();
1852      for (ColumnFamilyDescriptor hcd : hcds) {
1853        if (Bytes.equals(family, hcd.getName())) {
1854          if (!hcd.isMobEnabled()) {
1855            LOG.error("Column family " + hcd.getNameAsString() + " is not a mob column family");
1856            throw new DoNotRetryIOException("Column family " + hcd.getNameAsString()
1857                    + " is not a mob column family");
1858          }
1859          compactedColumns.add(hcd);
1860        }
1861      }
1862    } else {
1863      for (ColumnFamilyDescriptor hcd : hcds) {
1864        if (hcd.isMobEnabled()) {
1865          compactedColumns.add(hcd);
1866        }
1867      }
1868    }
1869    if (compactedColumns.isEmpty()) {
1870      LOG.error("No mob column families are assigned in the mob compaction");
1871      throw new DoNotRetryIOException(
1872              "No mob column families are assigned in the mob compaction");
1873    }
1874    if (request.hasMajor() && request.getMajor()) {
1875      allFiles = true;
1876    }
1877    String familyLogMsg = (family != null) ? Bytes.toString(family) : "";
1878    if (LOG.isTraceEnabled()) {
1879      LOG.trace("User-triggered mob compaction requested for table: "
1880              + tableName.getNameAsString() + " for column family: " + familyLogMsg);
1881    }
1882    master.requestMobCompaction(tableName, compactedColumns, allFiles);
1883    return CompactRegionResponse.newBuilder().build();
1884  }
1885
1886  @Override
1887  public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller,
1888      IsBalancerEnabledRequest request) throws ServiceException {
1889    IsBalancerEnabledResponse.Builder response = IsBalancerEnabledResponse.newBuilder();
1890    response.setEnabled(master.isBalancerOn());
1891    return response.build();
1892  }
1893
1894  @Override
1895  public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller,
1896    SetSplitOrMergeEnabledRequest request) throws ServiceException {
1897    SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder();
1898    try {
1899      master.checkInitialized();
1900      boolean newValue = request.getEnabled();
1901      for (MasterProtos.MasterSwitchType masterSwitchType: request.getSwitchTypesList()) {
1902        MasterSwitchType switchType = convert(masterSwitchType);
1903        boolean oldValue = master.isSplitOrMergeEnabled(switchType);
1904        response.addPrevValue(oldValue);
1905        if (master.cpHost != null) {
1906          master.cpHost.preSetSplitOrMergeEnabled(newValue, switchType);
1907        }
1908        master.getSplitOrMergeTracker().setSplitOrMergeEnabled(newValue, switchType);
1909        if (master.cpHost != null) {
1910          master.cpHost.postSetSplitOrMergeEnabled(newValue, switchType);
1911        }
1912      }
1913    } catch (IOException e) {
1914      throw new ServiceException(e);
1915    } catch (KeeperException e) {
1916      throw new ServiceException(e);
1917    }
1918    return response.build();
1919  }
1920
1921  @Override
1922  public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller,
1923    IsSplitOrMergeEnabledRequest request) throws ServiceException {
1924    IsSplitOrMergeEnabledResponse.Builder response = IsSplitOrMergeEnabledResponse.newBuilder();
1925    response.setEnabled(master.isSplitOrMergeEnabled(convert(request.getSwitchType())));
1926    return response.build();
1927  }
1928
1929  @Override
1930  public NormalizeResponse normalize(RpcController controller,
1931      NormalizeRequest request) throws ServiceException {
1932    rpcPreCheck("normalize");
1933    try {
1934      return NormalizeResponse.newBuilder().setNormalizerRan(master.normalizeRegions()).build();
1935    } catch (IOException ex) {
1936      throw new ServiceException(ex);
1937    }
1938  }
1939
1940  @Override
1941  public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller,
1942      SetNormalizerRunningRequest request) throws ServiceException {
1943    rpcPreCheck("setNormalizerRunning");
1944
1945    // Sets normalizer on/off flag in ZK.
1946    boolean prevValue = master.getRegionNormalizerTracker().isNormalizerOn();
1947    boolean newValue = request.getOn();
1948    try {
1949      master.getRegionNormalizerTracker().setNormalizerOn(newValue);
1950    } catch (KeeperException ke) {
1951      LOG.warn("Error flipping normalizer switch", ke);
1952    }
1953    LOG.info("{} set normalizerSwitch={}", master.getClientIdAuditPrefix(), newValue);
1954    return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build();
1955  }
1956
1957  @Override
1958  public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller,
1959      IsNormalizerEnabledRequest request) throws ServiceException {
1960    IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder();
1961    response.setEnabled(master.isNormalizerOn());
1962    return response.build();
1963  }
1964
1965  /**
1966   * Returns the security capabilities in effect on the cluster
1967   */
1968  @Override
1969  public SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller,
1970      SecurityCapabilitiesRequest request) throws ServiceException {
1971    SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder();
1972    try {
1973      master.checkInitialized();
1974      Set<SecurityCapabilitiesResponse.Capability> capabilities = new HashSet<>();
1975      // Authentication
1976      if (User.isHBaseSecurityEnabled(master.getConfiguration())) {
1977        capabilities.add(SecurityCapabilitiesResponse.Capability.SECURE_AUTHENTICATION);
1978      } else {
1979        capabilities.add(SecurityCapabilitiesResponse.Capability.SIMPLE_AUTHENTICATION);
1980      }
1981      // A coprocessor that implements AccessControlService can provide AUTHORIZATION and
1982      // CELL_AUTHORIZATION
1983      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
1984        if (AccessChecker.isAuthorizationSupported(master.getConfiguration())) {
1985          capabilities.add(SecurityCapabilitiesResponse.Capability.AUTHORIZATION);
1986        }
1987        if (AccessController.isCellAuthorizationSupported(master.getConfiguration())) {
1988          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_AUTHORIZATION);
1989        }
1990      }
1991      // A coprocessor that implements VisibilityLabelsService can provide CELL_VISIBILITY.
1992      if (master.cpHost != null && hasVisibilityLabelsServiceCoprocessor(master.cpHost)) {
1993        if (VisibilityController.isCellAuthorizationSupported(master.getConfiguration())) {
1994          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_VISIBILITY);
1995        }
1996      }
1997      response.addAllCapabilities(capabilities);
1998    } catch (IOException e) {
1999      throw new ServiceException(e);
2000    }
2001    return response.build();
2002  }
2003
2004  /**
2005   * Determines if there is a MasterCoprocessor deployed which implements
2006   * {@link org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService.Interface}.
2007   */
2008  boolean hasAccessControlServiceCoprocessor(MasterCoprocessorHost cpHost) {
2009    return checkCoprocessorWithService(
2010        cpHost.findCoprocessors(MasterCoprocessor.class), AccessControlService.Interface.class);
2011  }
2012
2013  /**
2014   * Determines if there is a MasterCoprocessor deployed which implements
2015   * {@link org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService.Interface}.
2016   */
2017  boolean hasVisibilityLabelsServiceCoprocessor(MasterCoprocessorHost cpHost) {
2018    return checkCoprocessorWithService(
2019        cpHost.findCoprocessors(MasterCoprocessor.class),
2020        VisibilityLabelsService.Interface.class);
2021  }
2022
2023  /**
2024   * Determines if there is a coprocessor implementation in the provided argument which extends
2025   * or implements the provided {@code service}.
2026   */
2027  boolean checkCoprocessorWithService(
2028      List<MasterCoprocessor> coprocessorsToCheck, Class<?> service) {
2029    if (coprocessorsToCheck == null || coprocessorsToCheck.isEmpty()) {
2030      return false;
2031    }
2032    for (MasterCoprocessor cp : coprocessorsToCheck) {
2033      if (service.isAssignableFrom(cp.getClass())) {
2034        return true;
2035      }
2036    }
2037    return false;
2038  }
2039
2040  private MasterSwitchType convert(MasterProtos.MasterSwitchType switchType) {
2041    switch (switchType) {
2042      case SPLIT:
2043        return MasterSwitchType.SPLIT;
2044      case MERGE:
2045        return MasterSwitchType.MERGE;
2046      default:
2047        break;
2048    }
2049    return null;
2050  }
2051
2052  @Override
2053  public AddReplicationPeerResponse addReplicationPeer(RpcController controller,
2054      AddReplicationPeerRequest request) throws ServiceException {
2055    try {
2056      long procId = master.addReplicationPeer(request.getPeerId(),
2057        ReplicationPeerConfigUtil.convert(request.getPeerConfig()),
2058        request.getPeerState().getState().equals(ReplicationState.State.ENABLED));
2059      return AddReplicationPeerResponse.newBuilder().setProcId(procId).build();
2060    } catch (ReplicationException | IOException e) {
2061      throw new ServiceException(e);
2062    }
2063  }
2064
2065  @Override
2066  public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller,
2067      RemoveReplicationPeerRequest request) throws ServiceException {
2068    try {
2069      long procId = master.removeReplicationPeer(request.getPeerId());
2070      return RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build();
2071    } catch (ReplicationException | IOException e) {
2072      throw new ServiceException(e);
2073    }
2074  }
2075
2076  @Override
2077  public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller,
2078      EnableReplicationPeerRequest request) throws ServiceException {
2079    try {
2080      long procId = master.enableReplicationPeer(request.getPeerId());
2081      return EnableReplicationPeerResponse.newBuilder().setProcId(procId).build();
2082    } catch (ReplicationException | IOException e) {
2083      throw new ServiceException(e);
2084    }
2085  }
2086
2087  @Override
2088  public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller,
2089      DisableReplicationPeerRequest request) throws ServiceException {
2090    try {
2091      long procId = master.disableReplicationPeer(request.getPeerId());
2092      return DisableReplicationPeerResponse.newBuilder().setProcId(procId).build();
2093    } catch (ReplicationException | IOException e) {
2094      throw new ServiceException(e);
2095    }
2096  }
2097
2098  @Override
2099  public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller,
2100      GetReplicationPeerConfigRequest request) throws ServiceException {
2101    GetReplicationPeerConfigResponse.Builder response = GetReplicationPeerConfigResponse
2102        .newBuilder();
2103    try {
2104      String peerId = request.getPeerId();
2105      ReplicationPeerConfig peerConfig = master.getReplicationPeerConfig(peerId);
2106      response.setPeerId(peerId);
2107      response.setPeerConfig(ReplicationPeerConfigUtil.convert(peerConfig));
2108    } catch (ReplicationException | IOException e) {
2109      throw new ServiceException(e);
2110    }
2111    return response.build();
2112  }
2113
2114  @Override
2115  public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller,
2116      UpdateReplicationPeerConfigRequest request) throws ServiceException {
2117    try {
2118      long procId = master.updateReplicationPeerConfig(request.getPeerId(),
2119        ReplicationPeerConfigUtil.convert(request.getPeerConfig()));
2120      return UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build();
2121    } catch (ReplicationException | IOException e) {
2122      throw new ServiceException(e);
2123    }
2124  }
2125
2126  @Override
2127  public ListReplicationPeersResponse listReplicationPeers(RpcController controller,
2128      ListReplicationPeersRequest request) throws ServiceException {
2129    ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder();
2130    try {
2131      List<ReplicationPeerDescription> peers = master
2132          .listReplicationPeers(request.hasRegex() ? request.getRegex() : null);
2133      for (ReplicationPeerDescription peer : peers) {
2134        response.addPeerDesc(ReplicationPeerConfigUtil.toProtoReplicationPeerDescription(peer));
2135      }
2136    } catch (ReplicationException | IOException e) {
2137      throw new ServiceException(e);
2138    }
2139    return response.build();
2140  }
2141
2142  @Override
2143  public ListDecommissionedRegionServersResponse listDecommissionedRegionServers(
2144      RpcController controller, ListDecommissionedRegionServersRequest request)
2145      throws ServiceException {
2146    ListDecommissionedRegionServersResponse.Builder response =
2147        ListDecommissionedRegionServersResponse.newBuilder();
2148    try {
2149      master.checkInitialized();
2150      if (master.cpHost != null) {
2151        master.cpHost.preListDecommissionedRegionServers();
2152      }
2153      List<ServerName> servers = master.listDecommissionedRegionServers();
2154      response.addAllServerName((servers.stream().map(server -> ProtobufUtil.toServerName(server)))
2155          .collect(Collectors.toList()));
2156      if (master.cpHost != null) {
2157        master.cpHost.postListDecommissionedRegionServers();
2158      }
2159    } catch (IOException io) {
2160      throw new ServiceException(io);
2161    }
2162
2163    return response.build();
2164  }
2165
2166  @Override
2167  public DecommissionRegionServersResponse decommissionRegionServers(RpcController controller,
2168      DecommissionRegionServersRequest request) throws ServiceException {
2169    try {
2170      master.checkInitialized();
2171      List<ServerName> servers = request.getServerNameList().stream()
2172          .map(pbServer -> ProtobufUtil.toServerName(pbServer)).collect(Collectors.toList());
2173      boolean offload = request.getOffload();
2174      if (master.cpHost != null) {
2175        master.cpHost.preDecommissionRegionServers(servers, offload);
2176      }
2177      master.decommissionRegionServers(servers, offload);
2178      if (master.cpHost != null) {
2179        master.cpHost.postDecommissionRegionServers(servers, offload);
2180      }
2181    } catch (IOException io) {
2182      throw new ServiceException(io);
2183    }
2184
2185    return DecommissionRegionServersResponse.newBuilder().build();
2186  }
2187
2188  @Override
2189  public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller,
2190      RecommissionRegionServerRequest request) throws ServiceException {
2191    try {
2192      master.checkInitialized();
2193      ServerName server = ProtobufUtil.toServerName(request.getServerName());
2194      List<byte[]> encodedRegionNames = request.getRegionList().stream()
2195          .map(regionSpecifier -> regionSpecifier.getValue().toByteArray())
2196          .collect(Collectors.toList());
2197      if (master.cpHost != null) {
2198        master.cpHost.preRecommissionRegionServer(server, encodedRegionNames);
2199      }
2200      master.recommissionRegionServer(server, encodedRegionNames);
2201      if (master.cpHost != null) {
2202        master.cpHost.postRecommissionRegionServer(server, encodedRegionNames);
2203      }
2204    } catch (IOException io) {
2205      throw new ServiceException(io);
2206    }
2207
2208    return RecommissionRegionServerResponse.newBuilder().build();
2209  }
2210
2211  @Override
2212  public LockResponse requestLock(RpcController controller, final LockRequest request)
2213      throws ServiceException {
2214    try {
2215      if (request.getDescription().isEmpty()) {
2216        throw new IllegalArgumentException("Empty description");
2217      }
2218      NonceProcedureRunnable npr;
2219      LockType type = LockType.valueOf(request.getLockType().name());
2220      if (request.getRegionInfoCount() > 0) {
2221        final RegionInfo[] regionInfos = new RegionInfo[request.getRegionInfoCount()];
2222        for (int i = 0; i < request.getRegionInfoCount(); ++i) {
2223          regionInfos[i] = ProtobufUtil.toRegionInfo(request.getRegionInfo(i));
2224        }
2225        npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
2226          @Override
2227          protected void run() throws IOException {
2228            setProcId(master.getLockManager().remoteLocks().requestRegionsLock(regionInfos,
2229                request.getDescription(), getNonceKey()));
2230          }
2231
2232          @Override
2233          protected String getDescription() {
2234            return "RequestLock";
2235          }
2236        };
2237      } else if (request.hasTableName()) {
2238        final TableName tableName = ProtobufUtil.toTableName(request.getTableName());
2239        npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
2240          @Override
2241          protected void run() throws IOException {
2242            setProcId(master.getLockManager().remoteLocks().requestTableLock(tableName, type,
2243                request.getDescription(), getNonceKey()));
2244          }
2245
2246          @Override
2247          protected String getDescription() {
2248            return "RequestLock";
2249          }
2250        };
2251      } else if (request.hasNamespace()) {
2252        npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
2253          @Override
2254          protected void run() throws IOException {
2255            setProcId(master.getLockManager().remoteLocks().requestNamespaceLock(
2256                request.getNamespace(), type, request.getDescription(), getNonceKey()));
2257          }
2258
2259          @Override
2260          protected String getDescription() {
2261            return "RequestLock";
2262          }
2263        };
2264      } else {
2265        throw new IllegalArgumentException("one of table/namespace/region should be specified");
2266      }
2267      long procId = MasterProcedureUtil.submitProcedure(npr);
2268      return LockResponse.newBuilder().setProcId(procId).build();
2269    } catch (IllegalArgumentException e) {
2270      LOG.warn("Exception when queuing lock", e);
2271      throw new ServiceException(new DoNotRetryIOException(e));
2272    } catch (IOException e) {
2273      LOG.warn("Exception when queuing lock", e);
2274      throw new ServiceException(e);
2275    }
2276  }
2277
2278  /**
2279   * @return LOCKED, if procedure is found and it has the lock; else UNLOCKED.
2280   * @throws ServiceException if given proc id is found but it is not a LockProcedure.
2281   */
2282  @Override
2283  public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request)
2284      throws ServiceException {
2285    try {
2286      if (master.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(),
2287          request.getKeepAlive())) {
2288        return LockHeartbeatResponse.newBuilder().setTimeoutMs(
2289            master.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF,
2290                LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS))
2291            .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).build();
2292      } else {
2293        return LockHeartbeatResponse.newBuilder()
2294            .setLockStatus(LockHeartbeatResponse.LockStatus.UNLOCKED).build();
2295      }
2296    } catch (IOException e) {
2297      throw new ServiceException(e);
2298    }
2299  }
2300
2301  @Override
2302  public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller,
2303      RegionSpaceUseReportRequest request) throws ServiceException {
2304    try {
2305      master.checkInitialized();
2306      if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) {
2307        return RegionSpaceUseReportResponse.newBuilder().build();
2308      }
2309      MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
2310      if (quotaManager != null) {
2311        final long now = EnvironmentEdgeManager.currentTime();
2312        for (RegionSpaceUse report : request.getSpaceUseList()) {
2313          quotaManager.addRegionSize(ProtobufUtil.toRegionInfo(report.getRegionInfo()),
2314            report.getRegionSize(), now);
2315        }
2316      } else {
2317        LOG.debug("Received region space usage report but HMaster is not ready to process it, "
2318            + "skipping");
2319      }
2320      return RegionSpaceUseReportResponse.newBuilder().build();
2321    } catch (Exception e) {
2322      throw new ServiceException(e);
2323    }
2324  }
2325
2326  @Override
2327  public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(
2328      RpcController controller, GetSpaceQuotaRegionSizesRequest request) throws ServiceException {
2329    try {
2330      master.checkInitialized();
2331      MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
2332      GetSpaceQuotaRegionSizesResponse.Builder builder =
2333          GetSpaceQuotaRegionSizesResponse.newBuilder();
2334      if (quotaManager != null) {
2335        Map<RegionInfo,Long> regionSizes = quotaManager.snapshotRegionSizes();
2336        Map<TableName,Long> regionSizesByTable = new HashMap<>();
2337        // Translate hregioninfo+long -> tablename+long
2338        for (Entry<RegionInfo,Long> entry : regionSizes.entrySet()) {
2339          final TableName tableName = entry.getKey().getTable();
2340          Long prevSize = regionSizesByTable.get(tableName);
2341          if (prevSize == null) {
2342            prevSize = 0L;
2343          }
2344          regionSizesByTable.put(tableName, prevSize + entry.getValue());
2345        }
2346        // Serialize them into the protobuf
2347        for (Entry<TableName,Long> tableSize : regionSizesByTable.entrySet()) {
2348          builder.addSizes(RegionSizes.newBuilder()
2349              .setTableName(ProtobufUtil.toProtoTableName(tableSize.getKey()))
2350              .setSize(tableSize.getValue()).build());
2351        }
2352        return builder.build();
2353      } else {
2354        LOG.debug("Received space quota region size report but HMaster is not ready to process it,"
2355            + "skipping");
2356      }
2357      return builder.build();
2358    } catch (Exception e) {
2359      throw new ServiceException(e);
2360    }
2361  }
2362
2363  @Override
2364  public GetQuotaStatesResponse getQuotaStates(
2365      RpcController controller, GetQuotaStatesRequest request) throws ServiceException {
2366    try {
2367      master.checkInitialized();
2368      QuotaObserverChore quotaChore = this.master.getQuotaObserverChore();
2369      GetQuotaStatesResponse.Builder builder = GetQuotaStatesResponse.newBuilder();
2370      if (quotaChore != null) {
2371        // The "current" view of all tables with quotas
2372        Map<TableName, SpaceQuotaSnapshot> tableSnapshots = quotaChore.getTableQuotaSnapshots();
2373        for (Entry<TableName, SpaceQuotaSnapshot> entry : tableSnapshots.entrySet()) {
2374          builder.addTableSnapshots(
2375              TableQuotaSnapshot.newBuilder()
2376                  .setTableName(ProtobufUtil.toProtoTableName(entry.getKey()))
2377                  .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build());
2378        }
2379        // The "current" view of all namespaces with quotas
2380        Map<String, SpaceQuotaSnapshot> nsSnapshots = quotaChore.getNamespaceQuotaSnapshots();
2381        for (Entry<String, SpaceQuotaSnapshot> entry : nsSnapshots.entrySet()) {
2382          builder.addNsSnapshots(
2383              NamespaceQuotaSnapshot.newBuilder()
2384                  .setNamespace(entry.getKey())
2385                  .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build());
2386        }
2387        return builder.build();
2388      }
2389      return builder.build();
2390    } catch (Exception e) {
2391      throw new ServiceException(e);
2392    }
2393  }
2394
2395  @Override
2396  public ClearDeadServersResponse clearDeadServers(RpcController controller,
2397      ClearDeadServersRequest request) throws ServiceException {
2398    LOG.debug(master.getClientIdAuditPrefix() + " clear dead region servers.");
2399    ClearDeadServersResponse.Builder response = ClearDeadServersResponse.newBuilder();
2400    try {
2401      master.checkInitialized();
2402      if (master.cpHost != null) {
2403        master.cpHost.preClearDeadServers();
2404      }
2405
2406      if (master.getServerManager().areDeadServersInProgress()) {
2407        LOG.debug("Some dead server is still under processing, won't clear the dead server list");
2408        response.addAllServerName(request.getServerNameList());
2409      } else {
2410        for (HBaseProtos.ServerName pbServer : request.getServerNameList()) {
2411          if (!master.getServerManager().getDeadServers()
2412                  .removeDeadServer(ProtobufUtil.toServerName(pbServer))) {
2413            response.addServerName(pbServer);
2414          }
2415        }
2416      }
2417
2418      if (master.cpHost != null) {
2419        master.cpHost.postClearDeadServers(
2420            ProtobufUtil.toServerNameList(request.getServerNameList()),
2421            ProtobufUtil.toServerNameList(response.getServerNameList()));
2422      }
2423    } catch (IOException io) {
2424      throw new ServiceException(io);
2425    }
2426    return response.build();
2427  }
2428
2429  @Override
2430  public ReportProcedureDoneResponse reportProcedureDone(RpcController controller,
2431      ReportProcedureDoneRequest request) throws ServiceException {
2432    request.getResultList().forEach(result -> {
2433      if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) {
2434        master.remoteProcedureCompleted(result.getProcId());
2435      } else {
2436        master.remoteProcedureFailed(result.getProcId(),
2437          RemoteProcedureException.fromProto(result.getError()));
2438      }
2439    });
2440    return ReportProcedureDoneResponse.getDefaultInstance();
2441  }
2442 
2443  // HBCK Services
2444
2445  @Override
2446  public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest req)
2447      throws ServiceException {
2448    rpcPreCheck("runHbckChore");
2449    LOG.info("{} request HBCK chore to run", master.getClientIdAuditPrefix());
2450    HbckChore hbckChore = master.getHbckChore();
2451    boolean ran = hbckChore.runChore();
2452    return RunHbckChoreResponse.newBuilder().setRan(ran).build();
2453  }
2454
2455  /**
2456   * Update state of the table in meta only. This is required by hbck in some situations to cleanup
2457   * stuck assign/ unassign regions procedures for the table.
2458   *
2459   * @return previous state of the table
2460   */
2461  @Override
2462  public GetTableStateResponse setTableStateInMeta(RpcController controller,
2463      SetTableStateInMetaRequest request) throws ServiceException {
2464    TableName tn = ProtobufUtil.toTableName(request.getTableName());
2465    try {
2466      TableState prevState = this.master.getTableStateManager().getTableState(tn);
2467      TableState newState = TableState.convert(tn, request.getTableState());
2468      LOG.info("{} set table={} state from {} to {}", master.getClientIdAuditPrefix(),
2469          tn, prevState.getState(), newState.getState());
2470      this.master.getTableStateManager().setTableState(tn, newState.getState());
2471      return GetTableStateResponse.newBuilder().setTableState(prevState.convert()).build();
2472    } catch (Exception e) {
2473      throw new ServiceException(e);
2474    }
2475  }
2476
2477  /**
2478   * Update state of the region in meta only. This is required by hbck in some situations to cleanup
2479   * stuck assign/ unassign regions procedures for the table.
2480   *
2481   * @return previous states of the regions
2482   */
2483  @Override
2484  public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller,
2485    SetRegionStateInMetaRequest request) throws ServiceException {
2486    SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder();
2487    try {
2488      for (RegionSpecifierAndState s : request.getStatesList()) {
2489        RegionSpecifier spec = s.getRegionSpecifier();
2490        String encodedName;
2491        if (spec.getType() == RegionSpecifierType.ENCODED_REGION_NAME) {
2492          encodedName = spec.getValue().toStringUtf8();
2493        } else {
2494          // TODO: actually, a full region name can save a lot on meta scan, improve later.
2495          encodedName = RegionInfo.encodeRegionName(spec.getValue().toByteArray());
2496        }
2497        RegionInfo info = this.master.getAssignmentManager().loadRegionFromMeta(encodedName);
2498        LOG.trace("region info loaded from meta table: {}", info);
2499        RegionState prevState =
2500          this.master.getAssignmentManager().getRegionStates().getRegionState(info);
2501        RegionState.State newState = RegionState.State.convert(s.getState());
2502        LOG.info("{} set region={} state from {} to {}", master.getClientIdAuditPrefix(), info,
2503          prevState.getState(), newState);
2504        Put metaPut = MetaTableAccessor.makePutFromRegionInfo(info, System.currentTimeMillis());
2505        metaPut.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
2506          Bytes.toBytes(newState.name()));
2507        List<Put> putList = new ArrayList<>();
2508        putList.add(metaPut);
2509        MetaTableAccessor.putsToMetaTable(this.master.getConnection(), putList);
2510        // Loads from meta again to refresh AM cache with the new region state
2511        this.master.getAssignmentManager().loadRegionFromMeta(encodedName);
2512        builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec)
2513          .setState(prevState.getState().convert()));
2514      }
2515    } catch (Exception e) {
2516      throw new ServiceException(e);
2517    }
2518    return builder.build();
2519  }
2520
2521  /**
2522   * Get RegionInfo from Master using content of RegionSpecifier as key.
2523   * @return RegionInfo found by decoding <code>rs</code> or null if none found
2524   */
2525  private RegionInfo getRegionInfo(HBaseProtos.RegionSpecifier rs) throws UnknownRegionException {
2526    RegionInfo ri = null;
2527    switch(rs.getType()) {
2528      case REGION_NAME:
2529        final byte[] regionName = rs.getValue().toByteArray();
2530        ri = this.master.getAssignmentManager().getRegionInfo(regionName);
2531        break;
2532      case ENCODED_REGION_NAME:
2533        String encodedRegionName = Bytes.toString(rs.getValue().toByteArray());
2534        RegionState regionState = this.master.getAssignmentManager().getRegionStates().
2535            getRegionState(encodedRegionName);
2536        ri = regionState == null ?
2537          this.master.getAssignmentManager().loadRegionFromMeta(encodedRegionName) :
2538            regionState.getRegion();
2539        break;
2540      default:
2541        break;
2542    }
2543    return ri;
2544  }
2545
2546  /**
2547   * A 'raw' version of assign that does bulk and skirts Master state checks (assigns can be made
2548   * during Master startup). For use by Hbck2.
2549   */
2550  @Override
2551  public MasterProtos.AssignsResponse assigns(RpcController controller,
2552      MasterProtos.AssignsRequest request)
2553    throws ServiceException {
2554    if (this.master.getMasterProcedureExecutor() == null) {
2555      throw new ServiceException("Master's ProcedureExecutor not initialized; retry later");
2556    }
2557    MasterProtos.AssignsResponse.Builder responseBuilder =
2558        MasterProtos.AssignsResponse.newBuilder();
2559    try {
2560      boolean override = request.getOverride();
2561      LOG.info("{} assigns, override={}", master.getClientIdAuditPrefix(), override);
2562      for (HBaseProtos.RegionSpecifier rs: request.getRegionList()) {
2563        RegionInfo ri = getRegionInfo(rs);
2564        if (ri == null) {
2565          LOG.info("Unknown={}", rs);
2566          responseBuilder.addPid(Procedure.NO_PROC_ID);
2567          continue;
2568        }
2569        responseBuilder.addPid(this.master.getMasterProcedureExecutor().submitProcedure(this.master
2570            .getAssignmentManager().createOneAssignProcedure(ri, override)));
2571      }
2572      return responseBuilder.build();
2573    } catch (IOException ioe) {
2574      throw new ServiceException(ioe);
2575    }
2576  }
2577
2578  /**
2579   * A 'raw' version of unassign that does bulk and skirts Master state checks (unassigns can be
2580   * made during Master startup). For use by Hbck2.
2581   */
2582  @Override
2583  public MasterProtos.UnassignsResponse unassigns(RpcController controller,
2584      MasterProtos.UnassignsRequest request)
2585      throws ServiceException {
2586    if (this.master.getMasterProcedureExecutor() == null) {
2587      throw new ServiceException("Master's ProcedureExecutor not initialized; retry later");
2588    }
2589    MasterProtos.UnassignsResponse.Builder responseBuilder =
2590        MasterProtos.UnassignsResponse.newBuilder();
2591    try {
2592      boolean override = request.getOverride();
2593      LOG.info("{} unassigns, override={}", master.getClientIdAuditPrefix(), override);
2594      for (HBaseProtos.RegionSpecifier rs: request.getRegionList()) {
2595        RegionInfo ri = getRegionInfo(rs);
2596        if (ri == null) {
2597          LOG.info("Unknown={}", rs);
2598          responseBuilder.addPid(Procedure.NO_PROC_ID);
2599          continue;
2600        }
2601        responseBuilder.addPid(this.master.getMasterProcedureExecutor().submitProcedure(this.master
2602            .getAssignmentManager().createOneUnassignProcedure(ri, override)));
2603      }
2604      return responseBuilder.build();
2605    } catch (IOException ioe) {
2606      throw new ServiceException(ioe);
2607    }
2608  }
2609
2610  /**
2611   * Bypass specified procedure to completion. Procedure is marked completed but no actual work
2612   * is done from the current state/ step onwards. Parents of the procedure are also marked for
2613   * bypass.
2614   *
2615   * NOTE: this is a dangerous operation and may be used to unstuck buggy procedures. This may
2616   * leave system in inconherent state. This may need to be followed by some cleanup steps/
2617   * actions by operator.
2618   *
2619   * @return BypassProcedureToCompletionResponse indicating success or failure
2620   */
2621  @Override
2622  public MasterProtos.BypassProcedureResponse bypassProcedure(RpcController controller,
2623      MasterProtos.BypassProcedureRequest request) throws ServiceException {
2624    try {
2625      LOG.info("{} bypass procedures={}, waitTime={}, override={}, recursive={}",
2626          master.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(),
2627          request.getOverride(), request.getRecursive());
2628      List<Boolean> ret =
2629          master.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(),
2630          request.getWaitTime(), request.getOverride(), request.getRecursive());
2631      return MasterProtos.BypassProcedureResponse.newBuilder().addAllBypassed(ret).build();
2632    } catch (IOException e) {
2633      throw new ServiceException(e);
2634    }
2635  }
2636
2637  @Override
2638  public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProcedure(
2639      RpcController controller, MasterProtos.ScheduleServerCrashProcedureRequest request)
2640      throws ServiceException {
2641    List<Long> pids = new ArrayList<>();
2642    for (HBaseProtos.ServerName sn: request.getServerNameList()) {
2643      ServerName serverName = ProtobufUtil.toServerName(sn);
2644      LOG.info("{} schedule ServerCrashProcedure for {}",
2645          this.master.getClientIdAuditPrefix(), serverName);
2646      if (shouldSubmitSCP(serverName)) {
2647        pids.add(this.master.getServerManager().expireServer(serverName, true));
2648      } else {
2649        pids.add(Procedure.NO_PROC_ID);
2650      }
2651    }
2652    return MasterProtos.ScheduleServerCrashProcedureResponse.newBuilder().addAllPid(pids).build();
2653  }
2654
2655  @Override
2656  public FixMetaResponse fixMeta(RpcController controller, FixMetaRequest request)
2657      throws ServiceException {
2658    try {
2659      MetaFixer mf = new MetaFixer(this.master);
2660      mf.fix();
2661      return FixMetaResponse.newBuilder().build();
2662    } catch (IOException ioe) {
2663      throw new ServiceException(ioe);
2664    }
2665  }
2666
2667  @Override
2668  public SwitchRpcThrottleResponse switchRpcThrottle(RpcController controller,
2669      SwitchRpcThrottleRequest request) throws ServiceException {
2670    try {
2671      master.checkInitialized();
2672      return master.getMasterQuotaManager().switchRpcThrottle(request);
2673    } catch (Exception e) {
2674      throw new ServiceException(e);
2675    }
2676  }
2677
2678  @Override
2679  public MasterProtos.IsRpcThrottleEnabledResponse isRpcThrottleEnabled(RpcController controller,
2680      MasterProtos.IsRpcThrottleEnabledRequest request) throws ServiceException {
2681    try {
2682      master.checkInitialized();
2683      return master.getMasterQuotaManager().isRpcThrottleEnabled(request);
2684    } catch (Exception e) {
2685      throw new ServiceException(e);
2686    }
2687  }
2688
2689  @Override
2690  public SwitchExceedThrottleQuotaResponse switchExceedThrottleQuota(RpcController controller,
2691      SwitchExceedThrottleQuotaRequest request) throws ServiceException {
2692    try {
2693      master.checkInitialized();
2694      return master.getMasterQuotaManager().switchExceedThrottleQuota(request);
2695    } catch (Exception e) {
2696      throw new ServiceException(e);
2697    }
2698  }
2699
2700  @Override
2701  public FileArchiveNotificationResponse reportFileArchival(RpcController controller,
2702      FileArchiveNotificationRequest request) throws ServiceException {
2703    try {
2704      master.checkInitialized();
2705      if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) {
2706        return FileArchiveNotificationResponse.newBuilder().build();
2707      }
2708      master.getMasterQuotaManager().processFileArchivals(request, master.getConnection(),
2709          master.getConfiguration(), master.getFileSystem());
2710      return FileArchiveNotificationResponse.newBuilder().build();
2711    } catch (Exception e) {
2712      throw new ServiceException(e);
2713    }
2714  }
2715
2716  @Override
2717  public GrantResponse grant(RpcController controller, GrantRequest request)
2718      throws ServiceException {
2719    try {
2720      master.checkInitialized();
2721      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
2722        final UserPermission perm =
2723            ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
2724        boolean mergeExistingPermissions = request.getMergeExistingPermissions();
2725        master.cpHost.preGrant(perm, mergeExistingPermissions);
2726        try (Table table = master.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
2727          PermissionStorage.addUserPermission(getConfiguration(), perm, table,
2728            mergeExistingPermissions);
2729        }
2730        master.cpHost.postGrant(perm, mergeExistingPermissions);
2731        User caller = RpcServer.getRequestUser().orElse(null);
2732        if (AUDITLOG.isTraceEnabled()) {
2733          // audit log should store permission changes in addition to auth results
2734          String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("");
2735          AUDITLOG.trace("User {} (remote address: {}) granted permission {}", caller,
2736            remoteAddress, perm);
2737        }
2738        return GrantResponse.getDefaultInstance();
2739      } else {
2740        throw new DoNotRetryIOException(
2741            new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2742      }
2743    } catch (IOException ioe) {
2744      throw new ServiceException(ioe);
2745    }
2746  }
2747
2748  @Override
2749  public RevokeResponse revoke(RpcController controller, RevokeRequest request)
2750      throws ServiceException {
2751    try {
2752      master.checkInitialized();
2753      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
2754        final UserPermission userPermission =
2755            ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
2756        master.cpHost.preRevoke(userPermission);
2757        try (Table table = master.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
2758          PermissionStorage.removeUserPermission(master.getConfiguration(), userPermission, table);
2759        }
2760        master.cpHost.postRevoke(userPermission);
2761        User caller = RpcServer.getRequestUser().orElse(null);
2762        if (AUDITLOG.isTraceEnabled()) {
2763          // audit log should record all permission changes
2764          String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("");
2765          AUDITLOG.trace("User {} (remote address: {}) revoked permission {}", caller,
2766            remoteAddress, userPermission);
2767        }
2768        return RevokeResponse.getDefaultInstance();
2769      } else {
2770        throw new DoNotRetryIOException(
2771            new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2772      }
2773    } catch (IOException ioe) {
2774      throw new ServiceException(ioe);
2775    }
2776  }
2777
2778  @Override
2779  public GetUserPermissionsResponse getUserPermissions(RpcController controller,
2780      GetUserPermissionsRequest request) throws ServiceException {
2781    try {
2782      master.checkInitialized();
2783      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
2784        final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null;
2785        String namespace =
2786            request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null;
2787        TableName table =
2788            request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null;
2789        byte[] cf = request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null;
2790        byte[] cq =
2791            request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null;
2792        Type permissionType = request.hasType() ? request.getType() : null;
2793        master.getMasterCoprocessorHost().preGetUserPermissions(userName, namespace, table, cf, cq);
2794
2795        List<UserPermission> perms = null;
2796        if (permissionType == Type.Table) {
2797          boolean filter = (cf != null || userName != null) ? true : false;
2798          perms = PermissionStorage.getUserTablePermissions(master.getConfiguration(), table, cf,
2799            cq, userName, filter);
2800        } else if (permissionType == Type.Namespace) {
2801          perms = PermissionStorage.getUserNamespacePermissions(master.getConfiguration(),
2802            namespace, userName, userName != null ? true : false);
2803        } else {
2804          perms = PermissionStorage.getUserPermissions(master.getConfiguration(), null, null, null,
2805            userName, userName != null ? true : false);
2806          // Skip super users when filter user is specified
2807          if (userName == null) {
2808            // Adding superusers explicitly to the result set as PermissionStorage do not store
2809            // them. Also using acl as table name to be inline with the results of global admin and
2810            // will help in avoiding any leakage of information about being superusers.
2811            for (String user : Superusers.getSuperUsers()) {
2812              perms.add(new UserPermission(user,
2813                  Permission.newBuilder().withActions(Action.values()).build()));
2814            }
2815          }
2816        }
2817
2818        master.getMasterCoprocessorHost().postGetUserPermissions(userName, namespace, table, cf,
2819          cq);
2820        AccessControlProtos.GetUserPermissionsResponse response =
2821            ShadedAccessControlUtil.buildGetUserPermissionsResponse(perms);
2822        return response;
2823      } else {
2824        throw new DoNotRetryIOException(
2825            new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2826      }
2827    } catch (IOException ioe) {
2828      throw new ServiceException(ioe);
2829    }
2830  }
2831
2832  @Override
2833  public HasUserPermissionsResponse hasUserPermissions(RpcController controller,
2834      HasUserPermissionsRequest request) throws ServiceException {
2835    try {
2836      master.checkInitialized();
2837      if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
2838        User caller = RpcServer.getRequestUser().orElse(null);
2839        String userName =
2840            request.hasUserName() ? request.getUserName().toStringUtf8() : caller.getShortName();
2841        List<Permission> permissions = new ArrayList<>();
2842        for (int i = 0; i < request.getPermissionCount(); i++) {
2843          permissions.add(ShadedAccessControlUtil.toPermission(request.getPermission(i)));
2844        }
2845        master.getMasterCoprocessorHost().preHasUserPermissions(userName, permissions);
2846        if (!caller.getShortName().equals(userName)) {
2847          List<String> groups = AccessChecker.getUserGroups(userName);
2848          caller = new InputUser(userName, groups.toArray(new String[groups.size()]));
2849        }
2850        List<Boolean> hasUserPermissions = new ArrayList<>();
2851        if (getAccessChecker() != null) {
2852          for (Permission permission : permissions) {
2853            boolean hasUserPermission =
2854                getAccessChecker().hasUserPermission(caller, "hasUserPermissions", permission);
2855            hasUserPermissions.add(hasUserPermission);
2856          }
2857        } else {
2858          for (int i = 0; i < permissions.size(); i++) {
2859            hasUserPermissions.add(true);
2860          }
2861        }
2862        master.getMasterCoprocessorHost().postHasUserPermissions(userName, permissions);
2863        HasUserPermissionsResponse.Builder builder =
2864            HasUserPermissionsResponse.newBuilder().addAllHasUserPermission(hasUserPermissions);
2865        return builder.build();
2866      } else {
2867        throw new DoNotRetryIOException(
2868            new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
2869      }
2870    } catch (IOException ioe) {
2871      throw new ServiceException(ioe);
2872    }
2873  }
2874
2875  private boolean containMetaWals(ServerName serverName) throws IOException {
2876    Path logDir = new Path(master.getWALRootDir(),
2877        AbstractFSWALProvider.getWALDirectoryName(serverName.toString()));
2878    Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
2879    Path checkDir = master.getFileSystem().exists(splitDir) ? splitDir : logDir;
2880    try {
2881      return master.getFileSystem().listStatus(checkDir, META_FILTER).length > 0;
2882    } catch (FileNotFoundException fnfe) {
2883      // If no files, then we don't contain metas; was failing schedule of
2884      // SCP because this was FNFE'ing when no server dirs ('Unknown Server').
2885      LOG.warn("No dir for WALs for {}; continuing", serverName.toString());
2886      return false;
2887    }
2888  }
2889
2890  private boolean shouldSubmitSCP(ServerName serverName) {
2891    // check if there is already a SCP of this server running
2892    List<Procedure<MasterProcedureEnv>> procedures =
2893        master.getMasterProcedureExecutor().getProcedures();
2894    for (Procedure<MasterProcedureEnv> procedure : procedures) {
2895      if (procedure instanceof ServerCrashProcedure) {
2896        if (serverName.compareTo(((ServerCrashProcedure) procedure).getServerName()) == 0
2897            && !procedure.isFinished()) {
2898          LOG.info("there is already a SCP of this server {} running, pid {}", serverName,
2899            procedure.getProcId());
2900          return false;
2901        }
2902      }
2903    }
2904    return true;
2905  }
2906
2907  @Override
2908  public GetClusterIdResponse getClusterId(RpcController rpcController, GetClusterIdRequest request)
2909      throws ServiceException {
2910    GetClusterIdResponse.Builder resp = GetClusterIdResponse.newBuilder();
2911    String clusterId = master.getClusterId();
2912    if (clusterId != null) {
2913      resp.setClusterId(clusterId);
2914    }
2915    return resp.build();
2916  }
2917
2918  @Override
2919  public GetActiveMasterResponse getActiveMaster(RpcController rpcController,
2920      GetActiveMasterRequest request) throws ServiceException {
2921    GetActiveMasterResponse.Builder resp = GetActiveMasterResponse.newBuilder();
2922    Optional<ServerName> serverName = master.getActiveMaster();
2923    serverName.ifPresent(name -> resp.setServerName(ProtobufUtil.toServerName(name)));
2924    return resp.build();
2925  }
2926
2927  @Override
2928  public GetMetaRegionLocationsResponse getMetaRegionLocations(RpcController rpcController,
2929      GetMetaRegionLocationsRequest request) throws ServiceException {
2930    GetMetaRegionLocationsResponse.Builder response = GetMetaRegionLocationsResponse.newBuilder();
2931    Optional<List<HRegionLocation>> metaLocations =
2932        master.getMetaRegionLocationCache().getMetaRegionLocations();
2933    metaLocations.ifPresent(hRegionLocations -> hRegionLocations.forEach(
2934      location -> response.addMetaLocations(ProtobufUtil.toRegionLocation(location))));
2935    return response.build();
2936  }
2937}