001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master; 019 020import java.io.IOException; 021import java.lang.reflect.InvocationTargetException; 022import java.lang.reflect.Method; 023import java.net.InetAddress; 024import java.util.ArrayList; 025import java.util.Collections; 026import java.util.HashMap; 027import java.util.HashSet; 028import java.util.List; 029import java.util.Map; 030import java.util.Map.Entry; 031import java.util.Optional; 032import java.util.Set; 033import java.util.concurrent.CompletableFuture; 034import java.util.concurrent.ExecutionException; 035import java.util.concurrent.ThreadLocalRandom; 036import java.util.stream.Collectors; 037import org.apache.hadoop.conf.Configuration; 038import org.apache.hadoop.hbase.ClusterMetricsBuilder; 039import org.apache.hadoop.hbase.DoNotRetryIOException; 040import org.apache.hadoop.hbase.HBaseRpcServicesBase; 041import org.apache.hadoop.hbase.HConstants; 042import org.apache.hadoop.hbase.MasterNotRunningException; 043import org.apache.hadoop.hbase.MetaTableAccessor; 044import org.apache.hadoop.hbase.NamespaceDescriptor; 045import org.apache.hadoop.hbase.ServerMetrics; 046import org.apache.hadoop.hbase.ServerMetricsBuilder; 047import org.apache.hadoop.hbase.ServerName; 048import org.apache.hadoop.hbase.TableName; 049import org.apache.hadoop.hbase.UnknownRegionException; 050import org.apache.hadoop.hbase.client.BalanceRequest; 051import org.apache.hadoop.hbase.client.BalanceResponse; 052import org.apache.hadoop.hbase.client.MasterSwitchType; 053import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; 054import org.apache.hadoop.hbase.client.RegionInfo; 055import org.apache.hadoop.hbase.client.Table; 056import org.apache.hadoop.hbase.client.TableDescriptor; 057import org.apache.hadoop.hbase.client.TableState; 058import org.apache.hadoop.hbase.client.VersionInfoUtil; 059import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; 060import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; 061import org.apache.hadoop.hbase.errorhandling.ForeignException; 062import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; 063import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; 064import org.apache.hadoop.hbase.ipc.PriorityFunction; 065import org.apache.hadoop.hbase.ipc.QosPriority; 066import org.apache.hadoop.hbase.ipc.RpcServer; 067import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; 068import org.apache.hadoop.hbase.ipc.ServerRpcController; 069import org.apache.hadoop.hbase.master.assignment.AssignmentManager; 070import org.apache.hadoop.hbase.master.assignment.RegionStateNode; 071import org.apache.hadoop.hbase.master.assignment.RegionStates; 072import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; 073import org.apache.hadoop.hbase.master.hbck.HbckChore; 074import org.apache.hadoop.hbase.master.janitor.MetaFixer; 075import org.apache.hadoop.hbase.master.locking.LockProcedure; 076import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; 077import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; 078import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable; 079import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; 080import org.apache.hadoop.hbase.master.replication.AbstractPeerNoLockProcedure; 081import org.apache.hadoop.hbase.mob.MobUtils; 082import org.apache.hadoop.hbase.namequeues.BalancerDecisionDetails; 083import org.apache.hadoop.hbase.namequeues.BalancerRejectionDetails; 084import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; 085import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest; 086import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse; 087import org.apache.hadoop.hbase.net.Address; 088import org.apache.hadoop.hbase.procedure.MasterProcedureManager; 089import org.apache.hadoop.hbase.procedure2.LockType; 090import org.apache.hadoop.hbase.procedure2.LockedResource; 091import org.apache.hadoop.hbase.procedure2.Procedure; 092import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; 093import org.apache.hadoop.hbase.procedure2.ProcedureUtil; 094import org.apache.hadoop.hbase.procedure2.RemoteProcedureException; 095import org.apache.hadoop.hbase.quotas.MasterQuotaManager; 096import org.apache.hadoop.hbase.quotas.QuotaObserverChore; 097import org.apache.hadoop.hbase.quotas.QuotaUtil; 098import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; 099import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory; 100import org.apache.hadoop.hbase.replication.ReplicationException; 101import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; 102import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; 103import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; 104import org.apache.hadoop.hbase.rsgroup.RSGroupUtil; 105import org.apache.hadoop.hbase.security.Superusers; 106import org.apache.hadoop.hbase.security.User; 107import org.apache.hadoop.hbase.security.access.AccessChecker; 108import org.apache.hadoop.hbase.security.access.AccessChecker.InputUser; 109import org.apache.hadoop.hbase.security.access.AccessController; 110import org.apache.hadoop.hbase.security.access.Permission; 111import org.apache.hadoop.hbase.security.access.Permission.Action; 112import org.apache.hadoop.hbase.security.access.PermissionStorage; 113import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil; 114import org.apache.hadoop.hbase.security.access.UserPermission; 115import org.apache.hadoop.hbase.security.visibility.VisibilityController; 116import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; 117import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; 118import org.apache.hadoop.hbase.util.Bytes; 119import org.apache.hadoop.hbase.util.DNS; 120import org.apache.hadoop.hbase.util.DNS.ServerType; 121import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 122import org.apache.hadoop.hbase.util.ForeignExceptionUtil; 123import org.apache.hadoop.hbase.util.Pair; 124import org.apache.hadoop.hbase.zookeeper.ZKWatcher; 125import org.apache.yetus.audience.InterfaceAudience; 126import org.slf4j.Logger; 127import org.slf4j.LoggerFactory; 128 129import org.apache.hbase.thirdparty.com.google.common.collect.Sets; 130import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; 131import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; 132import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.ServiceDescriptor; 133import org.apache.hbase.thirdparty.com.google.protobuf.Message; 134import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; 135import org.apache.hbase.thirdparty.com.google.protobuf.Service; 136import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; 137import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; 138 139import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 140import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; 141import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; 142import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.AccessControlService; 143import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest; 144import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse; 145import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest; 146import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantResponse; 147import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest; 148import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsResponse; 149import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type; 150import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest; 151import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeResponse; 152import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; 153import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest; 154import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse; 155import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; 156import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse; 157import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; 158import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; 159import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; 160import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; 161import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest; 162import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse; 163import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; 164import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; 165import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; 166import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; 167import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; 168import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; 169import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; 170import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; 171import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; 172import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; 173import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest; 174import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; 175import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest; 176import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; 177import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest; 178import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse; 179import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; 180import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse; 181import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; 182import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; 183import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; 184import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; 185import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; 186import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse; 187import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; 188import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; 189import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest; 190import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse; 191import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; 192import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; 193import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; 194import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 195import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; 196import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; 197import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; 198import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; 199import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; 200import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest; 201import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse; 202import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest; 203import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse; 204import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; 205import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; 206import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; 207import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; 208import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; 209import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; 210import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; 211import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; 212import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest; 213import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse; 214import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; 215import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; 216import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; 217import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; 218import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest; 219import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse; 220import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; 221import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; 222import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; 223import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; 224import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; 225import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; 226import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; 227import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; 228import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; 229import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; 230import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; 231import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; 232import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; 233import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; 234import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; 235import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; 236import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaRequest; 237import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaResponse; 238import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreRequest; 239import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreResponse; 240import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableRequest; 241import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableResponse; 242import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; 243import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; 244import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; 245import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; 246import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest; 247import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse; 248import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; 249import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; 250import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; 251import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; 252import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest; 253import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse; 254import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; 255import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; 256import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; 257import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; 258import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; 259import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; 260import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest; 261import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse; 262import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService; 263import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; 264import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; 265import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; 266import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse; 267import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest; 268import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse; 269import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest; 270import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse; 271import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest; 272import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse; 273import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest; 274import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse; 275import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; 276import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; 277import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest; 278import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse; 279import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; 280import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; 281import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; 282import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse; 283import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest; 284import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse; 285import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; 286import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; 287import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest; 288import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesResponse; 289import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest; 290import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse; 291import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByStateRequest; 292import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByStateResponse; 293import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; 294import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse; 295import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByStateRequest; 296import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByStateResponse; 297import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest; 298import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest; 299import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse; 300import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; 301import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; 302import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; 303import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; 304import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; 305import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerRequest; 306import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerResponse; 307import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; 308import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; 309import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest; 310import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse; 311import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerRequest; 312import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerResponse; 313import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; 314import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse; 315import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest; 316import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse; 317import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; 318import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; 319import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest; 320import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse; 321import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState; 322import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; 323import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; 324import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest; 325import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse; 326import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest; 327import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse; 328import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreRequest; 329import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreResponse; 330import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest; 331import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse; 332import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; 333import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse; 334import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest; 335import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse; 336import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; 337import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse; 338import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; 339import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; 340import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest; 341import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaResponse; 342import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest; 343import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse; 344import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest; 345import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse; 346import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest; 347import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; 348import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse; 349import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; 350import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; 351import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; 352import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; 353import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; 354import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse; 355import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaRequest; 356import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaResponse; 357import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleRequest; 358import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleResponse; 359import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; 360import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; 361import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; 362import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse; 363import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest; 364import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse; 365import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.NamespaceQuotaSnapshot; 366import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.TableQuotaSnapshot; 367import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest; 368import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse; 369import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes; 370import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest; 371import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; 372import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest; 373import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse; 374import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest; 375import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse; 376import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetConfiguredNamespacesAndTablesInRSGroupRequest; 377import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetConfiguredNamespacesAndTablesInRSGroupResponse; 378import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest; 379import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse; 380import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest; 381import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse; 382import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest; 383import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse; 384import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest; 385import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse; 386import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListTablesInRSGroupRequest; 387import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListTablesInRSGroupResponse; 388import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersRequest; 389import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersResponse; 390import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest; 391import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse; 392import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest; 393import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse; 394import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RenameRSGroupRequest; 395import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RenameRSGroupResponse; 396import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigRequest; 397import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigResponse; 398import org.apache.hadoop.hbase.shaded.protobuf.generated.RecentLogs; 399import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; 400import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest; 401import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse; 402import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; 403import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; 404import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLiveRegionServersRequest; 405import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLiveRegionServersResponse; 406import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; 407import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse; 408import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; 409import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; 410import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; 411import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse; 412import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest; 413import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse; 414import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult; 415import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest; 416import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse; 417import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; 418import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; 419import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; 420import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; 421import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ClientMetaService; 422import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; 423import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; 424import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; 425import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; 426import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest; 427import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; 428import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; 429import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; 430import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerModificationProceduresRequest; 431import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerModificationProceduresResponse; 432import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerStateRequest; 433import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerStateResponse; 434import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.IsReplicationPeerModificationEnabledRequest; 435import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.IsReplicationPeerModificationEnabledResponse; 436import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; 437import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; 438import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; 439import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; 440import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerModificationSwitchRequest; 441import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerModificationSwitchResponse; 442import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState; 443import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequest; 444import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse; 445import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; 446import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; 447import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; 448import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService; 449 450/** 451 * Implements the master RPC services. 452 */ 453@InterfaceAudience.Private 454public class MasterRpcServices extends HBaseRpcServicesBase<HMaster> 455 implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface, 456 LockService.BlockingInterface, HbckService.BlockingInterface { 457 458 private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName()); 459 private static final Logger AUDITLOG = 460 LoggerFactory.getLogger("SecurityLogger." + MasterRpcServices.class.getName()); 461 462 /** RPC scheduler to use for the master. */ 463 public static final String MASTER_RPC_SCHEDULER_FACTORY_CLASS = 464 "hbase.master.rpc.scheduler.factory.class"; 465 466 /** 467 * @return Subset of configuration to pass initializing regionservers: e.g. the filesystem to use 468 * and root directory to use. 469 */ 470 private RegionServerStartupResponse.Builder createConfigurationSubset() { 471 RegionServerStartupResponse.Builder resp = 472 addConfig(RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR); 473 resp = addConfig(resp, "fs.defaultFS"); 474 return addConfig(resp, "hbase.master.info.port"); 475 } 476 477 private RegionServerStartupResponse.Builder 478 addConfig(final RegionServerStartupResponse.Builder resp, final String key) { 479 NameStringPair.Builder entry = 480 NameStringPair.newBuilder().setName(key).setValue(server.getConfiguration().get(key)); 481 resp.addMapEntries(entry.build()); 482 return resp; 483 } 484 485 public MasterRpcServices(HMaster m) throws IOException { 486 super(m, m.getProcessName()); 487 } 488 489 @Override 490 protected boolean defaultReservoirEnabled() { 491 return false; 492 } 493 494 @Override 495 protected ServerType getDNSServerType() { 496 return DNS.ServerType.MASTER; 497 } 498 499 @Override 500 protected String getHostname(Configuration conf, String defaultHostname) { 501 return conf.get("hbase.master.ipc.address", defaultHostname); 502 } 503 504 @Override 505 protected String getPortConfigName() { 506 return HConstants.MASTER_PORT; 507 } 508 509 @Override 510 protected int getDefaultPort() { 511 return HConstants.DEFAULT_MASTER_PORT; 512 } 513 514 @Override 515 protected Class<?> getRpcSchedulerFactoryClass(Configuration conf) { 516 return conf.getClass(MASTER_RPC_SCHEDULER_FACTORY_CLASS, SimpleRpcSchedulerFactory.class); 517 } 518 519 @Override 520 protected PriorityFunction createPriority() { 521 return new MasterAnnotationReadingPriorityFunction(this); 522 } 523 524 /** 525 * Checks for the following pre-checks in order: 526 * <ol> 527 * <li>Master is initialized</li> 528 * <li>Rpc caller has admin permissions</li> 529 * </ol> 530 * @param requestName name of rpc request. Used in reporting failures to provide context. 531 * @throws ServiceException If any of the above listed pre-check fails. 532 */ 533 private void rpcPreCheck(String requestName) throws ServiceException { 534 try { 535 server.checkInitialized(); 536 requirePermission(requestName, Permission.Action.ADMIN); 537 } catch (IOException ioe) { 538 throw new ServiceException(ioe); 539 } 540 } 541 542 enum BalanceSwitchMode { 543 SYNC, 544 ASYNC 545 } 546 547 /** 548 * Assigns balancer switch according to BalanceSwitchMode 549 * @param b new balancer switch 550 * @param mode BalanceSwitchMode 551 * @return old balancer switch 552 */ 553 boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException { 554 boolean oldValue = server.loadBalancerStateStore.get(); 555 boolean newValue = b; 556 try { 557 if (server.cpHost != null) { 558 server.cpHost.preBalanceSwitch(newValue); 559 } 560 if (mode == BalanceSwitchMode.SYNC) { 561 synchronized (server.getLoadBalancer()) { 562 server.loadBalancerStateStore.set(newValue); 563 } 564 } else { 565 server.loadBalancerStateStore.set(newValue); 566 } 567 LOG.info(server.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue); 568 if (server.cpHost != null) { 569 server.cpHost.postBalanceSwitch(oldValue, newValue); 570 } 571 server.getLoadBalancer().updateBalancerStatus(newValue); 572 } catch (IOException ioe) { 573 LOG.warn("Error flipping balance switch", ioe); 574 } 575 return oldValue; 576 } 577 578 boolean synchronousBalanceSwitch(final boolean b) throws IOException { 579 return switchBalancer(b, BalanceSwitchMode.SYNC); 580 } 581 582 /** Returns list of blocking services and their security info classes that this server supports */ 583 @Override 584 protected List<BlockingServiceAndInterface> getServices() { 585 List<BlockingServiceAndInterface> bssi = new ArrayList<>(5); 586 bssi.add(new BlockingServiceAndInterface(MasterService.newReflectiveBlockingService(this), 587 MasterService.BlockingInterface.class)); 588 bssi.add( 589 new BlockingServiceAndInterface(RegionServerStatusService.newReflectiveBlockingService(this), 590 RegionServerStatusService.BlockingInterface.class)); 591 bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this), 592 LockService.BlockingInterface.class)); 593 bssi.add(new BlockingServiceAndInterface(HbckService.newReflectiveBlockingService(this), 594 HbckService.BlockingInterface.class)); 595 bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this), 596 ClientMetaService.BlockingInterface.class)); 597 bssi.add(new BlockingServiceAndInterface(AdminService.newReflectiveBlockingService(this), 598 AdminService.BlockingInterface.class)); 599 return bssi; 600 } 601 602 void start(ZKWatcher zkWatcher) { 603 internalStart(zkWatcher); 604 } 605 606 void stop() { 607 internalStop(); 608 } 609 610 @Override 611 @QosPriority(priority = HConstants.ADMIN_QOS) 612 public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller, 613 GetLastFlushedSequenceIdRequest request) throws ServiceException { 614 try { 615 server.checkServiceStarted(); 616 } catch (IOException ioe) { 617 throw new ServiceException(ioe); 618 } 619 byte[] encodedRegionName = request.getRegionName().toByteArray(); 620 RegionStoreSequenceIds ids = 621 server.getServerManager().getLastFlushedSequenceId(encodedRegionName); 622 return ResponseConverter.buildGetLastFlushedSequenceIdResponse(ids); 623 } 624 625 @Override 626 @QosPriority(priority = HConstants.ADMIN_QOS) 627 public RegionServerReportResponse regionServerReport(RpcController controller, 628 RegionServerReportRequest request) throws ServiceException { 629 try { 630 server.checkServiceStarted(); 631 int versionNumber = 0; 632 String version = "0.0.0"; 633 VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo(); 634 if (versionInfo != null) { 635 version = versionInfo.getVersion(); 636 versionNumber = VersionInfoUtil.getVersionNumber(versionInfo); 637 } 638 ClusterStatusProtos.ServerLoad sl = request.getLoad(); 639 ServerName serverName = ProtobufUtil.toServerName(request.getServer()); 640 ServerMetrics oldLoad = server.getServerManager().getLoad(serverName); 641 ServerMetrics newLoad = 642 ServerMetricsBuilder.toServerMetrics(serverName, versionNumber, version, sl); 643 server.getServerManager().regionServerReport(serverName, newLoad); 644 server.getAssignmentManager().reportOnlineRegions(serverName, 645 newLoad.getRegionMetrics().keySet()); 646 if (sl != null && server.metricsMaster != null) { 647 // Up our metrics. 648 server.metricsMaster.incrementRequests( 649 sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getRequestCount() : 0)); 650 server.metricsMaster.incrementReadRequests( 651 sl.getReadRequestsCount() - (oldLoad != null ? oldLoad.getReadRequestsCount() : 0)); 652 server.metricsMaster.incrementWriteRequests( 653 sl.getWriteRequestsCount() - (oldLoad != null ? oldLoad.getWriteRequestsCount() : 0)); 654 } 655 } catch (IOException ioe) { 656 throw new ServiceException(ioe); 657 } 658 return RegionServerReportResponse.newBuilder().build(); 659 } 660 661 @Override 662 @QosPriority(priority = HConstants.ADMIN_QOS) 663 public RegionServerStartupResponse regionServerStartup(RpcController controller, 664 RegionServerStartupRequest request) throws ServiceException { 665 // Register with server manager 666 try { 667 server.checkServiceStarted(); 668 int versionNumber = 0; 669 String version = "0.0.0"; 670 VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo(); 671 if (versionInfo != null) { 672 version = versionInfo.getVersion(); 673 versionNumber = VersionInfoUtil.getVersionNumber(versionInfo); 674 } 675 InetAddress ia = server.getRemoteInetAddress(request.getPort(), request.getServerStartCode()); 676 // if regionserver passed hostname to use, 677 // then use it instead of doing a reverse DNS lookup 678 ServerName rs = 679 server.getServerManager().regionServerStartup(request, versionNumber, version, ia); 680 681 // Send back some config info 682 RegionServerStartupResponse.Builder resp = createConfigurationSubset(); 683 NameStringPair.Builder entry = NameStringPair.newBuilder() 684 .setName(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER).setValue(rs.getHostname()); 685 resp.addMapEntries(entry.build()); 686 687 return resp.build(); 688 } catch (IOException ioe) { 689 throw new ServiceException(ioe); 690 } 691 } 692 693 @Override 694 @QosPriority(priority = HConstants.ADMIN_QOS) 695 public ReportRSFatalErrorResponse reportRSFatalError(RpcController controller, 696 ReportRSFatalErrorRequest request) throws ServiceException { 697 String errorText = request.getErrorMessage(); 698 ServerName sn = ProtobufUtil.toServerName(request.getServer()); 699 String msg = sn + " reported a fatal error:\n" + errorText; 700 LOG.warn(msg); 701 server.rsFatals.add(msg); 702 return ReportRSFatalErrorResponse.newBuilder().build(); 703 } 704 705 @Override 706 public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req) 707 throws ServiceException { 708 try { 709 long procId = server.addColumn(ProtobufUtil.toTableName(req.getTableName()), 710 ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(), 711 req.getNonce()); 712 if (procId == -1) { 713 // This mean operation was not performed in server, so do not set any procId 714 return AddColumnResponse.newBuilder().build(); 715 } else { 716 return AddColumnResponse.newBuilder().setProcId(procId).build(); 717 } 718 } catch (IOException ioe) { 719 throw new ServiceException(ioe); 720 } 721 } 722 723 @Override 724 public AssignRegionResponse assignRegion(RpcController controller, AssignRegionRequest req) 725 throws ServiceException { 726 try { 727 server.checkInitialized(); 728 729 final RegionSpecifierType type = req.getRegion().getType(); 730 if (type != RegionSpecifierType.REGION_NAME) { 731 LOG.warn("assignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME 732 + " actual: " + type); 733 } 734 735 final byte[] regionName = req.getRegion().getValue().toByteArray(); 736 final RegionInfo regionInfo = server.getAssignmentManager().getRegionInfo(regionName); 737 if (regionInfo == null) { 738 throw new UnknownRegionException(Bytes.toStringBinary(regionName)); 739 } 740 741 final AssignRegionResponse arr = AssignRegionResponse.newBuilder().build(); 742 if (server.cpHost != null) { 743 server.cpHost.preAssign(regionInfo); 744 } 745 LOG.info(server.getClientIdAuditPrefix() + " assign " + regionInfo.getRegionNameAsString()); 746 server.getAssignmentManager().assign(regionInfo); 747 if (server.cpHost != null) { 748 server.cpHost.postAssign(regionInfo); 749 } 750 return arr; 751 } catch (IOException ioe) { 752 throw new ServiceException(ioe); 753 } 754 } 755 756 @Override 757 public MasterProtos.BalanceResponse balance(RpcController controller, 758 MasterProtos.BalanceRequest request) throws ServiceException { 759 try { 760 return ProtobufUtil.toBalanceResponse(server.balance(ProtobufUtil.toBalanceRequest(request))); 761 } catch (IOException ex) { 762 throw new ServiceException(ex); 763 } 764 } 765 766 @Override 767 public CreateNamespaceResponse createNamespace(RpcController controller, 768 CreateNamespaceRequest request) throws ServiceException { 769 try { 770 long procId = 771 server.createNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()), 772 request.getNonceGroup(), request.getNonce()); 773 return CreateNamespaceResponse.newBuilder().setProcId(procId).build(); 774 } catch (IOException e) { 775 throw new ServiceException(e); 776 } 777 } 778 779 @Override 780 public CreateTableResponse createTable(RpcController controller, CreateTableRequest req) 781 throws ServiceException { 782 TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema()); 783 byte[][] splitKeys = ProtobufUtil.getSplitKeysArray(req); 784 try { 785 long procId = 786 server.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce()); 787 LOG.info(server.getClientIdAuditPrefix() + " procedure request for creating table: " 788 + req.getTableSchema().getTableName() + " procId is: " + procId); 789 return CreateTableResponse.newBuilder().setProcId(procId).build(); 790 } catch (IOException ioe) { 791 throw new ServiceException(ioe); 792 } 793 } 794 795 @Override 796 public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req) 797 throws ServiceException { 798 try { 799 long procId = server.deleteColumn(ProtobufUtil.toTableName(req.getTableName()), 800 req.getColumnName().toByteArray(), req.getNonceGroup(), req.getNonce()); 801 if (procId == -1) { 802 // This mean operation was not performed in server, so do not set any procId 803 return DeleteColumnResponse.newBuilder().build(); 804 } else { 805 return DeleteColumnResponse.newBuilder().setProcId(procId).build(); 806 } 807 } catch (IOException ioe) { 808 throw new ServiceException(ioe); 809 } 810 } 811 812 @Override 813 public DeleteNamespaceResponse deleteNamespace(RpcController controller, 814 DeleteNamespaceRequest request) throws ServiceException { 815 try { 816 long procId = server.deleteNamespace(request.getNamespaceName(), request.getNonceGroup(), 817 request.getNonce()); 818 return DeleteNamespaceResponse.newBuilder().setProcId(procId).build(); 819 } catch (IOException e) { 820 throw new ServiceException(e); 821 } 822 } 823 824 /** 825 * Execute Delete Snapshot operation. 826 * @return DeleteSnapshotResponse (a protobuf wrapped void) if the snapshot existed and was 827 * deleted properly. 828 * @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not 829 * exist. 830 */ 831 @Override 832 public DeleteSnapshotResponse deleteSnapshot(RpcController controller, 833 DeleteSnapshotRequest request) throws ServiceException { 834 try { 835 server.checkInitialized(); 836 server.snapshotManager.checkSnapshotSupport(); 837 838 LOG.info(server.getClientIdAuditPrefix() + " delete " + request.getSnapshot()); 839 server.snapshotManager.deleteSnapshot(request.getSnapshot()); 840 return DeleteSnapshotResponse.newBuilder().build(); 841 } catch (IOException e) { 842 throw new ServiceException(e); 843 } 844 } 845 846 @Override 847 public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request) 848 throws ServiceException { 849 try { 850 long procId = server.deleteTable(ProtobufUtil.toTableName(request.getTableName()), 851 request.getNonceGroup(), request.getNonce()); 852 return DeleteTableResponse.newBuilder().setProcId(procId).build(); 853 } catch (IOException ioe) { 854 throw new ServiceException(ioe); 855 } 856 } 857 858 @Override 859 public TruncateTableResponse truncateTable(RpcController controller, TruncateTableRequest request) 860 throws ServiceException { 861 try { 862 long procId = server.truncateTable(ProtobufUtil.toTableName(request.getTableName()), 863 request.getPreserveSplits(), request.getNonceGroup(), request.getNonce()); 864 return TruncateTableResponse.newBuilder().setProcId(procId).build(); 865 } catch (IOException ioe) { 866 throw new ServiceException(ioe); 867 } 868 } 869 870 @Override 871 public DisableTableResponse disableTable(RpcController controller, DisableTableRequest request) 872 throws ServiceException { 873 try { 874 long procId = server.disableTable(ProtobufUtil.toTableName(request.getTableName()), 875 request.getNonceGroup(), request.getNonce()); 876 return DisableTableResponse.newBuilder().setProcId(procId).build(); 877 } catch (IOException ioe) { 878 throw new ServiceException(ioe); 879 } 880 } 881 882 @Override 883 public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c, 884 EnableCatalogJanitorRequest req) throws ServiceException { 885 rpcPreCheck("enableCatalogJanitor"); 886 return EnableCatalogJanitorResponse.newBuilder() 887 .setPrevValue(server.catalogJanitorChore.setEnabled(req.getEnable())).build(); 888 } 889 890 @Override 891 public SetCleanerChoreRunningResponse setCleanerChoreRunning(RpcController c, 892 SetCleanerChoreRunningRequest req) throws ServiceException { 893 rpcPreCheck("setCleanerChoreRunning"); 894 895 boolean prevValue = 896 server.getLogCleaner().getEnabled() && server.getHFileCleaner().getEnabled(); 897 server.getLogCleaner().setEnabled(req.getOn()); 898 for (HFileCleaner hFileCleaner : server.getHFileCleaners()) { 899 hFileCleaner.setEnabled(req.getOn()); 900 } 901 return SetCleanerChoreRunningResponse.newBuilder().setPrevValue(prevValue).build(); 902 } 903 904 @Override 905 public EnableTableResponse enableTable(RpcController controller, EnableTableRequest request) 906 throws ServiceException { 907 try { 908 long procId = server.enableTable(ProtobufUtil.toTableName(request.getTableName()), 909 request.getNonceGroup(), request.getNonce()); 910 return EnableTableResponse.newBuilder().setProcId(procId).build(); 911 } catch (IOException ioe) { 912 throw new ServiceException(ioe); 913 } 914 } 915 916 @Override 917 public MergeTableRegionsResponse mergeTableRegions(RpcController c, 918 MergeTableRegionsRequest request) throws ServiceException { 919 try { 920 server.checkInitialized(); 921 } catch (IOException ioe) { 922 throw new ServiceException(ioe); 923 } 924 925 RegionStates regionStates = server.getAssignmentManager().getRegionStates(); 926 927 RegionInfo[] regionsToMerge = new RegionInfo[request.getRegionCount()]; 928 for (int i = 0; i < request.getRegionCount(); i++) { 929 final byte[] encodedNameOfRegion = request.getRegion(i).getValue().toByteArray(); 930 if (request.getRegion(i).getType() != RegionSpecifierType.ENCODED_REGION_NAME) { 931 LOG.warn("MergeRegions specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME 932 + " actual: region " + i + " =" + request.getRegion(i).getType()); 933 } 934 RegionState regionState = regionStates.getRegionState(Bytes.toString(encodedNameOfRegion)); 935 if (regionState == null) { 936 throw new ServiceException( 937 new UnknownRegionException(Bytes.toStringBinary(encodedNameOfRegion))); 938 } 939 regionsToMerge[i] = regionState.getRegion(); 940 } 941 942 try { 943 long procId = server.mergeRegions(regionsToMerge, request.getForcible(), 944 request.getNonceGroup(), request.getNonce()); 945 return MergeTableRegionsResponse.newBuilder().setProcId(procId).build(); 946 } catch (IOException ioe) { 947 throw new ServiceException(ioe); 948 } 949 } 950 951 @Override 952 public SplitTableRegionResponse splitRegion(final RpcController controller, 953 final SplitTableRegionRequest request) throws ServiceException { 954 try { 955 long procId = server.splitRegion(ProtobufUtil.toRegionInfo(request.getRegionInfo()), 956 request.hasSplitRow() ? request.getSplitRow().toByteArray() : null, request.getNonceGroup(), 957 request.getNonce()); 958 return SplitTableRegionResponse.newBuilder().setProcId(procId).build(); 959 } catch (IOException ie) { 960 throw new ServiceException(ie); 961 } 962 } 963 964 @Override 965 public MasterProtos.TruncateRegionResponse truncateRegion(RpcController controller, 966 final MasterProtos.TruncateRegionRequest request) throws ServiceException { 967 try { 968 long procId = server.truncateRegion(ProtobufUtil.toRegionInfo(request.getRegionInfo()), 969 request.getNonceGroup(), request.getNonce()); 970 return MasterProtos.TruncateRegionResponse.newBuilder().setProcId(procId).build(); 971 } catch (IOException ie) { 972 throw new ServiceException(ie); 973 } 974 } 975 976 @Override 977 public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller, 978 final ClientProtos.CoprocessorServiceRequest request) throws ServiceException { 979 rpcPreCheck("execMasterService"); 980 try { 981 ServerRpcController execController = new ServerRpcController(); 982 ClientProtos.CoprocessorServiceCall call = request.getCall(); 983 String serviceName = call.getServiceName(); 984 String methodName = call.getMethodName(); 985 if (!server.coprocessorServiceHandlers.containsKey(serviceName)) { 986 throw new UnknownProtocolException(null, 987 "No registered Master Coprocessor Endpoint found for " + serviceName 988 + ". Has it been enabled?"); 989 } 990 991 Service service = server.coprocessorServiceHandlers.get(serviceName); 992 ServiceDescriptor serviceDesc = service.getDescriptorForType(); 993 MethodDescriptor methodDesc = 994 CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc); 995 996 Message execRequest = CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest()); 997 final Message.Builder responseBuilder = 998 service.getResponsePrototype(methodDesc).newBuilderForType(); 999 service.callMethod(methodDesc, execController, execRequest, (message) -> { 1000 if (message != null) { 1001 responseBuilder.mergeFrom(message); 1002 } 1003 }); 1004 Message execResult = responseBuilder.build(); 1005 if (execController.getFailedOn() != null) { 1006 throw execController.getFailedOn(); 1007 } 1008 1009 String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse(""); 1010 User caller = RpcServer.getRequestUser().orElse(null); 1011 AUDITLOG.info("User {} (remote address: {}) master service request for {}.{}", caller, 1012 remoteAddress, serviceName, methodName); 1013 1014 return CoprocessorRpcUtils.getResponse(execResult, HConstants.EMPTY_BYTE_ARRAY); 1015 } catch (IOException ie) { 1016 throw new ServiceException(ie); 1017 } 1018 } 1019 1020 /** 1021 * Triggers an asynchronous attempt to run a distributed procedure. {@inheritDoc} 1022 */ 1023 @Override 1024 public ExecProcedureResponse execProcedure(RpcController controller, ExecProcedureRequest request) 1025 throws ServiceException { 1026 try { 1027 server.checkInitialized(); 1028 ProcedureDescription desc = request.getProcedure(); 1029 MasterProcedureManager mpm = 1030 server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature()); 1031 if (mpm == null) { 1032 throw new ServiceException( 1033 new DoNotRetryIOException("The procedure is not registered: " + desc.getSignature())); 1034 } 1035 LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature()); 1036 mpm.checkPermissions(desc, getAccessChecker(), RpcServer.getRequestUser().orElse(null)); 1037 mpm.execProcedure(desc); 1038 // send back the max amount of time the client should wait for the procedure 1039 // to complete 1040 long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME; 1041 return ExecProcedureResponse.newBuilder().setExpectedTimeout(waitTime).build(); 1042 } catch (ForeignException e) { 1043 throw new ServiceException(e.getCause()); 1044 } catch (IOException e) { 1045 throw new ServiceException(e); 1046 } 1047 } 1048 1049 /** 1050 * Triggers a synchronous attempt to run a distributed procedure and sets return data in response. 1051 * {@inheritDoc} 1052 */ 1053 @Override 1054 public ExecProcedureResponse execProcedureWithRet(RpcController controller, 1055 ExecProcedureRequest request) throws ServiceException { 1056 rpcPreCheck("execProcedureWithRet"); 1057 try { 1058 ProcedureDescription desc = request.getProcedure(); 1059 MasterProcedureManager mpm = 1060 server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature()); 1061 if (mpm == null) { 1062 throw new ServiceException("The procedure is not registered: " + desc.getSignature()); 1063 } 1064 LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature()); 1065 byte[] data = mpm.execProcedureWithRet(desc); 1066 ExecProcedureResponse.Builder builder = ExecProcedureResponse.newBuilder(); 1067 // set return data if available 1068 if (data != null) { 1069 builder.setReturnData(UnsafeByteOperations.unsafeWrap(data)); 1070 } 1071 return builder.build(); 1072 } catch (IOException e) { 1073 throw new ServiceException(e); 1074 } 1075 } 1076 1077 @Override 1078 public GetClusterStatusResponse getClusterStatus(RpcController controller, 1079 GetClusterStatusRequest req) throws ServiceException { 1080 GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder(); 1081 try { 1082 // We used to check if Master was up at this point but let this call proceed even if 1083 // Master is initializing... else we shut out stuff like hbck2 tool from making progress 1084 // since it queries this method to figure cluster version. hbck2 wants to be able to work 1085 // against Master even if it is 'initializing' so it can do fixup. 1086 response.setClusterStatus(ClusterMetricsBuilder.toClusterStatus( 1087 server.getClusterMetrics(ClusterMetricsBuilder.toOptions(req.getOptionsList())))); 1088 } catch (IOException e) { 1089 throw new ServiceException(e); 1090 } 1091 return response.build(); 1092 } 1093 1094 /** 1095 * List the currently available/stored snapshots. Any in-progress snapshots are ignored 1096 */ 1097 @Override 1098 public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller, 1099 GetCompletedSnapshotsRequest request) throws ServiceException { 1100 try { 1101 server.checkInitialized(); 1102 GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder(); 1103 List<SnapshotDescription> snapshots = server.snapshotManager.getCompletedSnapshots(); 1104 1105 // convert to protobuf 1106 for (SnapshotDescription snapshot : snapshots) { 1107 builder.addSnapshots(snapshot); 1108 } 1109 return builder.build(); 1110 } catch (IOException e) { 1111 throw new ServiceException(e); 1112 } 1113 } 1114 1115 @Override 1116 public ListNamespacesResponse listNamespaces(RpcController controller, 1117 ListNamespacesRequest request) throws ServiceException { 1118 try { 1119 return ListNamespacesResponse.newBuilder().addAllNamespaceName(server.listNamespaces()) 1120 .build(); 1121 } catch (IOException e) { 1122 throw new ServiceException(e); 1123 } 1124 } 1125 1126 @Override 1127 public GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller, 1128 GetNamespaceDescriptorRequest request) throws ServiceException { 1129 try { 1130 return GetNamespaceDescriptorResponse.newBuilder() 1131 .setNamespaceDescriptor( 1132 ProtobufUtil.toProtoNamespaceDescriptor(server.getNamespace(request.getNamespaceName()))) 1133 .build(); 1134 } catch (IOException e) { 1135 throw new ServiceException(e); 1136 } 1137 } 1138 1139 /** 1140 * Get the number of regions of the table that have been updated by the alter. 1141 * @return Pair indicating the number of regions updated Pair.getFirst is the regions that are yet 1142 * to be updated Pair.getSecond is the total number of regions of the table 1143 */ 1144 @Override 1145 public GetSchemaAlterStatusResponse getSchemaAlterStatus(RpcController controller, 1146 GetSchemaAlterStatusRequest req) throws ServiceException { 1147 // TODO: currently, we query using the table name on the client side. this 1148 // may overlap with other table operations or the table operation may 1149 // have completed before querying this API. We need to refactor to a 1150 // transaction system in the future to avoid these ambiguities. 1151 TableName tableName = ProtobufUtil.toTableName(req.getTableName()); 1152 1153 try { 1154 server.checkInitialized(); 1155 Pair<Integer, Integer> pair = server.getAssignmentManager().getReopenStatus(tableName); 1156 GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder(); 1157 ret.setYetToUpdateRegions(pair.getFirst()); 1158 ret.setTotalRegions(pair.getSecond()); 1159 return ret.build(); 1160 } catch (IOException ioe) { 1161 throw new ServiceException(ioe); 1162 } 1163 } 1164 1165 /** 1166 * Get list of TableDescriptors for requested tables. 1167 * @param c Unused (set to null). 1168 * @param req GetTableDescriptorsRequest that contains: - tableNames: requested tables, or if 1169 * empty, all are requested. 1170 */ 1171 @Override 1172 public GetTableDescriptorsResponse getTableDescriptors(RpcController c, 1173 GetTableDescriptorsRequest req) throws ServiceException { 1174 try { 1175 server.checkInitialized(); 1176 1177 final String regex = req.hasRegex() ? req.getRegex() : null; 1178 final String namespace = req.hasNamespace() ? req.getNamespace() : null; 1179 List<TableName> tableNameList = null; 1180 if (req.getTableNamesCount() > 0) { 1181 tableNameList = new ArrayList<TableName>(req.getTableNamesCount()); 1182 for (HBaseProtos.TableName tableNamePB : req.getTableNamesList()) { 1183 tableNameList.add(ProtobufUtil.toTableName(tableNamePB)); 1184 } 1185 } 1186 1187 List<TableDescriptor> descriptors = 1188 server.listTableDescriptors(namespace, regex, tableNameList, req.getIncludeSysTables()); 1189 1190 GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder(); 1191 if (descriptors != null && descriptors.size() > 0) { 1192 // Add the table descriptors to the response 1193 for (TableDescriptor htd : descriptors) { 1194 builder.addTableSchema(ProtobufUtil.toTableSchema(htd)); 1195 } 1196 } 1197 return builder.build(); 1198 } catch (IOException ioe) { 1199 throw new ServiceException(ioe); 1200 } 1201 } 1202 1203 @Override 1204 public ListTableDescriptorsByStateResponse listTableDescriptorsByState(RpcController controller, 1205 ListTableDescriptorsByStateRequest request) throws ServiceException { 1206 try { 1207 server.checkInitialized(); 1208 List<TableDescriptor> descriptors = server.listTableDescriptors(null, null, null, false); 1209 1210 ListTableDescriptorsByStateResponse.Builder builder = 1211 ListTableDescriptorsByStateResponse.newBuilder(); 1212 if (descriptors != null && descriptors.size() > 0) { 1213 // Add the table descriptors to the response 1214 TableState.State state = 1215 request.getIsEnabled() ? TableState.State.ENABLED : TableState.State.DISABLED; 1216 for (TableDescriptor htd : descriptors) { 1217 if (server.getTableStateManager().isTableState(htd.getTableName(), state)) { 1218 builder.addTableSchema(ProtobufUtil.toTableSchema(htd)); 1219 } 1220 } 1221 } 1222 return builder.build(); 1223 } catch (IOException ioe) { 1224 throw new ServiceException(ioe); 1225 } 1226 } 1227 1228 /** 1229 * Get list of userspace table names 1230 * @param controller Unused (set to null). 1231 * @param req GetTableNamesRequest 1232 */ 1233 @Override 1234 public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest req) 1235 throws ServiceException { 1236 try { 1237 server.checkServiceStarted(); 1238 1239 final String regex = req.hasRegex() ? req.getRegex() : null; 1240 final String namespace = req.hasNamespace() ? req.getNamespace() : null; 1241 List<TableName> tableNames = 1242 server.listTableNames(namespace, regex, req.getIncludeSysTables()); 1243 1244 GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder(); 1245 if (tableNames != null && tableNames.size() > 0) { 1246 // Add the table names to the response 1247 for (TableName table : tableNames) { 1248 builder.addTableNames(ProtobufUtil.toProtoTableName(table)); 1249 } 1250 } 1251 return builder.build(); 1252 } catch (IOException e) { 1253 throw new ServiceException(e); 1254 } 1255 } 1256 1257 @Override 1258 public ListTableNamesByStateResponse listTableNamesByState(RpcController controller, 1259 ListTableNamesByStateRequest request) throws ServiceException { 1260 try { 1261 server.checkServiceStarted(); 1262 List<TableName> tableNames = server.listTableNames(null, null, false); 1263 ListTableNamesByStateResponse.Builder builder = ListTableNamesByStateResponse.newBuilder(); 1264 if (tableNames != null && tableNames.size() > 0) { 1265 // Add the disabled table names to the response 1266 TableState.State state = 1267 request.getIsEnabled() ? TableState.State.ENABLED : TableState.State.DISABLED; 1268 for (TableName table : tableNames) { 1269 if (server.getTableStateManager().isTableState(table, state)) { 1270 builder.addTableNames(ProtobufUtil.toProtoTableName(table)); 1271 } 1272 } 1273 } 1274 return builder.build(); 1275 } catch (IOException e) { 1276 throw new ServiceException(e); 1277 } 1278 } 1279 1280 @Override 1281 public GetTableStateResponse getTableState(RpcController controller, GetTableStateRequest request) 1282 throws ServiceException { 1283 try { 1284 server.checkServiceStarted(); 1285 TableName tableName = ProtobufUtil.toTableName(request.getTableName()); 1286 TableState ts = server.getTableStateManager().getTableState(tableName); 1287 GetTableStateResponse.Builder builder = GetTableStateResponse.newBuilder(); 1288 builder.setTableState(ts.convert()); 1289 return builder.build(); 1290 } catch (IOException e) { 1291 throw new ServiceException(e); 1292 } 1293 } 1294 1295 @Override 1296 public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c, 1297 IsCatalogJanitorEnabledRequest req) throws ServiceException { 1298 return IsCatalogJanitorEnabledResponse.newBuilder().setValue(server.isCatalogJanitorEnabled()) 1299 .build(); 1300 } 1301 1302 @Override 1303 public IsCleanerChoreEnabledResponse isCleanerChoreEnabled(RpcController c, 1304 IsCleanerChoreEnabledRequest req) throws ServiceException { 1305 return IsCleanerChoreEnabledResponse.newBuilder().setValue(server.isCleanerChoreEnabled()) 1306 .build(); 1307 } 1308 1309 @Override 1310 public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req) 1311 throws ServiceException { 1312 try { 1313 server.checkServiceStarted(); 1314 return IsMasterRunningResponse.newBuilder().setIsMasterRunning(!server.isStopped()).build(); 1315 } catch (IOException e) { 1316 throw new ServiceException(e); 1317 } 1318 } 1319 1320 /** 1321 * Checks if the specified procedure is done. 1322 * @return true if the procedure is done, false if the procedure is in the process of completing 1323 * @throws ServiceException if invalid procedure or failed procedure with progress failure reason. 1324 */ 1325 @Override 1326 public IsProcedureDoneResponse isProcedureDone(RpcController controller, 1327 IsProcedureDoneRequest request) throws ServiceException { 1328 try { 1329 server.checkInitialized(); 1330 ProcedureDescription desc = request.getProcedure(); 1331 MasterProcedureManager mpm = 1332 server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature()); 1333 if (mpm == null) { 1334 throw new ServiceException("The procedure is not registered: " + desc.getSignature()); 1335 } 1336 LOG.debug("Checking to see if procedure from request:" + desc.getSignature() + " is done"); 1337 1338 IsProcedureDoneResponse.Builder builder = IsProcedureDoneResponse.newBuilder(); 1339 boolean done = mpm.isProcedureDone(desc); 1340 builder.setDone(done); 1341 return builder.build(); 1342 } catch (ForeignException e) { 1343 throw new ServiceException(e.getCause()); 1344 } catch (IOException e) { 1345 throw new ServiceException(e); 1346 } 1347 } 1348 1349 /** 1350 * Checks if the specified snapshot is done. 1351 * @return true if the snapshot is in file system ready to use, false if the snapshot is in the 1352 * process of completing 1353 * @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or a wrapped 1354 * HBaseSnapshotException with progress failure reason. 1355 */ 1356 @Override 1357 public IsSnapshotDoneResponse isSnapshotDone(RpcController controller, 1358 IsSnapshotDoneRequest request) throws ServiceException { 1359 LOG.debug("Checking to see if snapshot from request:" 1360 + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()) + " is done"); 1361 try { 1362 server.checkInitialized(); 1363 IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder(); 1364 boolean done = server.snapshotManager.isSnapshotDone(request.getSnapshot()); 1365 builder.setDone(done); 1366 return builder.build(); 1367 } catch (ForeignException e) { 1368 throw new ServiceException(e.getCause()); 1369 } catch (IOException e) { 1370 throw new ServiceException(e); 1371 } 1372 } 1373 1374 @Override 1375 public GetProcedureResultResponse getProcedureResult(RpcController controller, 1376 GetProcedureResultRequest request) throws ServiceException { 1377 LOG.debug("Checking to see if procedure is done pid=" + request.getProcId()); 1378 try { 1379 server.checkInitialized(); 1380 GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder(); 1381 long procId = request.getProcId(); 1382 ProcedureExecutor<?> executor = server.getMasterProcedureExecutor(); 1383 Procedure<?> result = executor.getResultOrProcedure(procId); 1384 if (result != null) { 1385 builder.setSubmittedTime(result.getSubmittedTime()); 1386 builder.setLastUpdate(result.getLastUpdate()); 1387 if (executor.isFinished(procId)) { 1388 builder.setState(GetProcedureResultResponse.State.FINISHED); 1389 if (result.isFailed()) { 1390 IOException exception = MasterProcedureUtil.unwrapRemoteIOException(result); 1391 builder.setException(ForeignExceptionUtil.toProtoForeignException(exception)); 1392 } 1393 byte[] resultData = result.getResult(); 1394 if (resultData != null) { 1395 builder.setResult(UnsafeByteOperations.unsafeWrap(resultData)); 1396 } 1397 server.getMasterProcedureExecutor().removeResult(request.getProcId()); 1398 } else { 1399 builder.setState(GetProcedureResultResponse.State.RUNNING); 1400 } 1401 } else { 1402 builder.setState(GetProcedureResultResponse.State.NOT_FOUND); 1403 } 1404 return builder.build(); 1405 } catch (IOException e) { 1406 throw new ServiceException(e); 1407 } 1408 } 1409 1410 @Override 1411 public AbortProcedureResponse abortProcedure(RpcController rpcController, 1412 AbortProcedureRequest request) throws ServiceException { 1413 try { 1414 AbortProcedureResponse.Builder response = AbortProcedureResponse.newBuilder(); 1415 boolean abortResult = 1416 server.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning()); 1417 response.setIsProcedureAborted(abortResult); 1418 return response.build(); 1419 } catch (IOException e) { 1420 throw new ServiceException(e); 1421 } 1422 } 1423 1424 @Override 1425 public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c, 1426 ListNamespaceDescriptorsRequest request) throws ServiceException { 1427 try { 1428 ListNamespaceDescriptorsResponse.Builder response = 1429 ListNamespaceDescriptorsResponse.newBuilder(); 1430 for (NamespaceDescriptor ns : server.getNamespaces()) { 1431 response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns)); 1432 } 1433 return response.build(); 1434 } catch (IOException e) { 1435 throw new ServiceException(e); 1436 } 1437 } 1438 1439 @Override 1440 public GetProceduresResponse getProcedures(RpcController rpcController, 1441 GetProceduresRequest request) throws ServiceException { 1442 try { 1443 final GetProceduresResponse.Builder response = GetProceduresResponse.newBuilder(); 1444 for (Procedure<?> p : server.getProcedures()) { 1445 response.addProcedure(ProcedureUtil.convertToProtoProcedure(p)); 1446 } 1447 return response.build(); 1448 } catch (IOException e) { 1449 throw new ServiceException(e); 1450 } 1451 } 1452 1453 @Override 1454 public GetLocksResponse getLocks(RpcController controller, GetLocksRequest request) 1455 throws ServiceException { 1456 try { 1457 final GetLocksResponse.Builder builder = GetLocksResponse.newBuilder(); 1458 1459 for (LockedResource lockedResource : server.getLocks()) { 1460 builder.addLock(ProcedureUtil.convertToProtoLockedResource(lockedResource)); 1461 } 1462 1463 return builder.build(); 1464 } catch (IOException e) { 1465 throw new ServiceException(e); 1466 } 1467 } 1468 1469 @Override 1470 public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c, 1471 ListTableDescriptorsByNamespaceRequest request) throws ServiceException { 1472 try { 1473 ListTableDescriptorsByNamespaceResponse.Builder b = 1474 ListTableDescriptorsByNamespaceResponse.newBuilder(); 1475 for (TableDescriptor htd : server 1476 .listTableDescriptorsByNamespace(request.getNamespaceName())) { 1477 b.addTableSchema(ProtobufUtil.toTableSchema(htd)); 1478 } 1479 return b.build(); 1480 } catch (IOException e) { 1481 throw new ServiceException(e); 1482 } 1483 } 1484 1485 @Override 1486 public ListTableNamesByNamespaceResponse listTableNamesByNamespace(RpcController c, 1487 ListTableNamesByNamespaceRequest request) throws ServiceException { 1488 try { 1489 ListTableNamesByNamespaceResponse.Builder b = ListTableNamesByNamespaceResponse.newBuilder(); 1490 for (TableName tableName : server.listTableNamesByNamespace(request.getNamespaceName())) { 1491 b.addTableName(ProtobufUtil.toProtoTableName(tableName)); 1492 } 1493 return b.build(); 1494 } catch (IOException e) { 1495 throw new ServiceException(e); 1496 } 1497 } 1498 1499 @Override 1500 public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req) 1501 throws ServiceException { 1502 try { 1503 long procId = server.modifyColumn(ProtobufUtil.toTableName(req.getTableName()), 1504 ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(), 1505 req.getNonce()); 1506 if (procId == -1) { 1507 // This mean operation was not performed in server, so do not set any procId 1508 return ModifyColumnResponse.newBuilder().build(); 1509 } else { 1510 return ModifyColumnResponse.newBuilder().setProcId(procId).build(); 1511 } 1512 } catch (IOException ioe) { 1513 throw new ServiceException(ioe); 1514 } 1515 } 1516 1517 @Override 1518 public ModifyColumnStoreFileTrackerResponse modifyColumnStoreFileTracker(RpcController controller, 1519 ModifyColumnStoreFileTrackerRequest req) throws ServiceException { 1520 try { 1521 long procId = 1522 server.modifyColumnStoreFileTracker(ProtobufUtil.toTableName(req.getTableName()), 1523 req.getFamily().toByteArray(), req.getDstSft(), req.getNonceGroup(), req.getNonce()); 1524 return ModifyColumnStoreFileTrackerResponse.newBuilder().setProcId(procId).build(); 1525 } catch (IOException ioe) { 1526 throw new ServiceException(ioe); 1527 } 1528 } 1529 1530 @Override 1531 public ModifyNamespaceResponse modifyNamespace(RpcController controller, 1532 ModifyNamespaceRequest request) throws ServiceException { 1533 try { 1534 long procId = 1535 server.modifyNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()), 1536 request.getNonceGroup(), request.getNonce()); 1537 return ModifyNamespaceResponse.newBuilder().setProcId(procId).build(); 1538 } catch (IOException e) { 1539 throw new ServiceException(e); 1540 } 1541 } 1542 1543 @Override 1544 public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req) 1545 throws ServiceException { 1546 try { 1547 long procId = server.modifyTable(ProtobufUtil.toTableName(req.getTableName()), 1548 ProtobufUtil.toTableDescriptor(req.getTableSchema()), req.getNonceGroup(), req.getNonce(), 1549 req.getReopenRegions()); 1550 return ModifyTableResponse.newBuilder().setProcId(procId).build(); 1551 } catch (IOException ioe) { 1552 throw new ServiceException(ioe); 1553 } 1554 } 1555 1556 @Override 1557 public ModifyTableStoreFileTrackerResponse modifyTableStoreFileTracker(RpcController controller, 1558 ModifyTableStoreFileTrackerRequest req) throws ServiceException { 1559 try { 1560 long procId = server.modifyTableStoreFileTracker(ProtobufUtil.toTableName(req.getTableName()), 1561 req.getDstSft(), req.getNonceGroup(), req.getNonce()); 1562 return ModifyTableStoreFileTrackerResponse.newBuilder().setProcId(procId).build(); 1563 } catch (IOException ioe) { 1564 throw new ServiceException(ioe); 1565 } 1566 } 1567 1568 @Override 1569 public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest req) 1570 throws ServiceException { 1571 final byte[] encodedRegionName = req.getRegion().getValue().toByteArray(); 1572 RegionSpecifierType type = req.getRegion().getType(); 1573 final byte[] destServerName = (req.hasDestServerName()) 1574 ? Bytes.toBytes(ProtobufUtil.toServerName(req.getDestServerName()).getServerName()) 1575 : null; 1576 MoveRegionResponse mrr = MoveRegionResponse.newBuilder().build(); 1577 1578 if (type != RegionSpecifierType.ENCODED_REGION_NAME) { 1579 LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME 1580 + " actual: " + type); 1581 } 1582 1583 try { 1584 server.checkInitialized(); 1585 server.move(encodedRegionName, destServerName); 1586 } catch (IOException ioe) { 1587 throw new ServiceException(ioe); 1588 } 1589 return mrr; 1590 } 1591 1592 /** 1593 * Offline specified region from master's in-memory state. It will not attempt to reassign the 1594 * region as in unassign. This is a special method that should be used by experts or hbck. 1595 */ 1596 @Override 1597 public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request) 1598 throws ServiceException { 1599 try { 1600 server.checkInitialized(); 1601 1602 final RegionSpecifierType type = request.getRegion().getType(); 1603 if (type != RegionSpecifierType.REGION_NAME) { 1604 LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME 1605 + " actual: " + type); 1606 } 1607 1608 final byte[] regionName = request.getRegion().getValue().toByteArray(); 1609 final RegionInfo hri = server.getAssignmentManager().getRegionInfo(regionName); 1610 if (hri == null) { 1611 throw new UnknownRegionException(Bytes.toStringBinary(regionName)); 1612 } 1613 1614 if (server.cpHost != null) { 1615 server.cpHost.preRegionOffline(hri); 1616 } 1617 LOG.info(server.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString()); 1618 server.getAssignmentManager().offlineRegion(hri); 1619 if (server.cpHost != null) { 1620 server.cpHost.postRegionOffline(hri); 1621 } 1622 } catch (IOException ioe) { 1623 throw new ServiceException(ioe); 1624 } 1625 return OfflineRegionResponse.newBuilder().build(); 1626 } 1627 1628 /** 1629 * Execute Restore/Clone snapshot operation. 1630 * <p> 1631 * If the specified table exists a "Restore" is executed, replacing the table schema and directory 1632 * data with the content of the snapshot. The table must be disabled, or a 1633 * UnsupportedOperationException will be thrown. 1634 * <p> 1635 * If the table doesn't exist a "Clone" is executed, a new table is created using the schema at 1636 * the time of the snapshot, and the content of the snapshot. 1637 * <p> 1638 * The restore/clone operation does not require copying HFiles. Since HFiles are immutable the 1639 * table can point to and use the same files as the original one. 1640 */ 1641 @Override 1642 public RestoreSnapshotResponse restoreSnapshot(RpcController controller, 1643 RestoreSnapshotRequest request) throws ServiceException { 1644 try { 1645 long procId = server.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(), 1646 request.getNonce(), request.getRestoreACL(), request.getCustomSFT()); 1647 return RestoreSnapshotResponse.newBuilder().setProcId(procId).build(); 1648 } catch (ForeignException e) { 1649 throw new ServiceException(e.getCause()); 1650 } catch (IOException e) { 1651 throw new ServiceException(e); 1652 } 1653 } 1654 1655 @Override 1656 public SetSnapshotCleanupResponse switchSnapshotCleanup(RpcController controller, 1657 SetSnapshotCleanupRequest request) throws ServiceException { 1658 try { 1659 server.checkInitialized(); 1660 final boolean enabled = request.getEnabled(); 1661 final boolean isSynchronous = request.hasSynchronous() && request.getSynchronous(); 1662 final boolean prevSnapshotCleanupRunning = this.switchSnapshotCleanup(enabled, isSynchronous); 1663 return SetSnapshotCleanupResponse.newBuilder() 1664 .setPrevSnapshotCleanup(prevSnapshotCleanupRunning).build(); 1665 } catch (IOException e) { 1666 throw new ServiceException(e); 1667 } 1668 } 1669 1670 @Override 1671 public IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(RpcController controller, 1672 IsSnapshotCleanupEnabledRequest request) throws ServiceException { 1673 try { 1674 server.checkInitialized(); 1675 final boolean isSnapshotCleanupEnabled = server.snapshotCleanupStateStore.get(); 1676 return IsSnapshotCleanupEnabledResponse.newBuilder().setEnabled(isSnapshotCleanupEnabled) 1677 .build(); 1678 } catch (IOException e) { 1679 throw new ServiceException(e); 1680 } 1681 } 1682 1683 /** 1684 * Turn on/off snapshot auto-cleanup based on TTL 1685 * @param enabledNewVal Set to <code>true</code> to enable, <code>false</code> to disable 1686 * @param synchronous If <code>true</code>, it waits until current snapshot cleanup is 1687 * completed, if outstanding 1688 * @return previous snapshot auto-cleanup mode 1689 */ 1690 private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal, 1691 final boolean synchronous) throws IOException { 1692 final boolean oldValue = server.snapshotCleanupStateStore.get(); 1693 server.switchSnapshotCleanup(enabledNewVal, synchronous); 1694 LOG.info("{} Successfully set snapshot cleanup to {}", server.getClientIdAuditPrefix(), 1695 enabledNewVal); 1696 return oldValue; 1697 } 1698 1699 @Override 1700 public RunCatalogScanResponse runCatalogScan(RpcController c, RunCatalogScanRequest req) 1701 throws ServiceException { 1702 rpcPreCheck("runCatalogScan"); 1703 try { 1704 return ResponseConverter.buildRunCatalogScanResponse(this.server.catalogJanitorChore.scan()); 1705 } catch (IOException ioe) { 1706 throw new ServiceException(ioe); 1707 } 1708 } 1709 1710 @Override 1711 public RunCleanerChoreResponse runCleanerChore(RpcController c, RunCleanerChoreRequest req) 1712 throws ServiceException { 1713 rpcPreCheck("runCleanerChore"); 1714 try { 1715 CompletableFuture<Boolean> fileCleanerFuture = server.getHFileCleaner().triggerCleanerNow(); 1716 CompletableFuture<Boolean> logCleanerFuture = server.getLogCleaner().triggerCleanerNow(); 1717 boolean result = fileCleanerFuture.get() && logCleanerFuture.get(); 1718 return ResponseConverter.buildRunCleanerChoreResponse(result); 1719 } catch (InterruptedException e) { 1720 throw new ServiceException(e); 1721 } catch (ExecutionException e) { 1722 throw new ServiceException(e.getCause()); 1723 } 1724 } 1725 1726 @Override 1727 public SetBalancerRunningResponse setBalancerRunning(RpcController c, 1728 SetBalancerRunningRequest req) throws ServiceException { 1729 try { 1730 server.checkInitialized(); 1731 boolean prevValue = (req.getSynchronous()) 1732 ? synchronousBalanceSwitch(req.getOn()) 1733 : server.balanceSwitch(req.getOn()); 1734 return SetBalancerRunningResponse.newBuilder().setPrevBalanceValue(prevValue).build(); 1735 } catch (IOException ioe) { 1736 throw new ServiceException(ioe); 1737 } 1738 } 1739 1740 @Override 1741 public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request) 1742 throws ServiceException { 1743 LOG.info(server.getClientIdAuditPrefix() + " shutdown"); 1744 try { 1745 server.shutdown(); 1746 } catch (IOException e) { 1747 LOG.error("Exception occurred in HMaster.shutdown()", e); 1748 throw new ServiceException(e); 1749 } 1750 return ShutdownResponse.newBuilder().build(); 1751 } 1752 1753 /** 1754 * Triggers an asynchronous attempt to take a snapshot. {@inheritDoc} 1755 */ 1756 @Override 1757 public SnapshotResponse snapshot(RpcController controller, SnapshotRequest request) 1758 throws ServiceException { 1759 try { 1760 server.checkInitialized(); 1761 server.snapshotManager.checkSnapshotSupport(); 1762 1763 LOG.info(server.getClientIdAuditPrefix() + " snapshot request for:" 1764 + ClientSnapshotDescriptionUtils.toString(request.getSnapshot())); 1765 // get the snapshot information 1766 SnapshotDescription snapshot = 1767 SnapshotDescriptionUtils.validate(request.getSnapshot(), server.getConfiguration()); 1768 // send back the max amount of time the client should wait for the snapshot to complete 1769 long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(server.getConfiguration(), 1770 snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME); 1771 1772 SnapshotResponse.Builder builder = SnapshotResponse.newBuilder().setExpectedTimeout(waitTime); 1773 1774 // If there is nonce group and nonce in the snapshot request, then the client can 1775 // handle snapshot procedure procId. And if enable the snapshot procedure, we 1776 // will do the snapshot work with proc-v2, otherwise we will fall back to zk proc. 1777 if ( 1778 request.hasNonceGroup() && request.hasNonce() 1779 && server.snapshotManager.snapshotProcedureEnabled() 1780 ) { 1781 long nonceGroup = request.getNonceGroup(); 1782 long nonce = request.getNonce(); 1783 long procId = server.snapshotManager.takeSnapshot(snapshot, nonceGroup, nonce); 1784 return builder.setProcId(procId).build(); 1785 } else { 1786 server.snapshotManager.takeSnapshot(snapshot); 1787 return builder.build(); 1788 } 1789 } catch (ForeignException e) { 1790 throw new ServiceException(e.getCause()); 1791 } catch (IOException e) { 1792 throw new ServiceException(e); 1793 } 1794 } 1795 1796 @Override 1797 public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request) 1798 throws ServiceException { 1799 LOG.info(server.getClientIdAuditPrefix() + " stop"); 1800 try { 1801 server.stopMaster(); 1802 } catch (IOException e) { 1803 LOG.error("Exception occurred while stopping master", e); 1804 throw new ServiceException(e); 1805 } 1806 return StopMasterResponse.newBuilder().build(); 1807 } 1808 1809 @Override 1810 public IsInMaintenanceModeResponse isMasterInMaintenanceMode(final RpcController controller, 1811 final IsInMaintenanceModeRequest request) throws ServiceException { 1812 IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder(); 1813 response.setInMaintenanceMode(server.isInMaintenanceMode()); 1814 return response.build(); 1815 } 1816 1817 @Override 1818 public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req) 1819 throws ServiceException { 1820 try { 1821 final byte[] regionName = req.getRegion().getValue().toByteArray(); 1822 RegionSpecifierType type = req.getRegion().getType(); 1823 UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build(); 1824 1825 server.checkInitialized(); 1826 if (type != RegionSpecifierType.REGION_NAME) { 1827 LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME 1828 + " actual: " + type); 1829 } 1830 RegionStateNode rsn = 1831 server.getAssignmentManager().getRegionStates().getRegionStateNodeFromName(regionName); 1832 if (rsn == null) { 1833 throw new UnknownRegionException(Bytes.toString(regionName)); 1834 } 1835 1836 RegionInfo hri = rsn.getRegionInfo(); 1837 if (server.cpHost != null) { 1838 server.cpHost.preUnassign(hri); 1839 } 1840 LOG.debug(server.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString() 1841 + " in current location if it is online"); 1842 server.getAssignmentManager().unassign(hri); 1843 if (server.cpHost != null) { 1844 server.cpHost.postUnassign(hri); 1845 } 1846 1847 return urr; 1848 } catch (IOException ioe) { 1849 throw new ServiceException(ioe); 1850 } 1851 } 1852 1853 @Override 1854 public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c, 1855 ReportRegionStateTransitionRequest req) throws ServiceException { 1856 try { 1857 server.checkServiceStarted(); 1858 for (RegionServerStatusProtos.RegionStateTransition transition : req.getTransitionList()) { 1859 long procId = 1860 transition.getProcIdCount() > 0 ? transition.getProcId(0) : Procedure.NO_PROC_ID; 1861 // -1 is less than any possible MasterActiveCode 1862 long initiatingMasterActiveTime = transition.hasInitiatingMasterActiveTime() 1863 ? transition.getInitiatingMasterActiveTime() 1864 : -1; 1865 throwOnOldMaster(procId, initiatingMasterActiveTime); 1866 } 1867 return server.getAssignmentManager().reportRegionStateTransition(req); 1868 } catch (IOException ioe) { 1869 throw new ServiceException(ioe); 1870 } 1871 } 1872 1873 @Override 1874 public SetQuotaResponse setQuota(RpcController c, SetQuotaRequest req) throws ServiceException { 1875 try { 1876 server.checkInitialized(); 1877 return server.getMasterQuotaManager().setQuota(req); 1878 } catch (Exception e) { 1879 throw new ServiceException(e); 1880 } 1881 } 1882 1883 @Override 1884 public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller, 1885 MajorCompactionTimestampRequest request) throws ServiceException { 1886 MajorCompactionTimestampResponse.Builder response = 1887 MajorCompactionTimestampResponse.newBuilder(); 1888 try { 1889 server.checkInitialized(); 1890 response.setCompactionTimestamp( 1891 server.getLastMajorCompactionTimestamp(ProtobufUtil.toTableName(request.getTableName()))); 1892 } catch (IOException e) { 1893 throw new ServiceException(e); 1894 } 1895 return response.build(); 1896 } 1897 1898 @Override 1899 public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( 1900 RpcController controller, MajorCompactionTimestampForRegionRequest request) 1901 throws ServiceException { 1902 MajorCompactionTimestampResponse.Builder response = 1903 MajorCompactionTimestampResponse.newBuilder(); 1904 try { 1905 server.checkInitialized(); 1906 response.setCompactionTimestamp(server 1907 .getLastMajorCompactionTimestampForRegion(request.getRegion().getValue().toByteArray())); 1908 } catch (IOException e) { 1909 throw new ServiceException(e); 1910 } 1911 return response.build(); 1912 } 1913 1914 @Override 1915 public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller, 1916 IsBalancerEnabledRequest request) throws ServiceException { 1917 IsBalancerEnabledResponse.Builder response = IsBalancerEnabledResponse.newBuilder(); 1918 response.setEnabled(server.isBalancerOn()); 1919 return response.build(); 1920 } 1921 1922 @Override 1923 public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller, 1924 SetSplitOrMergeEnabledRequest request) throws ServiceException { 1925 SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder(); 1926 try { 1927 server.checkInitialized(); 1928 boolean newValue = request.getEnabled(); 1929 for (MasterProtos.MasterSwitchType masterSwitchType : request.getSwitchTypesList()) { 1930 MasterSwitchType switchType = convert(masterSwitchType); 1931 boolean oldValue = server.isSplitOrMergeEnabled(switchType); 1932 response.addPrevValue(oldValue); 1933 if (server.cpHost != null) { 1934 server.cpHost.preSetSplitOrMergeEnabled(newValue, switchType); 1935 } 1936 server.getSplitOrMergeStateStore().setSplitOrMergeEnabled(newValue, switchType); 1937 if (server.cpHost != null) { 1938 server.cpHost.postSetSplitOrMergeEnabled(newValue, switchType); 1939 } 1940 } 1941 } catch (IOException e) { 1942 throw new ServiceException(e); 1943 } 1944 return response.build(); 1945 } 1946 1947 @Override 1948 public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller, 1949 IsSplitOrMergeEnabledRequest request) throws ServiceException { 1950 IsSplitOrMergeEnabledResponse.Builder response = IsSplitOrMergeEnabledResponse.newBuilder(); 1951 response.setEnabled(server.isSplitOrMergeEnabled(convert(request.getSwitchType()))); 1952 return response.build(); 1953 } 1954 1955 @Override 1956 public NormalizeResponse normalize(RpcController controller, NormalizeRequest request) 1957 throws ServiceException { 1958 rpcPreCheck("normalize"); 1959 try { 1960 final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder() 1961 .tableNames(ProtobufUtil.toTableNameList(request.getTableNamesList())) 1962 .regex(request.hasRegex() ? request.getRegex() : null) 1963 .namespace(request.hasNamespace() ? request.getNamespace() : null).build(); 1964 return NormalizeResponse.newBuilder() 1965 // all API requests are considered priority requests. 1966 .setNormalizerRan(server.normalizeRegions(ntfp, true)).build(); 1967 } catch (IOException ex) { 1968 throw new ServiceException(ex); 1969 } 1970 } 1971 1972 @Override 1973 public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller, 1974 SetNormalizerRunningRequest request) throws ServiceException { 1975 rpcPreCheck("setNormalizerRunning"); 1976 1977 // Sets normalizer on/off flag in ZK. 1978 // TODO: this method is totally broken in terms of atomicity of actions and values read. 1979 // 1. The contract has this RPC returning the previous value. There isn't a ZKUtil method 1980 // that lets us retrieve the previous value as part of setting a new value, so we simply 1981 // perform a read before issuing the update. Thus we have a data race opportunity, between 1982 // when the `prevValue` is read and whatever is actually overwritten. 1983 // 2. Down in `setNormalizerOn`, the call to `createAndWatch` inside of the catch clause can 1984 // itself fail in the event that the znode already exists. Thus, another data race, between 1985 // when the initial `setData` call is notified of the absence of the target znode and the 1986 // subsequent `createAndWatch`, with another client creating said node. 1987 // That said, there's supposed to be only one active master and thus there's supposed to be 1988 // only one process with the authority to modify the value. 1989 final boolean prevValue = server.getRegionNormalizerManager().isNormalizerOn(); 1990 final boolean newValue = request.getOn(); 1991 try { 1992 server.getRegionNormalizerManager().setNormalizerOn(newValue); 1993 } catch (IOException e) { 1994 throw new ServiceException(e); 1995 } 1996 LOG.info("{} set normalizerSwitch={}", server.getClientIdAuditPrefix(), newValue); 1997 return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build(); 1998 } 1999 2000 @Override 2001 public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller, 2002 IsNormalizerEnabledRequest request) { 2003 IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder(); 2004 response.setEnabled(server.isNormalizerOn()); 2005 return response.build(); 2006 } 2007 2008 /** 2009 * Returns the security capabilities in effect on the cluster 2010 */ 2011 @Override 2012 public SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller, 2013 SecurityCapabilitiesRequest request) throws ServiceException { 2014 SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder(); 2015 try { 2016 server.checkInitialized(); 2017 Set<SecurityCapabilitiesResponse.Capability> capabilities = new HashSet<>(); 2018 // Authentication 2019 if (User.isHBaseSecurityEnabled(server.getConfiguration())) { 2020 capabilities.add(SecurityCapabilitiesResponse.Capability.SECURE_AUTHENTICATION); 2021 } else { 2022 capabilities.add(SecurityCapabilitiesResponse.Capability.SIMPLE_AUTHENTICATION); 2023 } 2024 // A coprocessor that implements AccessControlService can provide AUTHORIZATION and 2025 // CELL_AUTHORIZATION 2026 if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { 2027 if (AccessChecker.isAuthorizationSupported(server.getConfiguration())) { 2028 capabilities.add(SecurityCapabilitiesResponse.Capability.AUTHORIZATION); 2029 } 2030 if (AccessController.isCellAuthorizationSupported(server.getConfiguration())) { 2031 capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_AUTHORIZATION); 2032 } 2033 } 2034 // A coprocessor that implements VisibilityLabelsService can provide CELL_VISIBILITY. 2035 if (server.cpHost != null && hasVisibilityLabelsServiceCoprocessor(server.cpHost)) { 2036 if (VisibilityController.isCellAuthorizationSupported(server.getConfiguration())) { 2037 capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_VISIBILITY); 2038 } 2039 } 2040 response.addAllCapabilities(capabilities); 2041 } catch (IOException e) { 2042 throw new ServiceException(e); 2043 } 2044 return response.build(); 2045 } 2046 2047 /** 2048 * Determines if there is a MasterCoprocessor deployed which implements 2049 * {@link AccessControlService.Interface}. 2050 */ 2051 boolean hasAccessControlServiceCoprocessor(MasterCoprocessorHost cpHost) { 2052 return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class), 2053 AccessControlService.Interface.class); 2054 } 2055 2056 /** 2057 * Determines if there is a MasterCoprocessor deployed which implements 2058 * {@link VisibilityLabelsService.Interface}. 2059 */ 2060 boolean hasVisibilityLabelsServiceCoprocessor(MasterCoprocessorHost cpHost) { 2061 return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class), 2062 VisibilityLabelsService.Interface.class); 2063 } 2064 2065 /** 2066 * Determines if there is a coprocessor implementation in the provided argument which extends or 2067 * implements the provided {@code service}. 2068 */ 2069 boolean checkCoprocessorWithService(List<MasterCoprocessor> coprocessorsToCheck, 2070 Class<?> service) { 2071 if (coprocessorsToCheck == null || coprocessorsToCheck.isEmpty()) { 2072 return false; 2073 } 2074 for (MasterCoprocessor cp : coprocessorsToCheck) { 2075 if (service.isAssignableFrom(cp.getClass())) { 2076 return true; 2077 } 2078 } 2079 return false; 2080 } 2081 2082 private MasterSwitchType convert(MasterProtos.MasterSwitchType switchType) { 2083 switch (switchType) { 2084 case SPLIT: 2085 return MasterSwitchType.SPLIT; 2086 case MERGE: 2087 return MasterSwitchType.MERGE; 2088 default: 2089 break; 2090 } 2091 return null; 2092 } 2093 2094 @Override 2095 public AddReplicationPeerResponse addReplicationPeer(RpcController controller, 2096 AddReplicationPeerRequest request) throws ServiceException { 2097 try { 2098 long procId = server.addReplicationPeer(request.getPeerId(), 2099 ReplicationPeerConfigUtil.convert(request.getPeerConfig()), 2100 request.getPeerState().getState().equals(ReplicationState.State.ENABLED)); 2101 return AddReplicationPeerResponse.newBuilder().setProcId(procId).build(); 2102 } catch (ReplicationException | IOException e) { 2103 throw new ServiceException(e); 2104 } 2105 } 2106 2107 @Override 2108 public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller, 2109 RemoveReplicationPeerRequest request) throws ServiceException { 2110 try { 2111 long procId = server.removeReplicationPeer(request.getPeerId()); 2112 return RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build(); 2113 } catch (ReplicationException | IOException e) { 2114 throw new ServiceException(e); 2115 } 2116 } 2117 2118 @Override 2119 public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller, 2120 EnableReplicationPeerRequest request) throws ServiceException { 2121 try { 2122 long procId = server.enableReplicationPeer(request.getPeerId()); 2123 return EnableReplicationPeerResponse.newBuilder().setProcId(procId).build(); 2124 } catch (ReplicationException | IOException e) { 2125 throw new ServiceException(e); 2126 } 2127 } 2128 2129 @Override 2130 public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller, 2131 DisableReplicationPeerRequest request) throws ServiceException { 2132 try { 2133 long procId = server.disableReplicationPeer(request.getPeerId()); 2134 return DisableReplicationPeerResponse.newBuilder().setProcId(procId).build(); 2135 } catch (ReplicationException | IOException e) { 2136 throw new ServiceException(e); 2137 } 2138 } 2139 2140 @Override 2141 public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller, 2142 GetReplicationPeerConfigRequest request) throws ServiceException { 2143 GetReplicationPeerConfigResponse.Builder response = 2144 GetReplicationPeerConfigResponse.newBuilder(); 2145 try { 2146 String peerId = request.getPeerId(); 2147 ReplicationPeerConfig peerConfig = server.getReplicationPeerConfig(peerId); 2148 response.setPeerId(peerId); 2149 response.setPeerConfig(ReplicationPeerConfigUtil.convert(peerConfig)); 2150 } catch (ReplicationException | IOException e) { 2151 throw new ServiceException(e); 2152 } 2153 return response.build(); 2154 } 2155 2156 @Override 2157 public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller, 2158 UpdateReplicationPeerConfigRequest request) throws ServiceException { 2159 try { 2160 long procId = server.updateReplicationPeerConfig(request.getPeerId(), 2161 ReplicationPeerConfigUtil.convert(request.getPeerConfig())); 2162 return UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build(); 2163 } catch (ReplicationException | IOException e) { 2164 throw new ServiceException(e); 2165 } 2166 } 2167 2168 @Override 2169 public TransitReplicationPeerSyncReplicationStateResponse 2170 transitReplicationPeerSyncReplicationState(RpcController controller, 2171 TransitReplicationPeerSyncReplicationStateRequest request) throws ServiceException { 2172 try { 2173 long procId = server.transitReplicationPeerSyncReplicationState(request.getPeerId(), 2174 ReplicationPeerConfigUtil.toSyncReplicationState(request.getSyncReplicationState())); 2175 return TransitReplicationPeerSyncReplicationStateResponse.newBuilder().setProcId(procId) 2176 .build(); 2177 } catch (ReplicationException | IOException e) { 2178 throw new ServiceException(e); 2179 } 2180 } 2181 2182 @Override 2183 public ListReplicationPeersResponse listReplicationPeers(RpcController controller, 2184 ListReplicationPeersRequest request) throws ServiceException { 2185 ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder(); 2186 try { 2187 List<ReplicationPeerDescription> peers = 2188 server.listReplicationPeers(request.hasRegex() ? request.getRegex() : null); 2189 for (ReplicationPeerDescription peer : peers) { 2190 response.addPeerDesc(ReplicationPeerConfigUtil.toProtoReplicationPeerDescription(peer)); 2191 } 2192 } catch (ReplicationException | IOException e) { 2193 throw new ServiceException(e); 2194 } 2195 return response.build(); 2196 } 2197 2198 @Override 2199 public GetReplicationPeerStateResponse isReplicationPeerEnabled(RpcController controller, 2200 GetReplicationPeerStateRequest request) throws ServiceException { 2201 boolean isEnabled; 2202 try { 2203 isEnabled = server.getReplicationPeerManager().getPeerState(request.getPeerId()); 2204 } catch (ReplicationException ioe) { 2205 throw new ServiceException(ioe); 2206 } 2207 return GetReplicationPeerStateResponse.newBuilder().setIsEnabled(isEnabled).build(); 2208 } 2209 2210 @Override 2211 public ReplicationPeerModificationSwitchResponse replicationPeerModificationSwitch( 2212 RpcController controller, ReplicationPeerModificationSwitchRequest request) 2213 throws ServiceException { 2214 try { 2215 server.checkInitialized(); 2216 boolean prevValue = server.replicationPeerModificationSwitch(request.getOn()); 2217 return ReplicationPeerModificationSwitchResponse.newBuilder().setPreviousValue(prevValue) 2218 .build(); 2219 } catch (IOException ioe) { 2220 throw new ServiceException(ioe); 2221 } 2222 } 2223 2224 @Override 2225 public GetReplicationPeerModificationProceduresResponse getReplicationPeerModificationProcedures( 2226 RpcController controller, GetReplicationPeerModificationProceduresRequest request) 2227 throws ServiceException { 2228 try { 2229 server.checkInitialized(); 2230 GetReplicationPeerModificationProceduresResponse.Builder builder = 2231 GetReplicationPeerModificationProceduresResponse.newBuilder(); 2232 for (Procedure<?> proc : server.getProcedures()) { 2233 if (proc.isFinished()) { 2234 continue; 2235 } 2236 if (!(proc instanceof AbstractPeerNoLockProcedure)) { 2237 continue; 2238 } 2239 builder.addProcedure(ProcedureUtil.convertToProtoProcedure(proc)); 2240 } 2241 return builder.build(); 2242 } catch (IOException ioe) { 2243 throw new ServiceException(ioe); 2244 } 2245 } 2246 2247 @Override 2248 public IsReplicationPeerModificationEnabledResponse isReplicationPeerModificationEnabled( 2249 RpcController controller, IsReplicationPeerModificationEnabledRequest request) 2250 throws ServiceException { 2251 try { 2252 server.checkInitialized(); 2253 return IsReplicationPeerModificationEnabledResponse.newBuilder() 2254 .setEnabled(server.isReplicationPeerModificationEnabled()).build(); 2255 } catch (IOException ioe) { 2256 throw new ServiceException(ioe); 2257 } 2258 } 2259 2260 @Override 2261 public ListDecommissionedRegionServersResponse listDecommissionedRegionServers( 2262 RpcController controller, ListDecommissionedRegionServersRequest request) 2263 throws ServiceException { 2264 ListDecommissionedRegionServersResponse.Builder response = 2265 ListDecommissionedRegionServersResponse.newBuilder(); 2266 try { 2267 server.checkInitialized(); 2268 if (server.cpHost != null) { 2269 server.cpHost.preListDecommissionedRegionServers(); 2270 } 2271 List<ServerName> servers = server.listDecommissionedRegionServers(); 2272 response.addAllServerName((servers.stream().map(server -> ProtobufUtil.toServerName(server))) 2273 .collect(Collectors.toList())); 2274 if (server.cpHost != null) { 2275 server.cpHost.postListDecommissionedRegionServers(); 2276 } 2277 } catch (IOException io) { 2278 throw new ServiceException(io); 2279 } 2280 2281 return response.build(); 2282 } 2283 2284 @Override 2285 public DecommissionRegionServersResponse decommissionRegionServers(RpcController controller, 2286 DecommissionRegionServersRequest request) throws ServiceException { 2287 try { 2288 server.checkInitialized(); 2289 List<ServerName> servers = request.getServerNameList().stream() 2290 .map(pbServer -> ProtobufUtil.toServerName(pbServer)).collect(Collectors.toList()); 2291 boolean offload = request.getOffload(); 2292 if (server.cpHost != null) { 2293 server.cpHost.preDecommissionRegionServers(servers, offload); 2294 } 2295 server.decommissionRegionServers(servers, offload); 2296 if (server.cpHost != null) { 2297 server.cpHost.postDecommissionRegionServers(servers, offload); 2298 } 2299 } catch (IOException io) { 2300 throw new ServiceException(io); 2301 } 2302 2303 return DecommissionRegionServersResponse.newBuilder().build(); 2304 } 2305 2306 @Override 2307 public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller, 2308 RecommissionRegionServerRequest request) throws ServiceException { 2309 try { 2310 server.checkInitialized(); 2311 ServerName sn = ProtobufUtil.toServerName(request.getServerName()); 2312 List<byte[]> encodedRegionNames = request.getRegionList().stream() 2313 .map(regionSpecifier -> regionSpecifier.getValue().toByteArray()) 2314 .collect(Collectors.toList()); 2315 if (server.cpHost != null) { 2316 server.cpHost.preRecommissionRegionServer(sn, encodedRegionNames); 2317 } 2318 server.recommissionRegionServer(sn, encodedRegionNames); 2319 if (server.cpHost != null) { 2320 server.cpHost.postRecommissionRegionServer(sn, encodedRegionNames); 2321 } 2322 } catch (IOException io) { 2323 throw new ServiceException(io); 2324 } 2325 2326 return RecommissionRegionServerResponse.newBuilder().build(); 2327 } 2328 2329 @Override 2330 public LockResponse requestLock(RpcController controller, final LockRequest request) 2331 throws ServiceException { 2332 try { 2333 if (request.getDescription().isEmpty()) { 2334 throw new IllegalArgumentException("Empty description"); 2335 } 2336 NonceProcedureRunnable npr; 2337 LockType type = LockType.valueOf(request.getLockType().name()); 2338 if (request.getRegionInfoCount() > 0) { 2339 final RegionInfo[] regionInfos = new RegionInfo[request.getRegionInfoCount()]; 2340 for (int i = 0; i < request.getRegionInfoCount(); ++i) { 2341 regionInfos[i] = ProtobufUtil.toRegionInfo(request.getRegionInfo(i)); 2342 } 2343 npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) { 2344 @Override 2345 protected void run() throws IOException { 2346 setProcId(server.getLockManager().remoteLocks().requestRegionsLock(regionInfos, 2347 request.getDescription(), getNonceKey())); 2348 } 2349 2350 @Override 2351 protected String getDescription() { 2352 return "RequestLock"; 2353 } 2354 }; 2355 } else if (request.hasTableName()) { 2356 final TableName tableName = ProtobufUtil.toTableName(request.getTableName()); 2357 npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) { 2358 @Override 2359 protected void run() throws IOException { 2360 setProcId(server.getLockManager().remoteLocks().requestTableLock(tableName, type, 2361 request.getDescription(), getNonceKey())); 2362 } 2363 2364 @Override 2365 protected String getDescription() { 2366 return "RequestLock"; 2367 } 2368 }; 2369 } else if (request.hasNamespace()) { 2370 npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) { 2371 @Override 2372 protected void run() throws IOException { 2373 setProcId(server.getLockManager().remoteLocks().requestNamespaceLock( 2374 request.getNamespace(), type, request.getDescription(), getNonceKey())); 2375 } 2376 2377 @Override 2378 protected String getDescription() { 2379 return "RequestLock"; 2380 } 2381 }; 2382 } else { 2383 throw new IllegalArgumentException("one of table/namespace/region should be specified"); 2384 } 2385 long procId = MasterProcedureUtil.submitProcedure(npr); 2386 return LockResponse.newBuilder().setProcId(procId).build(); 2387 } catch (IllegalArgumentException e) { 2388 LOG.warn("Exception when queuing lock", e); 2389 throw new ServiceException(new DoNotRetryIOException(e)); 2390 } catch (IOException e) { 2391 LOG.warn("Exception when queuing lock", e); 2392 throw new ServiceException(e); 2393 } 2394 } 2395 2396 /** 2397 * @return LOCKED, if procedure is found and it has the lock; else UNLOCKED. 2398 * @throws ServiceException if given proc id is found but it is not a LockProcedure. 2399 */ 2400 @Override 2401 public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request) 2402 throws ServiceException { 2403 try { 2404 if ( 2405 server.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(), 2406 request.getKeepAlive()) 2407 ) { 2408 return LockHeartbeatResponse.newBuilder() 2409 .setTimeoutMs(server.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF, 2410 LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS)) 2411 .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).build(); 2412 } else { 2413 return LockHeartbeatResponse.newBuilder() 2414 .setLockStatus(LockHeartbeatResponse.LockStatus.UNLOCKED).build(); 2415 } 2416 } catch (IOException e) { 2417 throw new ServiceException(e); 2418 } 2419 } 2420 2421 @Override 2422 public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller, 2423 RegionSpaceUseReportRequest request) throws ServiceException { 2424 try { 2425 server.checkInitialized(); 2426 if (!QuotaUtil.isQuotaEnabled(server.getConfiguration())) { 2427 return RegionSpaceUseReportResponse.newBuilder().build(); 2428 } 2429 MasterQuotaManager quotaManager = this.server.getMasterQuotaManager(); 2430 if (quotaManager != null) { 2431 final long now = EnvironmentEdgeManager.currentTime(); 2432 for (RegionSpaceUse report : request.getSpaceUseList()) { 2433 quotaManager.addRegionSize(ProtobufUtil.toRegionInfo(report.getRegionInfo()), 2434 report.getRegionSize(), now); 2435 } 2436 } else { 2437 LOG.debug("Received region space usage report but HMaster is not ready to process it, " 2438 + "skipping"); 2439 } 2440 return RegionSpaceUseReportResponse.newBuilder().build(); 2441 } catch (Exception e) { 2442 throw new ServiceException(e); 2443 } 2444 } 2445 2446 @Override 2447 public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(RpcController controller, 2448 GetSpaceQuotaRegionSizesRequest request) throws ServiceException { 2449 try { 2450 server.checkInitialized(); 2451 MasterQuotaManager quotaManager = this.server.getMasterQuotaManager(); 2452 GetSpaceQuotaRegionSizesResponse.Builder builder = 2453 GetSpaceQuotaRegionSizesResponse.newBuilder(); 2454 if (quotaManager != null) { 2455 Map<RegionInfo, Long> regionSizes = quotaManager.snapshotRegionSizes(); 2456 Map<TableName, Long> regionSizesByTable = new HashMap<>(); 2457 // Translate hregioninfo+long -> tablename+long 2458 for (Entry<RegionInfo, Long> entry : regionSizes.entrySet()) { 2459 final TableName tableName = entry.getKey().getTable(); 2460 Long prevSize = regionSizesByTable.get(tableName); 2461 if (prevSize == null) { 2462 prevSize = 0L; 2463 } 2464 regionSizesByTable.put(tableName, prevSize + entry.getValue()); 2465 } 2466 // Serialize them into the protobuf 2467 for (Entry<TableName, Long> tableSize : regionSizesByTable.entrySet()) { 2468 builder.addSizes( 2469 RegionSizes.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableSize.getKey())) 2470 .setSize(tableSize.getValue()).build()); 2471 } 2472 return builder.build(); 2473 } else { 2474 LOG.debug("Received space quota region size report but HMaster is not ready to process it," 2475 + "skipping"); 2476 } 2477 return builder.build(); 2478 } catch (Exception e) { 2479 throw new ServiceException(e); 2480 } 2481 } 2482 2483 @Override 2484 public GetQuotaStatesResponse getQuotaStates(RpcController controller, 2485 GetQuotaStatesRequest request) throws ServiceException { 2486 try { 2487 server.checkInitialized(); 2488 QuotaObserverChore quotaChore = this.server.getQuotaObserverChore(); 2489 GetQuotaStatesResponse.Builder builder = GetQuotaStatesResponse.newBuilder(); 2490 if (quotaChore != null) { 2491 // The "current" view of all tables with quotas 2492 Map<TableName, SpaceQuotaSnapshot> tableSnapshots = quotaChore.getTableQuotaSnapshots(); 2493 for (Entry<TableName, SpaceQuotaSnapshot> entry : tableSnapshots.entrySet()) { 2494 builder.addTableSnapshots(TableQuotaSnapshot.newBuilder() 2495 .setTableName(ProtobufUtil.toProtoTableName(entry.getKey())) 2496 .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build()); 2497 } 2498 // The "current" view of all namespaces with quotas 2499 Map<String, SpaceQuotaSnapshot> nsSnapshots = quotaChore.getNamespaceQuotaSnapshots(); 2500 for (Entry<String, SpaceQuotaSnapshot> entry : nsSnapshots.entrySet()) { 2501 builder.addNsSnapshots(NamespaceQuotaSnapshot.newBuilder().setNamespace(entry.getKey()) 2502 .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build()); 2503 } 2504 return builder.build(); 2505 } 2506 return builder.build(); 2507 } catch (Exception e) { 2508 throw new ServiceException(e); 2509 } 2510 } 2511 2512 @Override 2513 public ClearDeadServersResponse clearDeadServers(RpcController controller, 2514 ClearDeadServersRequest request) throws ServiceException { 2515 LOG.debug(server.getClientIdAuditPrefix() + " clear dead region servers."); 2516 ClearDeadServersResponse.Builder response = ClearDeadServersResponse.newBuilder(); 2517 try { 2518 server.checkInitialized(); 2519 if (server.cpHost != null) { 2520 server.cpHost.preClearDeadServers(); 2521 } 2522 2523 if (server.getServerManager().areDeadServersInProgress()) { 2524 LOG.debug("Some dead server is still under processing, won't clear the dead server list"); 2525 response.addAllServerName(request.getServerNameList()); 2526 } else { 2527 DeadServer deadServer = server.getServerManager().getDeadServers(); 2528 Set<Address> clearedServers = new HashSet<>(); 2529 for (HBaseProtos.ServerName pbServer : request.getServerNameList()) { 2530 ServerName serverName = ProtobufUtil.toServerName(pbServer); 2531 final boolean deadInProcess = 2532 server.getProcedures().stream().anyMatch(p -> (p instanceof ServerCrashProcedure) 2533 && ((ServerCrashProcedure) p).getServerName().equals(serverName)); 2534 if (deadInProcess) { 2535 throw new ServiceException( 2536 String.format("Dead server '%s' is not 'dead' in fact...", serverName)); 2537 } 2538 2539 if (!deadServer.removeDeadServer(serverName)) { 2540 response.addServerName(pbServer); 2541 } else { 2542 clearedServers.add(serverName.getAddress()); 2543 } 2544 } 2545 server.getRSGroupInfoManager().removeServers(clearedServers); 2546 LOG.info("Remove decommissioned servers {} from RSGroup done", clearedServers); 2547 } 2548 2549 if (server.cpHost != null) { 2550 server.cpHost.postClearDeadServers( 2551 ProtobufUtil.toServerNameList(request.getServerNameList()), 2552 ProtobufUtil.toServerNameList(response.getServerNameList())); 2553 } 2554 } catch (IOException io) { 2555 throw new ServiceException(io); 2556 } 2557 return response.build(); 2558 } 2559 2560 @Override 2561 public ReportProcedureDoneResponse reportProcedureDone(RpcController controller, 2562 ReportProcedureDoneRequest request) throws ServiceException { 2563 // Check Masters is up and ready for duty before progressing. Remote side will keep trying. 2564 try { 2565 this.server.checkServiceStarted(); 2566 for (RemoteProcedureResult result : request.getResultList()) { 2567 // -1 is less than any possible MasterActiveCode 2568 long initiatingMasterActiveTime = 2569 result.hasInitiatingMasterActiveTime() ? result.getInitiatingMasterActiveTime() : -1; 2570 throwOnOldMaster(result.getProcId(), initiatingMasterActiveTime); 2571 } 2572 } catch (IOException ioe) { 2573 throw new ServiceException(ioe); 2574 } 2575 request.getResultList().forEach(result -> { 2576 if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) { 2577 server.remoteProcedureCompleted(result.getProcId()); 2578 } else { 2579 server.remoteProcedureFailed(result.getProcId(), 2580 RemoteProcedureException.fromProto(result.getError())); 2581 } 2582 }); 2583 return ReportProcedureDoneResponse.getDefaultInstance(); 2584 } 2585 2586 private void throwOnOldMaster(long procId, long initiatingMasterActiveTime) 2587 throws MasterNotRunningException { 2588 if (initiatingMasterActiveTime > server.getMasterActiveTime()) { 2589 // procedure is initiated by new active master but report received on master with older active 2590 // time 2591 LOG.warn( 2592 "Report for procId: {} and initiatingMasterAT {} received on master with activeTime {}", 2593 procId, initiatingMasterActiveTime, server.getMasterActiveTime()); 2594 throw new MasterNotRunningException("Another master is active"); 2595 } 2596 } 2597 2598 @Override 2599 public FileArchiveNotificationResponse reportFileArchival(RpcController controller, 2600 FileArchiveNotificationRequest request) throws ServiceException { 2601 try { 2602 server.checkInitialized(); 2603 if (!QuotaUtil.isQuotaEnabled(server.getConfiguration())) { 2604 return FileArchiveNotificationResponse.newBuilder().build(); 2605 } 2606 server.getMasterQuotaManager().processFileArchivals(request, server.getConnection(), 2607 server.getConfiguration(), server.getFileSystem()); 2608 return FileArchiveNotificationResponse.newBuilder().build(); 2609 } catch (Exception e) { 2610 throw new ServiceException(e); 2611 } 2612 } 2613 2614 // HBCK Services 2615 2616 @Override 2617 public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest req) 2618 throws ServiceException { 2619 rpcPreCheck("runHbckChore"); 2620 LOG.info("{} request HBCK chore to run", server.getClientIdAuditPrefix()); 2621 HbckChore hbckChore = server.getHbckChore(); 2622 boolean ran = hbckChore.runChore(); 2623 return RunHbckChoreResponse.newBuilder().setRan(ran).build(); 2624 } 2625 2626 /** 2627 * Update state of the table in meta only. This is required by hbck in some situations to cleanup 2628 * stuck assign/ unassign regions procedures for the table. 2629 * @return previous state of the table 2630 */ 2631 @Override 2632 public GetTableStateResponse setTableStateInMeta(RpcController controller, 2633 SetTableStateInMetaRequest request) throws ServiceException { 2634 rpcPreCheck("setTableStateInMeta"); 2635 TableName tn = ProtobufUtil.toTableName(request.getTableName()); 2636 try { 2637 TableState prevState = this.server.getTableStateManager().getTableState(tn); 2638 TableState newState = TableState.convert(tn, request.getTableState()); 2639 LOG.info("{} set table={} state from {} to {}", server.getClientIdAuditPrefix(), tn, 2640 prevState.getState(), newState.getState()); 2641 this.server.getTableStateManager().setTableState(tn, newState.getState()); 2642 return GetTableStateResponse.newBuilder().setTableState(prevState.convert()).build(); 2643 } catch (Exception e) { 2644 throw new ServiceException(e); 2645 } 2646 } 2647 2648 /** 2649 * Update state of the region in meta only. This is required by hbck in some situations to cleanup 2650 * stuck assign/ unassign regions procedures for the table. 2651 * @return previous states of the regions 2652 */ 2653 @Override 2654 public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller, 2655 SetRegionStateInMetaRequest request) throws ServiceException { 2656 rpcPreCheck("setRegionStateInMeta"); 2657 SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder(); 2658 final AssignmentManager am = server.getAssignmentManager(); 2659 try { 2660 for (RegionSpecifierAndState s : request.getStatesList()) { 2661 final RegionSpecifier spec = s.getRegionSpecifier(); 2662 final RegionInfo targetRegionInfo = getRegionInfo(spec); 2663 final RegionState.State targetState = RegionState.State.convert(s.getState()); 2664 final RegionState.State currentState = Optional.ofNullable(targetRegionInfo) 2665 .map(info -> am.getRegionStates().getRegionState(info)).map(RegionState::getState) 2666 .orElseThrow( 2667 () -> new ServiceException("No existing state known for region '" + spec + "'.")); 2668 LOG.info("{} set region={} state from {} to {}", server.getClientIdAuditPrefix(), 2669 targetRegionInfo, currentState, targetState); 2670 if (currentState == targetState) { 2671 LOG.debug("Proposed state matches current state. {}, {}", targetRegionInfo, currentState); 2672 continue; 2673 } 2674 MetaTableAccessor.updateRegionState(server.getConnection(), targetRegionInfo, targetState); 2675 // Loads from meta again to refresh AM cache with the new region state 2676 am.populateRegionStatesFromMeta(targetRegionInfo); 2677 builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec) 2678 .setState(currentState.convert())); 2679 } 2680 } catch (IOException e) { 2681 throw new ServiceException(e); 2682 } 2683 return builder.build(); 2684 } 2685 2686 /** 2687 * Get {@link RegionInfo} from Master using content of {@link RegionSpecifier} as key. 2688 * @return {@link RegionInfo} found by decoding {@code rs} or {@code null} if {@code rs} is 2689 * unknown to the master. 2690 * @throws ServiceException If some error occurs while querying META or parsing results. 2691 */ 2692 private RegionInfo getRegionInfo(HBaseProtos.RegionSpecifier rs) throws ServiceException { 2693 // TODO: this doesn't handle MOB regions. Should it? See the public method #getRegionInfo 2694 final AssignmentManager am = server.getAssignmentManager(); 2695 final String encodedRegionName; 2696 final RegionInfo info; 2697 // first try resolving from the AM's caches. 2698 switch (rs.getType()) { 2699 case REGION_NAME: 2700 final byte[] regionName = rs.getValue().toByteArray(); 2701 encodedRegionName = RegionInfo.encodeRegionName(regionName); 2702 info = am.getRegionInfo(regionName); 2703 break; 2704 case ENCODED_REGION_NAME: 2705 encodedRegionName = rs.getValue().toStringUtf8(); 2706 info = am.getRegionInfo(encodedRegionName); 2707 break; 2708 default: 2709 throw new IllegalArgumentException("Unrecognized RegionSpecifierType " + rs.getType()); 2710 } 2711 if (info != null) { 2712 return info; 2713 } 2714 // fall back to a meta scan and check the cache again. 2715 try { 2716 am.populateRegionStatesFromMeta(encodedRegionName); 2717 } catch (IOException e) { 2718 throw new ServiceException(e); 2719 } 2720 return am.getRegionInfo(encodedRegionName); 2721 } 2722 2723 /** 2724 * @throws ServiceException If no MasterProcedureExecutor 2725 */ 2726 private void checkMasterProcedureExecutor() throws ServiceException { 2727 if (this.server.getMasterProcedureExecutor() == null) { 2728 throw new ServiceException("Master's ProcedureExecutor not initialized; retry later"); 2729 } 2730 } 2731 2732 /** 2733 * A 'raw' version of assign that does bulk and can skirt Master state checks if override is set; 2734 * i.e. assigns can be forced during Master startup or if RegionState is unclean. Used by HBCK2. 2735 */ 2736 @Override 2737 public MasterProtos.AssignsResponse assigns(RpcController controller, 2738 MasterProtos.AssignsRequest request) throws ServiceException { 2739 checkMasterProcedureExecutor(); 2740 final ProcedureExecutor<MasterProcedureEnv> pe = server.getMasterProcedureExecutor(); 2741 final AssignmentManager am = server.getAssignmentManager(); 2742 MasterProtos.AssignsResponse.Builder responseBuilder = 2743 MasterProtos.AssignsResponse.newBuilder(); 2744 final boolean override = request.getOverride(); 2745 final boolean force = request.getForce(); 2746 LOG.info("{} assigns, override={}", server.getClientIdAuditPrefix(), override); 2747 for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) { 2748 final RegionInfo info = getRegionInfo(rs); 2749 if (info == null) { 2750 LOG.info("Unknown region {}", rs); 2751 continue; 2752 } 2753 responseBuilder.addPid(Optional.ofNullable(am.createOneAssignProcedure(info, override, force)) 2754 .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID)); 2755 } 2756 return responseBuilder.build(); 2757 } 2758 2759 /** 2760 * A 'raw' version of unassign that does bulk and can skirt Master state checks if override is 2761 * set; i.e. unassigns can be forced during Master startup or if RegionState is unclean. Used by 2762 * HBCK2. 2763 */ 2764 @Override 2765 public MasterProtos.UnassignsResponse unassigns(RpcController controller, 2766 MasterProtos.UnassignsRequest request) throws ServiceException { 2767 checkMasterProcedureExecutor(); 2768 final ProcedureExecutor<MasterProcedureEnv> pe = server.getMasterProcedureExecutor(); 2769 final AssignmentManager am = server.getAssignmentManager(); 2770 MasterProtos.UnassignsResponse.Builder responseBuilder = 2771 MasterProtos.UnassignsResponse.newBuilder(); 2772 final boolean override = request.getOverride(); 2773 final boolean force = request.getForce(); 2774 LOG.info("{} unassigns, override={}", server.getClientIdAuditPrefix(), override); 2775 for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) { 2776 final RegionInfo info = getRegionInfo(rs); 2777 if (info == null) { 2778 LOG.info("Unknown region {}", rs); 2779 continue; 2780 } 2781 responseBuilder 2782 .addPid(Optional.ofNullable(am.createOneUnassignProcedure(info, override, force)) 2783 .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID)); 2784 } 2785 return responseBuilder.build(); 2786 } 2787 2788 /** 2789 * Bypass specified procedure to completion. Procedure is marked completed but no actual work is 2790 * done from the current state/ step onwards. Parents of the procedure are also marked for bypass. 2791 * NOTE: this is a dangerous operation and may be used to unstuck buggy procedures. This may leave 2792 * system in incoherent state. This may need to be followed by some cleanup steps/ actions by 2793 * operator. 2794 * @return BypassProcedureToCompletionResponse indicating success or failure 2795 */ 2796 @Override 2797 public MasterProtos.BypassProcedureResponse bypassProcedure(RpcController controller, 2798 MasterProtos.BypassProcedureRequest request) throws ServiceException { 2799 try { 2800 LOG.info("{} bypass procedures={}, waitTime={}, override={}, recursive={}", 2801 server.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(), 2802 request.getOverride(), request.getRecursive()); 2803 List<Boolean> ret = 2804 server.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(), 2805 request.getWaitTime(), request.getOverride(), request.getRecursive()); 2806 return MasterProtos.BypassProcedureResponse.newBuilder().addAllBypassed(ret).build(); 2807 } catch (IOException e) { 2808 throw new ServiceException(e); 2809 } 2810 } 2811 2812 @Override 2813 public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProcedure( 2814 RpcController controller, MasterProtos.ScheduleServerCrashProcedureRequest request) 2815 throws ServiceException { 2816 List<Long> pids = new ArrayList<>(); 2817 for (HBaseProtos.ServerName sn : request.getServerNameList()) { 2818 ServerName serverName = ProtobufUtil.toServerName(sn); 2819 LOG.info("{} schedule ServerCrashProcedure for {}", this.server.getClientIdAuditPrefix(), 2820 serverName); 2821 if (shouldSubmitSCP(serverName)) { 2822 pids.add(this.server.getServerManager().expireServer(serverName, true)); 2823 } else { 2824 pids.add(Procedure.NO_PROC_ID); 2825 } 2826 } 2827 return MasterProtos.ScheduleServerCrashProcedureResponse.newBuilder().addAllPid(pids).build(); 2828 } 2829 2830 @Override 2831 public MasterProtos.ScheduleSCPsForUnknownServersResponse scheduleSCPsForUnknownServers( 2832 RpcController controller, MasterProtos.ScheduleSCPsForUnknownServersRequest request) 2833 throws ServiceException { 2834 List<Long> pids = new ArrayList<>(); 2835 final Set<ServerName> serverNames = server.getAssignmentManager().getRegionStates() 2836 .getRegionStates().stream().map(RegionState::getServerName).collect(Collectors.toSet()); 2837 2838 final Set<ServerName> unknownServerNames = serverNames.stream() 2839 .filter(sn -> server.getServerManager().isServerUnknown(sn)).collect(Collectors.toSet()); 2840 2841 for (ServerName sn : unknownServerNames) { 2842 LOG.info("{} schedule ServerCrashProcedure for unknown {}", 2843 this.server.getClientIdAuditPrefix(), sn); 2844 if (shouldSubmitSCP(sn)) { 2845 pids.add(this.server.getServerManager().expireServer(sn, true)); 2846 } else { 2847 pids.add(Procedure.NO_PROC_ID); 2848 } 2849 } 2850 return MasterProtos.ScheduleSCPsForUnknownServersResponse.newBuilder().addAllPid(pids).build(); 2851 } 2852 2853 @Override 2854 public FixMetaResponse fixMeta(RpcController controller, FixMetaRequest request) 2855 throws ServiceException { 2856 rpcPreCheck("fixMeta"); 2857 try { 2858 MetaFixer mf = new MetaFixer(this.server); 2859 mf.fix(); 2860 return FixMetaResponse.newBuilder().build(); 2861 } catch (IOException ioe) { 2862 throw new ServiceException(ioe); 2863 } 2864 } 2865 2866 @Override 2867 public SwitchRpcThrottleResponse switchRpcThrottle(RpcController controller, 2868 SwitchRpcThrottleRequest request) throws ServiceException { 2869 try { 2870 server.checkInitialized(); 2871 return server.getMasterQuotaManager().switchRpcThrottle(request); 2872 } catch (Exception e) { 2873 throw new ServiceException(e); 2874 } 2875 } 2876 2877 @Override 2878 public MasterProtos.IsRpcThrottleEnabledResponse isRpcThrottleEnabled(RpcController controller, 2879 MasterProtos.IsRpcThrottleEnabledRequest request) throws ServiceException { 2880 try { 2881 server.checkInitialized(); 2882 return server.getMasterQuotaManager().isRpcThrottleEnabled(request); 2883 } catch (Exception e) { 2884 throw new ServiceException(e); 2885 } 2886 } 2887 2888 @Override 2889 public SwitchExceedThrottleQuotaResponse switchExceedThrottleQuota(RpcController controller, 2890 SwitchExceedThrottleQuotaRequest request) throws ServiceException { 2891 try { 2892 server.checkInitialized(); 2893 return server.getMasterQuotaManager().switchExceedThrottleQuota(request); 2894 } catch (Exception e) { 2895 throw new ServiceException(e); 2896 } 2897 } 2898 2899 @Override 2900 public GrantResponse grant(RpcController controller, GrantRequest request) 2901 throws ServiceException { 2902 try { 2903 server.checkInitialized(); 2904 if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { 2905 final UserPermission perm = 2906 ShadedAccessControlUtil.toUserPermission(request.getUserPermission()); 2907 boolean mergeExistingPermissions = request.getMergeExistingPermissions(); 2908 server.cpHost.preGrant(perm, mergeExistingPermissions); 2909 try (Table table = server.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { 2910 PermissionStorage.addUserPermission(getConfiguration(), perm, table, 2911 mergeExistingPermissions); 2912 } 2913 server.cpHost.postGrant(perm, mergeExistingPermissions); 2914 return GrantResponse.getDefaultInstance(); 2915 } else { 2916 throw new DoNotRetryIOException( 2917 new UnsupportedOperationException(AccessController.class.getName() + " is not loaded")); 2918 } 2919 } catch (IOException ioe) { 2920 throw new ServiceException(ioe); 2921 } 2922 } 2923 2924 @Override 2925 public RevokeResponse revoke(RpcController controller, RevokeRequest request) 2926 throws ServiceException { 2927 try { 2928 server.checkInitialized(); 2929 if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { 2930 final UserPermission userPermission = 2931 ShadedAccessControlUtil.toUserPermission(request.getUserPermission()); 2932 server.cpHost.preRevoke(userPermission); 2933 try (Table table = server.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { 2934 PermissionStorage.removeUserPermission(server.getConfiguration(), userPermission, table); 2935 } 2936 server.cpHost.postRevoke(userPermission); 2937 return RevokeResponse.getDefaultInstance(); 2938 } else { 2939 throw new DoNotRetryIOException( 2940 new UnsupportedOperationException(AccessController.class.getName() + " is not loaded")); 2941 } 2942 } catch (IOException ioe) { 2943 throw new ServiceException(ioe); 2944 } 2945 } 2946 2947 @Override 2948 public GetUserPermissionsResponse getUserPermissions(RpcController controller, 2949 GetUserPermissionsRequest request) throws ServiceException { 2950 try { 2951 server.checkInitialized(); 2952 if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { 2953 final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null; 2954 String namespace = 2955 request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null; 2956 TableName table = 2957 request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null; 2958 byte[] cf = request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null; 2959 byte[] cq = 2960 request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null; 2961 Type permissionType = request.hasType() ? request.getType() : null; 2962 server.getMasterCoprocessorHost().preGetUserPermissions(userName, namespace, table, cf, cq); 2963 2964 List<UserPermission> perms = null; 2965 if (permissionType == Type.Table) { 2966 boolean filter = (cf != null || userName != null) ? true : false; 2967 perms = PermissionStorage.getUserTablePermissions(server.getConfiguration(), table, cf, 2968 cq, userName, filter); 2969 } else if (permissionType == Type.Namespace) { 2970 perms = PermissionStorage.getUserNamespacePermissions(server.getConfiguration(), 2971 namespace, userName, userName != null ? true : false); 2972 } else { 2973 perms = PermissionStorage.getUserPermissions(server.getConfiguration(), null, null, null, 2974 userName, userName != null ? true : false); 2975 // Skip super users when filter user is specified 2976 if (userName == null) { 2977 // Adding superusers explicitly to the result set as PermissionStorage do not store 2978 // them. Also using acl as table name to be inline with the results of global admin and 2979 // will help in avoiding any leakage of information about being superusers. 2980 for (String user : Superusers.getSuperUsers()) { 2981 perms.add(new UserPermission(user, 2982 Permission.newBuilder().withActions(Action.values()).build())); 2983 } 2984 } 2985 } 2986 2987 server.getMasterCoprocessorHost().postGetUserPermissions(userName, namespace, table, cf, 2988 cq); 2989 AccessControlProtos.GetUserPermissionsResponse response = 2990 ShadedAccessControlUtil.buildGetUserPermissionsResponse(perms); 2991 return response; 2992 } else { 2993 throw new DoNotRetryIOException( 2994 new UnsupportedOperationException(AccessController.class.getName() + " is not loaded")); 2995 } 2996 } catch (IOException ioe) { 2997 throw new ServiceException(ioe); 2998 } 2999 } 3000 3001 @Override 3002 public HasUserPermissionsResponse hasUserPermissions(RpcController controller, 3003 HasUserPermissionsRequest request) throws ServiceException { 3004 try { 3005 server.checkInitialized(); 3006 if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { 3007 User caller = RpcServer.getRequestUser().orElse(null); 3008 String userName = 3009 request.hasUserName() ? request.getUserName().toStringUtf8() : caller.getShortName(); 3010 List<Permission> permissions = new ArrayList<>(); 3011 for (int i = 0; i < request.getPermissionCount(); i++) { 3012 permissions.add(ShadedAccessControlUtil.toPermission(request.getPermission(i))); 3013 } 3014 server.getMasterCoprocessorHost().preHasUserPermissions(userName, permissions); 3015 if (!caller.getShortName().equals(userName)) { 3016 List<String> groups = AccessChecker.getUserGroups(userName); 3017 caller = new InputUser(userName, groups.toArray(new String[groups.size()])); 3018 } 3019 List<Boolean> hasUserPermissions = new ArrayList<>(); 3020 if (getAccessChecker() != null) { 3021 for (Permission permission : permissions) { 3022 boolean hasUserPermission = 3023 getAccessChecker().hasUserPermission(caller, "hasUserPermissions", permission); 3024 hasUserPermissions.add(hasUserPermission); 3025 } 3026 } else { 3027 for (int i = 0; i < permissions.size(); i++) { 3028 hasUserPermissions.add(true); 3029 } 3030 } 3031 server.getMasterCoprocessorHost().postHasUserPermissions(userName, permissions); 3032 HasUserPermissionsResponse.Builder builder = 3033 HasUserPermissionsResponse.newBuilder().addAllHasUserPermission(hasUserPermissions); 3034 return builder.build(); 3035 } else { 3036 throw new DoNotRetryIOException( 3037 new UnsupportedOperationException(AccessController.class.getName() + " is not loaded")); 3038 } 3039 } catch (IOException ioe) { 3040 throw new ServiceException(ioe); 3041 } 3042 } 3043 3044 private boolean shouldSubmitSCP(ServerName serverName) { 3045 // check if there is already a SCP of this server running 3046 List<Procedure<MasterProcedureEnv>> procedures = 3047 server.getMasterProcedureExecutor().getProcedures(); 3048 for (Procedure<MasterProcedureEnv> procedure : procedures) { 3049 if (procedure instanceof ServerCrashProcedure) { 3050 if ( 3051 serverName.compareTo(((ServerCrashProcedure) procedure).getServerName()) == 0 3052 && !procedure.isFinished() 3053 ) { 3054 LOG.info("there is already a SCP of this server {} running, pid {}", serverName, 3055 procedure.getProcId()); 3056 return false; 3057 } 3058 } 3059 } 3060 return true; 3061 } 3062 3063 @Override 3064 public GetRSGroupInfoResponse getRSGroupInfo(RpcController controller, 3065 GetRSGroupInfoRequest request) throws ServiceException { 3066 String groupName = request.getRSGroupName(); 3067 LOG.info( 3068 server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName); 3069 try { 3070 if (server.getMasterCoprocessorHost() != null) { 3071 server.getMasterCoprocessorHost().preGetRSGroupInfo(groupName); 3072 } 3073 RSGroupInfo rsGroupInfo = server.getRSGroupInfoManager().getRSGroup(groupName); 3074 GetRSGroupInfoResponse resp; 3075 if (rsGroupInfo != null) { 3076 resp = GetRSGroupInfoResponse.newBuilder() 3077 .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build(); 3078 } else { 3079 resp = GetRSGroupInfoResponse.getDefaultInstance(); 3080 } 3081 if (server.getMasterCoprocessorHost() != null) { 3082 server.getMasterCoprocessorHost().postGetRSGroupInfo(groupName); 3083 } 3084 return resp; 3085 } catch (IOException e) { 3086 throw new ServiceException(e); 3087 } 3088 } 3089 3090 @Override 3091 public GetRSGroupInfoOfTableResponse getRSGroupInfoOfTable(RpcController controller, 3092 GetRSGroupInfoOfTableRequest request) throws ServiceException { 3093 TableName tableName = ProtobufUtil.toTableName(request.getTableName()); 3094 LOG.info( 3095 server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName); 3096 try { 3097 if (server.getMasterCoprocessorHost() != null) { 3098 server.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName); 3099 } 3100 GetRSGroupInfoOfTableResponse resp; 3101 TableDescriptor td = server.getTableDescriptors().get(tableName); 3102 if (td == null) { 3103 resp = GetRSGroupInfoOfTableResponse.getDefaultInstance(); 3104 } else { 3105 RSGroupInfo rsGroupInfo = 3106 RSGroupUtil.getRSGroupInfo(server, server.getRSGroupInfoManager(), tableName) 3107 .orElse(server.getRSGroupInfoManager().getRSGroup(RSGroupInfo.DEFAULT_GROUP)); 3108 resp = GetRSGroupInfoOfTableResponse.newBuilder() 3109 .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build(); 3110 } 3111 if (server.getMasterCoprocessorHost() != null) { 3112 server.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName); 3113 } 3114 return resp; 3115 } catch (IOException e) { 3116 throw new ServiceException(e); 3117 } 3118 } 3119 3120 @Override 3121 public GetRSGroupInfoOfServerResponse getRSGroupInfoOfServer(RpcController controller, 3122 GetRSGroupInfoOfServerRequest request) throws ServiceException { 3123 Address hp = 3124 Address.fromParts(request.getServer().getHostName(), request.getServer().getPort()); 3125 LOG.info(server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp); 3126 try { 3127 if (server.getMasterCoprocessorHost() != null) { 3128 server.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp); 3129 } 3130 RSGroupInfo rsGroupInfo = server.getRSGroupInfoManager().getRSGroupOfServer(hp); 3131 GetRSGroupInfoOfServerResponse resp; 3132 if (rsGroupInfo != null) { 3133 resp = GetRSGroupInfoOfServerResponse.newBuilder() 3134 .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build(); 3135 } else { 3136 resp = GetRSGroupInfoOfServerResponse.getDefaultInstance(); 3137 } 3138 if (server.getMasterCoprocessorHost() != null) { 3139 server.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp); 3140 } 3141 return resp; 3142 } catch (IOException e) { 3143 throw new ServiceException(e); 3144 } 3145 } 3146 3147 @Override 3148 public MoveServersResponse moveServers(RpcController controller, MoveServersRequest request) 3149 throws ServiceException { 3150 Set<Address> hostPorts = Sets.newHashSet(); 3151 MoveServersResponse.Builder builder = MoveServersResponse.newBuilder(); 3152 for (HBaseProtos.ServerName el : request.getServersList()) { 3153 hostPorts.add(Address.fromParts(el.getHostName(), el.getPort())); 3154 } 3155 LOG.info(server.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " 3156 + request.getTargetGroup()); 3157 try { 3158 if (server.getMasterCoprocessorHost() != null) { 3159 server.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup()); 3160 } 3161 server.getRSGroupInfoManager().moveServers(hostPorts, request.getTargetGroup()); 3162 if (server.getMasterCoprocessorHost() != null) { 3163 server.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup()); 3164 } 3165 } catch (IOException e) { 3166 throw new ServiceException(e); 3167 } 3168 return builder.build(); 3169 } 3170 3171 @Override 3172 public AddRSGroupResponse addRSGroup(RpcController controller, AddRSGroupRequest request) 3173 throws ServiceException { 3174 AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder(); 3175 LOG.info(server.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName()); 3176 try { 3177 if (server.getMasterCoprocessorHost() != null) { 3178 server.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName()); 3179 } 3180 server.getRSGroupInfoManager().addRSGroup(new RSGroupInfo(request.getRSGroupName())); 3181 if (server.getMasterCoprocessorHost() != null) { 3182 server.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName()); 3183 } 3184 } catch (IOException e) { 3185 throw new ServiceException(e); 3186 } 3187 return builder.build(); 3188 } 3189 3190 @Override 3191 public RemoveRSGroupResponse removeRSGroup(RpcController controller, RemoveRSGroupRequest request) 3192 throws ServiceException { 3193 RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder(); 3194 LOG.info(server.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName()); 3195 try { 3196 if (server.getMasterCoprocessorHost() != null) { 3197 server.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName()); 3198 } 3199 server.getRSGroupInfoManager().removeRSGroup(request.getRSGroupName()); 3200 if (server.getMasterCoprocessorHost() != null) { 3201 server.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName()); 3202 } 3203 } catch (IOException e) { 3204 throw new ServiceException(e); 3205 } 3206 return builder.build(); 3207 } 3208 3209 @Override 3210 public BalanceRSGroupResponse balanceRSGroup(RpcController controller, 3211 BalanceRSGroupRequest request) throws ServiceException { 3212 BalanceRequest balanceRequest = ProtobufUtil.toBalanceRequest(request); 3213 3214 BalanceRSGroupResponse.Builder builder = 3215 BalanceRSGroupResponse.newBuilder().setBalanceRan(false); 3216 3217 LOG.info( 3218 server.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName()); 3219 try { 3220 if (server.getMasterCoprocessorHost() != null) { 3221 server.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName(), 3222 balanceRequest); 3223 } 3224 BalanceResponse response = 3225 server.getRSGroupInfoManager().balanceRSGroup(request.getRSGroupName(), balanceRequest); 3226 ProtobufUtil.populateBalanceRSGroupResponse(builder, response); 3227 if (server.getMasterCoprocessorHost() != null) { 3228 server.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(), 3229 balanceRequest, response); 3230 } 3231 } catch (IOException e) { 3232 throw new ServiceException(e); 3233 } 3234 return builder.build(); 3235 } 3236 3237 @Override 3238 public ListRSGroupInfosResponse listRSGroupInfos(RpcController controller, 3239 ListRSGroupInfosRequest request) throws ServiceException { 3240 ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder(); 3241 LOG.info(server.getClientIdAuditPrefix() + " list rsgroup"); 3242 try { 3243 if (server.getMasterCoprocessorHost() != null) { 3244 server.getMasterCoprocessorHost().preListRSGroups(); 3245 } 3246 List<RSGroupInfo> rsGroupInfos = server.getRSGroupInfoManager().listRSGroups().stream() 3247 .map(RSGroupInfo::new).collect(Collectors.toList()); 3248 Map<String, RSGroupInfo> name2Info = new HashMap<>(); 3249 List<TableDescriptor> needToFill = 3250 new ArrayList<>(server.getTableDescriptors().getAll().values()); 3251 for (RSGroupInfo rsGroupInfo : rsGroupInfos) { 3252 name2Info.put(rsGroupInfo.getName(), rsGroupInfo); 3253 for (TableDescriptor td : server.getTableDescriptors().getAll().values()) { 3254 if (rsGroupInfo.containsTable(td.getTableName())) { 3255 needToFill.remove(td); 3256 } 3257 } 3258 } 3259 for (TableDescriptor td : needToFill) { 3260 String groupName = td.getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP); 3261 RSGroupInfo rsGroupInfo = name2Info.get(groupName); 3262 if (rsGroupInfo != null) { 3263 rsGroupInfo.addTable(td.getTableName()); 3264 } 3265 } 3266 for (RSGroupInfo rsGroupInfo : rsGroupInfos) { 3267 // TODO: this can be done at once outside this loop, do not need to scan all every time. 3268 builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)); 3269 } 3270 if (server.getMasterCoprocessorHost() != null) { 3271 server.getMasterCoprocessorHost().postListRSGroups(); 3272 } 3273 } catch (IOException e) { 3274 throw new ServiceException(e); 3275 } 3276 return builder.build(); 3277 } 3278 3279 @Override 3280 public RemoveServersResponse removeServers(RpcController controller, RemoveServersRequest request) 3281 throws ServiceException { 3282 RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder(); 3283 Set<Address> servers = Sets.newHashSet(); 3284 for (HBaseProtos.ServerName el : request.getServersList()) { 3285 servers.add(Address.fromParts(el.getHostName(), el.getPort())); 3286 } 3287 LOG.info( 3288 server.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + servers); 3289 try { 3290 if (server.getMasterCoprocessorHost() != null) { 3291 server.getMasterCoprocessorHost().preRemoveServers(servers); 3292 } 3293 server.getRSGroupInfoManager().removeServers(servers); 3294 if (server.getMasterCoprocessorHost() != null) { 3295 server.getMasterCoprocessorHost().postRemoveServers(servers); 3296 } 3297 } catch (IOException e) { 3298 throw new ServiceException(e); 3299 } 3300 return builder.build(); 3301 } 3302 3303 @Override 3304 public ListTablesInRSGroupResponse listTablesInRSGroup(RpcController controller, 3305 ListTablesInRSGroupRequest request) throws ServiceException { 3306 ListTablesInRSGroupResponse.Builder builder = ListTablesInRSGroupResponse.newBuilder(); 3307 String groupName = request.getGroupName(); 3308 LOG.info(server.getClientIdAuditPrefix() + " list tables in rsgroup " + groupName); 3309 try { 3310 if (server.getMasterCoprocessorHost() != null) { 3311 server.getMasterCoprocessorHost().preListTablesInRSGroup(groupName); 3312 } 3313 RSGroupUtil.listTablesInRSGroup(server, groupName).stream() 3314 .map(ProtobufUtil::toProtoTableName).forEach(builder::addTableName); 3315 if (server.getMasterCoprocessorHost() != null) { 3316 server.getMasterCoprocessorHost().postListTablesInRSGroup(groupName); 3317 } 3318 } catch (IOException e) { 3319 throw new ServiceException(e); 3320 } 3321 return builder.build(); 3322 } 3323 3324 @Override 3325 public GetConfiguredNamespacesAndTablesInRSGroupResponse 3326 getConfiguredNamespacesAndTablesInRSGroup(RpcController controller, 3327 GetConfiguredNamespacesAndTablesInRSGroupRequest request) throws ServiceException { 3328 GetConfiguredNamespacesAndTablesInRSGroupResponse.Builder builder = 3329 GetConfiguredNamespacesAndTablesInRSGroupResponse.newBuilder(); 3330 String groupName = request.getGroupName(); 3331 LOG.info(server.getClientIdAuditPrefix() + " get configured namespaces and tables in rsgroup " 3332 + groupName); 3333 try { 3334 if (server.getMasterCoprocessorHost() != null) { 3335 server.getMasterCoprocessorHost().preGetConfiguredNamespacesAndTablesInRSGroup(groupName); 3336 } 3337 for (NamespaceDescriptor nd : server.getClusterSchema().getNamespaces()) { 3338 if (groupName.equals(nd.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP))) { 3339 builder.addNamespace(nd.getName()); 3340 } 3341 } 3342 for (TableDescriptor td : server.getTableDescriptors().getAll().values()) { 3343 if (td.getRegionServerGroup().map(g -> g.equals(groupName)).orElse(false)) { 3344 builder.addTableName(ProtobufUtil.toProtoTableName(td.getTableName())); 3345 } 3346 } 3347 if (server.getMasterCoprocessorHost() != null) { 3348 server.getMasterCoprocessorHost().postGetConfiguredNamespacesAndTablesInRSGroup(groupName); 3349 } 3350 } catch (IOException e) { 3351 throw new ServiceException(e); 3352 } 3353 return builder.build(); 3354 } 3355 3356 @Override 3357 public RenameRSGroupResponse renameRSGroup(RpcController controller, RenameRSGroupRequest request) 3358 throws ServiceException { 3359 RenameRSGroupResponse.Builder builder = RenameRSGroupResponse.newBuilder(); 3360 String oldRSGroup = request.getOldRsgroupName(); 3361 String newRSGroup = request.getNewRsgroupName(); 3362 LOG.info("{} rename rsgroup from {} to {} ", server.getClientIdAuditPrefix(), oldRSGroup, 3363 newRSGroup); 3364 try { 3365 if (server.getMasterCoprocessorHost() != null) { 3366 server.getMasterCoprocessorHost().preRenameRSGroup(oldRSGroup, newRSGroup); 3367 } 3368 server.getRSGroupInfoManager().renameRSGroup(oldRSGroup, newRSGroup); 3369 if (server.getMasterCoprocessorHost() != null) { 3370 server.getMasterCoprocessorHost().postRenameRSGroup(oldRSGroup, newRSGroup); 3371 } 3372 } catch (IOException e) { 3373 throw new ServiceException(e); 3374 } 3375 return builder.build(); 3376 } 3377 3378 @Override 3379 public UpdateRSGroupConfigResponse updateRSGroupConfig(RpcController controller, 3380 UpdateRSGroupConfigRequest request) throws ServiceException { 3381 UpdateRSGroupConfigResponse.Builder builder = UpdateRSGroupConfigResponse.newBuilder(); 3382 String groupName = request.getGroupName(); 3383 Map<String, String> configuration = new HashMap<>(); 3384 request.getConfigurationList().forEach(p -> configuration.put(p.getName(), p.getValue())); 3385 LOG.info("{} update rsgroup {} configuration {}", server.getClientIdAuditPrefix(), groupName, 3386 configuration); 3387 try { 3388 if (server.getMasterCoprocessorHost() != null) { 3389 server.getMasterCoprocessorHost().preUpdateRSGroupConfig(groupName, configuration); 3390 } 3391 server.getRSGroupInfoManager().updateRSGroupConfig(groupName, configuration); 3392 if (server.getMasterCoprocessorHost() != null) { 3393 server.getMasterCoprocessorHost().postUpdateRSGroupConfig(groupName, configuration); 3394 } 3395 } catch (IOException e) { 3396 throw new ServiceException(e); 3397 } 3398 return builder.build(); 3399 } 3400 3401 @Override 3402 public HBaseProtos.LogEntry getLogEntries(RpcController controller, 3403 HBaseProtos.LogRequest request) throws ServiceException { 3404 try { 3405 final String logClassName = request.getLogClassName(); 3406 Class<?> logClass = Class.forName(logClassName).asSubclass(Message.class); 3407 Method method = logClass.getMethod("parseFrom", ByteString.class); 3408 if (logClassName.contains("BalancerDecisionsRequest")) { 3409 MasterProtos.BalancerDecisionsRequest balancerDecisionsRequest = 3410 (MasterProtos.BalancerDecisionsRequest) method.invoke(null, request.getLogMessage()); 3411 MasterProtos.BalancerDecisionsResponse balancerDecisionsResponse = 3412 getBalancerDecisions(balancerDecisionsRequest); 3413 return HBaseProtos.LogEntry.newBuilder() 3414 .setLogClassName(balancerDecisionsResponse.getClass().getName()) 3415 .setLogMessage(balancerDecisionsResponse.toByteString()).build(); 3416 } else if (logClassName.contains("BalancerRejectionsRequest")) { 3417 MasterProtos.BalancerRejectionsRequest balancerRejectionsRequest = 3418 (MasterProtos.BalancerRejectionsRequest) method.invoke(null, request.getLogMessage()); 3419 MasterProtos.BalancerRejectionsResponse balancerRejectionsResponse = 3420 getBalancerRejections(balancerRejectionsRequest); 3421 return HBaseProtos.LogEntry.newBuilder() 3422 .setLogClassName(balancerRejectionsResponse.getClass().getName()) 3423 .setLogMessage(balancerRejectionsResponse.toByteString()).build(); 3424 } 3425 } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException 3426 | InvocationTargetException e) { 3427 LOG.error("Error while retrieving log entries.", e); 3428 throw new ServiceException(e); 3429 } 3430 throw new ServiceException("Invalid request params"); 3431 } 3432 3433 private MasterProtos.BalancerDecisionsResponse 3434 getBalancerDecisions(MasterProtos.BalancerDecisionsRequest request) { 3435 final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder(); 3436 if (namedQueueRecorder == null) { 3437 return MasterProtos.BalancerDecisionsResponse.newBuilder() 3438 .addAllBalancerDecision(Collections.emptyList()).build(); 3439 } 3440 final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest(); 3441 namedQueueGetRequest.setNamedQueueEvent(BalancerDecisionDetails.BALANCER_DECISION_EVENT); 3442 namedQueueGetRequest.setBalancerDecisionsRequest(request); 3443 NamedQueueGetResponse namedQueueGetResponse = 3444 namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); 3445 List<RecentLogs.BalancerDecision> balancerDecisions = namedQueueGetResponse != null 3446 ? namedQueueGetResponse.getBalancerDecisions() 3447 : Collections.emptyList(); 3448 return MasterProtos.BalancerDecisionsResponse.newBuilder() 3449 .addAllBalancerDecision(balancerDecisions).build(); 3450 } 3451 3452 private MasterProtos.BalancerRejectionsResponse 3453 getBalancerRejections(MasterProtos.BalancerRejectionsRequest request) { 3454 final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder(); 3455 if (namedQueueRecorder == null) { 3456 return MasterProtos.BalancerRejectionsResponse.newBuilder() 3457 .addAllBalancerRejection(Collections.emptyList()).build(); 3458 } 3459 final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest(); 3460 namedQueueGetRequest.setNamedQueueEvent(BalancerRejectionDetails.BALANCER_REJECTION_EVENT); 3461 namedQueueGetRequest.setBalancerRejectionsRequest(request); 3462 NamedQueueGetResponse namedQueueGetResponse = 3463 namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); 3464 List<RecentLogs.BalancerRejection> balancerRejections = namedQueueGetResponse != null 3465 ? namedQueueGetResponse.getBalancerRejections() 3466 : Collections.emptyList(); 3467 return MasterProtos.BalancerRejectionsResponse.newBuilder() 3468 .addAllBalancerRejection(balancerRejections).build(); 3469 } 3470 3471 @Override 3472 @QosPriority(priority = HConstants.ADMIN_QOS) 3473 public GetRegionInfoResponse getRegionInfo(final RpcController controller, 3474 final GetRegionInfoRequest request) throws ServiceException { 3475 final GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); 3476 final RegionInfo info = getRegionInfo(request.getRegion()); 3477 if (info != null) { 3478 builder.setRegionInfo(ProtobufUtil.toRegionInfo(info)); 3479 } else { 3480 // Is it a MOB name? These work differently. 3481 byte[] regionName = request.getRegion().getValue().toByteArray(); 3482 TableName tableName = RegionInfo.getTable(regionName); 3483 if (MobUtils.isMobRegionName(tableName, regionName)) { 3484 // a dummy region info contains the compaction state. 3485 RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName); 3486 builder.setRegionInfo(ProtobufUtil.toRegionInfo(mobRegionInfo)); 3487 if (request.hasCompactionState() && request.getCompactionState()) { 3488 builder.setCompactionState(server.getMobCompactionState(tableName)); 3489 } 3490 } else { 3491 // If unknown RegionInfo and not a MOB region, it is unknown. 3492 throw new ServiceException(new UnknownRegionException(Bytes.toString(regionName))); 3493 } 3494 } 3495 return builder.build(); 3496 } 3497 3498 @Override 3499 public GetStoreFileResponse getStoreFile(RpcController controller, GetStoreFileRequest request) 3500 throws ServiceException { 3501 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3502 } 3503 3504 @Override 3505 public GetOnlineRegionResponse getOnlineRegion(RpcController controller, 3506 GetOnlineRegionRequest request) throws ServiceException { 3507 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3508 } 3509 3510 @Override 3511 public OpenRegionResponse openRegion(RpcController controller, OpenRegionRequest request) 3512 throws ServiceException { 3513 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3514 } 3515 3516 @Override 3517 public WarmupRegionResponse warmupRegion(RpcController controller, WarmupRegionRequest request) 3518 throws ServiceException { 3519 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3520 } 3521 3522 @Override 3523 public CloseRegionResponse closeRegion(RpcController controller, CloseRegionRequest request) 3524 throws ServiceException { 3525 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3526 } 3527 3528 @Override 3529 public FlushRegionResponse flushRegion(RpcController controller, FlushRegionRequest request) 3530 throws ServiceException { 3531 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3532 } 3533 3534 @Override 3535 public CompactionSwitchResponse compactionSwitch(RpcController controller, 3536 CompactionSwitchRequest request) throws ServiceException { 3537 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3538 } 3539 3540 @Override 3541 public CompactRegionResponse compactRegion(RpcController controller, CompactRegionRequest request) 3542 throws ServiceException { 3543 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3544 } 3545 3546 @Override 3547 public ReplicateWALEntryResponse replicateWALEntry(RpcController controller, 3548 ReplicateWALEntryRequest request) throws ServiceException { 3549 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3550 } 3551 3552 @Override 3553 public ReplicateWALEntryResponse replay(RpcController controller, 3554 ReplicateWALEntryRequest request) throws ServiceException { 3555 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3556 } 3557 3558 @Override 3559 public RollWALWriterResponse rollWALWriter(RpcController controller, RollWALWriterRequest request) 3560 throws ServiceException { 3561 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3562 } 3563 3564 @Override 3565 public GetServerInfoResponse getServerInfo(RpcController controller, GetServerInfoRequest request) 3566 throws ServiceException { 3567 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3568 } 3569 3570 @Override 3571 public StopServerResponse stopServer(RpcController controller, StopServerRequest request) 3572 throws ServiceException { 3573 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3574 } 3575 3576 @Override 3577 public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller, 3578 UpdateFavoredNodesRequest request) throws ServiceException { 3579 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3580 } 3581 3582 @Override 3583 public GetRegionLoadResponse getRegionLoad(RpcController controller, GetRegionLoadRequest request) 3584 throws ServiceException { 3585 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3586 } 3587 3588 @Override 3589 public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, 3590 ClearCompactionQueuesRequest request) throws ServiceException { 3591 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3592 } 3593 3594 @Override 3595 public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, 3596 ClearRegionBlockCacheRequest request) throws ServiceException { 3597 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3598 } 3599 3600 @Override 3601 public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(RpcController controller, 3602 GetSpaceQuotaSnapshotsRequest request) throws ServiceException { 3603 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3604 } 3605 3606 @Override 3607 public ExecuteProceduresResponse executeProcedures(RpcController controller, 3608 ExecuteProceduresRequest request) throws ServiceException { 3609 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3610 } 3611 3612 @Override 3613 public GetCachedFilesListResponse getCachedFilesList(RpcController controller, 3614 GetCachedFilesListRequest request) throws ServiceException { 3615 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3616 } 3617 3618 @Override 3619 public GetLiveRegionServersResponse getLiveRegionServers(RpcController controller, 3620 GetLiveRegionServersRequest request) throws ServiceException { 3621 List<ServerName> regionServers = new ArrayList<>(server.getLiveRegionServers()); 3622 Collections.shuffle(regionServers, ThreadLocalRandom.current()); 3623 GetLiveRegionServersResponse.Builder builder = 3624 GetLiveRegionServersResponse.newBuilder().setTotal(regionServers.size()); 3625 regionServers.stream().limit(request.getCount()).map(ProtobufUtil::toServerName) 3626 .forEach(builder::addServer); 3627 return builder.build(); 3628 } 3629 3630 @Override 3631 public ReplicateWALEntryResponse replicateToReplica(RpcController controller, 3632 ReplicateWALEntryRequest request) throws ServiceException { 3633 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3634 } 3635 3636 @Override 3637 public FlushMasterStoreResponse flushMasterStore(RpcController controller, 3638 FlushMasterStoreRequest request) throws ServiceException { 3639 rpcPreCheck("flushMasterStore"); 3640 try { 3641 server.flushMasterStore(); 3642 } catch (IOException ioe) { 3643 throw new ServiceException(ioe); 3644 } 3645 return FlushMasterStoreResponse.newBuilder().build(); 3646 } 3647 3648 @Override 3649 public FlushTableResponse flushTable(RpcController controller, FlushTableRequest req) 3650 throws ServiceException { 3651 TableName tableName = ProtobufUtil.toTableName(req.getTableName()); 3652 List<byte[]> columnFamilies = req.getColumnFamilyCount() > 0 3653 ? req.getColumnFamilyList().stream().filter(cf -> !cf.isEmpty()).map(ByteString::toByteArray) 3654 .collect(Collectors.toList()) 3655 : null; 3656 try { 3657 long procId = 3658 server.flushTable(tableName, columnFamilies, req.getNonceGroup(), req.getNonce()); 3659 return FlushTableResponse.newBuilder().setProcId(procId).build(); 3660 } catch (IOException ioe) { 3661 throw new ServiceException(ioe); 3662 } 3663 } 3664}