001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master; 019 020import java.io.IOException; 021import java.lang.reflect.InvocationTargetException; 022import java.lang.reflect.Method; 023import java.net.InetAddress; 024import java.util.ArrayList; 025import java.util.Collections; 026import java.util.HashMap; 027import java.util.HashSet; 028import java.util.List; 029import java.util.Map; 030import java.util.Map.Entry; 031import java.util.Optional; 032import java.util.Set; 033import java.util.concurrent.CompletableFuture; 034import java.util.concurrent.ExecutionException; 035import java.util.concurrent.ThreadLocalRandom; 036import java.util.stream.Collectors; 037import org.apache.hadoop.conf.Configuration; 038import org.apache.hadoop.hbase.ClusterMetricsBuilder; 039import org.apache.hadoop.hbase.DoNotRetryIOException; 040import org.apache.hadoop.hbase.HBaseRpcServicesBase; 041import org.apache.hadoop.hbase.HConstants; 042import org.apache.hadoop.hbase.MasterNotRunningException; 043import org.apache.hadoop.hbase.MetaTableAccessor; 044import org.apache.hadoop.hbase.NamespaceDescriptor; 045import org.apache.hadoop.hbase.ServerMetrics; 046import org.apache.hadoop.hbase.ServerMetricsBuilder; 047import org.apache.hadoop.hbase.ServerName; 048import org.apache.hadoop.hbase.TableName; 049import org.apache.hadoop.hbase.UnknownRegionException; 050import org.apache.hadoop.hbase.client.BalanceRequest; 051import org.apache.hadoop.hbase.client.BalanceResponse; 052import org.apache.hadoop.hbase.client.MasterSwitchType; 053import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; 054import org.apache.hadoop.hbase.client.RegionInfo; 055import org.apache.hadoop.hbase.client.Table; 056import org.apache.hadoop.hbase.client.TableDescriptor; 057import org.apache.hadoop.hbase.client.TableState; 058import org.apache.hadoop.hbase.client.VersionInfoUtil; 059import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; 060import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; 061import org.apache.hadoop.hbase.errorhandling.ForeignException; 062import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; 063import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; 064import org.apache.hadoop.hbase.ipc.PriorityFunction; 065import org.apache.hadoop.hbase.ipc.QosPriority; 066import org.apache.hadoop.hbase.ipc.RpcServer; 067import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; 068import org.apache.hadoop.hbase.ipc.ServerRpcController; 069import org.apache.hadoop.hbase.master.assignment.AssignmentManager; 070import org.apache.hadoop.hbase.master.assignment.RegionStateNode; 071import org.apache.hadoop.hbase.master.assignment.RegionStates; 072import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; 073import org.apache.hadoop.hbase.master.hbck.HbckChore; 074import org.apache.hadoop.hbase.master.janitor.MetaFixer; 075import org.apache.hadoop.hbase.master.locking.LockProcedure; 076import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; 077import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; 078import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable; 079import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; 080import org.apache.hadoop.hbase.master.replication.AbstractPeerNoLockProcedure; 081import org.apache.hadoop.hbase.mob.MobUtils; 082import org.apache.hadoop.hbase.namequeues.BalancerDecisionDetails; 083import org.apache.hadoop.hbase.namequeues.BalancerRejectionDetails; 084import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; 085import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest; 086import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse; 087import org.apache.hadoop.hbase.net.Address; 088import org.apache.hadoop.hbase.procedure.MasterProcedureManager; 089import org.apache.hadoop.hbase.procedure2.LockType; 090import org.apache.hadoop.hbase.procedure2.LockedResource; 091import org.apache.hadoop.hbase.procedure2.Procedure; 092import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; 093import org.apache.hadoop.hbase.procedure2.ProcedureUtil; 094import org.apache.hadoop.hbase.procedure2.RemoteProcedureException; 095import org.apache.hadoop.hbase.quotas.MasterQuotaManager; 096import org.apache.hadoop.hbase.quotas.QuotaObserverChore; 097import org.apache.hadoop.hbase.quotas.QuotaUtil; 098import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; 099import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory; 100import org.apache.hadoop.hbase.replication.ReplicationException; 101import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; 102import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; 103import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; 104import org.apache.hadoop.hbase.rsgroup.RSGroupUtil; 105import org.apache.hadoop.hbase.security.Superusers; 106import org.apache.hadoop.hbase.security.User; 107import org.apache.hadoop.hbase.security.access.AccessChecker; 108import org.apache.hadoop.hbase.security.access.AccessChecker.InputUser; 109import org.apache.hadoop.hbase.security.access.AccessController; 110import org.apache.hadoop.hbase.security.access.Permission; 111import org.apache.hadoop.hbase.security.access.Permission.Action; 112import org.apache.hadoop.hbase.security.access.PermissionStorage; 113import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil; 114import org.apache.hadoop.hbase.security.access.UserPermission; 115import org.apache.hadoop.hbase.security.visibility.VisibilityController; 116import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; 117import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; 118import org.apache.hadoop.hbase.util.Bytes; 119import org.apache.hadoop.hbase.util.DNS; 120import org.apache.hadoop.hbase.util.DNS.ServerType; 121import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 122import org.apache.hadoop.hbase.util.ForeignExceptionUtil; 123import org.apache.hadoop.hbase.util.Pair; 124import org.apache.hadoop.hbase.zookeeper.ZKWatcher; 125import org.apache.yetus.audience.InterfaceAudience; 126import org.slf4j.Logger; 127import org.slf4j.LoggerFactory; 128 129import org.apache.hbase.thirdparty.com.google.common.collect.Sets; 130import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; 131import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; 132import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.ServiceDescriptor; 133import org.apache.hbase.thirdparty.com.google.protobuf.Message; 134import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; 135import org.apache.hbase.thirdparty.com.google.protobuf.Service; 136import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; 137import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; 138 139import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 140import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; 141import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; 142import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.AccessControlService; 143import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest; 144import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse; 145import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest; 146import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantResponse; 147import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest; 148import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsResponse; 149import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type; 150import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest; 151import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeResponse; 152import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; 153import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest; 154import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse; 155import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; 156import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse; 157import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; 158import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; 159import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; 160import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; 161import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest; 162import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse; 163import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; 164import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; 165import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; 166import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; 167import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListRequest; 168import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetCachedFilesListResponse; 169import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; 170import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; 171import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; 172import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; 173import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest; 174import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; 175import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest; 176import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; 177import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest; 178import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse; 179import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; 180import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse; 181import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; 182import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; 183import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; 184import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; 185import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; 186import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse; 187import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; 188import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; 189import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest; 190import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse; 191import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; 192import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; 193import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; 194import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 195import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; 196import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; 197import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; 198import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; 199import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; 200import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest; 201import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse; 202import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest; 203import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse; 204import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; 205import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; 206import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; 207import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; 208import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; 209import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; 210import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; 211import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; 212import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest; 213import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse; 214import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; 215import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; 216import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; 217import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; 218import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest; 219import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse; 220import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; 221import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; 222import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; 223import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; 224import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; 225import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; 226import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; 227import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; 228import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; 229import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; 230import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; 231import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; 232import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; 233import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; 234import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; 235import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; 236import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaRequest; 237import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaResponse; 238import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreRequest; 239import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreResponse; 240import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableRequest; 241import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushTableResponse; 242import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; 243import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; 244import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; 245import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; 246import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest; 247import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse; 248import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; 249import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; 250import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; 251import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; 252import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest; 253import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse; 254import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; 255import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; 256import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; 257import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; 258import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; 259import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; 260import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest; 261import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse; 262import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService; 263import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; 264import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; 265import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; 266import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse; 267import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest; 268import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse; 269import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest; 270import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse; 271import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest; 272import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse; 273import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest; 274import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse; 275import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; 276import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; 277import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest; 278import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse; 279import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; 280import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; 281import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; 282import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse; 283import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest; 284import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse; 285import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; 286import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; 287import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest; 288import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesResponse; 289import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest; 290import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse; 291import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByStateRequest; 292import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByStateResponse; 293import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; 294import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse; 295import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByStateRequest; 296import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByStateResponse; 297import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest; 298import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest; 299import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse; 300import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; 301import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; 302import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; 303import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; 304import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; 305import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerRequest; 306import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerResponse; 307import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; 308import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; 309import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest; 310import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse; 311import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerRequest; 312import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerResponse; 313import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; 314import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse; 315import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest; 316import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse; 317import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; 318import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; 319import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest; 320import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse; 321import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState; 322import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; 323import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; 324import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest; 325import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse; 326import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest; 327import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse; 328import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreRequest; 329import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreResponse; 330import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest; 331import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse; 332import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; 333import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse; 334import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest; 335import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse; 336import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; 337import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse; 338import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; 339import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; 340import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest; 341import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaResponse; 342import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest; 343import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse; 344import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest; 345import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse; 346import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest; 347import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; 348import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse; 349import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; 350import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; 351import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; 352import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; 353import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; 354import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse; 355import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaRequest; 356import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaResponse; 357import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleRequest; 358import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleResponse; 359import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; 360import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; 361import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; 362import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse; 363import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest; 364import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse; 365import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.NamespaceQuotaSnapshot; 366import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.TableQuotaSnapshot; 367import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest; 368import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse; 369import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes; 370import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest; 371import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; 372import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest; 373import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse; 374import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest; 375import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse; 376import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetConfiguredNamespacesAndTablesInRSGroupRequest; 377import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetConfiguredNamespacesAndTablesInRSGroupResponse; 378import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest; 379import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse; 380import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest; 381import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse; 382import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest; 383import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse; 384import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest; 385import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse; 386import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListTablesInRSGroupRequest; 387import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListTablesInRSGroupResponse; 388import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersRequest; 389import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersResponse; 390import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest; 391import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse; 392import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest; 393import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse; 394import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RenameRSGroupRequest; 395import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RenameRSGroupResponse; 396import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigRequest; 397import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigResponse; 398import org.apache.hadoop.hbase.shaded.protobuf.generated.RecentLogs; 399import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; 400import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest; 401import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse; 402import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; 403import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; 404import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLiveRegionServersRequest; 405import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLiveRegionServersResponse; 406import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; 407import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse; 408import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; 409import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; 410import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; 411import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse; 412import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest; 413import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse; 414import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult; 415import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest; 416import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse; 417import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; 418import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; 419import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; 420import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; 421import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ClientMetaService; 422import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; 423import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; 424import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; 425import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; 426import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest; 427import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; 428import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; 429import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; 430import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerModificationProceduresRequest; 431import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerModificationProceduresResponse; 432import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerStateRequest; 433import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerStateResponse; 434import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.IsReplicationPeerModificationEnabledRequest; 435import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.IsReplicationPeerModificationEnabledResponse; 436import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; 437import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; 438import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; 439import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; 440import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerModificationSwitchRequest; 441import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerModificationSwitchResponse; 442import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState; 443import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequest; 444import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse; 445import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; 446import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; 447import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; 448import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService; 449 450/** 451 * Implements the master RPC services. 452 */ 453@InterfaceAudience.Private 454public class MasterRpcServices extends HBaseRpcServicesBase<HMaster> 455 implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface, 456 LockService.BlockingInterface, HbckService.BlockingInterface { 457 458 private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName()); 459 private static final Logger AUDITLOG = 460 LoggerFactory.getLogger("SecurityLogger." + MasterRpcServices.class.getName()); 461 462 /** RPC scheduler to use for the master. */ 463 public static final String MASTER_RPC_SCHEDULER_FACTORY_CLASS = 464 "hbase.master.rpc.scheduler.factory.class"; 465 466 /** 467 * @return Subset of configuration to pass initializing regionservers: e.g. the filesystem to use 468 * and root directory to use. 469 */ 470 private RegionServerStartupResponse.Builder createConfigurationSubset() { 471 RegionServerStartupResponse.Builder resp = 472 addConfig(RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR); 473 resp = addConfig(resp, "fs.defaultFS"); 474 return addConfig(resp, "hbase.master.info.port"); 475 } 476 477 private RegionServerStartupResponse.Builder 478 addConfig(final RegionServerStartupResponse.Builder resp, final String key) { 479 NameStringPair.Builder entry = 480 NameStringPair.newBuilder().setName(key).setValue(server.getConfiguration().get(key)); 481 resp.addMapEntries(entry.build()); 482 return resp; 483 } 484 485 public MasterRpcServices(HMaster m) throws IOException { 486 super(m, m.getProcessName()); 487 } 488 489 @Override 490 protected boolean defaultReservoirEnabled() { 491 return false; 492 } 493 494 @Override 495 protected ServerType getDNSServerType() { 496 return DNS.ServerType.MASTER; 497 } 498 499 @Override 500 protected String getHostname(Configuration conf, String defaultHostname) { 501 return conf.get("hbase.master.ipc.address", defaultHostname); 502 } 503 504 @Override 505 protected String getPortConfigName() { 506 return HConstants.MASTER_PORT; 507 } 508 509 @Override 510 protected int getDefaultPort() { 511 return HConstants.DEFAULT_MASTER_PORT; 512 } 513 514 @Override 515 protected Class<?> getRpcSchedulerFactoryClass(Configuration conf) { 516 return conf.getClass(MASTER_RPC_SCHEDULER_FACTORY_CLASS, SimpleRpcSchedulerFactory.class); 517 } 518 519 @Override 520 protected PriorityFunction createPriority() { 521 return new MasterAnnotationReadingPriorityFunction(this); 522 } 523 524 /** 525 * Checks for the following pre-checks in order: 526 * <ol> 527 * <li>Master is initialized</li> 528 * <li>Rpc caller has admin permissions</li> 529 * </ol> 530 * @param requestName name of rpc request. Used in reporting failures to provide context. 531 * @throws ServiceException If any of the above listed pre-check fails. 532 */ 533 private void rpcPreCheck(String requestName) throws ServiceException { 534 try { 535 server.checkInitialized(); 536 requirePermission(requestName, Permission.Action.ADMIN); 537 } catch (IOException ioe) { 538 throw new ServiceException(ioe); 539 } 540 } 541 542 enum BalanceSwitchMode { 543 SYNC, 544 ASYNC 545 } 546 547 /** 548 * Assigns balancer switch according to BalanceSwitchMode 549 * @param b new balancer switch 550 * @param mode BalanceSwitchMode 551 * @return old balancer switch 552 */ 553 boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException { 554 boolean oldValue = server.loadBalancerStateStore.get(); 555 boolean newValue = b; 556 try { 557 if (server.cpHost != null) { 558 server.cpHost.preBalanceSwitch(newValue); 559 } 560 if (mode == BalanceSwitchMode.SYNC) { 561 synchronized (server.getLoadBalancer()) { 562 server.loadBalancerStateStore.set(newValue); 563 } 564 } else { 565 server.loadBalancerStateStore.set(newValue); 566 } 567 LOG.info(server.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue); 568 if (server.cpHost != null) { 569 server.cpHost.postBalanceSwitch(oldValue, newValue); 570 } 571 server.getLoadBalancer().updateBalancerStatus(newValue); 572 } catch (IOException ioe) { 573 LOG.warn("Error flipping balance switch", ioe); 574 } 575 return oldValue; 576 } 577 578 boolean synchronousBalanceSwitch(final boolean b) throws IOException { 579 return switchBalancer(b, BalanceSwitchMode.SYNC); 580 } 581 582 /** Returns list of blocking services and their security info classes that this server supports */ 583 @Override 584 protected List<BlockingServiceAndInterface> getServices() { 585 List<BlockingServiceAndInterface> bssi = new ArrayList<>(5); 586 bssi.add(new BlockingServiceAndInterface(MasterService.newReflectiveBlockingService(this), 587 MasterService.BlockingInterface.class)); 588 bssi.add( 589 new BlockingServiceAndInterface(RegionServerStatusService.newReflectiveBlockingService(this), 590 RegionServerStatusService.BlockingInterface.class)); 591 bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this), 592 LockService.BlockingInterface.class)); 593 bssi.add(new BlockingServiceAndInterface(HbckService.newReflectiveBlockingService(this), 594 HbckService.BlockingInterface.class)); 595 bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this), 596 ClientMetaService.BlockingInterface.class)); 597 bssi.add(new BlockingServiceAndInterface(AdminService.newReflectiveBlockingService(this), 598 AdminService.BlockingInterface.class)); 599 return bssi; 600 } 601 602 void start(ZKWatcher zkWatcher) { 603 internalStart(zkWatcher); 604 } 605 606 void stop() { 607 internalStop(); 608 } 609 610 @Override 611 // priority for all RegionServerStatusProtos rpc's are set HIGH_QOS in 612 // MasterAnnotationReadingPriorityFunction itself 613 public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller, 614 GetLastFlushedSequenceIdRequest request) throws ServiceException { 615 try { 616 server.checkServiceStarted(); 617 } catch (IOException ioe) { 618 throw new ServiceException(ioe); 619 } 620 byte[] encodedRegionName = request.getRegionName().toByteArray(); 621 RegionStoreSequenceIds ids = 622 server.getServerManager().getLastFlushedSequenceId(encodedRegionName); 623 return ResponseConverter.buildGetLastFlushedSequenceIdResponse(ids); 624 } 625 626 @Override 627 public RegionServerReportResponse regionServerReport(RpcController controller, 628 RegionServerReportRequest request) throws ServiceException { 629 try { 630 server.checkServiceStarted(); 631 int versionNumber = 0; 632 String version = "0.0.0"; 633 VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo(); 634 if (versionInfo != null) { 635 version = versionInfo.getVersion(); 636 versionNumber = VersionInfoUtil.getVersionNumber(versionInfo); 637 } 638 ClusterStatusProtos.ServerLoad sl = request.getLoad(); 639 ServerName serverName = ProtobufUtil.toServerName(request.getServer()); 640 ServerMetrics oldLoad = server.getServerManager().getLoad(serverName); 641 ServerMetrics newLoad = 642 ServerMetricsBuilder.toServerMetrics(serverName, versionNumber, version, sl); 643 server.getServerManager().regionServerReport(serverName, newLoad); 644 server.getAssignmentManager().reportOnlineRegions(serverName, 645 newLoad.getRegionMetrics().keySet()); 646 if (sl != null && server.metricsMaster != null) { 647 // Up our metrics. 648 server.metricsMaster.incrementRequests( 649 sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getRequestCount() : 0)); 650 server.metricsMaster.incrementReadRequests( 651 sl.getReadRequestsCount() - (oldLoad != null ? oldLoad.getReadRequestsCount() : 0)); 652 server.metricsMaster.incrementWriteRequests( 653 sl.getWriteRequestsCount() - (oldLoad != null ? oldLoad.getWriteRequestsCount() : 0)); 654 } 655 } catch (IOException ioe) { 656 throw new ServiceException(ioe); 657 } 658 return RegionServerReportResponse.newBuilder().build(); 659 } 660 661 @Override 662 public RegionServerStartupResponse regionServerStartup(RpcController controller, 663 RegionServerStartupRequest request) throws ServiceException { 664 // Register with server manager 665 try { 666 server.checkServiceStarted(); 667 int versionNumber = 0; 668 String version = "0.0.0"; 669 VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo(); 670 if (versionInfo != null) { 671 version = versionInfo.getVersion(); 672 versionNumber = VersionInfoUtil.getVersionNumber(versionInfo); 673 } 674 InetAddress ia = server.getRemoteInetAddress(request.getPort(), request.getServerStartCode()); 675 // if regionserver passed hostname to use, 676 // then use it instead of doing a reverse DNS lookup 677 ServerName rs = 678 server.getServerManager().regionServerStartup(request, versionNumber, version, ia); 679 680 // Send back some config info 681 RegionServerStartupResponse.Builder resp = createConfigurationSubset(); 682 NameStringPair.Builder entry = NameStringPair.newBuilder() 683 .setName(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER).setValue(rs.getHostname()); 684 resp.addMapEntries(entry.build()); 685 686 return resp.build(); 687 } catch (IOException ioe) { 688 throw new ServiceException(ioe); 689 } 690 } 691 692 @Override 693 public ReportRSFatalErrorResponse reportRSFatalError(RpcController controller, 694 ReportRSFatalErrorRequest request) throws ServiceException { 695 String errorText = request.getErrorMessage(); 696 ServerName sn = ProtobufUtil.toServerName(request.getServer()); 697 String msg = sn + " reported a fatal error:\n" + errorText; 698 LOG.warn(msg); 699 server.rsFatals.add(msg); 700 return ReportRSFatalErrorResponse.newBuilder().build(); 701 } 702 703 @Override 704 public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req) 705 throws ServiceException { 706 try { 707 long procId = server.addColumn(ProtobufUtil.toTableName(req.getTableName()), 708 ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(), 709 req.getNonce()); 710 if (procId == -1) { 711 // This mean operation was not performed in server, so do not set any procId 712 return AddColumnResponse.newBuilder().build(); 713 } else { 714 return AddColumnResponse.newBuilder().setProcId(procId).build(); 715 } 716 } catch (IOException ioe) { 717 throw new ServiceException(ioe); 718 } 719 } 720 721 @Override 722 public AssignRegionResponse assignRegion(RpcController controller, AssignRegionRequest req) 723 throws ServiceException { 724 try { 725 server.checkInitialized(); 726 727 final RegionSpecifierType type = req.getRegion().getType(); 728 if (type != RegionSpecifierType.REGION_NAME) { 729 LOG.warn("assignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME 730 + " actual: " + type); 731 } 732 733 final byte[] regionName = req.getRegion().getValue().toByteArray(); 734 final RegionInfo regionInfo = server.getAssignmentManager().getRegionInfo(regionName); 735 if (regionInfo == null) { 736 throw new UnknownRegionException(Bytes.toStringBinary(regionName)); 737 } 738 739 final AssignRegionResponse arr = AssignRegionResponse.newBuilder().build(); 740 if (server.cpHost != null) { 741 server.cpHost.preAssign(regionInfo); 742 } 743 LOG.info(server.getClientIdAuditPrefix() + " assign " + regionInfo.getRegionNameAsString()); 744 server.getAssignmentManager().assign(regionInfo); 745 if (server.cpHost != null) { 746 server.cpHost.postAssign(regionInfo); 747 } 748 return arr; 749 } catch (IOException ioe) { 750 throw new ServiceException(ioe); 751 } 752 } 753 754 @Override 755 public MasterProtos.BalanceResponse balance(RpcController controller, 756 MasterProtos.BalanceRequest request) throws ServiceException { 757 try { 758 return ProtobufUtil.toBalanceResponse(server.balance(ProtobufUtil.toBalanceRequest(request))); 759 } catch (IOException ex) { 760 throw new ServiceException(ex); 761 } 762 } 763 764 @Override 765 public CreateNamespaceResponse createNamespace(RpcController controller, 766 CreateNamespaceRequest request) throws ServiceException { 767 try { 768 long procId = 769 server.createNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()), 770 request.getNonceGroup(), request.getNonce()); 771 return CreateNamespaceResponse.newBuilder().setProcId(procId).build(); 772 } catch (IOException e) { 773 throw new ServiceException(e); 774 } 775 } 776 777 @Override 778 public CreateTableResponse createTable(RpcController controller, CreateTableRequest req) 779 throws ServiceException { 780 TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema()); 781 byte[][] splitKeys = ProtobufUtil.getSplitKeysArray(req); 782 try { 783 long procId = 784 server.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce()); 785 LOG.info(server.getClientIdAuditPrefix() + " procedure request for creating table: " 786 + req.getTableSchema().getTableName() + " procId is: " + procId); 787 return CreateTableResponse.newBuilder().setProcId(procId).build(); 788 } catch (IOException ioe) { 789 throw new ServiceException(ioe); 790 } 791 } 792 793 @Override 794 public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req) 795 throws ServiceException { 796 try { 797 long procId = server.deleteColumn(ProtobufUtil.toTableName(req.getTableName()), 798 req.getColumnName().toByteArray(), req.getNonceGroup(), req.getNonce()); 799 if (procId == -1) { 800 // This mean operation was not performed in server, so do not set any procId 801 return DeleteColumnResponse.newBuilder().build(); 802 } else { 803 return DeleteColumnResponse.newBuilder().setProcId(procId).build(); 804 } 805 } catch (IOException ioe) { 806 throw new ServiceException(ioe); 807 } 808 } 809 810 @Override 811 public DeleteNamespaceResponse deleteNamespace(RpcController controller, 812 DeleteNamespaceRequest request) throws ServiceException { 813 try { 814 long procId = server.deleteNamespace(request.getNamespaceName(), request.getNonceGroup(), 815 request.getNonce()); 816 return DeleteNamespaceResponse.newBuilder().setProcId(procId).build(); 817 } catch (IOException e) { 818 throw new ServiceException(e); 819 } 820 } 821 822 /** 823 * Execute Delete Snapshot operation. 824 * @return DeleteSnapshotResponse (a protobuf wrapped void) if the snapshot existed and was 825 * deleted properly. 826 * @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not 827 * exist. 828 */ 829 @Override 830 public DeleteSnapshotResponse deleteSnapshot(RpcController controller, 831 DeleteSnapshotRequest request) throws ServiceException { 832 try { 833 server.checkInitialized(); 834 server.snapshotManager.checkSnapshotSupport(); 835 836 LOG.info(server.getClientIdAuditPrefix() + " delete " + request.getSnapshot()); 837 server.snapshotManager.deleteSnapshot(request.getSnapshot()); 838 return DeleteSnapshotResponse.newBuilder().build(); 839 } catch (IOException e) { 840 throw new ServiceException(e); 841 } 842 } 843 844 @Override 845 public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request) 846 throws ServiceException { 847 try { 848 long procId = server.deleteTable(ProtobufUtil.toTableName(request.getTableName()), 849 request.getNonceGroup(), request.getNonce()); 850 return DeleteTableResponse.newBuilder().setProcId(procId).build(); 851 } catch (IOException ioe) { 852 throw new ServiceException(ioe); 853 } 854 } 855 856 @Override 857 public TruncateTableResponse truncateTable(RpcController controller, TruncateTableRequest request) 858 throws ServiceException { 859 try { 860 long procId = server.truncateTable(ProtobufUtil.toTableName(request.getTableName()), 861 request.getPreserveSplits(), request.getNonceGroup(), request.getNonce()); 862 return TruncateTableResponse.newBuilder().setProcId(procId).build(); 863 } catch (IOException ioe) { 864 throw new ServiceException(ioe); 865 } 866 } 867 868 @Override 869 public DisableTableResponse disableTable(RpcController controller, DisableTableRequest request) 870 throws ServiceException { 871 try { 872 long procId = server.disableTable(ProtobufUtil.toTableName(request.getTableName()), 873 request.getNonceGroup(), request.getNonce()); 874 return DisableTableResponse.newBuilder().setProcId(procId).build(); 875 } catch (IOException ioe) { 876 throw new ServiceException(ioe); 877 } 878 } 879 880 @Override 881 public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c, 882 EnableCatalogJanitorRequest req) throws ServiceException { 883 rpcPreCheck("enableCatalogJanitor"); 884 return EnableCatalogJanitorResponse.newBuilder() 885 .setPrevValue(server.catalogJanitorChore.setEnabled(req.getEnable())).build(); 886 } 887 888 @Override 889 public SetCleanerChoreRunningResponse setCleanerChoreRunning(RpcController c, 890 SetCleanerChoreRunningRequest req) throws ServiceException { 891 rpcPreCheck("setCleanerChoreRunning"); 892 893 boolean prevValue = 894 server.getLogCleaner().getEnabled() && server.getHFileCleaner().getEnabled(); 895 server.getLogCleaner().setEnabled(req.getOn()); 896 for (HFileCleaner hFileCleaner : server.getHFileCleaners()) { 897 hFileCleaner.setEnabled(req.getOn()); 898 } 899 return SetCleanerChoreRunningResponse.newBuilder().setPrevValue(prevValue).build(); 900 } 901 902 @Override 903 public EnableTableResponse enableTable(RpcController controller, EnableTableRequest request) 904 throws ServiceException { 905 try { 906 long procId = server.enableTable(ProtobufUtil.toTableName(request.getTableName()), 907 request.getNonceGroup(), request.getNonce()); 908 return EnableTableResponse.newBuilder().setProcId(procId).build(); 909 } catch (IOException ioe) { 910 throw new ServiceException(ioe); 911 } 912 } 913 914 @Override 915 public MergeTableRegionsResponse mergeTableRegions(RpcController c, 916 MergeTableRegionsRequest request) throws ServiceException { 917 try { 918 server.checkInitialized(); 919 } catch (IOException ioe) { 920 throw new ServiceException(ioe); 921 } 922 923 RegionStates regionStates = server.getAssignmentManager().getRegionStates(); 924 925 RegionInfo[] regionsToMerge = new RegionInfo[request.getRegionCount()]; 926 for (int i = 0; i < request.getRegionCount(); i++) { 927 final byte[] encodedNameOfRegion = request.getRegion(i).getValue().toByteArray(); 928 if (request.getRegion(i).getType() != RegionSpecifierType.ENCODED_REGION_NAME) { 929 LOG.warn("MergeRegions specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME 930 + " actual: region " + i + " =" + request.getRegion(i).getType()); 931 } 932 RegionState regionState = regionStates.getRegionState(Bytes.toString(encodedNameOfRegion)); 933 if (regionState == null) { 934 throw new ServiceException( 935 new UnknownRegionException(Bytes.toStringBinary(encodedNameOfRegion))); 936 } 937 regionsToMerge[i] = regionState.getRegion(); 938 } 939 940 try { 941 long procId = server.mergeRegions(regionsToMerge, request.getForcible(), 942 request.getNonceGroup(), request.getNonce()); 943 return MergeTableRegionsResponse.newBuilder().setProcId(procId).build(); 944 } catch (IOException ioe) { 945 throw new ServiceException(ioe); 946 } 947 } 948 949 @Override 950 public SplitTableRegionResponse splitRegion(final RpcController controller, 951 final SplitTableRegionRequest request) throws ServiceException { 952 try { 953 long procId = server.splitRegion(ProtobufUtil.toRegionInfo(request.getRegionInfo()), 954 request.hasSplitRow() ? request.getSplitRow().toByteArray() : null, request.getNonceGroup(), 955 request.getNonce()); 956 return SplitTableRegionResponse.newBuilder().setProcId(procId).build(); 957 } catch (IOException ie) { 958 throw new ServiceException(ie); 959 } 960 } 961 962 @Override 963 public MasterProtos.TruncateRegionResponse truncateRegion(RpcController controller, 964 final MasterProtos.TruncateRegionRequest request) throws ServiceException { 965 try { 966 long procId = server.truncateRegion(ProtobufUtil.toRegionInfo(request.getRegionInfo()), 967 request.getNonceGroup(), request.getNonce()); 968 return MasterProtos.TruncateRegionResponse.newBuilder().setProcId(procId).build(); 969 } catch (IOException ie) { 970 throw new ServiceException(ie); 971 } 972 } 973 974 @Override 975 public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller, 976 final ClientProtos.CoprocessorServiceRequest request) throws ServiceException { 977 rpcPreCheck("execMasterService"); 978 try { 979 ServerRpcController execController = new ServerRpcController(); 980 ClientProtos.CoprocessorServiceCall call = request.getCall(); 981 String serviceName = call.getServiceName(); 982 String methodName = call.getMethodName(); 983 if (!server.coprocessorServiceHandlers.containsKey(serviceName)) { 984 throw new UnknownProtocolException(null, 985 "No registered Master Coprocessor Endpoint found for " + serviceName 986 + ". Has it been enabled?"); 987 } 988 989 Service service = server.coprocessorServiceHandlers.get(serviceName); 990 ServiceDescriptor serviceDesc = service.getDescriptorForType(); 991 MethodDescriptor methodDesc = 992 CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc); 993 994 Message execRequest = CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest()); 995 final Message.Builder responseBuilder = 996 service.getResponsePrototype(methodDesc).newBuilderForType(); 997 service.callMethod(methodDesc, execController, execRequest, (message) -> { 998 if (message != null) { 999 responseBuilder.mergeFrom(message); 1000 } 1001 }); 1002 Message execResult = responseBuilder.build(); 1003 if (execController.getFailedOn() != null) { 1004 throw execController.getFailedOn(); 1005 } 1006 1007 String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse(""); 1008 User caller = RpcServer.getRequestUser().orElse(null); 1009 AUDITLOG.info("User {} (remote address: {}) master service request for {}.{}", caller, 1010 remoteAddress, serviceName, methodName); 1011 1012 return CoprocessorRpcUtils.getResponse(execResult, HConstants.EMPTY_BYTE_ARRAY); 1013 } catch (IOException ie) { 1014 throw new ServiceException(ie); 1015 } 1016 } 1017 1018 /** 1019 * Triggers an asynchronous attempt to run a distributed procedure. {@inheritDoc} 1020 */ 1021 @Override 1022 public ExecProcedureResponse execProcedure(RpcController controller, ExecProcedureRequest request) 1023 throws ServiceException { 1024 try { 1025 server.checkInitialized(); 1026 ProcedureDescription desc = request.getProcedure(); 1027 MasterProcedureManager mpm = 1028 server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature()); 1029 if (mpm == null) { 1030 throw new ServiceException( 1031 new DoNotRetryIOException("The procedure is not registered: " + desc.getSignature())); 1032 } 1033 LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature()); 1034 mpm.checkPermissions(desc, getAccessChecker(), RpcServer.getRequestUser().orElse(null)); 1035 mpm.execProcedure(desc); 1036 // send back the max amount of time the client should wait for the procedure 1037 // to complete 1038 long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME; 1039 return ExecProcedureResponse.newBuilder().setExpectedTimeout(waitTime).build(); 1040 } catch (ForeignException e) { 1041 throw new ServiceException(e.getCause()); 1042 } catch (IOException e) { 1043 throw new ServiceException(e); 1044 } 1045 } 1046 1047 /** 1048 * Triggers a synchronous attempt to run a distributed procedure and sets return data in response. 1049 * {@inheritDoc} 1050 */ 1051 @Override 1052 public ExecProcedureResponse execProcedureWithRet(RpcController controller, 1053 ExecProcedureRequest request) throws ServiceException { 1054 rpcPreCheck("execProcedureWithRet"); 1055 try { 1056 ProcedureDescription desc = request.getProcedure(); 1057 MasterProcedureManager mpm = 1058 server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature()); 1059 if (mpm == null) { 1060 throw new ServiceException("The procedure is not registered: " + desc.getSignature()); 1061 } 1062 LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature()); 1063 byte[] data = mpm.execProcedureWithRet(desc); 1064 ExecProcedureResponse.Builder builder = ExecProcedureResponse.newBuilder(); 1065 // set return data if available 1066 if (data != null) { 1067 builder.setReturnData(UnsafeByteOperations.unsafeWrap(data)); 1068 } 1069 return builder.build(); 1070 } catch (IOException e) { 1071 throw new ServiceException(e); 1072 } 1073 } 1074 1075 @Override 1076 public GetClusterStatusResponse getClusterStatus(RpcController controller, 1077 GetClusterStatusRequest req) throws ServiceException { 1078 GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder(); 1079 try { 1080 // We used to check if Master was up at this point but let this call proceed even if 1081 // Master is initializing... else we shut out stuff like hbck2 tool from making progress 1082 // since it queries this method to figure cluster version. hbck2 wants to be able to work 1083 // against Master even if it is 'initializing' so it can do fixup. 1084 response.setClusterStatus(ClusterMetricsBuilder.toClusterStatus( 1085 server.getClusterMetrics(ClusterMetricsBuilder.toOptions(req.getOptionsList())))); 1086 } catch (IOException e) { 1087 throw new ServiceException(e); 1088 } 1089 return response.build(); 1090 } 1091 1092 /** 1093 * List the currently available/stored snapshots. Any in-progress snapshots are ignored 1094 */ 1095 @Override 1096 public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller, 1097 GetCompletedSnapshotsRequest request) throws ServiceException { 1098 try { 1099 server.checkInitialized(); 1100 GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder(); 1101 List<SnapshotDescription> snapshots = server.snapshotManager.getCompletedSnapshots(); 1102 1103 // convert to protobuf 1104 for (SnapshotDescription snapshot : snapshots) { 1105 builder.addSnapshots(snapshot); 1106 } 1107 return builder.build(); 1108 } catch (IOException e) { 1109 throw new ServiceException(e); 1110 } 1111 } 1112 1113 @Override 1114 public ListNamespacesResponse listNamespaces(RpcController controller, 1115 ListNamespacesRequest request) throws ServiceException { 1116 try { 1117 return ListNamespacesResponse.newBuilder().addAllNamespaceName(server.listNamespaces()) 1118 .build(); 1119 } catch (IOException e) { 1120 throw new ServiceException(e); 1121 } 1122 } 1123 1124 @Override 1125 public GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller, 1126 GetNamespaceDescriptorRequest request) throws ServiceException { 1127 try { 1128 return GetNamespaceDescriptorResponse.newBuilder() 1129 .setNamespaceDescriptor( 1130 ProtobufUtil.toProtoNamespaceDescriptor(server.getNamespace(request.getNamespaceName()))) 1131 .build(); 1132 } catch (IOException e) { 1133 throw new ServiceException(e); 1134 } 1135 } 1136 1137 /** 1138 * Get the number of regions of the table that have been updated by the alter. 1139 * @return Pair indicating the number of regions updated Pair.getFirst is the regions that are yet 1140 * to be updated Pair.getSecond is the total number of regions of the table 1141 */ 1142 @Override 1143 public GetSchemaAlterStatusResponse getSchemaAlterStatus(RpcController controller, 1144 GetSchemaAlterStatusRequest req) throws ServiceException { 1145 // TODO: currently, we query using the table name on the client side. this 1146 // may overlap with other table operations or the table operation may 1147 // have completed before querying this API. We need to refactor to a 1148 // transaction system in the future to avoid these ambiguities. 1149 TableName tableName = ProtobufUtil.toTableName(req.getTableName()); 1150 1151 try { 1152 server.checkInitialized(); 1153 Pair<Integer, Integer> pair = server.getAssignmentManager().getReopenStatus(tableName); 1154 GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder(); 1155 ret.setYetToUpdateRegions(pair.getFirst()); 1156 ret.setTotalRegions(pair.getSecond()); 1157 return ret.build(); 1158 } catch (IOException ioe) { 1159 throw new ServiceException(ioe); 1160 } 1161 } 1162 1163 /** 1164 * Get list of TableDescriptors for requested tables. 1165 * @param c Unused (set to null). 1166 * @param req GetTableDescriptorsRequest that contains: - tableNames: requested tables, or if 1167 * empty, all are requested. 1168 */ 1169 @Override 1170 public GetTableDescriptorsResponse getTableDescriptors(RpcController c, 1171 GetTableDescriptorsRequest req) throws ServiceException { 1172 try { 1173 server.checkInitialized(); 1174 1175 final String regex = req.hasRegex() ? req.getRegex() : null; 1176 final String namespace = req.hasNamespace() ? req.getNamespace() : null; 1177 List<TableName> tableNameList = null; 1178 if (req.getTableNamesCount() > 0) { 1179 tableNameList = new ArrayList<TableName>(req.getTableNamesCount()); 1180 for (HBaseProtos.TableName tableNamePB : req.getTableNamesList()) { 1181 tableNameList.add(ProtobufUtil.toTableName(tableNamePB)); 1182 } 1183 } 1184 1185 List<TableDescriptor> descriptors = 1186 server.listTableDescriptors(namespace, regex, tableNameList, req.getIncludeSysTables()); 1187 1188 GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder(); 1189 if (descriptors != null && descriptors.size() > 0) { 1190 // Add the table descriptors to the response 1191 for (TableDescriptor htd : descriptors) { 1192 builder.addTableSchema(ProtobufUtil.toTableSchema(htd)); 1193 } 1194 } 1195 return builder.build(); 1196 } catch (IOException ioe) { 1197 throw new ServiceException(ioe); 1198 } 1199 } 1200 1201 @Override 1202 public ListTableDescriptorsByStateResponse listTableDescriptorsByState(RpcController controller, 1203 ListTableDescriptorsByStateRequest request) throws ServiceException { 1204 try { 1205 server.checkInitialized(); 1206 List<TableDescriptor> descriptors = server.listTableDescriptors(null, null, null, false); 1207 1208 ListTableDescriptorsByStateResponse.Builder builder = 1209 ListTableDescriptorsByStateResponse.newBuilder(); 1210 if (descriptors != null && descriptors.size() > 0) { 1211 // Add the table descriptors to the response 1212 TableState.State state = 1213 request.getIsEnabled() ? TableState.State.ENABLED : TableState.State.DISABLED; 1214 for (TableDescriptor htd : descriptors) { 1215 if (server.getTableStateManager().isTableState(htd.getTableName(), state)) { 1216 builder.addTableSchema(ProtobufUtil.toTableSchema(htd)); 1217 } 1218 } 1219 } 1220 return builder.build(); 1221 } catch (IOException ioe) { 1222 throw new ServiceException(ioe); 1223 } 1224 } 1225 1226 /** 1227 * Get list of userspace table names 1228 * @param controller Unused (set to null). 1229 * @param req GetTableNamesRequest 1230 */ 1231 @Override 1232 public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest req) 1233 throws ServiceException { 1234 try { 1235 server.checkServiceStarted(); 1236 1237 final String regex = req.hasRegex() ? req.getRegex() : null; 1238 final String namespace = req.hasNamespace() ? req.getNamespace() : null; 1239 List<TableName> tableNames = 1240 server.listTableNames(namespace, regex, req.getIncludeSysTables()); 1241 1242 GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder(); 1243 if (tableNames != null && tableNames.size() > 0) { 1244 // Add the table names to the response 1245 for (TableName table : tableNames) { 1246 builder.addTableNames(ProtobufUtil.toProtoTableName(table)); 1247 } 1248 } 1249 return builder.build(); 1250 } catch (IOException e) { 1251 throw new ServiceException(e); 1252 } 1253 } 1254 1255 @Override 1256 public ListTableNamesByStateResponse listTableNamesByState(RpcController controller, 1257 ListTableNamesByStateRequest request) throws ServiceException { 1258 try { 1259 server.checkServiceStarted(); 1260 List<TableName> tableNames = server.listTableNames(null, null, false); 1261 ListTableNamesByStateResponse.Builder builder = ListTableNamesByStateResponse.newBuilder(); 1262 if (tableNames != null && tableNames.size() > 0) { 1263 // Add the disabled table names to the response 1264 TableState.State state = 1265 request.getIsEnabled() ? TableState.State.ENABLED : TableState.State.DISABLED; 1266 for (TableName table : tableNames) { 1267 if (server.getTableStateManager().isTableState(table, state)) { 1268 builder.addTableNames(ProtobufUtil.toProtoTableName(table)); 1269 } 1270 } 1271 } 1272 return builder.build(); 1273 } catch (IOException e) { 1274 throw new ServiceException(e); 1275 } 1276 } 1277 1278 @Override 1279 public GetTableStateResponse getTableState(RpcController controller, GetTableStateRequest request) 1280 throws ServiceException { 1281 try { 1282 server.checkServiceStarted(); 1283 TableName tableName = ProtobufUtil.toTableName(request.getTableName()); 1284 TableState ts = server.getTableStateManager().getTableState(tableName); 1285 GetTableStateResponse.Builder builder = GetTableStateResponse.newBuilder(); 1286 builder.setTableState(ts.convert()); 1287 return builder.build(); 1288 } catch (IOException e) { 1289 throw new ServiceException(e); 1290 } 1291 } 1292 1293 @Override 1294 public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c, 1295 IsCatalogJanitorEnabledRequest req) throws ServiceException { 1296 return IsCatalogJanitorEnabledResponse.newBuilder().setValue(server.isCatalogJanitorEnabled()) 1297 .build(); 1298 } 1299 1300 @Override 1301 public IsCleanerChoreEnabledResponse isCleanerChoreEnabled(RpcController c, 1302 IsCleanerChoreEnabledRequest req) throws ServiceException { 1303 return IsCleanerChoreEnabledResponse.newBuilder().setValue(server.isCleanerChoreEnabled()) 1304 .build(); 1305 } 1306 1307 @Override 1308 public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req) 1309 throws ServiceException { 1310 try { 1311 server.checkServiceStarted(); 1312 return IsMasterRunningResponse.newBuilder().setIsMasterRunning(!server.isStopped()).build(); 1313 } catch (IOException e) { 1314 throw new ServiceException(e); 1315 } 1316 } 1317 1318 /** 1319 * Checks if the specified procedure is done. 1320 * @return true if the procedure is done, false if the procedure is in the process of completing 1321 * @throws ServiceException if invalid procedure or failed procedure with progress failure reason. 1322 */ 1323 @Override 1324 public IsProcedureDoneResponse isProcedureDone(RpcController controller, 1325 IsProcedureDoneRequest request) throws ServiceException { 1326 try { 1327 server.checkInitialized(); 1328 ProcedureDescription desc = request.getProcedure(); 1329 MasterProcedureManager mpm = 1330 server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature()); 1331 if (mpm == null) { 1332 throw new ServiceException("The procedure is not registered: " + desc.getSignature()); 1333 } 1334 LOG.debug("Checking to see if procedure from request:" + desc.getSignature() + " is done"); 1335 1336 IsProcedureDoneResponse.Builder builder = IsProcedureDoneResponse.newBuilder(); 1337 boolean done = mpm.isProcedureDone(desc); 1338 builder.setDone(done); 1339 return builder.build(); 1340 } catch (ForeignException e) { 1341 throw new ServiceException(e.getCause()); 1342 } catch (IOException e) { 1343 throw new ServiceException(e); 1344 } 1345 } 1346 1347 /** 1348 * Checks if the specified snapshot is done. 1349 * @return true if the snapshot is in file system ready to use, false if the snapshot is in the 1350 * process of completing 1351 * @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or a wrapped 1352 * HBaseSnapshotException with progress failure reason. 1353 */ 1354 @Override 1355 public IsSnapshotDoneResponse isSnapshotDone(RpcController controller, 1356 IsSnapshotDoneRequest request) throws ServiceException { 1357 LOG.debug("Checking to see if snapshot from request:" 1358 + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()) + " is done"); 1359 try { 1360 server.checkInitialized(); 1361 IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder(); 1362 boolean done = server.snapshotManager.isSnapshotDone(request.getSnapshot()); 1363 builder.setDone(done); 1364 return builder.build(); 1365 } catch (ForeignException e) { 1366 throw new ServiceException(e.getCause()); 1367 } catch (IOException e) { 1368 throw new ServiceException(e); 1369 } 1370 } 1371 1372 @Override 1373 public GetProcedureResultResponse getProcedureResult(RpcController controller, 1374 GetProcedureResultRequest request) throws ServiceException { 1375 LOG.debug("Checking to see if procedure is done pid=" + request.getProcId()); 1376 try { 1377 server.checkInitialized(); 1378 GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder(); 1379 long procId = request.getProcId(); 1380 ProcedureExecutor<?> executor = server.getMasterProcedureExecutor(); 1381 Procedure<?> result = executor.getResultOrProcedure(procId); 1382 if (result != null) { 1383 builder.setSubmittedTime(result.getSubmittedTime()); 1384 builder.setLastUpdate(result.getLastUpdate()); 1385 if (executor.isFinished(procId)) { 1386 builder.setState(GetProcedureResultResponse.State.FINISHED); 1387 if (result.isFailed()) { 1388 IOException exception = MasterProcedureUtil.unwrapRemoteIOException(result); 1389 builder.setException(ForeignExceptionUtil.toProtoForeignException(exception)); 1390 } 1391 byte[] resultData = result.getResult(); 1392 if (resultData != null) { 1393 builder.setResult(UnsafeByteOperations.unsafeWrap(resultData)); 1394 } 1395 server.getMasterProcedureExecutor().removeResult(request.getProcId()); 1396 } else { 1397 builder.setState(GetProcedureResultResponse.State.RUNNING); 1398 } 1399 } else { 1400 builder.setState(GetProcedureResultResponse.State.NOT_FOUND); 1401 } 1402 return builder.build(); 1403 } catch (IOException e) { 1404 throw new ServiceException(e); 1405 } 1406 } 1407 1408 @Override 1409 public AbortProcedureResponse abortProcedure(RpcController rpcController, 1410 AbortProcedureRequest request) throws ServiceException { 1411 try { 1412 AbortProcedureResponse.Builder response = AbortProcedureResponse.newBuilder(); 1413 boolean abortResult = 1414 server.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning()); 1415 response.setIsProcedureAborted(abortResult); 1416 return response.build(); 1417 } catch (IOException e) { 1418 throw new ServiceException(e); 1419 } 1420 } 1421 1422 @Override 1423 public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c, 1424 ListNamespaceDescriptorsRequest request) throws ServiceException { 1425 try { 1426 ListNamespaceDescriptorsResponse.Builder response = 1427 ListNamespaceDescriptorsResponse.newBuilder(); 1428 for (NamespaceDescriptor ns : server.getNamespaces()) { 1429 response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns)); 1430 } 1431 return response.build(); 1432 } catch (IOException e) { 1433 throw new ServiceException(e); 1434 } 1435 } 1436 1437 @Override 1438 public GetProceduresResponse getProcedures(RpcController rpcController, 1439 GetProceduresRequest request) throws ServiceException { 1440 try { 1441 final GetProceduresResponse.Builder response = GetProceduresResponse.newBuilder(); 1442 for (Procedure<?> p : server.getProcedures()) { 1443 response.addProcedure(ProcedureUtil.convertToProtoProcedure(p)); 1444 } 1445 return response.build(); 1446 } catch (IOException e) { 1447 throw new ServiceException(e); 1448 } 1449 } 1450 1451 @Override 1452 public GetLocksResponse getLocks(RpcController controller, GetLocksRequest request) 1453 throws ServiceException { 1454 try { 1455 final GetLocksResponse.Builder builder = GetLocksResponse.newBuilder(); 1456 1457 for (LockedResource lockedResource : server.getLocks()) { 1458 builder.addLock(ProcedureUtil.convertToProtoLockedResource(lockedResource)); 1459 } 1460 1461 return builder.build(); 1462 } catch (IOException e) { 1463 throw new ServiceException(e); 1464 } 1465 } 1466 1467 @Override 1468 public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c, 1469 ListTableDescriptorsByNamespaceRequest request) throws ServiceException { 1470 try { 1471 ListTableDescriptorsByNamespaceResponse.Builder b = 1472 ListTableDescriptorsByNamespaceResponse.newBuilder(); 1473 for (TableDescriptor htd : server 1474 .listTableDescriptorsByNamespace(request.getNamespaceName())) { 1475 b.addTableSchema(ProtobufUtil.toTableSchema(htd)); 1476 } 1477 return b.build(); 1478 } catch (IOException e) { 1479 throw new ServiceException(e); 1480 } 1481 } 1482 1483 @Override 1484 public ListTableNamesByNamespaceResponse listTableNamesByNamespace(RpcController c, 1485 ListTableNamesByNamespaceRequest request) throws ServiceException { 1486 try { 1487 ListTableNamesByNamespaceResponse.Builder b = ListTableNamesByNamespaceResponse.newBuilder(); 1488 for (TableName tableName : server.listTableNamesByNamespace(request.getNamespaceName())) { 1489 b.addTableName(ProtobufUtil.toProtoTableName(tableName)); 1490 } 1491 return b.build(); 1492 } catch (IOException e) { 1493 throw new ServiceException(e); 1494 } 1495 } 1496 1497 @Override 1498 public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req) 1499 throws ServiceException { 1500 try { 1501 long procId = server.modifyColumn(ProtobufUtil.toTableName(req.getTableName()), 1502 ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(), 1503 req.getNonce()); 1504 if (procId == -1) { 1505 // This mean operation was not performed in server, so do not set any procId 1506 return ModifyColumnResponse.newBuilder().build(); 1507 } else { 1508 return ModifyColumnResponse.newBuilder().setProcId(procId).build(); 1509 } 1510 } catch (IOException ioe) { 1511 throw new ServiceException(ioe); 1512 } 1513 } 1514 1515 @Override 1516 public ModifyColumnStoreFileTrackerResponse modifyColumnStoreFileTracker(RpcController controller, 1517 ModifyColumnStoreFileTrackerRequest req) throws ServiceException { 1518 try { 1519 long procId = 1520 server.modifyColumnStoreFileTracker(ProtobufUtil.toTableName(req.getTableName()), 1521 req.getFamily().toByteArray(), req.getDstSft(), req.getNonceGroup(), req.getNonce()); 1522 return ModifyColumnStoreFileTrackerResponse.newBuilder().setProcId(procId).build(); 1523 } catch (IOException ioe) { 1524 throw new ServiceException(ioe); 1525 } 1526 } 1527 1528 @Override 1529 public ModifyNamespaceResponse modifyNamespace(RpcController controller, 1530 ModifyNamespaceRequest request) throws ServiceException { 1531 try { 1532 long procId = 1533 server.modifyNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()), 1534 request.getNonceGroup(), request.getNonce()); 1535 return ModifyNamespaceResponse.newBuilder().setProcId(procId).build(); 1536 } catch (IOException e) { 1537 throw new ServiceException(e); 1538 } 1539 } 1540 1541 @Override 1542 public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req) 1543 throws ServiceException { 1544 try { 1545 long procId = server.modifyTable(ProtobufUtil.toTableName(req.getTableName()), 1546 ProtobufUtil.toTableDescriptor(req.getTableSchema()), req.getNonceGroup(), req.getNonce(), 1547 req.getReopenRegions()); 1548 return ModifyTableResponse.newBuilder().setProcId(procId).build(); 1549 } catch (IOException ioe) { 1550 throw new ServiceException(ioe); 1551 } 1552 } 1553 1554 @Override 1555 public ModifyTableStoreFileTrackerResponse modifyTableStoreFileTracker(RpcController controller, 1556 ModifyTableStoreFileTrackerRequest req) throws ServiceException { 1557 try { 1558 long procId = server.modifyTableStoreFileTracker(ProtobufUtil.toTableName(req.getTableName()), 1559 req.getDstSft(), req.getNonceGroup(), req.getNonce()); 1560 return ModifyTableStoreFileTrackerResponse.newBuilder().setProcId(procId).build(); 1561 } catch (IOException ioe) { 1562 throw new ServiceException(ioe); 1563 } 1564 } 1565 1566 @Override 1567 public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest req) 1568 throws ServiceException { 1569 final byte[] encodedRegionName = req.getRegion().getValue().toByteArray(); 1570 RegionSpecifierType type = req.getRegion().getType(); 1571 final byte[] destServerName = (req.hasDestServerName()) 1572 ? Bytes.toBytes(ProtobufUtil.toServerName(req.getDestServerName()).getServerName()) 1573 : null; 1574 MoveRegionResponse mrr = MoveRegionResponse.newBuilder().build(); 1575 1576 if (type != RegionSpecifierType.ENCODED_REGION_NAME) { 1577 LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME 1578 + " actual: " + type); 1579 } 1580 1581 try { 1582 server.checkInitialized(); 1583 server.move(encodedRegionName, destServerName); 1584 } catch (IOException ioe) { 1585 throw new ServiceException(ioe); 1586 } 1587 return mrr; 1588 } 1589 1590 /** 1591 * Offline specified region from master's in-memory state. It will not attempt to reassign the 1592 * region as in unassign. This is a special method that should be used by experts or hbck. 1593 */ 1594 @Override 1595 public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request) 1596 throws ServiceException { 1597 try { 1598 server.checkInitialized(); 1599 1600 final RegionSpecifierType type = request.getRegion().getType(); 1601 if (type != RegionSpecifierType.REGION_NAME) { 1602 LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME 1603 + " actual: " + type); 1604 } 1605 1606 final byte[] regionName = request.getRegion().getValue().toByteArray(); 1607 final RegionInfo hri = server.getAssignmentManager().getRegionInfo(regionName); 1608 if (hri == null) { 1609 throw new UnknownRegionException(Bytes.toStringBinary(regionName)); 1610 } 1611 1612 if (server.cpHost != null) { 1613 server.cpHost.preRegionOffline(hri); 1614 } 1615 LOG.info(server.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString()); 1616 server.getAssignmentManager().offlineRegion(hri); 1617 if (server.cpHost != null) { 1618 server.cpHost.postRegionOffline(hri); 1619 } 1620 } catch (IOException ioe) { 1621 throw new ServiceException(ioe); 1622 } 1623 return OfflineRegionResponse.newBuilder().build(); 1624 } 1625 1626 /** 1627 * Execute Restore/Clone snapshot operation. 1628 * <p> 1629 * If the specified table exists a "Restore" is executed, replacing the table schema and directory 1630 * data with the content of the snapshot. The table must be disabled, or a 1631 * UnsupportedOperationException will be thrown. 1632 * <p> 1633 * If the table doesn't exist a "Clone" is executed, a new table is created using the schema at 1634 * the time of the snapshot, and the content of the snapshot. 1635 * <p> 1636 * The restore/clone operation does not require copying HFiles. Since HFiles are immutable the 1637 * table can point to and use the same files as the original one. 1638 */ 1639 @Override 1640 public RestoreSnapshotResponse restoreSnapshot(RpcController controller, 1641 RestoreSnapshotRequest request) throws ServiceException { 1642 try { 1643 long procId = server.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(), 1644 request.getNonce(), request.getRestoreACL(), request.getCustomSFT()); 1645 return RestoreSnapshotResponse.newBuilder().setProcId(procId).build(); 1646 } catch (ForeignException e) { 1647 throw new ServiceException(e.getCause()); 1648 } catch (IOException e) { 1649 throw new ServiceException(e); 1650 } 1651 } 1652 1653 @Override 1654 public SetSnapshotCleanupResponse switchSnapshotCleanup(RpcController controller, 1655 SetSnapshotCleanupRequest request) throws ServiceException { 1656 try { 1657 server.checkInitialized(); 1658 final boolean enabled = request.getEnabled(); 1659 final boolean isSynchronous = request.hasSynchronous() && request.getSynchronous(); 1660 final boolean prevSnapshotCleanupRunning = this.switchSnapshotCleanup(enabled, isSynchronous); 1661 return SetSnapshotCleanupResponse.newBuilder() 1662 .setPrevSnapshotCleanup(prevSnapshotCleanupRunning).build(); 1663 } catch (IOException e) { 1664 throw new ServiceException(e); 1665 } 1666 } 1667 1668 @Override 1669 public IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(RpcController controller, 1670 IsSnapshotCleanupEnabledRequest request) throws ServiceException { 1671 try { 1672 server.checkInitialized(); 1673 final boolean isSnapshotCleanupEnabled = server.snapshotCleanupStateStore.get(); 1674 return IsSnapshotCleanupEnabledResponse.newBuilder().setEnabled(isSnapshotCleanupEnabled) 1675 .build(); 1676 } catch (IOException e) { 1677 throw new ServiceException(e); 1678 } 1679 } 1680 1681 /** 1682 * Turn on/off snapshot auto-cleanup based on TTL 1683 * @param enabledNewVal Set to <code>true</code> to enable, <code>false</code> to disable 1684 * @param synchronous If <code>true</code>, it waits until current snapshot cleanup is 1685 * completed, if outstanding 1686 * @return previous snapshot auto-cleanup mode 1687 */ 1688 private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal, 1689 final boolean synchronous) throws IOException { 1690 final boolean oldValue = server.snapshotCleanupStateStore.get(); 1691 server.switchSnapshotCleanup(enabledNewVal, synchronous); 1692 LOG.info("{} Successfully set snapshot cleanup to {}", server.getClientIdAuditPrefix(), 1693 enabledNewVal); 1694 return oldValue; 1695 } 1696 1697 @Override 1698 public RunCatalogScanResponse runCatalogScan(RpcController c, RunCatalogScanRequest req) 1699 throws ServiceException { 1700 rpcPreCheck("runCatalogScan"); 1701 try { 1702 return ResponseConverter.buildRunCatalogScanResponse(this.server.catalogJanitorChore.scan()); 1703 } catch (IOException ioe) { 1704 throw new ServiceException(ioe); 1705 } 1706 } 1707 1708 @Override 1709 public RunCleanerChoreResponse runCleanerChore(RpcController c, RunCleanerChoreRequest req) 1710 throws ServiceException { 1711 rpcPreCheck("runCleanerChore"); 1712 try { 1713 CompletableFuture<Boolean> fileCleanerFuture = server.getHFileCleaner().triggerCleanerNow(); 1714 CompletableFuture<Boolean> logCleanerFuture = server.getLogCleaner().triggerCleanerNow(); 1715 boolean result = fileCleanerFuture.get() && logCleanerFuture.get(); 1716 return ResponseConverter.buildRunCleanerChoreResponse(result); 1717 } catch (InterruptedException e) { 1718 throw new ServiceException(e); 1719 } catch (ExecutionException e) { 1720 throw new ServiceException(e.getCause()); 1721 } 1722 } 1723 1724 @Override 1725 public SetBalancerRunningResponse setBalancerRunning(RpcController c, 1726 SetBalancerRunningRequest req) throws ServiceException { 1727 try { 1728 server.checkInitialized(); 1729 boolean prevValue = (req.getSynchronous()) 1730 ? synchronousBalanceSwitch(req.getOn()) 1731 : server.balanceSwitch(req.getOn()); 1732 return SetBalancerRunningResponse.newBuilder().setPrevBalanceValue(prevValue).build(); 1733 } catch (IOException ioe) { 1734 throw new ServiceException(ioe); 1735 } 1736 } 1737 1738 @Override 1739 public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request) 1740 throws ServiceException { 1741 LOG.info(server.getClientIdAuditPrefix() + " shutdown"); 1742 try { 1743 server.shutdown(); 1744 } catch (IOException e) { 1745 LOG.error("Exception occurred in HMaster.shutdown()", e); 1746 throw new ServiceException(e); 1747 } 1748 return ShutdownResponse.newBuilder().build(); 1749 } 1750 1751 /** 1752 * Triggers an asynchronous attempt to take a snapshot. {@inheritDoc} 1753 */ 1754 @Override 1755 public SnapshotResponse snapshot(RpcController controller, SnapshotRequest request) 1756 throws ServiceException { 1757 try { 1758 server.checkInitialized(); 1759 server.snapshotManager.checkSnapshotSupport(); 1760 1761 LOG.info(server.getClientIdAuditPrefix() + " snapshot request for:" 1762 + ClientSnapshotDescriptionUtils.toString(request.getSnapshot())); 1763 // get the snapshot information 1764 SnapshotDescription snapshot = 1765 SnapshotDescriptionUtils.validate(request.getSnapshot(), server.getConfiguration()); 1766 // send back the max amount of time the client should wait for the snapshot to complete 1767 long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(server.getConfiguration(), 1768 snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME); 1769 1770 SnapshotResponse.Builder builder = SnapshotResponse.newBuilder().setExpectedTimeout(waitTime); 1771 1772 // If there is nonce group and nonce in the snapshot request, then the client can 1773 // handle snapshot procedure procId. And if enable the snapshot procedure, we 1774 // will do the snapshot work with proc-v2, otherwise we will fall back to zk proc. 1775 if ( 1776 request.hasNonceGroup() && request.hasNonce() 1777 && server.snapshotManager.snapshotProcedureEnabled() 1778 ) { 1779 long nonceGroup = request.getNonceGroup(); 1780 long nonce = request.getNonce(); 1781 long procId = server.snapshotManager.takeSnapshot(snapshot, nonceGroup, nonce); 1782 return builder.setProcId(procId).build(); 1783 } else { 1784 server.snapshotManager.takeSnapshot(snapshot); 1785 return builder.build(); 1786 } 1787 } catch (ForeignException e) { 1788 throw new ServiceException(e.getCause()); 1789 } catch (IOException e) { 1790 throw new ServiceException(e); 1791 } 1792 } 1793 1794 @Override 1795 public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request) 1796 throws ServiceException { 1797 LOG.info(server.getClientIdAuditPrefix() + " stop"); 1798 try { 1799 server.stopMaster(); 1800 } catch (IOException e) { 1801 LOG.error("Exception occurred while stopping master", e); 1802 throw new ServiceException(e); 1803 } 1804 return StopMasterResponse.newBuilder().build(); 1805 } 1806 1807 @Override 1808 public IsInMaintenanceModeResponse isMasterInMaintenanceMode(final RpcController controller, 1809 final IsInMaintenanceModeRequest request) throws ServiceException { 1810 IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder(); 1811 response.setInMaintenanceMode(server.isInMaintenanceMode()); 1812 return response.build(); 1813 } 1814 1815 @Override 1816 public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req) 1817 throws ServiceException { 1818 try { 1819 final byte[] regionName = req.getRegion().getValue().toByteArray(); 1820 RegionSpecifierType type = req.getRegion().getType(); 1821 UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build(); 1822 1823 server.checkInitialized(); 1824 if (type != RegionSpecifierType.REGION_NAME) { 1825 LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME 1826 + " actual: " + type); 1827 } 1828 RegionStateNode rsn = 1829 server.getAssignmentManager().getRegionStates().getRegionStateNodeFromName(regionName); 1830 if (rsn == null) { 1831 throw new UnknownRegionException(Bytes.toString(regionName)); 1832 } 1833 1834 RegionInfo hri = rsn.getRegionInfo(); 1835 if (server.cpHost != null) { 1836 server.cpHost.preUnassign(hri); 1837 } 1838 LOG.debug(server.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString() 1839 + " in current location if it is online"); 1840 server.getAssignmentManager().unassign(hri); 1841 if (server.cpHost != null) { 1842 server.cpHost.postUnassign(hri); 1843 } 1844 1845 return urr; 1846 } catch (IOException ioe) { 1847 throw new ServiceException(ioe); 1848 } 1849 } 1850 1851 @Override 1852 public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c, 1853 ReportRegionStateTransitionRequest req) throws ServiceException { 1854 try { 1855 server.checkServiceStarted(); 1856 for (RegionServerStatusProtos.RegionStateTransition transition : req.getTransitionList()) { 1857 long procId = 1858 transition.getProcIdCount() > 0 ? transition.getProcId(0) : Procedure.NO_PROC_ID; 1859 // -1 is less than any possible MasterActiveCode 1860 long initiatingMasterActiveTime = transition.hasInitiatingMasterActiveTime() 1861 ? transition.getInitiatingMasterActiveTime() 1862 : -1; 1863 throwOnOldMaster(procId, initiatingMasterActiveTime); 1864 } 1865 return server.getAssignmentManager().reportRegionStateTransition(req); 1866 } catch (IOException ioe) { 1867 throw new ServiceException(ioe); 1868 } 1869 } 1870 1871 @Override 1872 public SetQuotaResponse setQuota(RpcController c, SetQuotaRequest req) throws ServiceException { 1873 try { 1874 server.checkInitialized(); 1875 return server.getMasterQuotaManager().setQuota(req); 1876 } catch (Exception e) { 1877 throw new ServiceException(e); 1878 } 1879 } 1880 1881 @Override 1882 public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller, 1883 MajorCompactionTimestampRequest request) throws ServiceException { 1884 MajorCompactionTimestampResponse.Builder response = 1885 MajorCompactionTimestampResponse.newBuilder(); 1886 try { 1887 server.checkInitialized(); 1888 response.setCompactionTimestamp( 1889 server.getLastMajorCompactionTimestamp(ProtobufUtil.toTableName(request.getTableName()))); 1890 } catch (IOException e) { 1891 throw new ServiceException(e); 1892 } 1893 return response.build(); 1894 } 1895 1896 @Override 1897 public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( 1898 RpcController controller, MajorCompactionTimestampForRegionRequest request) 1899 throws ServiceException { 1900 MajorCompactionTimestampResponse.Builder response = 1901 MajorCompactionTimestampResponse.newBuilder(); 1902 try { 1903 server.checkInitialized(); 1904 response.setCompactionTimestamp(server 1905 .getLastMajorCompactionTimestampForRegion(request.getRegion().getValue().toByteArray())); 1906 } catch (IOException e) { 1907 throw new ServiceException(e); 1908 } 1909 return response.build(); 1910 } 1911 1912 @Override 1913 public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller, 1914 IsBalancerEnabledRequest request) throws ServiceException { 1915 IsBalancerEnabledResponse.Builder response = IsBalancerEnabledResponse.newBuilder(); 1916 response.setEnabled(server.isBalancerOn()); 1917 return response.build(); 1918 } 1919 1920 @Override 1921 public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller, 1922 SetSplitOrMergeEnabledRequest request) throws ServiceException { 1923 SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder(); 1924 try { 1925 server.checkInitialized(); 1926 boolean newValue = request.getEnabled(); 1927 for (MasterProtos.MasterSwitchType masterSwitchType : request.getSwitchTypesList()) { 1928 MasterSwitchType switchType = convert(masterSwitchType); 1929 boolean oldValue = server.isSplitOrMergeEnabled(switchType); 1930 response.addPrevValue(oldValue); 1931 if (server.cpHost != null) { 1932 server.cpHost.preSetSplitOrMergeEnabled(newValue, switchType); 1933 } 1934 server.getSplitOrMergeStateStore().setSplitOrMergeEnabled(newValue, switchType); 1935 if (server.cpHost != null) { 1936 server.cpHost.postSetSplitOrMergeEnabled(newValue, switchType); 1937 } 1938 } 1939 } catch (IOException e) { 1940 throw new ServiceException(e); 1941 } 1942 return response.build(); 1943 } 1944 1945 @Override 1946 public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller, 1947 IsSplitOrMergeEnabledRequest request) throws ServiceException { 1948 IsSplitOrMergeEnabledResponse.Builder response = IsSplitOrMergeEnabledResponse.newBuilder(); 1949 response.setEnabled(server.isSplitOrMergeEnabled(convert(request.getSwitchType()))); 1950 return response.build(); 1951 } 1952 1953 @Override 1954 public NormalizeResponse normalize(RpcController controller, NormalizeRequest request) 1955 throws ServiceException { 1956 rpcPreCheck("normalize"); 1957 try { 1958 final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder() 1959 .tableNames(ProtobufUtil.toTableNameList(request.getTableNamesList())) 1960 .regex(request.hasRegex() ? request.getRegex() : null) 1961 .namespace(request.hasNamespace() ? request.getNamespace() : null).build(); 1962 return NormalizeResponse.newBuilder() 1963 // all API requests are considered priority requests. 1964 .setNormalizerRan(server.normalizeRegions(ntfp, true)).build(); 1965 } catch (IOException ex) { 1966 throw new ServiceException(ex); 1967 } 1968 } 1969 1970 @Override 1971 public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller, 1972 SetNormalizerRunningRequest request) throws ServiceException { 1973 rpcPreCheck("setNormalizerRunning"); 1974 1975 // Sets normalizer on/off flag in ZK. 1976 // TODO: this method is totally broken in terms of atomicity of actions and values read. 1977 // 1. The contract has this RPC returning the previous value. There isn't a ZKUtil method 1978 // that lets us retrieve the previous value as part of setting a new value, so we simply 1979 // perform a read before issuing the update. Thus we have a data race opportunity, between 1980 // when the `prevValue` is read and whatever is actually overwritten. 1981 // 2. Down in `setNormalizerOn`, the call to `createAndWatch` inside of the catch clause can 1982 // itself fail in the event that the znode already exists. Thus, another data race, between 1983 // when the initial `setData` call is notified of the absence of the target znode and the 1984 // subsequent `createAndWatch`, with another client creating said node. 1985 // That said, there's supposed to be only one active master and thus there's supposed to be 1986 // only one process with the authority to modify the value. 1987 final boolean prevValue = server.getRegionNormalizerManager().isNormalizerOn(); 1988 final boolean newValue = request.getOn(); 1989 try { 1990 server.getRegionNormalizerManager().setNormalizerOn(newValue); 1991 } catch (IOException e) { 1992 throw new ServiceException(e); 1993 } 1994 LOG.info("{} set normalizerSwitch={}", server.getClientIdAuditPrefix(), newValue); 1995 return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build(); 1996 } 1997 1998 @Override 1999 public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller, 2000 IsNormalizerEnabledRequest request) { 2001 IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder(); 2002 response.setEnabled(server.isNormalizerOn()); 2003 return response.build(); 2004 } 2005 2006 /** 2007 * Returns the security capabilities in effect on the cluster 2008 */ 2009 @Override 2010 public SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller, 2011 SecurityCapabilitiesRequest request) throws ServiceException { 2012 SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder(); 2013 try { 2014 server.checkInitialized(); 2015 Set<SecurityCapabilitiesResponse.Capability> capabilities = new HashSet<>(); 2016 // Authentication 2017 if (User.isHBaseSecurityEnabled(server.getConfiguration())) { 2018 capabilities.add(SecurityCapabilitiesResponse.Capability.SECURE_AUTHENTICATION); 2019 } else { 2020 capabilities.add(SecurityCapabilitiesResponse.Capability.SIMPLE_AUTHENTICATION); 2021 } 2022 // A coprocessor that implements AccessControlService can provide AUTHORIZATION and 2023 // CELL_AUTHORIZATION 2024 if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { 2025 if (AccessChecker.isAuthorizationSupported(server.getConfiguration())) { 2026 capabilities.add(SecurityCapabilitiesResponse.Capability.AUTHORIZATION); 2027 } 2028 if (AccessController.isCellAuthorizationSupported(server.getConfiguration())) { 2029 capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_AUTHORIZATION); 2030 } 2031 } 2032 // A coprocessor that implements VisibilityLabelsService can provide CELL_VISIBILITY. 2033 if (server.cpHost != null && hasVisibilityLabelsServiceCoprocessor(server.cpHost)) { 2034 if (VisibilityController.isCellAuthorizationSupported(server.getConfiguration())) { 2035 capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_VISIBILITY); 2036 } 2037 } 2038 response.addAllCapabilities(capabilities); 2039 } catch (IOException e) { 2040 throw new ServiceException(e); 2041 } 2042 return response.build(); 2043 } 2044 2045 /** 2046 * Determines if there is a MasterCoprocessor deployed which implements 2047 * {@link AccessControlService.Interface}. 2048 */ 2049 boolean hasAccessControlServiceCoprocessor(MasterCoprocessorHost cpHost) { 2050 return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class), 2051 AccessControlService.Interface.class); 2052 } 2053 2054 /** 2055 * Determines if there is a MasterCoprocessor deployed which implements 2056 * {@link VisibilityLabelsService.Interface}. 2057 */ 2058 boolean hasVisibilityLabelsServiceCoprocessor(MasterCoprocessorHost cpHost) { 2059 return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class), 2060 VisibilityLabelsService.Interface.class); 2061 } 2062 2063 /** 2064 * Determines if there is a coprocessor implementation in the provided argument which extends or 2065 * implements the provided {@code service}. 2066 */ 2067 boolean checkCoprocessorWithService(List<MasterCoprocessor> coprocessorsToCheck, 2068 Class<?> service) { 2069 if (coprocessorsToCheck == null || coprocessorsToCheck.isEmpty()) { 2070 return false; 2071 } 2072 for (MasterCoprocessor cp : coprocessorsToCheck) { 2073 if (service.isAssignableFrom(cp.getClass())) { 2074 return true; 2075 } 2076 } 2077 return false; 2078 } 2079 2080 private MasterSwitchType convert(MasterProtos.MasterSwitchType switchType) { 2081 switch (switchType) { 2082 case SPLIT: 2083 return MasterSwitchType.SPLIT; 2084 case MERGE: 2085 return MasterSwitchType.MERGE; 2086 default: 2087 break; 2088 } 2089 return null; 2090 } 2091 2092 @Override 2093 public AddReplicationPeerResponse addReplicationPeer(RpcController controller, 2094 AddReplicationPeerRequest request) throws ServiceException { 2095 try { 2096 long procId = server.addReplicationPeer(request.getPeerId(), 2097 ReplicationPeerConfigUtil.convert(request.getPeerConfig()), 2098 request.getPeerState().getState().equals(ReplicationState.State.ENABLED)); 2099 return AddReplicationPeerResponse.newBuilder().setProcId(procId).build(); 2100 } catch (ReplicationException | IOException e) { 2101 throw new ServiceException(e); 2102 } 2103 } 2104 2105 @Override 2106 public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller, 2107 RemoveReplicationPeerRequest request) throws ServiceException { 2108 try { 2109 long procId = server.removeReplicationPeer(request.getPeerId()); 2110 return RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build(); 2111 } catch (ReplicationException | IOException e) { 2112 throw new ServiceException(e); 2113 } 2114 } 2115 2116 @Override 2117 public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller, 2118 EnableReplicationPeerRequest request) throws ServiceException { 2119 try { 2120 long procId = server.enableReplicationPeer(request.getPeerId()); 2121 return EnableReplicationPeerResponse.newBuilder().setProcId(procId).build(); 2122 } catch (ReplicationException | IOException e) { 2123 throw new ServiceException(e); 2124 } 2125 } 2126 2127 @Override 2128 public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller, 2129 DisableReplicationPeerRequest request) throws ServiceException { 2130 try { 2131 long procId = server.disableReplicationPeer(request.getPeerId()); 2132 return DisableReplicationPeerResponse.newBuilder().setProcId(procId).build(); 2133 } catch (ReplicationException | IOException e) { 2134 throw new ServiceException(e); 2135 } 2136 } 2137 2138 @Override 2139 public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller, 2140 GetReplicationPeerConfigRequest request) throws ServiceException { 2141 GetReplicationPeerConfigResponse.Builder response = 2142 GetReplicationPeerConfigResponse.newBuilder(); 2143 try { 2144 String peerId = request.getPeerId(); 2145 ReplicationPeerConfig peerConfig = server.getReplicationPeerConfig(peerId); 2146 response.setPeerId(peerId); 2147 response.setPeerConfig(ReplicationPeerConfigUtil.convert(peerConfig)); 2148 } catch (ReplicationException | IOException e) { 2149 throw new ServiceException(e); 2150 } 2151 return response.build(); 2152 } 2153 2154 @Override 2155 public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller, 2156 UpdateReplicationPeerConfigRequest request) throws ServiceException { 2157 try { 2158 long procId = server.updateReplicationPeerConfig(request.getPeerId(), 2159 ReplicationPeerConfigUtil.convert(request.getPeerConfig())); 2160 return UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build(); 2161 } catch (ReplicationException | IOException e) { 2162 throw new ServiceException(e); 2163 } 2164 } 2165 2166 @Override 2167 public TransitReplicationPeerSyncReplicationStateResponse 2168 transitReplicationPeerSyncReplicationState(RpcController controller, 2169 TransitReplicationPeerSyncReplicationStateRequest request) throws ServiceException { 2170 try { 2171 long procId = server.transitReplicationPeerSyncReplicationState(request.getPeerId(), 2172 ReplicationPeerConfigUtil.toSyncReplicationState(request.getSyncReplicationState())); 2173 return TransitReplicationPeerSyncReplicationStateResponse.newBuilder().setProcId(procId) 2174 .build(); 2175 } catch (ReplicationException | IOException e) { 2176 throw new ServiceException(e); 2177 } 2178 } 2179 2180 @Override 2181 public ListReplicationPeersResponse listReplicationPeers(RpcController controller, 2182 ListReplicationPeersRequest request) throws ServiceException { 2183 ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder(); 2184 try { 2185 List<ReplicationPeerDescription> peers = 2186 server.listReplicationPeers(request.hasRegex() ? request.getRegex() : null); 2187 for (ReplicationPeerDescription peer : peers) { 2188 response.addPeerDesc(ReplicationPeerConfigUtil.toProtoReplicationPeerDescription(peer)); 2189 } 2190 } catch (ReplicationException | IOException e) { 2191 throw new ServiceException(e); 2192 } 2193 return response.build(); 2194 } 2195 2196 @Override 2197 public GetReplicationPeerStateResponse isReplicationPeerEnabled(RpcController controller, 2198 GetReplicationPeerStateRequest request) throws ServiceException { 2199 boolean isEnabled; 2200 try { 2201 isEnabled = server.getReplicationPeerManager().getPeerState(request.getPeerId()); 2202 } catch (ReplicationException ioe) { 2203 throw new ServiceException(ioe); 2204 } 2205 return GetReplicationPeerStateResponse.newBuilder().setIsEnabled(isEnabled).build(); 2206 } 2207 2208 @Override 2209 public ReplicationPeerModificationSwitchResponse replicationPeerModificationSwitch( 2210 RpcController controller, ReplicationPeerModificationSwitchRequest request) 2211 throws ServiceException { 2212 try { 2213 server.checkInitialized(); 2214 boolean prevValue = server.replicationPeerModificationSwitch(request.getOn()); 2215 return ReplicationPeerModificationSwitchResponse.newBuilder().setPreviousValue(prevValue) 2216 .build(); 2217 } catch (IOException ioe) { 2218 throw new ServiceException(ioe); 2219 } 2220 } 2221 2222 @Override 2223 public GetReplicationPeerModificationProceduresResponse getReplicationPeerModificationProcedures( 2224 RpcController controller, GetReplicationPeerModificationProceduresRequest request) 2225 throws ServiceException { 2226 try { 2227 server.checkInitialized(); 2228 GetReplicationPeerModificationProceduresResponse.Builder builder = 2229 GetReplicationPeerModificationProceduresResponse.newBuilder(); 2230 for (Procedure<?> proc : server.getProcedures()) { 2231 if (proc.isFinished()) { 2232 continue; 2233 } 2234 if (!(proc instanceof AbstractPeerNoLockProcedure)) { 2235 continue; 2236 } 2237 builder.addProcedure(ProcedureUtil.convertToProtoProcedure(proc)); 2238 } 2239 return builder.build(); 2240 } catch (IOException ioe) { 2241 throw new ServiceException(ioe); 2242 } 2243 } 2244 2245 @Override 2246 public IsReplicationPeerModificationEnabledResponse isReplicationPeerModificationEnabled( 2247 RpcController controller, IsReplicationPeerModificationEnabledRequest request) 2248 throws ServiceException { 2249 try { 2250 server.checkInitialized(); 2251 return IsReplicationPeerModificationEnabledResponse.newBuilder() 2252 .setEnabled(server.isReplicationPeerModificationEnabled()).build(); 2253 } catch (IOException ioe) { 2254 throw new ServiceException(ioe); 2255 } 2256 } 2257 2258 @Override 2259 public ListDecommissionedRegionServersResponse listDecommissionedRegionServers( 2260 RpcController controller, ListDecommissionedRegionServersRequest request) 2261 throws ServiceException { 2262 ListDecommissionedRegionServersResponse.Builder response = 2263 ListDecommissionedRegionServersResponse.newBuilder(); 2264 try { 2265 server.checkInitialized(); 2266 if (server.cpHost != null) { 2267 server.cpHost.preListDecommissionedRegionServers(); 2268 } 2269 List<ServerName> servers = server.listDecommissionedRegionServers(); 2270 response.addAllServerName((servers.stream().map(server -> ProtobufUtil.toServerName(server))) 2271 .collect(Collectors.toList())); 2272 if (server.cpHost != null) { 2273 server.cpHost.postListDecommissionedRegionServers(); 2274 } 2275 } catch (IOException io) { 2276 throw new ServiceException(io); 2277 } 2278 2279 return response.build(); 2280 } 2281 2282 @Override 2283 public DecommissionRegionServersResponse decommissionRegionServers(RpcController controller, 2284 DecommissionRegionServersRequest request) throws ServiceException { 2285 try { 2286 server.checkInitialized(); 2287 List<ServerName> servers = request.getServerNameList().stream() 2288 .map(pbServer -> ProtobufUtil.toServerName(pbServer)).collect(Collectors.toList()); 2289 boolean offload = request.getOffload(); 2290 if (server.cpHost != null) { 2291 server.cpHost.preDecommissionRegionServers(servers, offload); 2292 } 2293 server.decommissionRegionServers(servers, offload); 2294 if (server.cpHost != null) { 2295 server.cpHost.postDecommissionRegionServers(servers, offload); 2296 } 2297 } catch (IOException io) { 2298 throw new ServiceException(io); 2299 } 2300 2301 return DecommissionRegionServersResponse.newBuilder().build(); 2302 } 2303 2304 @Override 2305 public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller, 2306 RecommissionRegionServerRequest request) throws ServiceException { 2307 try { 2308 server.checkInitialized(); 2309 ServerName sn = ProtobufUtil.toServerName(request.getServerName()); 2310 List<byte[]> encodedRegionNames = request.getRegionList().stream() 2311 .map(regionSpecifier -> regionSpecifier.getValue().toByteArray()) 2312 .collect(Collectors.toList()); 2313 if (server.cpHost != null) { 2314 server.cpHost.preRecommissionRegionServer(sn, encodedRegionNames); 2315 } 2316 server.recommissionRegionServer(sn, encodedRegionNames); 2317 if (server.cpHost != null) { 2318 server.cpHost.postRecommissionRegionServer(sn, encodedRegionNames); 2319 } 2320 } catch (IOException io) { 2321 throw new ServiceException(io); 2322 } 2323 2324 return RecommissionRegionServerResponse.newBuilder().build(); 2325 } 2326 2327 @Override 2328 public LockResponse requestLock(RpcController controller, final LockRequest request) 2329 throws ServiceException { 2330 try { 2331 if (request.getDescription().isEmpty()) { 2332 throw new IllegalArgumentException("Empty description"); 2333 } 2334 NonceProcedureRunnable npr; 2335 LockType type = LockType.valueOf(request.getLockType().name()); 2336 if (request.getRegionInfoCount() > 0) { 2337 final RegionInfo[] regionInfos = new RegionInfo[request.getRegionInfoCount()]; 2338 for (int i = 0; i < request.getRegionInfoCount(); ++i) { 2339 regionInfos[i] = ProtobufUtil.toRegionInfo(request.getRegionInfo(i)); 2340 } 2341 npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) { 2342 @Override 2343 protected void run() throws IOException { 2344 setProcId(server.getLockManager().remoteLocks().requestRegionsLock(regionInfos, 2345 request.getDescription(), getNonceKey())); 2346 } 2347 2348 @Override 2349 protected String getDescription() { 2350 return "RequestLock"; 2351 } 2352 }; 2353 } else if (request.hasTableName()) { 2354 final TableName tableName = ProtobufUtil.toTableName(request.getTableName()); 2355 npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) { 2356 @Override 2357 protected void run() throws IOException { 2358 setProcId(server.getLockManager().remoteLocks().requestTableLock(tableName, type, 2359 request.getDescription(), getNonceKey())); 2360 } 2361 2362 @Override 2363 protected String getDescription() { 2364 return "RequestLock"; 2365 } 2366 }; 2367 } else if (request.hasNamespace()) { 2368 npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) { 2369 @Override 2370 protected void run() throws IOException { 2371 setProcId(server.getLockManager().remoteLocks().requestNamespaceLock( 2372 request.getNamespace(), type, request.getDescription(), getNonceKey())); 2373 } 2374 2375 @Override 2376 protected String getDescription() { 2377 return "RequestLock"; 2378 } 2379 }; 2380 } else { 2381 throw new IllegalArgumentException("one of table/namespace/region should be specified"); 2382 } 2383 long procId = MasterProcedureUtil.submitProcedure(npr); 2384 return LockResponse.newBuilder().setProcId(procId).build(); 2385 } catch (IllegalArgumentException e) { 2386 LOG.warn("Exception when queuing lock", e); 2387 throw new ServiceException(new DoNotRetryIOException(e)); 2388 } catch (IOException e) { 2389 LOG.warn("Exception when queuing lock", e); 2390 throw new ServiceException(e); 2391 } 2392 } 2393 2394 /** 2395 * @return LOCKED, if procedure is found and it has the lock; else UNLOCKED. 2396 * @throws ServiceException if given proc id is found but it is not a LockProcedure. 2397 */ 2398 @Override 2399 public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request) 2400 throws ServiceException { 2401 try { 2402 if ( 2403 server.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(), 2404 request.getKeepAlive()) 2405 ) { 2406 return LockHeartbeatResponse.newBuilder() 2407 .setTimeoutMs(server.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF, 2408 LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS)) 2409 .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).build(); 2410 } else { 2411 return LockHeartbeatResponse.newBuilder() 2412 .setLockStatus(LockHeartbeatResponse.LockStatus.UNLOCKED).build(); 2413 } 2414 } catch (IOException e) { 2415 throw new ServiceException(e); 2416 } 2417 } 2418 2419 @Override 2420 public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller, 2421 RegionSpaceUseReportRequest request) throws ServiceException { 2422 try { 2423 server.checkInitialized(); 2424 if (!QuotaUtil.isQuotaEnabled(server.getConfiguration())) { 2425 return RegionSpaceUseReportResponse.newBuilder().build(); 2426 } 2427 MasterQuotaManager quotaManager = this.server.getMasterQuotaManager(); 2428 if (quotaManager != null) { 2429 final long now = EnvironmentEdgeManager.currentTime(); 2430 for (RegionSpaceUse report : request.getSpaceUseList()) { 2431 quotaManager.addRegionSize(ProtobufUtil.toRegionInfo(report.getRegionInfo()), 2432 report.getRegionSize(), now); 2433 } 2434 } else { 2435 LOG.debug("Received region space usage report but HMaster is not ready to process it, " 2436 + "skipping"); 2437 } 2438 return RegionSpaceUseReportResponse.newBuilder().build(); 2439 } catch (Exception e) { 2440 throw new ServiceException(e); 2441 } 2442 } 2443 2444 @Override 2445 public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(RpcController controller, 2446 GetSpaceQuotaRegionSizesRequest request) throws ServiceException { 2447 try { 2448 server.checkInitialized(); 2449 MasterQuotaManager quotaManager = this.server.getMasterQuotaManager(); 2450 GetSpaceQuotaRegionSizesResponse.Builder builder = 2451 GetSpaceQuotaRegionSizesResponse.newBuilder(); 2452 if (quotaManager != null) { 2453 Map<RegionInfo, Long> regionSizes = quotaManager.snapshotRegionSizes(); 2454 Map<TableName, Long> regionSizesByTable = new HashMap<>(); 2455 // Translate hregioninfo+long -> tablename+long 2456 for (Entry<RegionInfo, Long> entry : regionSizes.entrySet()) { 2457 final TableName tableName = entry.getKey().getTable(); 2458 Long prevSize = regionSizesByTable.get(tableName); 2459 if (prevSize == null) { 2460 prevSize = 0L; 2461 } 2462 regionSizesByTable.put(tableName, prevSize + entry.getValue()); 2463 } 2464 // Serialize them into the protobuf 2465 for (Entry<TableName, Long> tableSize : regionSizesByTable.entrySet()) { 2466 builder.addSizes( 2467 RegionSizes.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableSize.getKey())) 2468 .setSize(tableSize.getValue()).build()); 2469 } 2470 return builder.build(); 2471 } else { 2472 LOG.debug("Received space quota region size report but HMaster is not ready to process it," 2473 + "skipping"); 2474 } 2475 return builder.build(); 2476 } catch (Exception e) { 2477 throw new ServiceException(e); 2478 } 2479 } 2480 2481 @Override 2482 public GetQuotaStatesResponse getQuotaStates(RpcController controller, 2483 GetQuotaStatesRequest request) throws ServiceException { 2484 try { 2485 server.checkInitialized(); 2486 QuotaObserverChore quotaChore = this.server.getQuotaObserverChore(); 2487 GetQuotaStatesResponse.Builder builder = GetQuotaStatesResponse.newBuilder(); 2488 if (quotaChore != null) { 2489 // The "current" view of all tables with quotas 2490 Map<TableName, SpaceQuotaSnapshot> tableSnapshots = quotaChore.getTableQuotaSnapshots(); 2491 for (Entry<TableName, SpaceQuotaSnapshot> entry : tableSnapshots.entrySet()) { 2492 builder.addTableSnapshots(TableQuotaSnapshot.newBuilder() 2493 .setTableName(ProtobufUtil.toProtoTableName(entry.getKey())) 2494 .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build()); 2495 } 2496 // The "current" view of all namespaces with quotas 2497 Map<String, SpaceQuotaSnapshot> nsSnapshots = quotaChore.getNamespaceQuotaSnapshots(); 2498 for (Entry<String, SpaceQuotaSnapshot> entry : nsSnapshots.entrySet()) { 2499 builder.addNsSnapshots(NamespaceQuotaSnapshot.newBuilder().setNamespace(entry.getKey()) 2500 .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build()); 2501 } 2502 return builder.build(); 2503 } 2504 return builder.build(); 2505 } catch (Exception e) { 2506 throw new ServiceException(e); 2507 } 2508 } 2509 2510 @Override 2511 public ClearDeadServersResponse clearDeadServers(RpcController controller, 2512 ClearDeadServersRequest request) throws ServiceException { 2513 LOG.debug(server.getClientIdAuditPrefix() + " clear dead region servers."); 2514 ClearDeadServersResponse.Builder response = ClearDeadServersResponse.newBuilder(); 2515 try { 2516 server.checkInitialized(); 2517 if (server.cpHost != null) { 2518 server.cpHost.preClearDeadServers(); 2519 } 2520 2521 if (server.getServerManager().areDeadServersInProgress()) { 2522 LOG.debug("Some dead server is still under processing, won't clear the dead server list"); 2523 response.addAllServerName(request.getServerNameList()); 2524 } else { 2525 DeadServer deadServer = server.getServerManager().getDeadServers(); 2526 Set<Address> clearedServers = new HashSet<>(); 2527 for (HBaseProtos.ServerName pbServer : request.getServerNameList()) { 2528 ServerName serverName = ProtobufUtil.toServerName(pbServer); 2529 final boolean deadInProcess = 2530 server.getProcedures().stream().anyMatch(p -> (p instanceof ServerCrashProcedure) 2531 && ((ServerCrashProcedure) p).getServerName().equals(serverName)); 2532 if (deadInProcess) { 2533 throw new ServiceException( 2534 String.format("Dead server '%s' is not 'dead' in fact...", serverName)); 2535 } 2536 2537 if (!deadServer.removeDeadServer(serverName)) { 2538 response.addServerName(pbServer); 2539 } else { 2540 clearedServers.add(serverName.getAddress()); 2541 } 2542 } 2543 server.getRSGroupInfoManager().removeServers(clearedServers); 2544 LOG.info("Remove decommissioned servers {} from RSGroup done", clearedServers); 2545 } 2546 2547 if (server.cpHost != null) { 2548 server.cpHost.postClearDeadServers( 2549 ProtobufUtil.toServerNameList(request.getServerNameList()), 2550 ProtobufUtil.toServerNameList(response.getServerNameList())); 2551 } 2552 } catch (IOException io) { 2553 throw new ServiceException(io); 2554 } 2555 return response.build(); 2556 } 2557 2558 @Override 2559 public ReportProcedureDoneResponse reportProcedureDone(RpcController controller, 2560 ReportProcedureDoneRequest request) throws ServiceException { 2561 // Check Masters is up and ready for duty before progressing. Remote side will keep trying. 2562 try { 2563 this.server.checkServiceStarted(); 2564 for (RemoteProcedureResult result : request.getResultList()) { 2565 // -1 is less than any possible MasterActiveCode 2566 long initiatingMasterActiveTime = 2567 result.hasInitiatingMasterActiveTime() ? result.getInitiatingMasterActiveTime() : -1; 2568 throwOnOldMaster(result.getProcId(), initiatingMasterActiveTime); 2569 } 2570 } catch (IOException ioe) { 2571 throw new ServiceException(ioe); 2572 } 2573 request.getResultList().forEach(result -> { 2574 if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) { 2575 server.remoteProcedureCompleted(result.getProcId()); 2576 } else { 2577 server.remoteProcedureFailed(result.getProcId(), 2578 RemoteProcedureException.fromProto(result.getError())); 2579 } 2580 }); 2581 return ReportProcedureDoneResponse.getDefaultInstance(); 2582 } 2583 2584 private void throwOnOldMaster(long procId, long initiatingMasterActiveTime) 2585 throws MasterNotRunningException { 2586 if (initiatingMasterActiveTime > server.getMasterActiveTime()) { 2587 // procedure is initiated by new active master but report received on master with older active 2588 // time 2589 LOG.warn( 2590 "Report for procId: {} and initiatingMasterAT {} received on master with activeTime {}", 2591 procId, initiatingMasterActiveTime, server.getMasterActiveTime()); 2592 throw new MasterNotRunningException("Another master is active"); 2593 } 2594 } 2595 2596 @Override 2597 public FileArchiveNotificationResponse reportFileArchival(RpcController controller, 2598 FileArchiveNotificationRequest request) throws ServiceException { 2599 try { 2600 server.checkInitialized(); 2601 if (!QuotaUtil.isQuotaEnabled(server.getConfiguration())) { 2602 return FileArchiveNotificationResponse.newBuilder().build(); 2603 } 2604 server.getMasterQuotaManager().processFileArchivals(request, server.getConnection(), 2605 server.getConfiguration(), server.getFileSystem()); 2606 return FileArchiveNotificationResponse.newBuilder().build(); 2607 } catch (Exception e) { 2608 throw new ServiceException(e); 2609 } 2610 } 2611 2612 // HBCK Services 2613 2614 @Override 2615 public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest req) 2616 throws ServiceException { 2617 rpcPreCheck("runHbckChore"); 2618 LOG.info("{} request HBCK chore to run", server.getClientIdAuditPrefix()); 2619 HbckChore hbckChore = server.getHbckChore(); 2620 boolean ran = hbckChore.runChore(); 2621 return RunHbckChoreResponse.newBuilder().setRan(ran).build(); 2622 } 2623 2624 /** 2625 * Update state of the table in meta only. This is required by hbck in some situations to cleanup 2626 * stuck assign/ unassign regions procedures for the table. 2627 * @return previous state of the table 2628 */ 2629 @Override 2630 public GetTableStateResponse setTableStateInMeta(RpcController controller, 2631 SetTableStateInMetaRequest request) throws ServiceException { 2632 rpcPreCheck("setTableStateInMeta"); 2633 TableName tn = ProtobufUtil.toTableName(request.getTableName()); 2634 try { 2635 TableState prevState = this.server.getTableStateManager().getTableState(tn); 2636 TableState newState = TableState.convert(tn, request.getTableState()); 2637 LOG.info("{} set table={} state from {} to {}", server.getClientIdAuditPrefix(), tn, 2638 prevState.getState(), newState.getState()); 2639 this.server.getTableStateManager().setTableState(tn, newState.getState()); 2640 return GetTableStateResponse.newBuilder().setTableState(prevState.convert()).build(); 2641 } catch (Exception e) { 2642 throw new ServiceException(e); 2643 } 2644 } 2645 2646 /** 2647 * Update state of the region in meta only. This is required by hbck in some situations to cleanup 2648 * stuck assign/ unassign regions procedures for the table. 2649 * @return previous states of the regions 2650 */ 2651 @Override 2652 public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller, 2653 SetRegionStateInMetaRequest request) throws ServiceException { 2654 rpcPreCheck("setRegionStateInMeta"); 2655 SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder(); 2656 final AssignmentManager am = server.getAssignmentManager(); 2657 try { 2658 for (RegionSpecifierAndState s : request.getStatesList()) { 2659 final RegionSpecifier spec = s.getRegionSpecifier(); 2660 final RegionInfo targetRegionInfo = getRegionInfo(spec); 2661 final RegionState.State targetState = RegionState.State.convert(s.getState()); 2662 final RegionState.State currentState = Optional.ofNullable(targetRegionInfo) 2663 .map(info -> am.getRegionStates().getRegionState(info)).map(RegionState::getState) 2664 .orElseThrow( 2665 () -> new ServiceException("No existing state known for region '" + spec + "'.")); 2666 LOG.info("{} set region={} state from {} to {}", server.getClientIdAuditPrefix(), 2667 targetRegionInfo, currentState, targetState); 2668 if (currentState == targetState) { 2669 LOG.debug("Proposed state matches current state. {}, {}", targetRegionInfo, currentState); 2670 continue; 2671 } 2672 MetaTableAccessor.updateRegionState(server.getConnection(), targetRegionInfo, targetState); 2673 // Loads from meta again to refresh AM cache with the new region state 2674 am.populateRegionStatesFromMeta(targetRegionInfo); 2675 builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec) 2676 .setState(currentState.convert())); 2677 } 2678 } catch (IOException e) { 2679 throw new ServiceException(e); 2680 } 2681 return builder.build(); 2682 } 2683 2684 /** 2685 * Get {@link RegionInfo} from Master using content of {@link RegionSpecifier} as key. 2686 * @return {@link RegionInfo} found by decoding {@code rs} or {@code null} if {@code rs} is 2687 * unknown to the master. 2688 * @throws ServiceException If some error occurs while querying META or parsing results. 2689 */ 2690 private RegionInfo getRegionInfo(HBaseProtos.RegionSpecifier rs) throws ServiceException { 2691 // TODO: this doesn't handle MOB regions. Should it? See the public method #getRegionInfo 2692 final AssignmentManager am = server.getAssignmentManager(); 2693 final String encodedRegionName; 2694 final RegionInfo info; 2695 // first try resolving from the AM's caches. 2696 switch (rs.getType()) { 2697 case REGION_NAME: 2698 final byte[] regionName = rs.getValue().toByteArray(); 2699 encodedRegionName = RegionInfo.encodeRegionName(regionName); 2700 info = am.getRegionInfo(regionName); 2701 break; 2702 case ENCODED_REGION_NAME: 2703 encodedRegionName = rs.getValue().toStringUtf8(); 2704 info = am.getRegionInfo(encodedRegionName); 2705 break; 2706 default: 2707 throw new IllegalArgumentException("Unrecognized RegionSpecifierType " + rs.getType()); 2708 } 2709 if (info != null) { 2710 return info; 2711 } 2712 // fall back to a meta scan and check the cache again. 2713 try { 2714 am.populateRegionStatesFromMeta(encodedRegionName); 2715 } catch (IOException e) { 2716 throw new ServiceException(e); 2717 } 2718 return am.getRegionInfo(encodedRegionName); 2719 } 2720 2721 /** 2722 * @throws ServiceException If no MasterProcedureExecutor 2723 */ 2724 private void checkMasterProcedureExecutor() throws ServiceException { 2725 if (this.server.getMasterProcedureExecutor() == null) { 2726 throw new ServiceException("Master's ProcedureExecutor not initialized; retry later"); 2727 } 2728 } 2729 2730 /** 2731 * A 'raw' version of assign that does bulk and can skirt Master state checks if override is set; 2732 * i.e. assigns can be forced during Master startup or if RegionState is unclean. Used by HBCK2. 2733 */ 2734 @Override 2735 public MasterProtos.AssignsResponse assigns(RpcController controller, 2736 MasterProtos.AssignsRequest request) throws ServiceException { 2737 checkMasterProcedureExecutor(); 2738 final ProcedureExecutor<MasterProcedureEnv> pe = server.getMasterProcedureExecutor(); 2739 final AssignmentManager am = server.getAssignmentManager(); 2740 MasterProtos.AssignsResponse.Builder responseBuilder = 2741 MasterProtos.AssignsResponse.newBuilder(); 2742 final boolean override = request.getOverride(); 2743 final boolean force = request.getForce(); 2744 LOG.info("{} assigns, override={}", server.getClientIdAuditPrefix(), override); 2745 for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) { 2746 final RegionInfo info = getRegionInfo(rs); 2747 if (info == null) { 2748 LOG.info("Unknown region {}", rs); 2749 continue; 2750 } 2751 responseBuilder.addPid(Optional.ofNullable(am.createOneAssignProcedure(info, override, force)) 2752 .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID)); 2753 } 2754 return responseBuilder.build(); 2755 } 2756 2757 /** 2758 * A 'raw' version of unassign that does bulk and can skirt Master state checks if override is 2759 * set; i.e. unassigns can be forced during Master startup or if RegionState is unclean. Used by 2760 * HBCK2. 2761 */ 2762 @Override 2763 public MasterProtos.UnassignsResponse unassigns(RpcController controller, 2764 MasterProtos.UnassignsRequest request) throws ServiceException { 2765 checkMasterProcedureExecutor(); 2766 final ProcedureExecutor<MasterProcedureEnv> pe = server.getMasterProcedureExecutor(); 2767 final AssignmentManager am = server.getAssignmentManager(); 2768 MasterProtos.UnassignsResponse.Builder responseBuilder = 2769 MasterProtos.UnassignsResponse.newBuilder(); 2770 final boolean override = request.getOverride(); 2771 final boolean force = request.getForce(); 2772 LOG.info("{} unassigns, override={}", server.getClientIdAuditPrefix(), override); 2773 for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) { 2774 final RegionInfo info = getRegionInfo(rs); 2775 if (info == null) { 2776 LOG.info("Unknown region {}", rs); 2777 continue; 2778 } 2779 responseBuilder 2780 .addPid(Optional.ofNullable(am.createOneUnassignProcedure(info, override, force)) 2781 .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID)); 2782 } 2783 return responseBuilder.build(); 2784 } 2785 2786 /** 2787 * Bypass specified procedure to completion. Procedure is marked completed but no actual work is 2788 * done from the current state/ step onwards. Parents of the procedure are also marked for bypass. 2789 * NOTE: this is a dangerous operation and may be used to unstuck buggy procedures. This may leave 2790 * system in incoherent state. This may need to be followed by some cleanup steps/ actions by 2791 * operator. 2792 * @return BypassProcedureToCompletionResponse indicating success or failure 2793 */ 2794 @Override 2795 public MasterProtos.BypassProcedureResponse bypassProcedure(RpcController controller, 2796 MasterProtos.BypassProcedureRequest request) throws ServiceException { 2797 try { 2798 LOG.info("{} bypass procedures={}, waitTime={}, override={}, recursive={}", 2799 server.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(), 2800 request.getOverride(), request.getRecursive()); 2801 List<Boolean> ret = 2802 server.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(), 2803 request.getWaitTime(), request.getOverride(), request.getRecursive()); 2804 return MasterProtos.BypassProcedureResponse.newBuilder().addAllBypassed(ret).build(); 2805 } catch (IOException e) { 2806 throw new ServiceException(e); 2807 } 2808 } 2809 2810 @Override 2811 public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProcedure( 2812 RpcController controller, MasterProtos.ScheduleServerCrashProcedureRequest request) 2813 throws ServiceException { 2814 List<Long> pids = new ArrayList<>(); 2815 for (HBaseProtos.ServerName sn : request.getServerNameList()) { 2816 ServerName serverName = ProtobufUtil.toServerName(sn); 2817 LOG.info("{} schedule ServerCrashProcedure for {}", this.server.getClientIdAuditPrefix(), 2818 serverName); 2819 if (shouldSubmitSCP(serverName)) { 2820 pids.add(this.server.getServerManager().expireServer(serverName, true)); 2821 } else { 2822 pids.add(Procedure.NO_PROC_ID); 2823 } 2824 } 2825 return MasterProtos.ScheduleServerCrashProcedureResponse.newBuilder().addAllPid(pids).build(); 2826 } 2827 2828 @Override 2829 public MasterProtos.ScheduleSCPsForUnknownServersResponse scheduleSCPsForUnknownServers( 2830 RpcController controller, MasterProtos.ScheduleSCPsForUnknownServersRequest request) 2831 throws ServiceException { 2832 List<Long> pids = new ArrayList<>(); 2833 final Set<ServerName> serverNames = server.getAssignmentManager().getRegionStates() 2834 .getRegionStates().stream().map(RegionState::getServerName).collect(Collectors.toSet()); 2835 2836 final Set<ServerName> unknownServerNames = serverNames.stream() 2837 .filter(sn -> server.getServerManager().isServerUnknown(sn)).collect(Collectors.toSet()); 2838 2839 for (ServerName sn : unknownServerNames) { 2840 LOG.info("{} schedule ServerCrashProcedure for unknown {}", 2841 this.server.getClientIdAuditPrefix(), sn); 2842 if (shouldSubmitSCP(sn)) { 2843 pids.add(this.server.getServerManager().expireServer(sn, true)); 2844 } else { 2845 pids.add(Procedure.NO_PROC_ID); 2846 } 2847 } 2848 return MasterProtos.ScheduleSCPsForUnknownServersResponse.newBuilder().addAllPid(pids).build(); 2849 } 2850 2851 @Override 2852 public FixMetaResponse fixMeta(RpcController controller, FixMetaRequest request) 2853 throws ServiceException { 2854 rpcPreCheck("fixMeta"); 2855 try { 2856 MetaFixer mf = new MetaFixer(this.server); 2857 mf.fix(); 2858 return FixMetaResponse.newBuilder().build(); 2859 } catch (IOException ioe) { 2860 throw new ServiceException(ioe); 2861 } 2862 } 2863 2864 @Override 2865 public SwitchRpcThrottleResponse switchRpcThrottle(RpcController controller, 2866 SwitchRpcThrottleRequest request) throws ServiceException { 2867 try { 2868 server.checkInitialized(); 2869 return server.getMasterQuotaManager().switchRpcThrottle(request); 2870 } catch (Exception e) { 2871 throw new ServiceException(e); 2872 } 2873 } 2874 2875 @Override 2876 public MasterProtos.IsRpcThrottleEnabledResponse isRpcThrottleEnabled(RpcController controller, 2877 MasterProtos.IsRpcThrottleEnabledRequest request) throws ServiceException { 2878 try { 2879 server.checkInitialized(); 2880 return server.getMasterQuotaManager().isRpcThrottleEnabled(request); 2881 } catch (Exception e) { 2882 throw new ServiceException(e); 2883 } 2884 } 2885 2886 @Override 2887 public SwitchExceedThrottleQuotaResponse switchExceedThrottleQuota(RpcController controller, 2888 SwitchExceedThrottleQuotaRequest request) throws ServiceException { 2889 try { 2890 server.checkInitialized(); 2891 return server.getMasterQuotaManager().switchExceedThrottleQuota(request); 2892 } catch (Exception e) { 2893 throw new ServiceException(e); 2894 } 2895 } 2896 2897 @Override 2898 public GrantResponse grant(RpcController controller, GrantRequest request) 2899 throws ServiceException { 2900 try { 2901 server.checkInitialized(); 2902 if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { 2903 final UserPermission perm = 2904 ShadedAccessControlUtil.toUserPermission(request.getUserPermission()); 2905 boolean mergeExistingPermissions = request.getMergeExistingPermissions(); 2906 server.cpHost.preGrant(perm, mergeExistingPermissions); 2907 try (Table table = server.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { 2908 PermissionStorage.addUserPermission(getConfiguration(), perm, table, 2909 mergeExistingPermissions); 2910 } 2911 server.cpHost.postGrant(perm, mergeExistingPermissions); 2912 return GrantResponse.getDefaultInstance(); 2913 } else { 2914 throw new DoNotRetryIOException( 2915 new UnsupportedOperationException(AccessController.class.getName() + " is not loaded")); 2916 } 2917 } catch (IOException ioe) { 2918 throw new ServiceException(ioe); 2919 } 2920 } 2921 2922 @Override 2923 public RevokeResponse revoke(RpcController controller, RevokeRequest request) 2924 throws ServiceException { 2925 try { 2926 server.checkInitialized(); 2927 if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { 2928 final UserPermission userPermission = 2929 ShadedAccessControlUtil.toUserPermission(request.getUserPermission()); 2930 server.cpHost.preRevoke(userPermission); 2931 try (Table table = server.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { 2932 PermissionStorage.removeUserPermission(server.getConfiguration(), userPermission, table); 2933 } 2934 server.cpHost.postRevoke(userPermission); 2935 return RevokeResponse.getDefaultInstance(); 2936 } else { 2937 throw new DoNotRetryIOException( 2938 new UnsupportedOperationException(AccessController.class.getName() + " is not loaded")); 2939 } 2940 } catch (IOException ioe) { 2941 throw new ServiceException(ioe); 2942 } 2943 } 2944 2945 @Override 2946 public GetUserPermissionsResponse getUserPermissions(RpcController controller, 2947 GetUserPermissionsRequest request) throws ServiceException { 2948 try { 2949 server.checkInitialized(); 2950 if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { 2951 final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null; 2952 String namespace = 2953 request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null; 2954 TableName table = 2955 request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null; 2956 byte[] cf = request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null; 2957 byte[] cq = 2958 request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null; 2959 Type permissionType = request.hasType() ? request.getType() : null; 2960 server.getMasterCoprocessorHost().preGetUserPermissions(userName, namespace, table, cf, cq); 2961 2962 List<UserPermission> perms = null; 2963 if (permissionType == Type.Table) { 2964 boolean filter = (cf != null || userName != null) ? true : false; 2965 perms = PermissionStorage.getUserTablePermissions(server.getConfiguration(), table, cf, 2966 cq, userName, filter); 2967 } else if (permissionType == Type.Namespace) { 2968 perms = PermissionStorage.getUserNamespacePermissions(server.getConfiguration(), 2969 namespace, userName, userName != null ? true : false); 2970 } else { 2971 perms = PermissionStorage.getUserPermissions(server.getConfiguration(), null, null, null, 2972 userName, userName != null ? true : false); 2973 // Skip super users when filter user is specified 2974 if (userName == null) { 2975 // Adding superusers explicitly to the result set as PermissionStorage do not store 2976 // them. Also using acl as table name to be inline with the results of global admin and 2977 // will help in avoiding any leakage of information about being superusers. 2978 for (String user : Superusers.getSuperUsers()) { 2979 perms.add(new UserPermission(user, 2980 Permission.newBuilder().withActions(Action.values()).build())); 2981 } 2982 } 2983 } 2984 2985 server.getMasterCoprocessorHost().postGetUserPermissions(userName, namespace, table, cf, 2986 cq); 2987 AccessControlProtos.GetUserPermissionsResponse response = 2988 ShadedAccessControlUtil.buildGetUserPermissionsResponse(perms); 2989 return response; 2990 } else { 2991 throw new DoNotRetryIOException( 2992 new UnsupportedOperationException(AccessController.class.getName() + " is not loaded")); 2993 } 2994 } catch (IOException ioe) { 2995 throw new ServiceException(ioe); 2996 } 2997 } 2998 2999 @Override 3000 public HasUserPermissionsResponse hasUserPermissions(RpcController controller, 3001 HasUserPermissionsRequest request) throws ServiceException { 3002 try { 3003 server.checkInitialized(); 3004 if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { 3005 User caller = RpcServer.getRequestUser().orElse(null); 3006 String userName = 3007 request.hasUserName() ? request.getUserName().toStringUtf8() : caller.getShortName(); 3008 List<Permission> permissions = new ArrayList<>(); 3009 for (int i = 0; i < request.getPermissionCount(); i++) { 3010 permissions.add(ShadedAccessControlUtil.toPermission(request.getPermission(i))); 3011 } 3012 server.getMasterCoprocessorHost().preHasUserPermissions(userName, permissions); 3013 if (!caller.getShortName().equals(userName)) { 3014 List<String> groups = AccessChecker.getUserGroups(userName); 3015 caller = new InputUser(userName, groups.toArray(new String[groups.size()])); 3016 } 3017 List<Boolean> hasUserPermissions = new ArrayList<>(); 3018 if (getAccessChecker() != null) { 3019 for (Permission permission : permissions) { 3020 boolean hasUserPermission = 3021 getAccessChecker().hasUserPermission(caller, "hasUserPermissions", permission); 3022 hasUserPermissions.add(hasUserPermission); 3023 } 3024 } else { 3025 for (int i = 0; i < permissions.size(); i++) { 3026 hasUserPermissions.add(true); 3027 } 3028 } 3029 server.getMasterCoprocessorHost().postHasUserPermissions(userName, permissions); 3030 HasUserPermissionsResponse.Builder builder = 3031 HasUserPermissionsResponse.newBuilder().addAllHasUserPermission(hasUserPermissions); 3032 return builder.build(); 3033 } else { 3034 throw new DoNotRetryIOException( 3035 new UnsupportedOperationException(AccessController.class.getName() + " is not loaded")); 3036 } 3037 } catch (IOException ioe) { 3038 throw new ServiceException(ioe); 3039 } 3040 } 3041 3042 private boolean shouldSubmitSCP(ServerName serverName) { 3043 // check if there is already a SCP of this server running 3044 List<Procedure<MasterProcedureEnv>> procedures = 3045 server.getMasterProcedureExecutor().getProcedures(); 3046 for (Procedure<MasterProcedureEnv> procedure : procedures) { 3047 if (procedure instanceof ServerCrashProcedure) { 3048 if ( 3049 serverName.compareTo(((ServerCrashProcedure) procedure).getServerName()) == 0 3050 && !procedure.isFinished() 3051 ) { 3052 LOG.info("there is already a SCP of this server {} running, pid {}", serverName, 3053 procedure.getProcId()); 3054 return false; 3055 } 3056 } 3057 } 3058 return true; 3059 } 3060 3061 @Override 3062 public GetRSGroupInfoResponse getRSGroupInfo(RpcController controller, 3063 GetRSGroupInfoRequest request) throws ServiceException { 3064 String groupName = request.getRSGroupName(); 3065 LOG.info( 3066 server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName); 3067 try { 3068 if (server.getMasterCoprocessorHost() != null) { 3069 server.getMasterCoprocessorHost().preGetRSGroupInfo(groupName); 3070 } 3071 RSGroupInfo rsGroupInfo = server.getRSGroupInfoManager().getRSGroup(groupName); 3072 GetRSGroupInfoResponse resp; 3073 if (rsGroupInfo != null) { 3074 resp = GetRSGroupInfoResponse.newBuilder() 3075 .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build(); 3076 } else { 3077 resp = GetRSGroupInfoResponse.getDefaultInstance(); 3078 } 3079 if (server.getMasterCoprocessorHost() != null) { 3080 server.getMasterCoprocessorHost().postGetRSGroupInfo(groupName); 3081 } 3082 return resp; 3083 } catch (IOException e) { 3084 throw new ServiceException(e); 3085 } 3086 } 3087 3088 @Override 3089 public GetRSGroupInfoOfTableResponse getRSGroupInfoOfTable(RpcController controller, 3090 GetRSGroupInfoOfTableRequest request) throws ServiceException { 3091 TableName tableName = ProtobufUtil.toTableName(request.getTableName()); 3092 LOG.info( 3093 server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName); 3094 try { 3095 if (server.getMasterCoprocessorHost() != null) { 3096 server.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName); 3097 } 3098 GetRSGroupInfoOfTableResponse resp; 3099 TableDescriptor td = server.getTableDescriptors().get(tableName); 3100 if (td == null) { 3101 resp = GetRSGroupInfoOfTableResponse.getDefaultInstance(); 3102 } else { 3103 RSGroupInfo rsGroupInfo = 3104 RSGroupUtil.getRSGroupInfo(server, server.getRSGroupInfoManager(), tableName) 3105 .orElse(server.getRSGroupInfoManager().getRSGroup(RSGroupInfo.DEFAULT_GROUP)); 3106 resp = GetRSGroupInfoOfTableResponse.newBuilder() 3107 .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build(); 3108 } 3109 if (server.getMasterCoprocessorHost() != null) { 3110 server.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName); 3111 } 3112 return resp; 3113 } catch (IOException e) { 3114 throw new ServiceException(e); 3115 } 3116 } 3117 3118 @Override 3119 public GetRSGroupInfoOfServerResponse getRSGroupInfoOfServer(RpcController controller, 3120 GetRSGroupInfoOfServerRequest request) throws ServiceException { 3121 Address hp = 3122 Address.fromParts(request.getServer().getHostName(), request.getServer().getPort()); 3123 LOG.info(server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp); 3124 try { 3125 if (server.getMasterCoprocessorHost() != null) { 3126 server.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp); 3127 } 3128 RSGroupInfo rsGroupInfo = server.getRSGroupInfoManager().getRSGroupOfServer(hp); 3129 GetRSGroupInfoOfServerResponse resp; 3130 if (rsGroupInfo != null) { 3131 resp = GetRSGroupInfoOfServerResponse.newBuilder() 3132 .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build(); 3133 } else { 3134 resp = GetRSGroupInfoOfServerResponse.getDefaultInstance(); 3135 } 3136 if (server.getMasterCoprocessorHost() != null) { 3137 server.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp); 3138 } 3139 return resp; 3140 } catch (IOException e) { 3141 throw new ServiceException(e); 3142 } 3143 } 3144 3145 @Override 3146 public MoveServersResponse moveServers(RpcController controller, MoveServersRequest request) 3147 throws ServiceException { 3148 Set<Address> hostPorts = Sets.newHashSet(); 3149 MoveServersResponse.Builder builder = MoveServersResponse.newBuilder(); 3150 for (HBaseProtos.ServerName el : request.getServersList()) { 3151 hostPorts.add(Address.fromParts(el.getHostName(), el.getPort())); 3152 } 3153 LOG.info(server.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " 3154 + request.getTargetGroup()); 3155 try { 3156 if (server.getMasterCoprocessorHost() != null) { 3157 server.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup()); 3158 } 3159 server.getRSGroupInfoManager().moveServers(hostPorts, request.getTargetGroup()); 3160 if (server.getMasterCoprocessorHost() != null) { 3161 server.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup()); 3162 } 3163 } catch (IOException e) { 3164 throw new ServiceException(e); 3165 } 3166 return builder.build(); 3167 } 3168 3169 @Override 3170 public AddRSGroupResponse addRSGroup(RpcController controller, AddRSGroupRequest request) 3171 throws ServiceException { 3172 AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder(); 3173 LOG.info(server.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName()); 3174 try { 3175 if (server.getMasterCoprocessorHost() != null) { 3176 server.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName()); 3177 } 3178 server.getRSGroupInfoManager().addRSGroup(new RSGroupInfo(request.getRSGroupName())); 3179 if (server.getMasterCoprocessorHost() != null) { 3180 server.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName()); 3181 } 3182 } catch (IOException e) { 3183 throw new ServiceException(e); 3184 } 3185 return builder.build(); 3186 } 3187 3188 @Override 3189 public RemoveRSGroupResponse removeRSGroup(RpcController controller, RemoveRSGroupRequest request) 3190 throws ServiceException { 3191 RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder(); 3192 LOG.info(server.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName()); 3193 try { 3194 if (server.getMasterCoprocessorHost() != null) { 3195 server.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName()); 3196 } 3197 server.getRSGroupInfoManager().removeRSGroup(request.getRSGroupName()); 3198 if (server.getMasterCoprocessorHost() != null) { 3199 server.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName()); 3200 } 3201 } catch (IOException e) { 3202 throw new ServiceException(e); 3203 } 3204 return builder.build(); 3205 } 3206 3207 @Override 3208 public BalanceRSGroupResponse balanceRSGroup(RpcController controller, 3209 BalanceRSGroupRequest request) throws ServiceException { 3210 BalanceRequest balanceRequest = ProtobufUtil.toBalanceRequest(request); 3211 3212 BalanceRSGroupResponse.Builder builder = 3213 BalanceRSGroupResponse.newBuilder().setBalanceRan(false); 3214 3215 LOG.info( 3216 server.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName()); 3217 try { 3218 if (server.getMasterCoprocessorHost() != null) { 3219 server.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName(), 3220 balanceRequest); 3221 } 3222 BalanceResponse response = 3223 server.getRSGroupInfoManager().balanceRSGroup(request.getRSGroupName(), balanceRequest); 3224 ProtobufUtil.populateBalanceRSGroupResponse(builder, response); 3225 if (server.getMasterCoprocessorHost() != null) { 3226 server.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(), 3227 balanceRequest, response); 3228 } 3229 } catch (IOException e) { 3230 throw new ServiceException(e); 3231 } 3232 return builder.build(); 3233 } 3234 3235 @Override 3236 public ListRSGroupInfosResponse listRSGroupInfos(RpcController controller, 3237 ListRSGroupInfosRequest request) throws ServiceException { 3238 ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder(); 3239 LOG.info(server.getClientIdAuditPrefix() + " list rsgroup"); 3240 try { 3241 if (server.getMasterCoprocessorHost() != null) { 3242 server.getMasterCoprocessorHost().preListRSGroups(); 3243 } 3244 List<RSGroupInfo> rsGroupInfos = server.getRSGroupInfoManager().listRSGroups().stream() 3245 .map(RSGroupInfo::new).collect(Collectors.toList()); 3246 Map<String, RSGroupInfo> name2Info = new HashMap<>(); 3247 List<TableDescriptor> needToFill = 3248 new ArrayList<>(server.getTableDescriptors().getAll().values()); 3249 for (RSGroupInfo rsGroupInfo : rsGroupInfos) { 3250 name2Info.put(rsGroupInfo.getName(), rsGroupInfo); 3251 for (TableDescriptor td : server.getTableDescriptors().getAll().values()) { 3252 if (rsGroupInfo.containsTable(td.getTableName())) { 3253 needToFill.remove(td); 3254 } 3255 } 3256 } 3257 for (TableDescriptor td : needToFill) { 3258 String groupName = td.getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP); 3259 RSGroupInfo rsGroupInfo = name2Info.get(groupName); 3260 if (rsGroupInfo != null) { 3261 rsGroupInfo.addTable(td.getTableName()); 3262 } 3263 } 3264 for (RSGroupInfo rsGroupInfo : rsGroupInfos) { 3265 // TODO: this can be done at once outside this loop, do not need to scan all every time. 3266 builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)); 3267 } 3268 if (server.getMasterCoprocessorHost() != null) { 3269 server.getMasterCoprocessorHost().postListRSGroups(); 3270 } 3271 } catch (IOException e) { 3272 throw new ServiceException(e); 3273 } 3274 return builder.build(); 3275 } 3276 3277 @Override 3278 public RemoveServersResponse removeServers(RpcController controller, RemoveServersRequest request) 3279 throws ServiceException { 3280 RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder(); 3281 Set<Address> servers = Sets.newHashSet(); 3282 for (HBaseProtos.ServerName el : request.getServersList()) { 3283 servers.add(Address.fromParts(el.getHostName(), el.getPort())); 3284 } 3285 LOG.info( 3286 server.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + servers); 3287 try { 3288 if (server.getMasterCoprocessorHost() != null) { 3289 server.getMasterCoprocessorHost().preRemoveServers(servers); 3290 } 3291 server.getRSGroupInfoManager().removeServers(servers); 3292 if (server.getMasterCoprocessorHost() != null) { 3293 server.getMasterCoprocessorHost().postRemoveServers(servers); 3294 } 3295 } catch (IOException e) { 3296 throw new ServiceException(e); 3297 } 3298 return builder.build(); 3299 } 3300 3301 @Override 3302 public ListTablesInRSGroupResponse listTablesInRSGroup(RpcController controller, 3303 ListTablesInRSGroupRequest request) throws ServiceException { 3304 ListTablesInRSGroupResponse.Builder builder = ListTablesInRSGroupResponse.newBuilder(); 3305 String groupName = request.getGroupName(); 3306 LOG.info(server.getClientIdAuditPrefix() + " list tables in rsgroup " + groupName); 3307 try { 3308 if (server.getMasterCoprocessorHost() != null) { 3309 server.getMasterCoprocessorHost().preListTablesInRSGroup(groupName); 3310 } 3311 RSGroupUtil.listTablesInRSGroup(server, groupName).stream() 3312 .map(ProtobufUtil::toProtoTableName).forEach(builder::addTableName); 3313 if (server.getMasterCoprocessorHost() != null) { 3314 server.getMasterCoprocessorHost().postListTablesInRSGroup(groupName); 3315 } 3316 } catch (IOException e) { 3317 throw new ServiceException(e); 3318 } 3319 return builder.build(); 3320 } 3321 3322 @Override 3323 public GetConfiguredNamespacesAndTablesInRSGroupResponse 3324 getConfiguredNamespacesAndTablesInRSGroup(RpcController controller, 3325 GetConfiguredNamespacesAndTablesInRSGroupRequest request) throws ServiceException { 3326 GetConfiguredNamespacesAndTablesInRSGroupResponse.Builder builder = 3327 GetConfiguredNamespacesAndTablesInRSGroupResponse.newBuilder(); 3328 String groupName = request.getGroupName(); 3329 LOG.info(server.getClientIdAuditPrefix() + " get configured namespaces and tables in rsgroup " 3330 + groupName); 3331 try { 3332 if (server.getMasterCoprocessorHost() != null) { 3333 server.getMasterCoprocessorHost().preGetConfiguredNamespacesAndTablesInRSGroup(groupName); 3334 } 3335 for (NamespaceDescriptor nd : server.getClusterSchema().getNamespaces()) { 3336 if (groupName.equals(nd.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP))) { 3337 builder.addNamespace(nd.getName()); 3338 } 3339 } 3340 for (TableDescriptor td : server.getTableDescriptors().getAll().values()) { 3341 if (td.getRegionServerGroup().map(g -> g.equals(groupName)).orElse(false)) { 3342 builder.addTableName(ProtobufUtil.toProtoTableName(td.getTableName())); 3343 } 3344 } 3345 if (server.getMasterCoprocessorHost() != null) { 3346 server.getMasterCoprocessorHost().postGetConfiguredNamespacesAndTablesInRSGroup(groupName); 3347 } 3348 } catch (IOException e) { 3349 throw new ServiceException(e); 3350 } 3351 return builder.build(); 3352 } 3353 3354 @Override 3355 public RenameRSGroupResponse renameRSGroup(RpcController controller, RenameRSGroupRequest request) 3356 throws ServiceException { 3357 RenameRSGroupResponse.Builder builder = RenameRSGroupResponse.newBuilder(); 3358 String oldRSGroup = request.getOldRsgroupName(); 3359 String newRSGroup = request.getNewRsgroupName(); 3360 LOG.info("{} rename rsgroup from {} to {} ", server.getClientIdAuditPrefix(), oldRSGroup, 3361 newRSGroup); 3362 try { 3363 if (server.getMasterCoprocessorHost() != null) { 3364 server.getMasterCoprocessorHost().preRenameRSGroup(oldRSGroup, newRSGroup); 3365 } 3366 server.getRSGroupInfoManager().renameRSGroup(oldRSGroup, newRSGroup); 3367 if (server.getMasterCoprocessorHost() != null) { 3368 server.getMasterCoprocessorHost().postRenameRSGroup(oldRSGroup, newRSGroup); 3369 } 3370 } catch (IOException e) { 3371 throw new ServiceException(e); 3372 } 3373 return builder.build(); 3374 } 3375 3376 @Override 3377 public UpdateRSGroupConfigResponse updateRSGroupConfig(RpcController controller, 3378 UpdateRSGroupConfigRequest request) throws ServiceException { 3379 UpdateRSGroupConfigResponse.Builder builder = UpdateRSGroupConfigResponse.newBuilder(); 3380 String groupName = request.getGroupName(); 3381 Map<String, String> configuration = new HashMap<>(); 3382 request.getConfigurationList().forEach(p -> configuration.put(p.getName(), p.getValue())); 3383 LOG.info("{} update rsgroup {} configuration {}", server.getClientIdAuditPrefix(), groupName, 3384 configuration); 3385 try { 3386 if (server.getMasterCoprocessorHost() != null) { 3387 server.getMasterCoprocessorHost().preUpdateRSGroupConfig(groupName, configuration); 3388 } 3389 server.getRSGroupInfoManager().updateRSGroupConfig(groupName, configuration); 3390 if (server.getMasterCoprocessorHost() != null) { 3391 server.getMasterCoprocessorHost().postUpdateRSGroupConfig(groupName, configuration); 3392 } 3393 } catch (IOException e) { 3394 throw new ServiceException(e); 3395 } 3396 return builder.build(); 3397 } 3398 3399 @Override 3400 public HBaseProtos.LogEntry getLogEntries(RpcController controller, 3401 HBaseProtos.LogRequest request) throws ServiceException { 3402 try { 3403 final String logClassName = request.getLogClassName(); 3404 Class<?> logClass = Class.forName(logClassName).asSubclass(Message.class); 3405 Method method = logClass.getMethod("parseFrom", ByteString.class); 3406 if (logClassName.contains("BalancerDecisionsRequest")) { 3407 MasterProtos.BalancerDecisionsRequest balancerDecisionsRequest = 3408 (MasterProtos.BalancerDecisionsRequest) method.invoke(null, request.getLogMessage()); 3409 MasterProtos.BalancerDecisionsResponse balancerDecisionsResponse = 3410 getBalancerDecisions(balancerDecisionsRequest); 3411 return HBaseProtos.LogEntry.newBuilder() 3412 .setLogClassName(balancerDecisionsResponse.getClass().getName()) 3413 .setLogMessage(balancerDecisionsResponse.toByteString()).build(); 3414 } else if (logClassName.contains("BalancerRejectionsRequest")) { 3415 MasterProtos.BalancerRejectionsRequest balancerRejectionsRequest = 3416 (MasterProtos.BalancerRejectionsRequest) method.invoke(null, request.getLogMessage()); 3417 MasterProtos.BalancerRejectionsResponse balancerRejectionsResponse = 3418 getBalancerRejections(balancerRejectionsRequest); 3419 return HBaseProtos.LogEntry.newBuilder() 3420 .setLogClassName(balancerRejectionsResponse.getClass().getName()) 3421 .setLogMessage(balancerRejectionsResponse.toByteString()).build(); 3422 } 3423 } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException 3424 | InvocationTargetException e) { 3425 LOG.error("Error while retrieving log entries.", e); 3426 throw new ServiceException(e); 3427 } 3428 throw new ServiceException("Invalid request params"); 3429 } 3430 3431 private MasterProtos.BalancerDecisionsResponse 3432 getBalancerDecisions(MasterProtos.BalancerDecisionsRequest request) { 3433 final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder(); 3434 if (namedQueueRecorder == null) { 3435 return MasterProtos.BalancerDecisionsResponse.newBuilder() 3436 .addAllBalancerDecision(Collections.emptyList()).build(); 3437 } 3438 final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest(); 3439 namedQueueGetRequest.setNamedQueueEvent(BalancerDecisionDetails.BALANCER_DECISION_EVENT); 3440 namedQueueGetRequest.setBalancerDecisionsRequest(request); 3441 NamedQueueGetResponse namedQueueGetResponse = 3442 namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); 3443 List<RecentLogs.BalancerDecision> balancerDecisions = namedQueueGetResponse != null 3444 ? namedQueueGetResponse.getBalancerDecisions() 3445 : Collections.emptyList(); 3446 return MasterProtos.BalancerDecisionsResponse.newBuilder() 3447 .addAllBalancerDecision(balancerDecisions).build(); 3448 } 3449 3450 private MasterProtos.BalancerRejectionsResponse 3451 getBalancerRejections(MasterProtos.BalancerRejectionsRequest request) { 3452 final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder(); 3453 if (namedQueueRecorder == null) { 3454 return MasterProtos.BalancerRejectionsResponse.newBuilder() 3455 .addAllBalancerRejection(Collections.emptyList()).build(); 3456 } 3457 final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest(); 3458 namedQueueGetRequest.setNamedQueueEvent(BalancerRejectionDetails.BALANCER_REJECTION_EVENT); 3459 namedQueueGetRequest.setBalancerRejectionsRequest(request); 3460 NamedQueueGetResponse namedQueueGetResponse = 3461 namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); 3462 List<RecentLogs.BalancerRejection> balancerRejections = namedQueueGetResponse != null 3463 ? namedQueueGetResponse.getBalancerRejections() 3464 : Collections.emptyList(); 3465 return MasterProtos.BalancerRejectionsResponse.newBuilder() 3466 .addAllBalancerRejection(balancerRejections).build(); 3467 } 3468 3469 @Override 3470 @QosPriority(priority = HConstants.ADMIN_QOS) 3471 public GetRegionInfoResponse getRegionInfo(final RpcController controller, 3472 final GetRegionInfoRequest request) throws ServiceException { 3473 final GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); 3474 final RegionInfo info = getRegionInfo(request.getRegion()); 3475 if (info != null) { 3476 builder.setRegionInfo(ProtobufUtil.toRegionInfo(info)); 3477 } else { 3478 // Is it a MOB name? These work differently. 3479 byte[] regionName = request.getRegion().getValue().toByteArray(); 3480 TableName tableName = RegionInfo.getTable(regionName); 3481 if (MobUtils.isMobRegionName(tableName, regionName)) { 3482 // a dummy region info contains the compaction state. 3483 RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName); 3484 builder.setRegionInfo(ProtobufUtil.toRegionInfo(mobRegionInfo)); 3485 if (request.hasCompactionState() && request.getCompactionState()) { 3486 builder.setCompactionState(server.getMobCompactionState(tableName)); 3487 } 3488 } else { 3489 // If unknown RegionInfo and not a MOB region, it is unknown. 3490 throw new ServiceException(new UnknownRegionException(Bytes.toString(regionName))); 3491 } 3492 } 3493 return builder.build(); 3494 } 3495 3496 @Override 3497 public GetStoreFileResponse getStoreFile(RpcController controller, GetStoreFileRequest request) 3498 throws ServiceException { 3499 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3500 } 3501 3502 @Override 3503 public GetOnlineRegionResponse getOnlineRegion(RpcController controller, 3504 GetOnlineRegionRequest request) throws ServiceException { 3505 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3506 } 3507 3508 @Override 3509 public OpenRegionResponse openRegion(RpcController controller, OpenRegionRequest request) 3510 throws ServiceException { 3511 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3512 } 3513 3514 @Override 3515 public WarmupRegionResponse warmupRegion(RpcController controller, WarmupRegionRequest request) 3516 throws ServiceException { 3517 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3518 } 3519 3520 @Override 3521 public CloseRegionResponse closeRegion(RpcController controller, CloseRegionRequest request) 3522 throws ServiceException { 3523 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3524 } 3525 3526 @Override 3527 public FlushRegionResponse flushRegion(RpcController controller, FlushRegionRequest request) 3528 throws ServiceException { 3529 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3530 } 3531 3532 @Override 3533 public CompactionSwitchResponse compactionSwitch(RpcController controller, 3534 CompactionSwitchRequest request) throws ServiceException { 3535 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3536 } 3537 3538 @Override 3539 public CompactRegionResponse compactRegion(RpcController controller, CompactRegionRequest request) 3540 throws ServiceException { 3541 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3542 } 3543 3544 @Override 3545 public ReplicateWALEntryResponse replicateWALEntry(RpcController controller, 3546 ReplicateWALEntryRequest request) throws ServiceException { 3547 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3548 } 3549 3550 @Override 3551 public ReplicateWALEntryResponse replay(RpcController controller, 3552 ReplicateWALEntryRequest request) throws ServiceException { 3553 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3554 } 3555 3556 @Override 3557 public RollWALWriterResponse rollWALWriter(RpcController controller, RollWALWriterRequest request) 3558 throws ServiceException { 3559 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3560 } 3561 3562 @Override 3563 public GetServerInfoResponse getServerInfo(RpcController controller, GetServerInfoRequest request) 3564 throws ServiceException { 3565 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3566 } 3567 3568 @Override 3569 public StopServerResponse stopServer(RpcController controller, StopServerRequest request) 3570 throws ServiceException { 3571 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3572 } 3573 3574 @Override 3575 public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller, 3576 UpdateFavoredNodesRequest request) throws ServiceException { 3577 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3578 } 3579 3580 @Override 3581 public GetRegionLoadResponse getRegionLoad(RpcController controller, GetRegionLoadRequest request) 3582 throws ServiceException { 3583 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3584 } 3585 3586 @Override 3587 public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, 3588 ClearCompactionQueuesRequest request) throws ServiceException { 3589 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3590 } 3591 3592 @Override 3593 public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, 3594 ClearRegionBlockCacheRequest request) throws ServiceException { 3595 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3596 } 3597 3598 @Override 3599 public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(RpcController controller, 3600 GetSpaceQuotaSnapshotsRequest request) throws ServiceException { 3601 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3602 } 3603 3604 @Override 3605 public ExecuteProceduresResponse executeProcedures(RpcController controller, 3606 ExecuteProceduresRequest request) throws ServiceException { 3607 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3608 } 3609 3610 @Override 3611 public GetCachedFilesListResponse getCachedFilesList(RpcController controller, 3612 GetCachedFilesListRequest request) throws ServiceException { 3613 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3614 } 3615 3616 @Override 3617 public GetLiveRegionServersResponse getLiveRegionServers(RpcController controller, 3618 GetLiveRegionServersRequest request) throws ServiceException { 3619 List<ServerName> regionServers = new ArrayList<>(server.getLiveRegionServers()); 3620 Collections.shuffle(regionServers, ThreadLocalRandom.current()); 3621 GetLiveRegionServersResponse.Builder builder = 3622 GetLiveRegionServersResponse.newBuilder().setTotal(regionServers.size()); 3623 regionServers.stream().limit(request.getCount()).map(ProtobufUtil::toServerName) 3624 .forEach(builder::addServer); 3625 return builder.build(); 3626 } 3627 3628 @Override 3629 public ReplicateWALEntryResponse replicateToReplica(RpcController controller, 3630 ReplicateWALEntryRequest request) throws ServiceException { 3631 throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); 3632 } 3633 3634 @Override 3635 public FlushMasterStoreResponse flushMasterStore(RpcController controller, 3636 FlushMasterStoreRequest request) throws ServiceException { 3637 rpcPreCheck("flushMasterStore"); 3638 try { 3639 server.flushMasterStore(); 3640 } catch (IOException ioe) { 3641 throw new ServiceException(ioe); 3642 } 3643 return FlushMasterStoreResponse.newBuilder().build(); 3644 } 3645 3646 @Override 3647 public FlushTableResponse flushTable(RpcController controller, FlushTableRequest req) 3648 throws ServiceException { 3649 TableName tableName = ProtobufUtil.toTableName(req.getTableName()); 3650 List<byte[]> columnFamilies = req.getColumnFamilyCount() > 0 3651 ? req.getColumnFamilyList().stream().filter(cf -> !cf.isEmpty()).map(ByteString::toByteArray) 3652 .collect(Collectors.toList()) 3653 : null; 3654 try { 3655 long procId = 3656 server.flushTable(tableName, columnFamilies, req.getNonceGroup(), req.getNonce()); 3657 return FlushTableResponse.newBuilder().setProcId(procId).build(); 3658 } catch (IOException ioe) { 3659 throw new ServiceException(ioe); 3660 } 3661 } 3662}