001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master; 019 020import static org.apache.hadoop.hbase.master.MasterWalManager.META_FILTER; 021 022import java.io.FileNotFoundException; 023import java.io.IOException; 024import java.lang.reflect.InvocationTargetException; 025import java.lang.reflect.Method; 026import java.net.BindException; 027import java.net.InetAddress; 028import java.net.InetSocketAddress; 029import java.util.ArrayList; 030import java.util.Collections; 031import java.util.HashMap; 032import java.util.HashSet; 033import java.util.List; 034import java.util.Map; 035import java.util.Map.Entry; 036import java.util.Optional; 037import java.util.Set; 038import java.util.stream.Collectors; 039import org.apache.hadoop.conf.Configuration; 040import org.apache.hadoop.fs.Path; 041import org.apache.hadoop.hbase.ClusterMetricsBuilder; 042import org.apache.hadoop.hbase.DoNotRetryIOException; 043import org.apache.hadoop.hbase.HConstants; 044import org.apache.hadoop.hbase.HRegionLocation; 045import org.apache.hadoop.hbase.MetaTableAccessor; 046import org.apache.hadoop.hbase.NamespaceDescriptor; 047import org.apache.hadoop.hbase.Server; 048import org.apache.hadoop.hbase.ServerMetrics; 049import org.apache.hadoop.hbase.ServerMetricsBuilder; 050import org.apache.hadoop.hbase.ServerName; 051import org.apache.hadoop.hbase.TableName; 052import org.apache.hadoop.hbase.UnknownRegionException; 053import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 054import org.apache.hadoop.hbase.client.MasterSwitchType; 055import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; 056import org.apache.hadoop.hbase.client.Put; 057import org.apache.hadoop.hbase.client.RegionInfo; 058import org.apache.hadoop.hbase.client.RegionInfoBuilder; 059import org.apache.hadoop.hbase.client.Table; 060import org.apache.hadoop.hbase.client.TableDescriptor; 061import org.apache.hadoop.hbase.client.TableState; 062import org.apache.hadoop.hbase.client.VersionInfoUtil; 063import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; 064import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; 065import org.apache.hadoop.hbase.errorhandling.ForeignException; 066import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; 067import org.apache.hadoop.hbase.io.ByteBuffAllocator; 068import org.apache.hadoop.hbase.io.hfile.HFile; 069import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; 070import org.apache.hadoop.hbase.ipc.PriorityFunction; 071import org.apache.hadoop.hbase.ipc.QosPriority; 072import org.apache.hadoop.hbase.ipc.RpcServer; 073import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; 074import org.apache.hadoop.hbase.ipc.RpcServerFactory; 075import org.apache.hadoop.hbase.ipc.RpcServerInterface; 076import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; 077import org.apache.hadoop.hbase.ipc.ServerRpcController; 078import org.apache.hadoop.hbase.master.assignment.RegionStates; 079import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; 080import org.apache.hadoop.hbase.master.janitor.MetaFixer; 081import org.apache.hadoop.hbase.master.locking.LockProcedure; 082import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; 083import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; 084import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable; 085import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; 086import org.apache.hadoop.hbase.mob.MobUtils; 087import org.apache.hadoop.hbase.namequeues.BalancerDecisionDetails; 088import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; 089import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest; 090import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse; 091import org.apache.hadoop.hbase.procedure.MasterProcedureManager; 092import org.apache.hadoop.hbase.procedure2.LockType; 093import org.apache.hadoop.hbase.procedure2.LockedResource; 094import org.apache.hadoop.hbase.procedure2.Procedure; 095import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; 096import org.apache.hadoop.hbase.procedure2.ProcedureUtil; 097import org.apache.hadoop.hbase.procedure2.RemoteProcedureException; 098import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; 099import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService; 100import org.apache.hadoop.hbase.quotas.MasterQuotaManager; 101import org.apache.hadoop.hbase.quotas.QuotaObserverChore; 102import org.apache.hadoop.hbase.quotas.QuotaUtil; 103import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; 104import org.apache.hadoop.hbase.regionserver.RSRpcServices; 105import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory; 106import org.apache.hadoop.hbase.replication.ReplicationException; 107import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; 108import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; 109import org.apache.hadoop.hbase.security.Superusers; 110import org.apache.hadoop.hbase.security.User; 111import org.apache.hadoop.hbase.security.access.AccessChecker; 112import org.apache.hadoop.hbase.security.access.AccessChecker.InputUser; 113import org.apache.hadoop.hbase.security.access.AccessController; 114import org.apache.hadoop.hbase.security.access.Permission; 115import org.apache.hadoop.hbase.security.access.Permission.Action; 116import org.apache.hadoop.hbase.security.access.PermissionStorage; 117import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil; 118import org.apache.hadoop.hbase.security.access.UserPermission; 119import org.apache.hadoop.hbase.security.visibility.VisibilityController; 120import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; 121import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; 122import org.apache.hadoop.hbase.util.Bytes; 123import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 124import org.apache.hadoop.hbase.util.ForeignExceptionUtil; 125import org.apache.hadoop.hbase.util.Pair; 126import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; 127import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; 128import org.apache.yetus.audience.InterfaceAudience; 129import org.apache.zookeeper.KeeperException; 130import org.slf4j.Logger; 131import org.slf4j.LoggerFactory; 132 133import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; 134import org.apache.hbase.thirdparty.com.google.protobuf.Message; 135import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; 136import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; 137import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; 138 139import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 140import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; 141import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; 142import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest; 143import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse; 144import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest; 145import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantResponse; 146import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest; 147import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsResponse; 148import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type; 149import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest; 150import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeResponse; 151import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; 152import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; 153import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; 154import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; 155import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; 156import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; 157import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; 158import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 159import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; 160import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; 161import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; 162import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; 163import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; 164import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest; 165import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse; 166import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest; 167import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse; 168import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; 169import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; 170import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; 171import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; 172import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; 173import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; 174import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; 175import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; 176import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; 177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; 178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest; 179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse; 180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService; 181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; 182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; 183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; 184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; 185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest; 186import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse; 187import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; 188import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; 189import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; 190import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; 191import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; 192import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; 193import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; 194import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; 195import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; 196import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; 197import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; 198import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; 199import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; 200import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; 201import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; 202import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; 203import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaRequest; 204import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaResponse; 205import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetActiveMasterRequest; 206import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetActiveMasterResponse; 207import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterIdRequest; 208import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterIdResponse; 209import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; 210import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; 211import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; 212import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; 213import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest; 214import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse; 215import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMastersRequest; 216import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMastersResponse; 217import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMastersResponseEntry; 218import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest; 219import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse; 220import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; 221import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; 222import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; 223import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; 224import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest; 225import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse; 226import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; 227import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; 228import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; 229import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; 230import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; 231import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; 232import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest; 233import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse; 234import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService; 235import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; 236import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; 237import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; 238import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse; 239import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest; 240import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse; 241import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest; 242import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse; 243import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest; 244import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse; 245import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest; 246import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse; 247import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; 248import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; 249import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos 250 .IsSnapshotCleanupEnabledRequest; 251import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos 252 .IsSnapshotCleanupEnabledResponse; 253import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; 254import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; 255import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; 256import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse; 257import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest; 258import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse; 259import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; 260import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; 261import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest; 262import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesResponse; 263import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest; 264import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse; 265import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; 266import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse; 267import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest; 268import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest; 269import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse; 270import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; 271import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; 272import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; 273import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; 274import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; 275import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; 276import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; 277import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest; 278import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse; 279import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; 280import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse; 281import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest; 282import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse; 283import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; 284import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; 285import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest; 286import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse; 287import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState; 288import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; 289import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; 290import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest; 291import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse; 292import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest; 293import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse; 294import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreRequest; 295import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunHbckChoreResponse; 296import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest; 297import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse; 298import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; 299import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse; 300import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest; 301import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse; 302import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; 303import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse; 304import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; 305import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; 306import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest; 307import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaResponse; 308import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest; 309import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse; 310import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest; 311import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse; 312import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest; 313import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; 314import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse; 315import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; 316import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; 317import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; 318import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; 319import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; 320import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse; 321import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaRequest; 322import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaResponse; 323import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleRequest; 324import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleResponse; 325import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; 326import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; 327import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; 328import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse; 329import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest; 330import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse; 331import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.NamespaceQuotaSnapshot; 332import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.TableQuotaSnapshot; 333import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest; 334import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse; 335import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes; 336import org.apache.hadoop.hbase.shaded.protobuf.generated.RecentLogs; 337import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest; 338import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse; 339import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; 340import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; 341import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; 342import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse; 343import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; 344import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; 345import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; 346import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse; 347import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest; 348import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse; 349import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult; 350import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest; 351import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse; 352import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; 353import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; 354import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; 355import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; 356import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; 357import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; 358import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; 359import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; 360import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest; 361import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; 362import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; 363import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; 364import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; 365import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; 366import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; 367import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; 368import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState; 369import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; 370import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; 371import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; 372 373/** 374 * Implements the master RPC services. 375 */ 376@InterfaceAudience.Private 377@SuppressWarnings("deprecation") 378public class MasterRpcServices extends RSRpcServices implements 379 MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface, 380 LockService.BlockingInterface, HbckService.BlockingInterface, 381 ClientMetaService.BlockingInterface { 382 private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName()); 383 private static final Logger AUDITLOG = 384 LoggerFactory.getLogger("SecurityLogger."+MasterRpcServices.class.getName()); 385 386 private final HMaster master; 387 388 /** 389 * @return Subset of configuration to pass initializing regionservers: e.g. 390 * the filesystem to use and root directory to use. 391 */ 392 private RegionServerStartupResponse.Builder createConfigurationSubset() { 393 RegionServerStartupResponse.Builder resp = addConfig( 394 RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR); 395 resp = addConfig(resp, "fs.defaultFS"); 396 return addConfig(resp, "hbase.master.info.port"); 397 } 398 399 private RegionServerStartupResponse.Builder addConfig( 400 final RegionServerStartupResponse.Builder resp, final String key) { 401 NameStringPair.Builder entry = NameStringPair.newBuilder() 402 .setName(key) 403 .setValue(master.getConfiguration().get(key)); 404 resp.addMapEntries(entry.build()); 405 return resp; 406 } 407 408 public MasterRpcServices(HMaster m) throws IOException { 409 super(m); 410 master = m; 411 } 412 413 @Override 414 protected Class<?> getRpcSchedulerFactoryClass() { 415 Configuration conf = getConfiguration(); 416 if (conf != null) { 417 return conf.getClass(MASTER_RPC_SCHEDULER_FACTORY_CLASS, super.getRpcSchedulerFactoryClass()); 418 } else { 419 return super.getRpcSchedulerFactoryClass(); 420 } 421 } 422 423 @Override 424 protected RpcServerInterface createRpcServer(final Server server, 425 final RpcSchedulerFactory rpcSchedulerFactory, final InetSocketAddress bindAddress, 426 final String name) throws IOException { 427 final Configuration conf = regionServer.getConfiguration(); 428 // RpcServer at HM by default enable ByteBufferPool iff HM having user table region in it 429 boolean reservoirEnabled = conf.getBoolean(ByteBuffAllocator.ALLOCATOR_POOL_ENABLED_KEY, 430 LoadBalancer.isMasterCanHostUserRegions(conf)); 431 try { 432 return RpcServerFactory.createRpcServer(server, name, getServices(), 433 bindAddress, // use final bindAddress for this server. 434 conf, rpcSchedulerFactory.create(conf, this, server), reservoirEnabled); 435 } catch (BindException be) { 436 throw new IOException(be.getMessage() + ". To switch ports use the '" 437 + HConstants.MASTER_PORT + "' configuration property.", 438 be.getCause() != null ? be.getCause() : be); 439 } 440 } 441 442 @Override 443 protected PriorityFunction createPriority() { 444 return new MasterAnnotationReadingPriorityFunction(this); 445 } 446 447 /** 448 * Checks for the following pre-checks in order: 449 * <ol> 450 * <li>Master is initialized</li> 451 * <li>Rpc caller has admin permissions</li> 452 * </ol> 453 * @param requestName name of rpc request. Used in reporting failures to provide context. 454 * @throws ServiceException If any of the above listed pre-check fails. 455 */ 456 private void rpcPreCheck(String requestName) throws ServiceException { 457 try { 458 master.checkInitialized(); 459 requirePermission(requestName, Permission.Action.ADMIN); 460 } catch (IOException ioe) { 461 throw new ServiceException(ioe); 462 } 463 } 464 465 enum BalanceSwitchMode { 466 SYNC, 467 ASYNC 468 } 469 470 /** 471 * Assigns balancer switch according to BalanceSwitchMode 472 * @param b new balancer switch 473 * @param mode BalanceSwitchMode 474 * @return old balancer switch 475 */ 476 boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException { 477 boolean oldValue = master.loadBalancerTracker.isBalancerOn(); 478 boolean newValue = b; 479 try { 480 if (master.cpHost != null) { 481 master.cpHost.preBalanceSwitch(newValue); 482 } 483 try { 484 if (mode == BalanceSwitchMode.SYNC) { 485 synchronized (master.getLoadBalancer()) { 486 master.loadBalancerTracker.setBalancerOn(newValue); 487 } 488 } else { 489 master.loadBalancerTracker.setBalancerOn(newValue); 490 } 491 } catch (KeeperException ke) { 492 throw new IOException(ke); 493 } 494 LOG.info(master.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue); 495 if (master.cpHost != null) { 496 master.cpHost.postBalanceSwitch(oldValue, newValue); 497 } 498 master.getLoadBalancer().updateBalancerStatus(newValue); 499 } catch (IOException ioe) { 500 LOG.warn("Error flipping balance switch", ioe); 501 } 502 return oldValue; 503 } 504 505 boolean synchronousBalanceSwitch(final boolean b) throws IOException { 506 return switchBalancer(b, BalanceSwitchMode.SYNC); 507 } 508 509 /** 510 * @return list of blocking services and their security info classes that this server supports 511 */ 512 @Override 513 protected List<BlockingServiceAndInterface> getServices() { 514 List<BlockingServiceAndInterface> bssi = new ArrayList<>(5); 515 bssi.add(new BlockingServiceAndInterface( 516 MasterService.newReflectiveBlockingService(this), 517 MasterService.BlockingInterface.class)); 518 bssi.add(new BlockingServiceAndInterface( 519 RegionServerStatusService.newReflectiveBlockingService(this), 520 RegionServerStatusService.BlockingInterface.class)); 521 bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this), 522 LockService.BlockingInterface.class)); 523 bssi.add(new BlockingServiceAndInterface(HbckService.newReflectiveBlockingService(this), 524 HbckService.BlockingInterface.class)); 525 bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this), 526 ClientMetaService.BlockingInterface.class)); 527 bssi.addAll(super.getServices()); 528 return bssi; 529 } 530 531 @Override 532 @QosPriority(priority = HConstants.ADMIN_QOS) 533 public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller, 534 GetLastFlushedSequenceIdRequest request) throws ServiceException { 535 try { 536 master.checkServiceStarted(); 537 } catch (IOException ioe) { 538 throw new ServiceException(ioe); 539 } 540 byte[] encodedRegionName = request.getRegionName().toByteArray(); 541 RegionStoreSequenceIds ids = master.getServerManager() 542 .getLastFlushedSequenceId(encodedRegionName); 543 return ResponseConverter.buildGetLastFlushedSequenceIdResponse(ids); 544 } 545 546 @Override 547 public RegionServerReportResponse regionServerReport(RpcController controller, 548 RegionServerReportRequest request) throws ServiceException { 549 try { 550 master.checkServiceStarted(); 551 int versionNumber = 0; 552 String version = "0.0.0"; 553 VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo(); 554 if (versionInfo != null) { 555 version = versionInfo.getVersion(); 556 versionNumber = VersionInfoUtil.getVersionNumber(versionInfo); 557 } 558 ClusterStatusProtos.ServerLoad sl = request.getLoad(); 559 ServerName serverName = ProtobufUtil.toServerName(request.getServer()); 560 ServerMetrics oldLoad = master.getServerManager().getLoad(serverName); 561 ServerMetrics newLoad = 562 ServerMetricsBuilder.toServerMetrics(serverName, versionNumber, version, sl); 563 master.getServerManager().regionServerReport(serverName, newLoad); 564 master.getAssignmentManager().reportOnlineRegions(serverName, 565 newLoad.getRegionMetrics().keySet()); 566 if (sl != null && master.metricsMaster != null) { 567 // Up our metrics. 568 master.metricsMaster.incrementRequests( 569 sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getRequestCount() : 0)); 570 } 571 } catch (IOException ioe) { 572 throw new ServiceException(ioe); 573 } 574 return RegionServerReportResponse.newBuilder().build(); 575 } 576 577 @Override 578 public RegionServerStartupResponse regionServerStartup(RpcController controller, 579 RegionServerStartupRequest request) throws ServiceException { 580 // Register with server manager 581 try { 582 master.checkServiceStarted(); 583 int versionNumber = 0; 584 String version = "0.0.0"; 585 VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo(); 586 if (versionInfo != null) { 587 version = versionInfo.getVersion(); 588 versionNumber = VersionInfoUtil.getVersionNumber(versionInfo); 589 } 590 InetAddress ia = master.getRemoteInetAddress(request.getPort(), request.getServerStartCode()); 591 // if regionserver passed hostname to use, 592 // then use it instead of doing a reverse DNS lookup 593 ServerName rs = 594 master.getServerManager().regionServerStartup(request, versionNumber, version, ia); 595 596 // Send back some config info 597 RegionServerStartupResponse.Builder resp = createConfigurationSubset(); 598 NameStringPair.Builder entry = NameStringPair.newBuilder() 599 .setName(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER).setValue(rs.getHostname()); 600 resp.addMapEntries(entry.build()); 601 602 return resp.build(); 603 } catch (IOException ioe) { 604 throw new ServiceException(ioe); 605 } 606 } 607 608 @Override 609 public ReportRSFatalErrorResponse reportRSFatalError( 610 RpcController controller, ReportRSFatalErrorRequest request) throws ServiceException { 611 String errorText = request.getErrorMessage(); 612 ServerName sn = ProtobufUtil.toServerName(request.getServer()); 613 String msg = sn + " reported a fatal error:\n" + errorText; 614 LOG.warn(msg); 615 master.rsFatals.add(msg); 616 return ReportRSFatalErrorResponse.newBuilder().build(); 617 } 618 619 @Override 620 public AddColumnResponse addColumn(RpcController controller, 621 AddColumnRequest req) throws ServiceException { 622 try { 623 long procId = master.addColumn( 624 ProtobufUtil.toTableName(req.getTableName()), 625 ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), 626 req.getNonceGroup(), 627 req.getNonce()); 628 if (procId == -1) { 629 // This mean operation was not performed in server, so do not set any procId 630 return AddColumnResponse.newBuilder().build(); 631 } else { 632 return AddColumnResponse.newBuilder().setProcId(procId).build(); 633 } 634 } catch (IOException ioe) { 635 throw new ServiceException(ioe); 636 } 637 } 638 639 @Override 640 public AssignRegionResponse assignRegion(RpcController controller, 641 AssignRegionRequest req) throws ServiceException { 642 try { 643 master.checkInitialized(); 644 645 final RegionSpecifierType type = req.getRegion().getType(); 646 if (type != RegionSpecifierType.REGION_NAME) { 647 LOG.warn("assignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME 648 + " actual: " + type); 649 } 650 651 final byte[] regionName = req.getRegion().getValue().toByteArray(); 652 final RegionInfo regionInfo = master.getAssignmentManager().getRegionInfo(regionName); 653 if (regionInfo == null) { 654 throw new UnknownRegionException(Bytes.toStringBinary(regionName)); 655 } 656 657 final AssignRegionResponse arr = AssignRegionResponse.newBuilder().build(); 658 if (master.cpHost != null) { 659 master.cpHost.preAssign(regionInfo); 660 } 661 LOG.info(master.getClientIdAuditPrefix() + " assign " + regionInfo.getRegionNameAsString()); 662 master.getAssignmentManager().assign(regionInfo); 663 if (master.cpHost != null) { 664 master.cpHost.postAssign(regionInfo); 665 } 666 return arr; 667 } catch (IOException ioe) { 668 throw new ServiceException(ioe); 669 } 670 } 671 672 673 @Override 674 public BalanceResponse balance(RpcController controller, 675 BalanceRequest request) throws ServiceException { 676 try { 677 return BalanceResponse.newBuilder().setBalancerRan(master.balance( 678 request.hasForce()? request.getForce(): false)).build(); 679 } catch (IOException ex) { 680 throw new ServiceException(ex); 681 } 682 } 683 684 @Override 685 public CreateNamespaceResponse createNamespace(RpcController controller, 686 CreateNamespaceRequest request) throws ServiceException { 687 try { 688 long procId = master.createNamespace( 689 ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()), 690 request.getNonceGroup(), 691 request.getNonce()); 692 return CreateNamespaceResponse.newBuilder().setProcId(procId).build(); 693 } catch (IOException e) { 694 throw new ServiceException(e); 695 } 696 } 697 698 @Override 699 public CreateTableResponse createTable(RpcController controller, CreateTableRequest req) 700 throws ServiceException { 701 TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema()); 702 byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req); 703 try { 704 long procId = 705 master.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce()); 706 LOG.info(master.getClientIdAuditPrefix() + " procedure request for creating table: " + 707 req.getTableSchema().getTableName() + " procId is: " + procId); 708 return CreateTableResponse.newBuilder().setProcId(procId).build(); 709 } catch (IOException ioe) { 710 throw new ServiceException(ioe); 711 } 712 } 713 714 @Override 715 public DeleteColumnResponse deleteColumn(RpcController controller, 716 DeleteColumnRequest req) throws ServiceException { 717 try { 718 long procId = master.deleteColumn( 719 ProtobufUtil.toTableName(req.getTableName()), 720 req.getColumnName().toByteArray(), 721 req.getNonceGroup(), 722 req.getNonce()); 723 if (procId == -1) { 724 // This mean operation was not performed in server, so do not set any procId 725 return DeleteColumnResponse.newBuilder().build(); 726 } else { 727 return DeleteColumnResponse.newBuilder().setProcId(procId).build(); 728 } 729 } catch (IOException ioe) { 730 throw new ServiceException(ioe); 731 } 732 } 733 734 @Override 735 public DeleteNamespaceResponse deleteNamespace(RpcController controller, 736 DeleteNamespaceRequest request) throws ServiceException { 737 try { 738 long procId = master.deleteNamespace( 739 request.getNamespaceName(), 740 request.getNonceGroup(), 741 request.getNonce()); 742 return DeleteNamespaceResponse.newBuilder().setProcId(procId).build(); 743 } catch (IOException e) { 744 throw new ServiceException(e); 745 } 746 } 747 748 /** 749 * Execute Delete Snapshot operation. 750 * @return DeleteSnapshotResponse (a protobuf wrapped void) if the snapshot existed and was 751 * deleted properly. 752 * @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not 753 * exist. 754 */ 755 @Override 756 public DeleteSnapshotResponse deleteSnapshot(RpcController controller, 757 DeleteSnapshotRequest request) throws ServiceException { 758 try { 759 master.checkInitialized(); 760 master.snapshotManager.checkSnapshotSupport(); 761 762 LOG.info(master.getClientIdAuditPrefix() + " delete " + request.getSnapshot()); 763 master.snapshotManager.deleteSnapshot(request.getSnapshot()); 764 return DeleteSnapshotResponse.newBuilder().build(); 765 } catch (IOException e) { 766 throw new ServiceException(e); 767 } 768 } 769 770 @Override 771 public DeleteTableResponse deleteTable(RpcController controller, 772 DeleteTableRequest request) throws ServiceException { 773 try { 774 long procId = master.deleteTable(ProtobufUtil.toTableName( 775 request.getTableName()), request.getNonceGroup(), request.getNonce()); 776 return DeleteTableResponse.newBuilder().setProcId(procId).build(); 777 } catch (IOException ioe) { 778 throw new ServiceException(ioe); 779 } 780 } 781 782 @Override 783 public TruncateTableResponse truncateTable(RpcController controller, TruncateTableRequest request) 784 throws ServiceException { 785 try { 786 long procId = master.truncateTable( 787 ProtobufUtil.toTableName(request.getTableName()), 788 request.getPreserveSplits(), 789 request.getNonceGroup(), 790 request.getNonce()); 791 return TruncateTableResponse.newBuilder().setProcId(procId).build(); 792 } catch (IOException ioe) { 793 throw new ServiceException(ioe); 794 } 795 } 796 797 @Override 798 public DisableTableResponse disableTable(RpcController controller, 799 DisableTableRequest request) throws ServiceException { 800 try { 801 long procId = master.disableTable( 802 ProtobufUtil.toTableName(request.getTableName()), 803 request.getNonceGroup(), 804 request.getNonce()); 805 return DisableTableResponse.newBuilder().setProcId(procId).build(); 806 } catch (IOException ioe) { 807 throw new ServiceException(ioe); 808 } 809 } 810 811 @Override 812 public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c, 813 EnableCatalogJanitorRequest req) throws ServiceException { 814 rpcPreCheck("enableCatalogJanitor"); 815 return EnableCatalogJanitorResponse.newBuilder().setPrevValue( 816 master.catalogJanitorChore.setEnabled(req.getEnable())).build(); 817 } 818 819 @Override 820 public SetCleanerChoreRunningResponse setCleanerChoreRunning( 821 RpcController c, SetCleanerChoreRunningRequest req) throws ServiceException { 822 rpcPreCheck("setCleanerChoreRunning"); 823 824 boolean prevValue = 825 master.getLogCleaner().getEnabled() && master.getHFileCleaner().getEnabled(); 826 master.getLogCleaner().setEnabled(req.getOn()); 827 master.getHFileCleaner().setEnabled(req.getOn()); 828 return SetCleanerChoreRunningResponse.newBuilder().setPrevValue(prevValue).build(); 829 } 830 831 @Override 832 public EnableTableResponse enableTable(RpcController controller, 833 EnableTableRequest request) throws ServiceException { 834 try { 835 long procId = master.enableTable( 836 ProtobufUtil.toTableName(request.getTableName()), 837 request.getNonceGroup(), 838 request.getNonce()); 839 return EnableTableResponse.newBuilder().setProcId(procId).build(); 840 } catch (IOException ioe) { 841 throw new ServiceException(ioe); 842 } 843 } 844 845 @Override 846 public MergeTableRegionsResponse mergeTableRegions( 847 RpcController c, MergeTableRegionsRequest request) throws ServiceException { 848 try { 849 master.checkInitialized(); 850 } catch (IOException ioe) { 851 throw new ServiceException(ioe); 852 } 853 854 RegionStates regionStates = master.getAssignmentManager().getRegionStates(); 855 856 RegionInfo[] regionsToMerge = new RegionInfo[request.getRegionCount()]; 857 for (int i = 0; i < request.getRegionCount(); i++) { 858 final byte[] encodedNameOfRegion = request.getRegion(i).getValue().toByteArray(); 859 if (request.getRegion(i).getType() != RegionSpecifierType.ENCODED_REGION_NAME) { 860 LOG.warn("MergeRegions specifier type: expected: " 861 + RegionSpecifierType.ENCODED_REGION_NAME + " actual: region " + i + " =" 862 + request.getRegion(i).getType()); 863 } 864 RegionState regionState = regionStates.getRegionState(Bytes.toString(encodedNameOfRegion)); 865 if (regionState == null) { 866 throw new ServiceException( 867 new UnknownRegionException(Bytes.toStringBinary(encodedNameOfRegion))); 868 } 869 regionsToMerge[i] = regionState.getRegion(); 870 } 871 872 try { 873 long procId = master.mergeRegions( 874 regionsToMerge, 875 request.getForcible(), 876 request.getNonceGroup(), 877 request.getNonce()); 878 return MergeTableRegionsResponse.newBuilder().setProcId(procId).build(); 879 } catch (IOException ioe) { 880 throw new ServiceException(ioe); 881 } 882 } 883 884 @Override 885 public SplitTableRegionResponse splitRegion(final RpcController controller, 886 final SplitTableRegionRequest request) throws ServiceException { 887 try { 888 long procId = master.splitRegion( 889 ProtobufUtil.toRegionInfo(request.getRegionInfo()), 890 request.hasSplitRow() ? request.getSplitRow().toByteArray() : null, 891 request.getNonceGroup(), 892 request.getNonce()); 893 return SplitTableRegionResponse.newBuilder().setProcId(procId).build(); 894 } catch (IOException ie) { 895 throw new ServiceException(ie); 896 } 897 } 898 899 @Override 900 public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller, 901 final ClientProtos.CoprocessorServiceRequest request) throws ServiceException { 902 rpcPreCheck("execMasterService"); 903 try { 904 ServerRpcController execController = new ServerRpcController(); 905 ClientProtos.CoprocessorServiceCall call = request.getCall(); 906 String serviceName = call.getServiceName(); 907 String methodName = call.getMethodName(); 908 if (!master.coprocessorServiceHandlers.containsKey(serviceName)) { 909 throw new UnknownProtocolException(null, 910 "No registered Master Coprocessor Endpoint found for " + serviceName + 911 ". Has it been enabled?"); 912 } 913 914 com.google.protobuf.Service service = master.coprocessorServiceHandlers.get(serviceName); 915 com.google.protobuf.Descriptors.ServiceDescriptor serviceDesc = service.getDescriptorForType(); 916 com.google.protobuf.Descriptors.MethodDescriptor methodDesc = 917 CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc); 918 919 com.google.protobuf.Message execRequest = 920 CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest()); 921 final com.google.protobuf.Message.Builder responseBuilder = 922 service.getResponsePrototype(methodDesc).newBuilderForType(); 923 service.callMethod(methodDesc, execController, execRequest, 924 (message) -> { 925 if (message != null) { 926 responseBuilder.mergeFrom(message); 927 } 928 }); 929 com.google.protobuf.Message execResult = responseBuilder.build(); 930 if (execController.getFailedOn() != null) { 931 throw execController.getFailedOn(); 932 } 933 return CoprocessorRpcUtils.getResponse(execResult, HConstants.EMPTY_BYTE_ARRAY); 934 } catch (IOException ie) { 935 throw new ServiceException(ie); 936 } 937 } 938 939 /** 940 * Triggers an asynchronous attempt to run a distributed procedure. 941 * {@inheritDoc} 942 */ 943 @Override 944 public ExecProcedureResponse execProcedure(RpcController controller, 945 ExecProcedureRequest request) throws ServiceException { 946 try { 947 master.checkInitialized(); 948 ProcedureDescription desc = request.getProcedure(); 949 MasterProcedureManager mpm = master.getMasterProcedureManagerHost().getProcedureManager( 950 desc.getSignature()); 951 if (mpm == null) { 952 throw new ServiceException(new DoNotRetryIOException("The procedure is not registered: " 953 + desc.getSignature())); 954 } 955 LOG.info(master.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature()); 956 mpm.checkPermissions(desc, getAccessChecker(), RpcServer.getRequestUser().orElse(null)); 957 mpm.execProcedure(desc); 958 // send back the max amount of time the client should wait for the procedure 959 // to complete 960 long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME; 961 return ExecProcedureResponse.newBuilder().setExpectedTimeout( 962 waitTime).build(); 963 } catch (ForeignException e) { 964 throw new ServiceException(e.getCause()); 965 } catch (IOException e) { 966 throw new ServiceException(e); 967 } 968 } 969 970 /** 971 * Triggers a synchronous attempt to run a distributed procedure and sets 972 * return data in response. 973 * {@inheritDoc} 974 */ 975 @Override 976 public ExecProcedureResponse execProcedureWithRet(RpcController controller, 977 ExecProcedureRequest request) throws ServiceException { 978 rpcPreCheck("execProcedureWithRet"); 979 try { 980 ProcedureDescription desc = request.getProcedure(); 981 MasterProcedureManager mpm = 982 master.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature()); 983 if (mpm == null) { 984 throw new ServiceException("The procedure is not registered: " + desc.getSignature()); 985 } 986 LOG.info(master.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature()); 987 byte[] data = mpm.execProcedureWithRet(desc); 988 ExecProcedureResponse.Builder builder = ExecProcedureResponse.newBuilder(); 989 // set return data if available 990 if (data != null) { 991 builder.setReturnData(UnsafeByteOperations.unsafeWrap(data)); 992 } 993 return builder.build(); 994 } catch (IOException e) { 995 throw new ServiceException(e); 996 } 997 } 998 999 @Override 1000 public GetClusterStatusResponse getClusterStatus(RpcController controller, 1001 GetClusterStatusRequest req) throws ServiceException { 1002 GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder(); 1003 try { 1004 // We used to check if Master was up at this point but let this call proceed even if 1005 // Master is initializing... else we shut out stuff like hbck2 tool from making progress 1006 // since it queries this method to figure cluster version. hbck2 wants to be able to work 1007 // against Master even if it is 'initializing' so it can do fixup. 1008 response.setClusterStatus(ClusterMetricsBuilder.toClusterStatus( 1009 master.getClusterMetrics(ClusterMetricsBuilder.toOptions(req.getOptionsList())))); 1010 } catch (IOException e) { 1011 throw new ServiceException(e); 1012 } 1013 return response.build(); 1014 } 1015 1016 /** 1017 * List the currently available/stored snapshots. Any in-progress snapshots are ignored 1018 */ 1019 @Override 1020 public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller, 1021 GetCompletedSnapshotsRequest request) throws ServiceException { 1022 try { 1023 master.checkInitialized(); 1024 GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder(); 1025 List<SnapshotDescription> snapshots = master.snapshotManager.getCompletedSnapshots(); 1026 1027 // convert to protobuf 1028 for (SnapshotDescription snapshot : snapshots) { 1029 builder.addSnapshots(snapshot); 1030 } 1031 return builder.build(); 1032 } catch (IOException e) { 1033 throw new ServiceException(e); 1034 } 1035 } 1036 1037 @Override 1038 public ListNamespacesResponse listNamespaces( 1039 RpcController controller, ListNamespacesRequest request) 1040 throws ServiceException { 1041 try { 1042 return ListNamespacesResponse.newBuilder() 1043 .addAllNamespaceName(master.listNamespaces()) 1044 .build(); 1045 } catch (IOException e) { 1046 throw new ServiceException(e); 1047 } 1048 } 1049 1050 @Override 1051 public GetNamespaceDescriptorResponse getNamespaceDescriptor( 1052 RpcController controller, GetNamespaceDescriptorRequest request) 1053 throws ServiceException { 1054 try { 1055 return GetNamespaceDescriptorResponse.newBuilder() 1056 .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor( 1057 master.getNamespace(request.getNamespaceName()))) 1058 .build(); 1059 } catch (IOException e) { 1060 throw new ServiceException(e); 1061 } 1062 } 1063 1064 /** 1065 * Get the number of regions of the table that have been updated by the alter. 1066 * 1067 * @return Pair indicating the number of regions updated Pair.getFirst is the 1068 * regions that are yet to be updated Pair.getSecond is the total number 1069 * of regions of the table 1070 * @throws ServiceException 1071 */ 1072 @Override 1073 public GetSchemaAlterStatusResponse getSchemaAlterStatus( 1074 RpcController controller, GetSchemaAlterStatusRequest req) throws ServiceException { 1075 // TODO: currently, we query using the table name on the client side. this 1076 // may overlap with other table operations or the table operation may 1077 // have completed before querying this API. We need to refactor to a 1078 // transaction system in the future to avoid these ambiguities. 1079 TableName tableName = ProtobufUtil.toTableName(req.getTableName()); 1080 1081 try { 1082 master.checkInitialized(); 1083 Pair<Integer,Integer> pair = master.getAssignmentManager().getReopenStatus(tableName); 1084 GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder(); 1085 ret.setYetToUpdateRegions(pair.getFirst()); 1086 ret.setTotalRegions(pair.getSecond()); 1087 return ret.build(); 1088 } catch (IOException ioe) { 1089 throw new ServiceException(ioe); 1090 } 1091 } 1092 1093 /** 1094 * Get list of TableDescriptors for requested tables. 1095 * @param c Unused (set to null). 1096 * @param req GetTableDescriptorsRequest that contains: 1097 * - tableNames: requested tables, or if empty, all are requested. 1098 * @return GetTableDescriptorsResponse 1099 * @throws ServiceException 1100 */ 1101 @Override 1102 public GetTableDescriptorsResponse getTableDescriptors(RpcController c, 1103 GetTableDescriptorsRequest req) throws ServiceException { 1104 try { 1105 master.checkInitialized(); 1106 1107 final String regex = req.hasRegex() ? req.getRegex() : null; 1108 final String namespace = req.hasNamespace() ? req.getNamespace() : null; 1109 List<TableName> tableNameList = null; 1110 if (req.getTableNamesCount() > 0) { 1111 tableNameList = new ArrayList<TableName>(req.getTableNamesCount()); 1112 for (HBaseProtos.TableName tableNamePB: req.getTableNamesList()) { 1113 tableNameList.add(ProtobufUtil.toTableName(tableNamePB)); 1114 } 1115 } 1116 1117 List<TableDescriptor> descriptors = master.listTableDescriptors(namespace, regex, 1118 tableNameList, req.getIncludeSysTables()); 1119 1120 GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder(); 1121 if (descriptors != null && descriptors.size() > 0) { 1122 // Add the table descriptors to the response 1123 for (TableDescriptor htd: descriptors) { 1124 builder.addTableSchema(ProtobufUtil.toTableSchema(htd)); 1125 } 1126 } 1127 return builder.build(); 1128 } catch (IOException ioe) { 1129 throw new ServiceException(ioe); 1130 } 1131 } 1132 1133 /** 1134 * Get list of userspace table names 1135 * @param controller Unused (set to null). 1136 * @param req GetTableNamesRequest 1137 * @return GetTableNamesResponse 1138 * @throws ServiceException 1139 */ 1140 @Override 1141 public GetTableNamesResponse getTableNames(RpcController controller, 1142 GetTableNamesRequest req) throws ServiceException { 1143 try { 1144 master.checkServiceStarted(); 1145 1146 final String regex = req.hasRegex() ? req.getRegex() : null; 1147 final String namespace = req.hasNamespace() ? req.getNamespace() : null; 1148 List<TableName> tableNames = master.listTableNames(namespace, regex, 1149 req.getIncludeSysTables()); 1150 1151 GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder(); 1152 if (tableNames != null && tableNames.size() > 0) { 1153 // Add the table names to the response 1154 for (TableName table: tableNames) { 1155 builder.addTableNames(ProtobufUtil.toProtoTableName(table)); 1156 } 1157 } 1158 return builder.build(); 1159 } catch (IOException e) { 1160 throw new ServiceException(e); 1161 } 1162 } 1163 1164 @Override 1165 public GetTableStateResponse getTableState(RpcController controller, 1166 GetTableStateRequest request) throws ServiceException { 1167 try { 1168 master.checkServiceStarted(); 1169 TableName tableName = ProtobufUtil.toTableName(request.getTableName()); 1170 TableState ts = master.getTableStateManager().getTableState(tableName); 1171 GetTableStateResponse.Builder builder = GetTableStateResponse.newBuilder(); 1172 builder.setTableState(ts.convert()); 1173 return builder.build(); 1174 } catch (IOException e) { 1175 throw new ServiceException(e); 1176 } 1177 } 1178 1179 @Override 1180 public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c, 1181 IsCatalogJanitorEnabledRequest req) throws ServiceException { 1182 return IsCatalogJanitorEnabledResponse.newBuilder().setValue( 1183 master.isCatalogJanitorEnabled()).build(); 1184 } 1185 1186 @Override 1187 public IsCleanerChoreEnabledResponse isCleanerChoreEnabled(RpcController c, 1188 IsCleanerChoreEnabledRequest req) 1189 throws ServiceException { 1190 return IsCleanerChoreEnabledResponse.newBuilder().setValue(master.isCleanerChoreEnabled()) 1191 .build(); 1192 } 1193 1194 @Override 1195 public IsMasterRunningResponse isMasterRunning(RpcController c, 1196 IsMasterRunningRequest req) throws ServiceException { 1197 try { 1198 master.checkServiceStarted(); 1199 return IsMasterRunningResponse.newBuilder().setIsMasterRunning( 1200 !master.isStopped()).build(); 1201 } catch (IOException e) { 1202 throw new ServiceException(e); 1203 } 1204 } 1205 1206 /** 1207 * Checks if the specified procedure is done. 1208 * @return true if the procedure is done, false if the procedure is in the process of completing 1209 * @throws ServiceException if invalid procedure or failed procedure with progress failure reason. 1210 */ 1211 @Override 1212 public IsProcedureDoneResponse isProcedureDone(RpcController controller, 1213 IsProcedureDoneRequest request) throws ServiceException { 1214 try { 1215 master.checkInitialized(); 1216 ProcedureDescription desc = request.getProcedure(); 1217 MasterProcedureManager mpm = master.getMasterProcedureManagerHost().getProcedureManager( 1218 desc.getSignature()); 1219 if (mpm == null) { 1220 throw new ServiceException("The procedure is not registered: " 1221 + desc.getSignature()); 1222 } 1223 LOG.debug("Checking to see if procedure from request:" 1224 + desc.getSignature() + " is done"); 1225 1226 IsProcedureDoneResponse.Builder builder = 1227 IsProcedureDoneResponse.newBuilder(); 1228 boolean done = mpm.isProcedureDone(desc); 1229 builder.setDone(done); 1230 return builder.build(); 1231 } catch (ForeignException e) { 1232 throw new ServiceException(e.getCause()); 1233 } catch (IOException e) { 1234 throw new ServiceException(e); 1235 } 1236 } 1237 1238 /** 1239 * Checks if the specified snapshot is done. 1240 * @return true if the snapshot is in file system ready to use, 1241 * false if the snapshot is in the process of completing 1242 * @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or 1243 * a wrapped HBaseSnapshotException with progress failure reason. 1244 */ 1245 @Override 1246 public IsSnapshotDoneResponse isSnapshotDone(RpcController controller, 1247 IsSnapshotDoneRequest request) throws ServiceException { 1248 LOG.debug("Checking to see if snapshot from request:" + 1249 ClientSnapshotDescriptionUtils.toString(request.getSnapshot()) + " is done"); 1250 try { 1251 master.checkInitialized(); 1252 IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder(); 1253 boolean done = master.snapshotManager.isSnapshotDone(request.getSnapshot()); 1254 builder.setDone(done); 1255 return builder.build(); 1256 } catch (ForeignException e) { 1257 throw new ServiceException(e.getCause()); 1258 } catch (IOException e) { 1259 throw new ServiceException(e); 1260 } 1261 } 1262 1263 @Override 1264 public GetProcedureResultResponse getProcedureResult(RpcController controller, 1265 GetProcedureResultRequest request) throws ServiceException { 1266 LOG.debug("Checking to see if procedure is done pid=" + request.getProcId()); 1267 try { 1268 master.checkInitialized(); 1269 GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder(); 1270 long procId = request.getProcId(); 1271 ProcedureExecutor<?> executor = master.getMasterProcedureExecutor(); 1272 Procedure<?> result = executor.getResultOrProcedure(procId); 1273 if (result != null) { 1274 builder.setSubmittedTime(result.getSubmittedTime()); 1275 builder.setLastUpdate(result.getLastUpdate()); 1276 if (executor.isFinished(procId)) { 1277 builder.setState(GetProcedureResultResponse.State.FINISHED); 1278 if (result.isFailed()) { 1279 IOException exception = 1280 MasterProcedureUtil.unwrapRemoteIOException(result); 1281 builder.setException(ForeignExceptionUtil.toProtoForeignException(exception)); 1282 } 1283 byte[] resultData = result.getResult(); 1284 if (resultData != null) { 1285 builder.setResult(UnsafeByteOperations.unsafeWrap(resultData)); 1286 } 1287 master.getMasterProcedureExecutor().removeResult(request.getProcId()); 1288 } else { 1289 builder.setState(GetProcedureResultResponse.State.RUNNING); 1290 } 1291 } else { 1292 builder.setState(GetProcedureResultResponse.State.NOT_FOUND); 1293 } 1294 return builder.build(); 1295 } catch (IOException e) { 1296 throw new ServiceException(e); 1297 } 1298 } 1299 1300 @Override 1301 public AbortProcedureResponse abortProcedure( 1302 RpcController rpcController, AbortProcedureRequest request) throws ServiceException { 1303 try { 1304 AbortProcedureResponse.Builder response = AbortProcedureResponse.newBuilder(); 1305 boolean abortResult = 1306 master.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning()); 1307 response.setIsProcedureAborted(abortResult); 1308 return response.build(); 1309 } catch (IOException e) { 1310 throw new ServiceException(e); 1311 } 1312 } 1313 1314 @Override 1315 public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c, 1316 ListNamespaceDescriptorsRequest request) throws ServiceException { 1317 try { 1318 ListNamespaceDescriptorsResponse.Builder response = 1319 ListNamespaceDescriptorsResponse.newBuilder(); 1320 for(NamespaceDescriptor ns: master.getNamespaces()) { 1321 response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns)); 1322 } 1323 return response.build(); 1324 } catch (IOException e) { 1325 throw new ServiceException(e); 1326 } 1327 } 1328 1329 @Override 1330 public GetProceduresResponse getProcedures( 1331 RpcController rpcController, 1332 GetProceduresRequest request) throws ServiceException { 1333 try { 1334 final GetProceduresResponse.Builder response = GetProceduresResponse.newBuilder(); 1335 for (Procedure<?> p: master.getProcedures()) { 1336 response.addProcedure(ProcedureUtil.convertToProtoProcedure(p)); 1337 } 1338 return response.build(); 1339 } catch (IOException e) { 1340 throw new ServiceException(e); 1341 } 1342 } 1343 1344 @Override 1345 public GetLocksResponse getLocks( 1346 RpcController controller, 1347 GetLocksRequest request) throws ServiceException { 1348 try { 1349 final GetLocksResponse.Builder builder = GetLocksResponse.newBuilder(); 1350 1351 for (LockedResource lockedResource: master.getLocks()) { 1352 builder.addLock(ProcedureUtil.convertToProtoLockedResource(lockedResource)); 1353 } 1354 1355 return builder.build(); 1356 } catch (IOException e) { 1357 throw new ServiceException(e); 1358 } 1359 } 1360 1361 @Override 1362 public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c, 1363 ListTableDescriptorsByNamespaceRequest request) throws ServiceException { 1364 try { 1365 ListTableDescriptorsByNamespaceResponse.Builder b = 1366 ListTableDescriptorsByNamespaceResponse.newBuilder(); 1367 for (TableDescriptor htd : master 1368 .listTableDescriptorsByNamespace(request.getNamespaceName())) { 1369 b.addTableSchema(ProtobufUtil.toTableSchema(htd)); 1370 } 1371 return b.build(); 1372 } catch (IOException e) { 1373 throw new ServiceException(e); 1374 } 1375 } 1376 1377 @Override 1378 public ListTableNamesByNamespaceResponse listTableNamesByNamespace(RpcController c, 1379 ListTableNamesByNamespaceRequest request) throws ServiceException { 1380 try { 1381 ListTableNamesByNamespaceResponse.Builder b = 1382 ListTableNamesByNamespaceResponse.newBuilder(); 1383 for (TableName tableName: master.listTableNamesByNamespace(request.getNamespaceName())) { 1384 b.addTableName(ProtobufUtil.toProtoTableName(tableName)); 1385 } 1386 return b.build(); 1387 } catch (IOException e) { 1388 throw new ServiceException(e); 1389 } 1390 } 1391 1392 @Override 1393 public ModifyColumnResponse modifyColumn(RpcController controller, 1394 ModifyColumnRequest req) throws ServiceException { 1395 try { 1396 long procId = master.modifyColumn( 1397 ProtobufUtil.toTableName(req.getTableName()), 1398 ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), 1399 req.getNonceGroup(), 1400 req.getNonce()); 1401 if (procId == -1) { 1402 // This mean operation was not performed in server, so do not set any procId 1403 return ModifyColumnResponse.newBuilder().build(); 1404 } else { 1405 return ModifyColumnResponse.newBuilder().setProcId(procId).build(); 1406 } 1407 } catch (IOException ioe) { 1408 throw new ServiceException(ioe); 1409 } 1410 } 1411 1412 @Override 1413 public ModifyNamespaceResponse modifyNamespace(RpcController controller, 1414 ModifyNamespaceRequest request) throws ServiceException { 1415 try { 1416 long procId = master.modifyNamespace( 1417 ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()), 1418 request.getNonceGroup(), 1419 request.getNonce()); 1420 return ModifyNamespaceResponse.newBuilder().setProcId(procId).build(); 1421 } catch (IOException e) { 1422 throw new ServiceException(e); 1423 } 1424 } 1425 1426 @Override 1427 public ModifyTableResponse modifyTable(RpcController controller, 1428 ModifyTableRequest req) throws ServiceException { 1429 try { 1430 long procId = master.modifyTable( 1431 ProtobufUtil.toTableName(req.getTableName()), 1432 ProtobufUtil.toTableDescriptor(req.getTableSchema()), 1433 req.getNonceGroup(), 1434 req.getNonce()); 1435 return ModifyTableResponse.newBuilder().setProcId(procId).build(); 1436 } catch (IOException ioe) { 1437 throw new ServiceException(ioe); 1438 } 1439 } 1440 1441 @Override 1442 public MoveRegionResponse moveRegion(RpcController controller, 1443 MoveRegionRequest req) throws ServiceException { 1444 final byte [] encodedRegionName = req.getRegion().getValue().toByteArray(); 1445 RegionSpecifierType type = req.getRegion().getType(); 1446 final byte [] destServerName = (req.hasDestServerName())? 1447 Bytes.toBytes(ProtobufUtil.toServerName(req.getDestServerName()).getServerName()):null; 1448 MoveRegionResponse mrr = MoveRegionResponse.newBuilder().build(); 1449 1450 if (type != RegionSpecifierType.ENCODED_REGION_NAME) { 1451 LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME 1452 + " actual: " + type); 1453 } 1454 1455 try { 1456 master.checkInitialized(); 1457 master.move(encodedRegionName, destServerName); 1458 } catch (IOException ioe) { 1459 throw new ServiceException(ioe); 1460 } 1461 return mrr; 1462 } 1463 1464 /** 1465 * Offline specified region from master's in-memory state. It will not attempt to 1466 * reassign the region as in unassign. 1467 * 1468 * This is a special method that should be used by experts or hbck. 1469 * 1470 */ 1471 @Override 1472 public OfflineRegionResponse offlineRegion(RpcController controller, 1473 OfflineRegionRequest request) throws ServiceException { 1474 try { 1475 master.checkInitialized(); 1476 1477 final RegionSpecifierType type = request.getRegion().getType(); 1478 if (type != RegionSpecifierType.REGION_NAME) { 1479 LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME 1480 + " actual: " + type); 1481 } 1482 1483 final byte[] regionName = request.getRegion().getValue().toByteArray(); 1484 final RegionInfo hri = master.getAssignmentManager().getRegionInfo(regionName); 1485 if (hri == null) { 1486 throw new UnknownRegionException(Bytes.toStringBinary(regionName)); 1487 } 1488 1489 if (master.cpHost != null) { 1490 master.cpHost.preRegionOffline(hri); 1491 } 1492 LOG.info(master.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString()); 1493 master.getAssignmentManager().offlineRegion(hri); 1494 if (master.cpHost != null) { 1495 master.cpHost.postRegionOffline(hri); 1496 } 1497 } catch (IOException ioe) { 1498 throw new ServiceException(ioe); 1499 } 1500 return OfflineRegionResponse.newBuilder().build(); 1501 } 1502 1503 /** 1504 * Execute Restore/Clone snapshot operation. 1505 * 1506 * <p>If the specified table exists a "Restore" is executed, replacing the table 1507 * schema and directory data with the content of the snapshot. 1508 * The table must be disabled, or a UnsupportedOperationException will be thrown. 1509 * 1510 * <p>If the table doesn't exist a "Clone" is executed, a new table is created 1511 * using the schema at the time of the snapshot, and the content of the snapshot. 1512 * 1513 * <p>The restore/clone operation does not require copying HFiles. Since HFiles 1514 * are immutable the table can point to and use the same files as the original one. 1515 */ 1516 @Override 1517 public RestoreSnapshotResponse restoreSnapshot(RpcController controller, 1518 RestoreSnapshotRequest request) throws ServiceException { 1519 try { 1520 long procId = master.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(), 1521 request.getNonce(), request.getRestoreACL()); 1522 return RestoreSnapshotResponse.newBuilder().setProcId(procId).build(); 1523 } catch (ForeignException e) { 1524 throw new ServiceException(e.getCause()); 1525 } catch (IOException e) { 1526 throw new ServiceException(e); 1527 } 1528 } 1529 1530 @Override 1531 public SetSnapshotCleanupResponse switchSnapshotCleanup( 1532 RpcController controller, SetSnapshotCleanupRequest request) 1533 throws ServiceException { 1534 try { 1535 master.checkInitialized(); 1536 final boolean enabled = request.getEnabled(); 1537 final boolean isSynchronous = request.hasSynchronous() && request.getSynchronous(); 1538 final boolean prevSnapshotCleanupRunning = this.switchSnapshotCleanup(enabled, isSynchronous); 1539 return SetSnapshotCleanupResponse.newBuilder() 1540 .setPrevSnapshotCleanup(prevSnapshotCleanupRunning).build(); 1541 } catch (IOException e) { 1542 throw new ServiceException(e); 1543 } 1544 } 1545 1546 @Override 1547 public IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled( 1548 RpcController controller, IsSnapshotCleanupEnabledRequest request) 1549 throws ServiceException { 1550 try { 1551 master.checkInitialized(); 1552 final boolean isSnapshotCleanupEnabled = master.snapshotCleanupTracker 1553 .isSnapshotCleanupEnabled(); 1554 return IsSnapshotCleanupEnabledResponse.newBuilder() 1555 .setEnabled(isSnapshotCleanupEnabled).build(); 1556 } catch (IOException e) { 1557 throw new ServiceException(e); 1558 } 1559 } 1560 1561 /** 1562 * Turn on/off snapshot auto-cleanup based on TTL 1563 * 1564 * @param enabledNewVal Set to <code>true</code> to enable, <code>false</code> to disable 1565 * @param synchronous If <code>true</code>, it waits until current snapshot cleanup is completed, 1566 * if outstanding 1567 * @return previous snapshot auto-cleanup mode 1568 */ 1569 private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal, 1570 final boolean synchronous) { 1571 final boolean oldValue = master.snapshotCleanupTracker.isSnapshotCleanupEnabled(); 1572 master.switchSnapshotCleanup(enabledNewVal, synchronous); 1573 LOG.info("{} Successfully set snapshot cleanup to {}", master.getClientIdAuditPrefix(), 1574 enabledNewVal); 1575 return oldValue; 1576 } 1577 1578 1579 @Override 1580 public RunCatalogScanResponse runCatalogScan(RpcController c, 1581 RunCatalogScanRequest req) throws ServiceException { 1582 rpcPreCheck("runCatalogScan"); 1583 try { 1584 return ResponseConverter.buildRunCatalogScanResponse( 1585 this.master.catalogJanitorChore.scan()); 1586 } catch (IOException ioe) { 1587 throw new ServiceException(ioe); 1588 } 1589 } 1590 1591 @Override 1592 public RunCleanerChoreResponse runCleanerChore(RpcController c, RunCleanerChoreRequest req) 1593 throws ServiceException { 1594 rpcPreCheck("runCleanerChore"); 1595 boolean result = master.getHFileCleaner().runCleaner() && master.getLogCleaner().runCleaner(); 1596 return ResponseConverter.buildRunCleanerChoreResponse(result); 1597 } 1598 1599 @Override 1600 public SetBalancerRunningResponse setBalancerRunning(RpcController c, 1601 SetBalancerRunningRequest req) throws ServiceException { 1602 try { 1603 master.checkInitialized(); 1604 boolean prevValue = (req.getSynchronous())? 1605 synchronousBalanceSwitch(req.getOn()) : master.balanceSwitch(req.getOn()); 1606 return SetBalancerRunningResponse.newBuilder().setPrevBalanceValue(prevValue).build(); 1607 } catch (IOException ioe) { 1608 throw new ServiceException(ioe); 1609 } 1610 } 1611 1612 @Override 1613 public ShutdownResponse shutdown(RpcController controller, 1614 ShutdownRequest request) throws ServiceException { 1615 LOG.info(master.getClientIdAuditPrefix() + " shutdown"); 1616 try { 1617 master.shutdown(); 1618 } catch (IOException e) { 1619 LOG.error("Exception occurred in HMaster.shutdown()", e); 1620 throw new ServiceException(e); 1621 } 1622 return ShutdownResponse.newBuilder().build(); 1623 } 1624 1625 /** 1626 * Triggers an asynchronous attempt to take a snapshot. 1627 * {@inheritDoc} 1628 */ 1629 @Override 1630 public SnapshotResponse snapshot(RpcController controller, 1631 SnapshotRequest request) throws ServiceException { 1632 try { 1633 master.checkInitialized(); 1634 master.snapshotManager.checkSnapshotSupport(); 1635 1636 LOG.info(master.getClientIdAuditPrefix() + " snapshot request for:" + 1637 ClientSnapshotDescriptionUtils.toString(request.getSnapshot())); 1638 // get the snapshot information 1639 SnapshotDescription snapshot = SnapshotDescriptionUtils.validate( 1640 request.getSnapshot(), master.getConfiguration()); 1641 master.snapshotManager.takeSnapshot(snapshot); 1642 1643 // send back the max amount of time the client should wait for the snapshot to complete 1644 long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(master.getConfiguration(), 1645 snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME); 1646 return SnapshotResponse.newBuilder().setExpectedTimeout(waitTime).build(); 1647 } catch (ForeignException e) { 1648 throw new ServiceException(e.getCause()); 1649 } catch (IOException e) { 1650 throw new ServiceException(e); 1651 } 1652 } 1653 1654 @Override 1655 public StopMasterResponse stopMaster(RpcController controller, 1656 StopMasterRequest request) throws ServiceException { 1657 LOG.info(master.getClientIdAuditPrefix() + " stop"); 1658 try { 1659 master.stopMaster(); 1660 } catch (IOException e) { 1661 LOG.error("Exception occurred while stopping master", e); 1662 throw new ServiceException(e); 1663 } 1664 return StopMasterResponse.newBuilder().build(); 1665 } 1666 1667 @Override 1668 public IsInMaintenanceModeResponse isMasterInMaintenanceMode( 1669 final RpcController controller, 1670 final IsInMaintenanceModeRequest request) throws ServiceException { 1671 IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder(); 1672 response.setInMaintenanceMode(master.isInMaintenanceMode()); 1673 return response.build(); 1674 } 1675 1676 @Override 1677 public UnassignRegionResponse unassignRegion(RpcController controller, 1678 UnassignRegionRequest req) throws ServiceException { 1679 try { 1680 final byte [] regionName = req.getRegion().getValue().toByteArray(); 1681 RegionSpecifierType type = req.getRegion().getType(); 1682 UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build(); 1683 1684 master.checkInitialized(); 1685 if (type != RegionSpecifierType.REGION_NAME) { 1686 LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME 1687 + " actual: " + type); 1688 } 1689 Pair<RegionInfo, ServerName> pair = 1690 MetaTableAccessor.getRegion(master.getConnection(), regionName); 1691 if (Bytes.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName(), regionName)) { 1692 pair = new Pair<>(RegionInfoBuilder.FIRST_META_REGIONINFO, 1693 MetaTableLocator.getMetaRegionLocation(master.getZooKeeper())); 1694 } 1695 if (pair == null) { 1696 throw new UnknownRegionException(Bytes.toString(regionName)); 1697 } 1698 1699 RegionInfo hri = pair.getFirst(); 1700 if (master.cpHost != null) { 1701 master.cpHost.preUnassign(hri); 1702 } 1703 LOG.debug(master.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString() 1704 + " in current location if it is online"); 1705 master.getAssignmentManager().unassign(hri); 1706 if (master.cpHost != null) { 1707 master.cpHost.postUnassign(hri); 1708 } 1709 1710 return urr; 1711 } catch (IOException ioe) { 1712 throw new ServiceException(ioe); 1713 } 1714 } 1715 1716 @Override 1717 public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c, 1718 ReportRegionStateTransitionRequest req) throws ServiceException { 1719 try { 1720 master.checkServiceStarted(); 1721 return master.getAssignmentManager().reportRegionStateTransition(req); 1722 } catch (IOException ioe) { 1723 throw new ServiceException(ioe); 1724 } 1725 } 1726 1727 @Override 1728 public SetQuotaResponse setQuota(RpcController c, SetQuotaRequest req) 1729 throws ServiceException { 1730 try { 1731 master.checkInitialized(); 1732 return master.getMasterQuotaManager().setQuota(req); 1733 } catch (Exception e) { 1734 throw new ServiceException(e); 1735 } 1736 } 1737 1738 @Override 1739 public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller, 1740 MajorCompactionTimestampRequest request) throws ServiceException { 1741 MajorCompactionTimestampResponse.Builder response = 1742 MajorCompactionTimestampResponse.newBuilder(); 1743 try { 1744 master.checkInitialized(); 1745 response.setCompactionTimestamp(master.getLastMajorCompactionTimestamp(ProtobufUtil 1746 .toTableName(request.getTableName()))); 1747 } catch (IOException e) { 1748 throw new ServiceException(e); 1749 } 1750 return response.build(); 1751 } 1752 1753 @Override 1754 public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( 1755 RpcController controller, MajorCompactionTimestampForRegionRequest request) 1756 throws ServiceException { 1757 MajorCompactionTimestampResponse.Builder response = 1758 MajorCompactionTimestampResponse.newBuilder(); 1759 try { 1760 master.checkInitialized(); 1761 response.setCompactionTimestamp(master.getLastMajorCompactionTimestampForRegion(request 1762 .getRegion().getValue().toByteArray())); 1763 } catch (IOException e) { 1764 throw new ServiceException(e); 1765 } 1766 return response.build(); 1767 } 1768 1769 /** 1770 * Compact a region on the master. 1771 * 1772 * @param controller the RPC controller 1773 * @param request the request 1774 * @throws ServiceException 1775 */ 1776 @Override 1777 @QosPriority(priority=HConstants.ADMIN_QOS) 1778 public CompactRegionResponse compactRegion(final RpcController controller, 1779 final CompactRegionRequest request) throws ServiceException { 1780 try { 1781 master.checkInitialized(); 1782 byte[] regionName = request.getRegion().getValue().toByteArray(); 1783 TableName tableName = RegionInfo.getTable(regionName); 1784 // if the region is a mob region, do the mob file compaction. 1785 if (MobUtils.isMobRegionName(tableName, regionName)) { 1786 checkHFileFormatVersionForMob(); 1787 return compactMob(request, tableName); 1788 } else { 1789 return super.compactRegion(controller, request); 1790 } 1791 } catch (IOException ie) { 1792 throw new ServiceException(ie); 1793 } 1794 } 1795 1796 /** 1797 * check configured hfile format version before to do compaction 1798 * @throws IOException throw IOException 1799 */ 1800 private void checkHFileFormatVersionForMob() throws IOException { 1801 if (HFile.getFormatVersion(master.getConfiguration()) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) { 1802 LOG.error("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS 1803 + " is required for MOB compaction. Compaction will not run."); 1804 throw new IOException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS 1805 + " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY 1806 + " accordingly."); 1807 } 1808 } 1809 1810 /** 1811 * This method implements Admin getRegionInfo. On RegionServer, it is 1812 * able to return RegionInfo and detail. On Master, it just returns 1813 * RegionInfo. On Master it has been hijacked to return Mob detail. 1814 * Master implementation is good for querying full region name if 1815 * you only have the encoded name (useful around region replicas 1816 * for example which do not have a row in hbase:meta). 1817 */ 1818 @Override 1819 @QosPriority(priority=HConstants.ADMIN_QOS) 1820 public GetRegionInfoResponse getRegionInfo(final RpcController controller, 1821 final GetRegionInfoRequest request) throws ServiceException { 1822 RegionInfo ri = null; 1823 try { 1824 ri = getRegionInfo(request.getRegion()); 1825 } catch(UnknownRegionException ure) { 1826 throw new ServiceException(ure); 1827 } 1828 GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); 1829 if (ri != null) { 1830 builder.setRegionInfo(ProtobufUtil.toRegionInfo(ri)); 1831 } else { 1832 // Is it a MOB name? These work differently. 1833 byte [] regionName = request.getRegion().getValue().toByteArray(); 1834 TableName tableName = RegionInfo.getTable(regionName); 1835 if (MobUtils.isMobRegionName(tableName, regionName)) { 1836 // a dummy region info contains the compaction state. 1837 RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName); 1838 builder.setRegionInfo(ProtobufUtil.toRegionInfo(mobRegionInfo)); 1839 if (request.hasCompactionState() && request.getCompactionState()) { 1840 builder.setCompactionState(master.getMobCompactionState(tableName)); 1841 } 1842 } else { 1843 // If unknown RegionInfo and not a MOB region, it is unknown. 1844 throw new ServiceException(new UnknownRegionException(Bytes.toString(regionName))); 1845 } 1846 } 1847 return builder.build(); 1848 } 1849 1850 /** 1851 * Compacts the mob files in the current table. 1852 * @param request the request. 1853 * @param tableName the current table name. 1854 * @return The response of the mob file compaction. 1855 * @throws IOException 1856 */ 1857 private CompactRegionResponse compactMob(final CompactRegionRequest request, 1858 TableName tableName) throws IOException { 1859 if (!master.getTableStateManager().isTableState(tableName, TableState.State.ENABLED)) { 1860 throw new DoNotRetryIOException("Table " + tableName + " is not enabled"); 1861 } 1862 boolean allFiles = false; 1863 List<ColumnFamilyDescriptor> compactedColumns = new ArrayList<>(); 1864 ColumnFamilyDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies(); 1865 byte[] family = null; 1866 if (request.hasFamily()) { 1867 family = request.getFamily().toByteArray(); 1868 for (ColumnFamilyDescriptor hcd : hcds) { 1869 if (Bytes.equals(family, hcd.getName())) { 1870 if (!hcd.isMobEnabled()) { 1871 LOG.error("Column family " + hcd.getNameAsString() + " is not a mob column family"); 1872 throw new DoNotRetryIOException("Column family " + hcd.getNameAsString() 1873 + " is not a mob column family"); 1874 } 1875 compactedColumns.add(hcd); 1876 } 1877 } 1878 } else { 1879 for (ColumnFamilyDescriptor hcd : hcds) { 1880 if (hcd.isMobEnabled()) { 1881 compactedColumns.add(hcd); 1882 } 1883 } 1884 } 1885 if (compactedColumns.isEmpty()) { 1886 LOG.error("No mob column families are assigned in the mob compaction"); 1887 throw new DoNotRetryIOException( 1888 "No mob column families are assigned in the mob compaction"); 1889 } 1890 if (request.hasMajor() && request.getMajor()) { 1891 allFiles = true; 1892 } 1893 String familyLogMsg = (family != null) ? Bytes.toString(family) : ""; 1894 if (LOG.isTraceEnabled()) { 1895 LOG.trace("User-triggered mob compaction requested for table: " 1896 + tableName.getNameAsString() + " for column family: " + familyLogMsg); 1897 } 1898 master.requestMobCompaction(tableName, compactedColumns, allFiles); 1899 return CompactRegionResponse.newBuilder().build(); 1900 } 1901 1902 @Override 1903 public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller, 1904 IsBalancerEnabledRequest request) throws ServiceException { 1905 IsBalancerEnabledResponse.Builder response = IsBalancerEnabledResponse.newBuilder(); 1906 response.setEnabled(master.isBalancerOn()); 1907 return response.build(); 1908 } 1909 1910 @Override 1911 public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller, 1912 SetSplitOrMergeEnabledRequest request) throws ServiceException { 1913 SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder(); 1914 try { 1915 master.checkInitialized(); 1916 boolean newValue = request.getEnabled(); 1917 for (MasterProtos.MasterSwitchType masterSwitchType: request.getSwitchTypesList()) { 1918 MasterSwitchType switchType = convert(masterSwitchType); 1919 boolean oldValue = master.isSplitOrMergeEnabled(switchType); 1920 response.addPrevValue(oldValue); 1921 if (master.cpHost != null) { 1922 master.cpHost.preSetSplitOrMergeEnabled(newValue, switchType); 1923 } 1924 master.getSplitOrMergeTracker().setSplitOrMergeEnabled(newValue, switchType); 1925 if (master.cpHost != null) { 1926 master.cpHost.postSetSplitOrMergeEnabled(newValue, switchType); 1927 } 1928 } 1929 } catch (IOException | KeeperException e) { 1930 throw new ServiceException(e); 1931 } 1932 return response.build(); 1933 } 1934 1935 @Override 1936 public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller, 1937 IsSplitOrMergeEnabledRequest request) throws ServiceException { 1938 IsSplitOrMergeEnabledResponse.Builder response = IsSplitOrMergeEnabledResponse.newBuilder(); 1939 response.setEnabled(master.isSplitOrMergeEnabled(convert(request.getSwitchType()))); 1940 return response.build(); 1941 } 1942 1943 @Override 1944 public NormalizeResponse normalize(RpcController controller, 1945 NormalizeRequest request) throws ServiceException { 1946 rpcPreCheck("normalize"); 1947 try { 1948 final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder() 1949 .tableNames(ProtobufUtil.toTableNameList(request.getTableNamesList())) 1950 .regex(request.hasRegex() ? request.getRegex() : null) 1951 .namespace(request.hasNamespace() ? request.getNamespace() : null) 1952 .build(); 1953 return NormalizeResponse.newBuilder() 1954 // all API requests are considered priority requests. 1955 .setNormalizerRan(master.normalizeRegions(ntfp, true)) 1956 .build(); 1957 } catch (IOException ex) { 1958 throw new ServiceException(ex); 1959 } 1960 } 1961 1962 @Override 1963 public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller, 1964 SetNormalizerRunningRequest request) throws ServiceException { 1965 rpcPreCheck("setNormalizerRunning"); 1966 1967 // Sets normalizer on/off flag in ZK. 1968 // TODO: this method is totally broken in terms of atomicity of actions and values read. 1969 // 1. The contract has this RPC returning the previous value. There isn't a ZKUtil method 1970 // that lets us retrieve the previous value as part of setting a new value, so we simply 1971 // perform a read before issuing the update. Thus we have a data race opportunity, between 1972 // when the `prevValue` is read and whatever is actually overwritten. 1973 // 2. Down in `setNormalizerOn`, the call to `createAndWatch` inside of the catch clause can 1974 // itself fail in the event that the znode already exists. Thus, another data race, between 1975 // when the initial `setData` call is notified of the absence of the target znode and the 1976 // subsequent `createAndWatch`, with another client creating said node. 1977 // That said, there's supposed to be only one active master and thus there's supposed to be 1978 // only one process with the authority to modify the value. 1979 final boolean prevValue = master.getRegionNormalizerManager().isNormalizerOn(); 1980 final boolean newValue = request.getOn(); 1981 master.getRegionNormalizerManager().setNormalizerOn(newValue); 1982 LOG.info("{} set normalizerSwitch={}", master.getClientIdAuditPrefix(), newValue); 1983 return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build(); 1984 } 1985 1986 @Override 1987 public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller, 1988 IsNormalizerEnabledRequest request) { 1989 IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder(); 1990 response.setEnabled(master.isNormalizerOn()); 1991 return response.build(); 1992 } 1993 1994 /** 1995 * Returns the security capabilities in effect on the cluster 1996 */ 1997 @Override 1998 public SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller, 1999 SecurityCapabilitiesRequest request) throws ServiceException { 2000 SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder(); 2001 try { 2002 master.checkInitialized(); 2003 Set<SecurityCapabilitiesResponse.Capability> capabilities = new HashSet<>(); 2004 // Authentication 2005 if (User.isHBaseSecurityEnabled(master.getConfiguration())) { 2006 capabilities.add(SecurityCapabilitiesResponse.Capability.SECURE_AUTHENTICATION); 2007 } else { 2008 capabilities.add(SecurityCapabilitiesResponse.Capability.SIMPLE_AUTHENTICATION); 2009 } 2010 // A coprocessor that implements AccessControlService can provide AUTHORIZATION and 2011 // CELL_AUTHORIZATION 2012 if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) { 2013 if (AccessChecker.isAuthorizationSupported(master.getConfiguration())) { 2014 capabilities.add(SecurityCapabilitiesResponse.Capability.AUTHORIZATION); 2015 } 2016 if (AccessController.isCellAuthorizationSupported(master.getConfiguration())) { 2017 capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_AUTHORIZATION); 2018 } 2019 } 2020 // A coprocessor that implements VisibilityLabelsService can provide CELL_VISIBILITY. 2021 if (master.cpHost != null && hasVisibilityLabelsServiceCoprocessor(master.cpHost)) { 2022 if (VisibilityController.isCellAuthorizationSupported(master.getConfiguration())) { 2023 capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_VISIBILITY); 2024 } 2025 } 2026 response.addAllCapabilities(capabilities); 2027 } catch (IOException e) { 2028 throw new ServiceException(e); 2029 } 2030 return response.build(); 2031 } 2032 2033 /** 2034 * Determines if there is a MasterCoprocessor deployed which implements 2035 * {@link org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService.Interface}. 2036 */ 2037 boolean hasAccessControlServiceCoprocessor(MasterCoprocessorHost cpHost) { 2038 return checkCoprocessorWithService( 2039 cpHost.findCoprocessors(MasterCoprocessor.class), AccessControlService.Interface.class); 2040 } 2041 2042 /** 2043 * Determines if there is a MasterCoprocessor deployed which implements 2044 * {@link org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService.Interface}. 2045 */ 2046 boolean hasVisibilityLabelsServiceCoprocessor(MasterCoprocessorHost cpHost) { 2047 return checkCoprocessorWithService( 2048 cpHost.findCoprocessors(MasterCoprocessor.class), 2049 VisibilityLabelsService.Interface.class); 2050 } 2051 2052 /** 2053 * Determines if there is a coprocessor implementation in the provided argument which extends 2054 * or implements the provided {@code service}. 2055 */ 2056 boolean checkCoprocessorWithService( 2057 List<MasterCoprocessor> coprocessorsToCheck, Class<?> service) { 2058 if (coprocessorsToCheck == null || coprocessorsToCheck.isEmpty()) { 2059 return false; 2060 } 2061 for (MasterCoprocessor cp : coprocessorsToCheck) { 2062 if (service.isAssignableFrom(cp.getClass())) { 2063 return true; 2064 } 2065 } 2066 return false; 2067 } 2068 2069 private MasterSwitchType convert(MasterProtos.MasterSwitchType switchType) { 2070 switch (switchType) { 2071 case SPLIT: 2072 return MasterSwitchType.SPLIT; 2073 case MERGE: 2074 return MasterSwitchType.MERGE; 2075 default: 2076 break; 2077 } 2078 return null; 2079 } 2080 2081 @Override 2082 public AddReplicationPeerResponse addReplicationPeer(RpcController controller, 2083 AddReplicationPeerRequest request) throws ServiceException { 2084 try { 2085 long procId = master.addReplicationPeer(request.getPeerId(), 2086 ReplicationPeerConfigUtil.convert(request.getPeerConfig()), 2087 request.getPeerState().getState().equals(ReplicationState.State.ENABLED)); 2088 return AddReplicationPeerResponse.newBuilder().setProcId(procId).build(); 2089 } catch (ReplicationException | IOException e) { 2090 throw new ServiceException(e); 2091 } 2092 } 2093 2094 @Override 2095 public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller, 2096 RemoveReplicationPeerRequest request) throws ServiceException { 2097 try { 2098 long procId = master.removeReplicationPeer(request.getPeerId()); 2099 return RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build(); 2100 } catch (ReplicationException | IOException e) { 2101 throw new ServiceException(e); 2102 } 2103 } 2104 2105 @Override 2106 public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller, 2107 EnableReplicationPeerRequest request) throws ServiceException { 2108 try { 2109 long procId = master.enableReplicationPeer(request.getPeerId()); 2110 return EnableReplicationPeerResponse.newBuilder().setProcId(procId).build(); 2111 } catch (ReplicationException | IOException e) { 2112 throw new ServiceException(e); 2113 } 2114 } 2115 2116 @Override 2117 public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller, 2118 DisableReplicationPeerRequest request) throws ServiceException { 2119 try { 2120 long procId = master.disableReplicationPeer(request.getPeerId()); 2121 return DisableReplicationPeerResponse.newBuilder().setProcId(procId).build(); 2122 } catch (ReplicationException | IOException e) { 2123 throw new ServiceException(e); 2124 } 2125 } 2126 2127 @Override 2128 public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller, 2129 GetReplicationPeerConfigRequest request) throws ServiceException { 2130 GetReplicationPeerConfigResponse.Builder response = GetReplicationPeerConfigResponse 2131 .newBuilder(); 2132 try { 2133 String peerId = request.getPeerId(); 2134 ReplicationPeerConfig peerConfig = master.getReplicationPeerConfig(peerId); 2135 response.setPeerId(peerId); 2136 response.setPeerConfig(ReplicationPeerConfigUtil.convert(peerConfig)); 2137 } catch (ReplicationException | IOException e) { 2138 throw new ServiceException(e); 2139 } 2140 return response.build(); 2141 } 2142 2143 @Override 2144 public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller, 2145 UpdateReplicationPeerConfigRequest request) throws ServiceException { 2146 try { 2147 long procId = master.updateReplicationPeerConfig(request.getPeerId(), 2148 ReplicationPeerConfigUtil.convert(request.getPeerConfig())); 2149 return UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build(); 2150 } catch (ReplicationException | IOException e) { 2151 throw new ServiceException(e); 2152 } 2153 } 2154 2155 @Override 2156 public ListReplicationPeersResponse listReplicationPeers(RpcController controller, 2157 ListReplicationPeersRequest request) throws ServiceException { 2158 ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder(); 2159 try { 2160 List<ReplicationPeerDescription> peers = master 2161 .listReplicationPeers(request.hasRegex() ? request.getRegex() : null); 2162 for (ReplicationPeerDescription peer : peers) { 2163 response.addPeerDesc(ReplicationPeerConfigUtil.toProtoReplicationPeerDescription(peer)); 2164 } 2165 } catch (ReplicationException | IOException e) { 2166 throw new ServiceException(e); 2167 } 2168 return response.build(); 2169 } 2170 2171 @Override 2172 public ListDecommissionedRegionServersResponse listDecommissionedRegionServers( 2173 RpcController controller, ListDecommissionedRegionServersRequest request) 2174 throws ServiceException { 2175 ListDecommissionedRegionServersResponse.Builder response = 2176 ListDecommissionedRegionServersResponse.newBuilder(); 2177 try { 2178 master.checkInitialized(); 2179 if (master.cpHost != null) { 2180 master.cpHost.preListDecommissionedRegionServers(); 2181 } 2182 List<ServerName> servers = master.listDecommissionedRegionServers(); 2183 response.addAllServerName((servers.stream().map(server -> ProtobufUtil.toServerName(server))) 2184 .collect(Collectors.toList())); 2185 if (master.cpHost != null) { 2186 master.cpHost.postListDecommissionedRegionServers(); 2187 } 2188 } catch (IOException io) { 2189 throw new ServiceException(io); 2190 } 2191 2192 return response.build(); 2193 } 2194 2195 @Override 2196 public DecommissionRegionServersResponse decommissionRegionServers(RpcController controller, 2197 DecommissionRegionServersRequest request) throws ServiceException { 2198 try { 2199 master.checkInitialized(); 2200 List<ServerName> servers = request.getServerNameList().stream() 2201 .map(pbServer -> ProtobufUtil.toServerName(pbServer)).collect(Collectors.toList()); 2202 boolean offload = request.getOffload(); 2203 if (master.cpHost != null) { 2204 master.cpHost.preDecommissionRegionServers(servers, offload); 2205 } 2206 master.decommissionRegionServers(servers, offload); 2207 if (master.cpHost != null) { 2208 master.cpHost.postDecommissionRegionServers(servers, offload); 2209 } 2210 } catch (IOException io) { 2211 throw new ServiceException(io); 2212 } 2213 2214 return DecommissionRegionServersResponse.newBuilder().build(); 2215 } 2216 2217 @Override 2218 public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller, 2219 RecommissionRegionServerRequest request) throws ServiceException { 2220 try { 2221 master.checkInitialized(); 2222 ServerName server = ProtobufUtil.toServerName(request.getServerName()); 2223 List<byte[]> encodedRegionNames = request.getRegionList().stream() 2224 .map(regionSpecifier -> regionSpecifier.getValue().toByteArray()) 2225 .collect(Collectors.toList()); 2226 if (master.cpHost != null) { 2227 master.cpHost.preRecommissionRegionServer(server, encodedRegionNames); 2228 } 2229 master.recommissionRegionServer(server, encodedRegionNames); 2230 if (master.cpHost != null) { 2231 master.cpHost.postRecommissionRegionServer(server, encodedRegionNames); 2232 } 2233 } catch (IOException io) { 2234 throw new ServiceException(io); 2235 } 2236 2237 return RecommissionRegionServerResponse.newBuilder().build(); 2238 } 2239 2240 @Override 2241 public LockResponse requestLock(RpcController controller, final LockRequest request) 2242 throws ServiceException { 2243 try { 2244 if (request.getDescription().isEmpty()) { 2245 throw new IllegalArgumentException("Empty description"); 2246 } 2247 NonceProcedureRunnable npr; 2248 LockType type = LockType.valueOf(request.getLockType().name()); 2249 if (request.getRegionInfoCount() > 0) { 2250 final RegionInfo[] regionInfos = new RegionInfo[request.getRegionInfoCount()]; 2251 for (int i = 0; i < request.getRegionInfoCount(); ++i) { 2252 regionInfos[i] = ProtobufUtil.toRegionInfo(request.getRegionInfo(i)); 2253 } 2254 npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) { 2255 @Override 2256 protected void run() throws IOException { 2257 setProcId(master.getLockManager().remoteLocks().requestRegionsLock(regionInfos, 2258 request.getDescription(), getNonceKey())); 2259 } 2260 2261 @Override 2262 protected String getDescription() { 2263 return "RequestLock"; 2264 } 2265 }; 2266 } else if (request.hasTableName()) { 2267 final TableName tableName = ProtobufUtil.toTableName(request.getTableName()); 2268 npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) { 2269 @Override 2270 protected void run() throws IOException { 2271 setProcId(master.getLockManager().remoteLocks().requestTableLock(tableName, type, 2272 request.getDescription(), getNonceKey())); 2273 } 2274 2275 @Override 2276 protected String getDescription() { 2277 return "RequestLock"; 2278 } 2279 }; 2280 } else if (request.hasNamespace()) { 2281 npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) { 2282 @Override 2283 protected void run() throws IOException { 2284 setProcId(master.getLockManager().remoteLocks().requestNamespaceLock( 2285 request.getNamespace(), type, request.getDescription(), getNonceKey())); 2286 } 2287 2288 @Override 2289 protected String getDescription() { 2290 return "RequestLock"; 2291 } 2292 }; 2293 } else { 2294 throw new IllegalArgumentException("one of table/namespace/region should be specified"); 2295 } 2296 long procId = MasterProcedureUtil.submitProcedure(npr); 2297 return LockResponse.newBuilder().setProcId(procId).build(); 2298 } catch (IllegalArgumentException e) { 2299 LOG.warn("Exception when queuing lock", e); 2300 throw new ServiceException(new DoNotRetryIOException(e)); 2301 } catch (IOException e) { 2302 LOG.warn("Exception when queuing lock", e); 2303 throw new ServiceException(e); 2304 } 2305 } 2306 2307 /** 2308 * @return LOCKED, if procedure is found and it has the lock; else UNLOCKED. 2309 * @throws ServiceException if given proc id is found but it is not a LockProcedure. 2310 */ 2311 @Override 2312 public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request) 2313 throws ServiceException { 2314 try { 2315 if (master.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(), 2316 request.getKeepAlive())) { 2317 return LockHeartbeatResponse.newBuilder().setTimeoutMs( 2318 master.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF, 2319 LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS)) 2320 .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).build(); 2321 } else { 2322 return LockHeartbeatResponse.newBuilder() 2323 .setLockStatus(LockHeartbeatResponse.LockStatus.UNLOCKED).build(); 2324 } 2325 } catch (IOException e) { 2326 throw new ServiceException(e); 2327 } 2328 } 2329 2330 @Override 2331 public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller, 2332 RegionSpaceUseReportRequest request) throws ServiceException { 2333 try { 2334 master.checkInitialized(); 2335 if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) { 2336 return RegionSpaceUseReportResponse.newBuilder().build(); 2337 } 2338 MasterQuotaManager quotaManager = this.master.getMasterQuotaManager(); 2339 if (quotaManager != null) { 2340 final long now = EnvironmentEdgeManager.currentTime(); 2341 for (RegionSpaceUse report : request.getSpaceUseList()) { 2342 quotaManager.addRegionSize(ProtobufUtil.toRegionInfo(report.getRegionInfo()), 2343 report.getRegionSize(), now); 2344 } 2345 } else { 2346 LOG.debug("Received region space usage report but HMaster is not ready to process it, " 2347 + "skipping"); 2348 } 2349 return RegionSpaceUseReportResponse.newBuilder().build(); 2350 } catch (Exception e) { 2351 throw new ServiceException(e); 2352 } 2353 } 2354 2355 @Override 2356 public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes( 2357 RpcController controller, GetSpaceQuotaRegionSizesRequest request) throws ServiceException { 2358 try { 2359 master.checkInitialized(); 2360 MasterQuotaManager quotaManager = this.master.getMasterQuotaManager(); 2361 GetSpaceQuotaRegionSizesResponse.Builder builder = 2362 GetSpaceQuotaRegionSizesResponse.newBuilder(); 2363 if (quotaManager != null) { 2364 Map<RegionInfo,Long> regionSizes = quotaManager.snapshotRegionSizes(); 2365 Map<TableName,Long> regionSizesByTable = new HashMap<>(); 2366 // Translate hregioninfo+long -> tablename+long 2367 for (Entry<RegionInfo,Long> entry : regionSizes.entrySet()) { 2368 final TableName tableName = entry.getKey().getTable(); 2369 Long prevSize = regionSizesByTable.get(tableName); 2370 if (prevSize == null) { 2371 prevSize = 0L; 2372 } 2373 regionSizesByTable.put(tableName, prevSize + entry.getValue()); 2374 } 2375 // Serialize them into the protobuf 2376 for (Entry<TableName,Long> tableSize : regionSizesByTable.entrySet()) { 2377 builder.addSizes(RegionSizes.newBuilder() 2378 .setTableName(ProtobufUtil.toProtoTableName(tableSize.getKey())) 2379 .setSize(tableSize.getValue()).build()); 2380 } 2381 return builder.build(); 2382 } else { 2383 LOG.debug("Received space quota region size report but HMaster is not ready to process it," 2384 + "skipping"); 2385 } 2386 return builder.build(); 2387 } catch (Exception e) { 2388 throw new ServiceException(e); 2389 } 2390 } 2391 2392 @Override 2393 public GetQuotaStatesResponse getQuotaStates( 2394 RpcController controller, GetQuotaStatesRequest request) throws ServiceException { 2395 try { 2396 master.checkInitialized(); 2397 QuotaObserverChore quotaChore = this.master.getQuotaObserverChore(); 2398 GetQuotaStatesResponse.Builder builder = GetQuotaStatesResponse.newBuilder(); 2399 if (quotaChore != null) { 2400 // The "current" view of all tables with quotas 2401 Map<TableName, SpaceQuotaSnapshot> tableSnapshots = quotaChore.getTableQuotaSnapshots(); 2402 for (Entry<TableName, SpaceQuotaSnapshot> entry : tableSnapshots.entrySet()) { 2403 builder.addTableSnapshots( 2404 TableQuotaSnapshot.newBuilder() 2405 .setTableName(ProtobufUtil.toProtoTableName(entry.getKey())) 2406 .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build()); 2407 } 2408 // The "current" view of all namespaces with quotas 2409 Map<String, SpaceQuotaSnapshot> nsSnapshots = quotaChore.getNamespaceQuotaSnapshots(); 2410 for (Entry<String, SpaceQuotaSnapshot> entry : nsSnapshots.entrySet()) { 2411 builder.addNsSnapshots( 2412 NamespaceQuotaSnapshot.newBuilder() 2413 .setNamespace(entry.getKey()) 2414 .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(entry.getValue())).build()); 2415 } 2416 return builder.build(); 2417 } 2418 return builder.build(); 2419 } catch (Exception e) { 2420 throw new ServiceException(e); 2421 } 2422 } 2423 2424 @Override 2425 public ClearDeadServersResponse clearDeadServers(RpcController controller, 2426 ClearDeadServersRequest request) throws ServiceException { 2427 LOG.debug(master.getClientIdAuditPrefix() + " clear dead region servers."); 2428 ClearDeadServersResponse.Builder response = ClearDeadServersResponse.newBuilder(); 2429 try { 2430 master.checkInitialized(); 2431 if (master.cpHost != null) { 2432 master.cpHost.preClearDeadServers(); 2433 } 2434 2435 if (master.getServerManager().areDeadServersInProgress()) { 2436 LOG.debug("Some dead server is still under processing, won't clear the dead server list"); 2437 response.addAllServerName(request.getServerNameList()); 2438 } else { 2439 for (HBaseProtos.ServerName pbServer : request.getServerNameList()) { 2440 if (!master.getServerManager().getDeadServers() 2441 .removeDeadServer(ProtobufUtil.toServerName(pbServer))) { 2442 response.addServerName(pbServer); 2443 } 2444 } 2445 } 2446 2447 if (master.cpHost != null) { 2448 master.cpHost.postClearDeadServers( 2449 ProtobufUtil.toServerNameList(request.getServerNameList()), 2450 ProtobufUtil.toServerNameList(response.getServerNameList())); 2451 } 2452 } catch (IOException io) { 2453 throw new ServiceException(io); 2454 } 2455 return response.build(); 2456 } 2457 2458 @Override 2459 public ReportProcedureDoneResponse reportProcedureDone(RpcController controller, 2460 ReportProcedureDoneRequest request) throws ServiceException { 2461 // Check Masters is up and ready for duty before progressing. Remote side will keep trying. 2462 try { 2463 this.master.checkServiceStarted(); 2464 } catch (ServerNotRunningYetException snrye) { 2465 throw new ServiceException(snrye); 2466 } 2467 request.getResultList().forEach(result -> { 2468 if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) { 2469 master.remoteProcedureCompleted(result.getProcId()); 2470 } else { 2471 master.remoteProcedureFailed(result.getProcId(), 2472 RemoteProcedureException.fromProto(result.getError())); 2473 } 2474 }); 2475 return ReportProcedureDoneResponse.getDefaultInstance(); 2476 } 2477 2478 // HBCK Services 2479 2480 @Override 2481 public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest req) 2482 throws ServiceException { 2483 rpcPreCheck("runHbckChore"); 2484 LOG.info("{} request HBCK chore to run", master.getClientIdAuditPrefix()); 2485 HbckChore hbckChore = master.getHbckChore(); 2486 boolean ran = hbckChore.runChore(); 2487 return RunHbckChoreResponse.newBuilder().setRan(ran).build(); 2488 } 2489 2490 /** 2491 * Update state of the table in meta only. This is required by hbck in some situations to cleanup 2492 * stuck assign/ unassign regions procedures for the table. 2493 * 2494 * @return previous state of the table 2495 */ 2496 @Override 2497 public GetTableStateResponse setTableStateInMeta(RpcController controller, 2498 SetTableStateInMetaRequest request) throws ServiceException { 2499 TableName tn = ProtobufUtil.toTableName(request.getTableName()); 2500 try { 2501 TableState prevState = this.master.getTableStateManager().getTableState(tn); 2502 TableState newState = TableState.convert(tn, request.getTableState()); 2503 LOG.info("{} set table={} state from {} to {}", master.getClientIdAuditPrefix(), 2504 tn, prevState.getState(), newState.getState()); 2505 this.master.getTableStateManager().setTableState(tn, newState.getState()); 2506 return GetTableStateResponse.newBuilder().setTableState(prevState.convert()).build(); 2507 } catch (Exception e) { 2508 throw new ServiceException(e); 2509 } 2510 } 2511 2512 /** 2513 * Update state of the region in meta only. This is required by hbck in some situations to cleanup 2514 * stuck assign/ unassign regions procedures for the table. 2515 * 2516 * @return previous states of the regions 2517 */ 2518 @Override 2519 public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller, 2520 SetRegionStateInMetaRequest request) throws ServiceException { 2521 SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder(); 2522 try { 2523 for (RegionSpecifierAndState s : request.getStatesList()) { 2524 RegionSpecifier spec = s.getRegionSpecifier(); 2525 String encodedName; 2526 if (spec.getType() == RegionSpecifierType.ENCODED_REGION_NAME) { 2527 encodedName = spec.getValue().toStringUtf8(); 2528 } else { 2529 // TODO: actually, a full region name can save a lot on meta scan, improve later. 2530 encodedName = RegionInfo.encodeRegionName(spec.getValue().toByteArray()); 2531 } 2532 RegionInfo info = this.master.getAssignmentManager().loadRegionFromMeta(encodedName); 2533 LOG.trace("region info loaded from meta table: {}", info); 2534 RegionState prevState = 2535 this.master.getAssignmentManager().getRegionStates().getRegionState(info); 2536 RegionState.State newState = RegionState.State.convert(s.getState()); 2537 LOG.info("{} set region={} state from {} to {}", master.getClientIdAuditPrefix(), info, 2538 prevState.getState(), newState); 2539 Put metaPut = MetaTableAccessor.makePutFromRegionInfo(info, System.currentTimeMillis()); 2540 metaPut.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER, 2541 Bytes.toBytes(newState.name())); 2542 List<Put> putList = new ArrayList<>(); 2543 putList.add(metaPut); 2544 MetaTableAccessor.putsToMetaTable(this.master.getConnection(), putList); 2545 // Loads from meta again to refresh AM cache with the new region state 2546 this.master.getAssignmentManager().loadRegionFromMeta(encodedName); 2547 builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec) 2548 .setState(prevState.getState().convert())); 2549 } 2550 } catch (Exception e) { 2551 throw new ServiceException(e); 2552 } 2553 return builder.build(); 2554 } 2555 2556 /** 2557 * Get RegionInfo from Master using content of RegionSpecifier as key. 2558 * @return RegionInfo found by decoding <code>rs</code> or null if none found 2559 */ 2560 private RegionInfo getRegionInfo(HBaseProtos.RegionSpecifier rs) throws UnknownRegionException { 2561 RegionInfo ri = null; 2562 switch(rs.getType()) { 2563 case REGION_NAME: 2564 final byte[] regionName = rs.getValue().toByteArray(); 2565 ri = this.master.getAssignmentManager().getRegionInfo(regionName); 2566 break; 2567 case ENCODED_REGION_NAME: 2568 String encodedRegionName = Bytes.toString(rs.getValue().toByteArray()); 2569 RegionState regionState = this.master.getAssignmentManager().getRegionStates(). 2570 getRegionState(encodedRegionName); 2571 ri = regionState == null ? 2572 this.master.getAssignmentManager().loadRegionFromMeta(encodedRegionName) : 2573 regionState.getRegion(); 2574 break; 2575 default: 2576 break; 2577 } 2578 return ri; 2579 } 2580 2581 /** 2582 * @throws ServiceException If no MasterProcedureExecutor 2583 */ 2584 private void checkMasterProcedureExecutor() throws ServiceException { 2585 if (this.master.getMasterProcedureExecutor() == null) { 2586 throw new ServiceException("Master's ProcedureExecutor not initialized; retry later"); 2587 } 2588 } 2589 2590 /** 2591 * A 'raw' version of assign that does bulk and can skirt Master state checks if override 2592 * is set; i.e. assigns can be forced during Master startup or if RegionState is unclean. 2593 * Used by HBCK2. 2594 */ 2595 @Override 2596 public MasterProtos.AssignsResponse assigns(RpcController controller, 2597 MasterProtos.AssignsRequest request) throws ServiceException { 2598 checkMasterProcedureExecutor(); 2599 MasterProtos.AssignsResponse.Builder responseBuilder = 2600 MasterProtos.AssignsResponse.newBuilder(); 2601 try { 2602 boolean override = request.getOverride(); 2603 LOG.info("{} assigns, override={}", master.getClientIdAuditPrefix(), override); 2604 for (HBaseProtos.RegionSpecifier rs: request.getRegionList()) { 2605 long pid = Procedure.NO_PROC_ID; 2606 RegionInfo ri = getRegionInfo(rs); 2607 if (ri == null) { 2608 LOG.info("Unknown={}", rs); 2609 } else { 2610 Procedure p = this.master.getAssignmentManager().createOneAssignProcedure(ri, override); 2611 if (p != null) { 2612 pid = this.master.getMasterProcedureExecutor().submitProcedure(p); 2613 } 2614 } 2615 responseBuilder.addPid(pid); 2616 } 2617 return responseBuilder.build(); 2618 } catch (IOException ioe) { 2619 throw new ServiceException(ioe); 2620 } 2621 } 2622 2623 /** 2624 * A 'raw' version of unassign that does bulk and can skirt Master state checks if override 2625 * is set; i.e. unassigns can be forced during Master startup or if RegionState is unclean. 2626 * Used by HBCK2. 2627 */ 2628 @Override 2629 public MasterProtos.UnassignsResponse unassigns(RpcController controller, 2630 MasterProtos.UnassignsRequest request) throws ServiceException { 2631 checkMasterProcedureExecutor(); 2632 MasterProtos.UnassignsResponse.Builder responseBuilder = 2633 MasterProtos.UnassignsResponse.newBuilder(); 2634 try { 2635 boolean override = request.getOverride(); 2636 LOG.info("{} unassigns, override={}", master.getClientIdAuditPrefix(), override); 2637 for (HBaseProtos.RegionSpecifier rs: request.getRegionList()) { 2638 long pid = Procedure.NO_PROC_ID; 2639 RegionInfo ri = getRegionInfo(rs); 2640 if (ri == null) { 2641 LOG.info("Unknown={}", rs); 2642 } else { 2643 Procedure p = this.master.getAssignmentManager().createOneUnassignProcedure(ri, override); 2644 if (p != null) { 2645 pid = this.master.getMasterProcedureExecutor().submitProcedure(p); 2646 } 2647 } 2648 responseBuilder.addPid(pid); 2649 } 2650 return responseBuilder.build(); 2651 } catch (IOException ioe) { 2652 throw new ServiceException(ioe); 2653 } 2654 } 2655 2656 /** 2657 * Bypass specified procedure to completion. Procedure is marked completed but no actual work 2658 * is done from the current state/ step onwards. Parents of the procedure are also marked for 2659 * bypass. 2660 * 2661 * NOTE: this is a dangerous operation and may be used to unstuck buggy procedures. This may 2662 * leave system in inconherent state. This may need to be followed by some cleanup steps/ 2663 * actions by operator. 2664 * 2665 * @return BypassProcedureToCompletionResponse indicating success or failure 2666 */ 2667 @Override 2668 public MasterProtos.BypassProcedureResponse bypassProcedure(RpcController controller, 2669 MasterProtos.BypassProcedureRequest request) throws ServiceException { 2670 try { 2671 LOG.info("{} bypass procedures={}, waitTime={}, override={}, recursive={}", 2672 master.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(), 2673 request.getOverride(), request.getRecursive()); 2674 List<Boolean> ret = 2675 master.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(), 2676 request.getWaitTime(), request.getOverride(), request.getRecursive()); 2677 return MasterProtos.BypassProcedureResponse.newBuilder().addAllBypassed(ret).build(); 2678 } catch (IOException e) { 2679 throw new ServiceException(e); 2680 } 2681 } 2682 2683 @Override 2684 public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProcedure( 2685 RpcController controller, MasterProtos.ScheduleServerCrashProcedureRequest request) 2686 throws ServiceException { 2687 List<Long> pids = new ArrayList<>(); 2688 for (HBaseProtos.ServerName sn: request.getServerNameList()) { 2689 ServerName serverName = ProtobufUtil.toServerName(sn); 2690 LOG.info("{} schedule ServerCrashProcedure for {}", 2691 this.master.getClientIdAuditPrefix(), serverName); 2692 if (shouldSubmitSCP(serverName)) { 2693 pids.add(this.master.getServerManager().expireServer(serverName, true)); 2694 } else { 2695 pids.add(Procedure.NO_PROC_ID); 2696 } 2697 } 2698 return MasterProtos.ScheduleServerCrashProcedureResponse.newBuilder().addAllPid(pids).build(); 2699 } 2700 2701 @Override 2702 public FixMetaResponse fixMeta(RpcController controller, FixMetaRequest request) 2703 throws ServiceException { 2704 try { 2705 MetaFixer mf = new MetaFixer(this.master); 2706 mf.fix(); 2707 return FixMetaResponse.newBuilder().build(); 2708 } catch (IOException ioe) { 2709 throw new ServiceException(ioe); 2710 } 2711 } 2712 2713 @Override 2714 public SwitchRpcThrottleResponse switchRpcThrottle(RpcController controller, 2715 SwitchRpcThrottleRequest request) throws ServiceException { 2716 try { 2717 master.checkInitialized(); 2718 return master.getMasterQuotaManager().switchRpcThrottle(request); 2719 } catch (Exception e) { 2720 throw new ServiceException(e); 2721 } 2722 } 2723 2724 @Override 2725 public MasterProtos.IsRpcThrottleEnabledResponse isRpcThrottleEnabled(RpcController controller, 2726 MasterProtos.IsRpcThrottleEnabledRequest request) throws ServiceException { 2727 try { 2728 master.checkInitialized(); 2729 return master.getMasterQuotaManager().isRpcThrottleEnabled(request); 2730 } catch (Exception e) { 2731 throw new ServiceException(e); 2732 } 2733 } 2734 2735 @Override 2736 public SwitchExceedThrottleQuotaResponse switchExceedThrottleQuota(RpcController controller, 2737 SwitchExceedThrottleQuotaRequest request) throws ServiceException { 2738 try { 2739 master.checkInitialized(); 2740 return master.getMasterQuotaManager().switchExceedThrottleQuota(request); 2741 } catch (Exception e) { 2742 throw new ServiceException(e); 2743 } 2744 } 2745 2746 @Override 2747 public FileArchiveNotificationResponse reportFileArchival(RpcController controller, 2748 FileArchiveNotificationRequest request) throws ServiceException { 2749 try { 2750 master.checkInitialized(); 2751 if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) { 2752 return FileArchiveNotificationResponse.newBuilder().build(); 2753 } 2754 master.getMasterQuotaManager().processFileArchivals(request, master.getConnection(), 2755 master.getConfiguration(), master.getFileSystem()); 2756 return FileArchiveNotificationResponse.newBuilder().build(); 2757 } catch (Exception e) { 2758 throw new ServiceException(e); 2759 } 2760 } 2761 2762 @Override 2763 public GrantResponse grant(RpcController controller, GrantRequest request) 2764 throws ServiceException { 2765 try { 2766 master.checkInitialized(); 2767 if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) { 2768 final UserPermission perm = 2769 ShadedAccessControlUtil.toUserPermission(request.getUserPermission()); 2770 boolean mergeExistingPermissions = request.getMergeExistingPermissions(); 2771 master.cpHost.preGrant(perm, mergeExistingPermissions); 2772 try (Table table = master.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { 2773 PermissionStorage.addUserPermission(getConfiguration(), perm, table, 2774 mergeExistingPermissions); 2775 } 2776 master.cpHost.postGrant(perm, mergeExistingPermissions); 2777 User caller = RpcServer.getRequestUser().orElse(null); 2778 if (AUDITLOG.isTraceEnabled()) { 2779 // audit log should store permission changes in addition to auth results 2780 String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse(""); 2781 AUDITLOG.trace("User {} (remote address: {}) granted permission {}", caller, 2782 remoteAddress, perm); 2783 } 2784 return GrantResponse.getDefaultInstance(); 2785 } else { 2786 throw new DoNotRetryIOException( 2787 new UnsupportedOperationException(AccessController.class.getName() + " is not loaded")); 2788 } 2789 } catch (IOException ioe) { 2790 throw new ServiceException(ioe); 2791 } 2792 } 2793 2794 @Override 2795 public RevokeResponse revoke(RpcController controller, RevokeRequest request) 2796 throws ServiceException { 2797 try { 2798 master.checkInitialized(); 2799 if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) { 2800 final UserPermission userPermission = 2801 ShadedAccessControlUtil.toUserPermission(request.getUserPermission()); 2802 master.cpHost.preRevoke(userPermission); 2803 try (Table table = master.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { 2804 PermissionStorage.removeUserPermission(master.getConfiguration(), userPermission, table); 2805 } 2806 master.cpHost.postRevoke(userPermission); 2807 User caller = RpcServer.getRequestUser().orElse(null); 2808 if (AUDITLOG.isTraceEnabled()) { 2809 // audit log should record all permission changes 2810 String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse(""); 2811 AUDITLOG.trace("User {} (remote address: {}) revoked permission {}", caller, 2812 remoteAddress, userPermission); 2813 } 2814 return RevokeResponse.getDefaultInstance(); 2815 } else { 2816 throw new DoNotRetryIOException( 2817 new UnsupportedOperationException(AccessController.class.getName() + " is not loaded")); 2818 } 2819 } catch (IOException ioe) { 2820 throw new ServiceException(ioe); 2821 } 2822 } 2823 2824 @Override 2825 public GetUserPermissionsResponse getUserPermissions(RpcController controller, 2826 GetUserPermissionsRequest request) throws ServiceException { 2827 try { 2828 master.checkInitialized(); 2829 if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) { 2830 final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null; 2831 String namespace = 2832 request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null; 2833 TableName table = 2834 request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null; 2835 byte[] cf = request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null; 2836 byte[] cq = 2837 request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null; 2838 Type permissionType = request.hasType() ? request.getType() : null; 2839 master.getMasterCoprocessorHost().preGetUserPermissions(userName, namespace, table, cf, cq); 2840 2841 List<UserPermission> perms = null; 2842 if (permissionType == Type.Table) { 2843 boolean filter = (cf != null || userName != null) ? true : false; 2844 perms = PermissionStorage.getUserTablePermissions(master.getConfiguration(), table, cf, 2845 cq, userName, filter); 2846 } else if (permissionType == Type.Namespace) { 2847 perms = PermissionStorage.getUserNamespacePermissions(master.getConfiguration(), 2848 namespace, userName, userName != null ? true : false); 2849 } else { 2850 perms = PermissionStorage.getUserPermissions(master.getConfiguration(), null, null, null, 2851 userName, userName != null ? true : false); 2852 // Skip super users when filter user is specified 2853 if (userName == null) { 2854 // Adding superusers explicitly to the result set as PermissionStorage do not store 2855 // them. Also using acl as table name to be inline with the results of global admin and 2856 // will help in avoiding any leakage of information about being superusers. 2857 for (String user : Superusers.getSuperUsers()) { 2858 perms.add(new UserPermission(user, 2859 Permission.newBuilder().withActions(Action.values()).build())); 2860 } 2861 } 2862 } 2863 2864 master.getMasterCoprocessorHost().postGetUserPermissions(userName, namespace, table, cf, 2865 cq); 2866 AccessControlProtos.GetUserPermissionsResponse response = 2867 ShadedAccessControlUtil.buildGetUserPermissionsResponse(perms); 2868 return response; 2869 } else { 2870 throw new DoNotRetryIOException( 2871 new UnsupportedOperationException(AccessController.class.getName() + " is not loaded")); 2872 } 2873 } catch (IOException ioe) { 2874 throw new ServiceException(ioe); 2875 } 2876 } 2877 2878 @Override 2879 public HasUserPermissionsResponse hasUserPermissions(RpcController controller, 2880 HasUserPermissionsRequest request) throws ServiceException { 2881 try { 2882 master.checkInitialized(); 2883 if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) { 2884 User caller = RpcServer.getRequestUser().orElse(null); 2885 String userName = 2886 request.hasUserName() ? request.getUserName().toStringUtf8() : caller.getShortName(); 2887 List<Permission> permissions = new ArrayList<>(); 2888 for (int i = 0; i < request.getPermissionCount(); i++) { 2889 permissions.add(ShadedAccessControlUtil.toPermission(request.getPermission(i))); 2890 } 2891 master.getMasterCoprocessorHost().preHasUserPermissions(userName, permissions); 2892 if (!caller.getShortName().equals(userName)) { 2893 List<String> groups = AccessChecker.getUserGroups(userName); 2894 caller = new InputUser(userName, groups.toArray(new String[groups.size()])); 2895 } 2896 List<Boolean> hasUserPermissions = new ArrayList<>(); 2897 if (getAccessChecker() != null) { 2898 for (Permission permission : permissions) { 2899 boolean hasUserPermission = 2900 getAccessChecker().hasUserPermission(caller, "hasUserPermissions", permission); 2901 hasUserPermissions.add(hasUserPermission); 2902 } 2903 } else { 2904 for (int i = 0; i < permissions.size(); i++) { 2905 hasUserPermissions.add(true); 2906 } 2907 } 2908 master.getMasterCoprocessorHost().postHasUserPermissions(userName, permissions); 2909 HasUserPermissionsResponse.Builder builder = 2910 HasUserPermissionsResponse.newBuilder().addAllHasUserPermission(hasUserPermissions); 2911 return builder.build(); 2912 } else { 2913 throw new DoNotRetryIOException( 2914 new UnsupportedOperationException(AccessController.class.getName() + " is not loaded")); 2915 } 2916 } catch (IOException ioe) { 2917 throw new ServiceException(ioe); 2918 } 2919 } 2920 2921 private boolean containMetaWals(ServerName serverName) throws IOException { 2922 Path logDir = new Path(master.getWALRootDir(), 2923 AbstractFSWALProvider.getWALDirectoryName(serverName.toString())); 2924 Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT); 2925 Path checkDir = master.getFileSystem().exists(splitDir) ? splitDir : logDir; 2926 try { 2927 return master.getFileSystem().listStatus(checkDir, META_FILTER).length > 0; 2928 } catch (FileNotFoundException fnfe) { 2929 // If no files, then we don't contain metas; was failing schedule of 2930 // SCP because this was FNFE'ing when no server dirs ('Unknown Server'). 2931 LOG.warn("No dir for WALs for {}; continuing", serverName.toString()); 2932 return false; 2933 } 2934 } 2935 2936 private boolean shouldSubmitSCP(ServerName serverName) { 2937 // check if there is already a SCP of this server running 2938 List<Procedure<MasterProcedureEnv>> procedures = 2939 master.getMasterProcedureExecutor().getProcedures(); 2940 for (Procedure<MasterProcedureEnv> procedure : procedures) { 2941 if (procedure instanceof ServerCrashProcedure) { 2942 if (serverName.compareTo(((ServerCrashProcedure) procedure).getServerName()) == 0 2943 && !procedure.isFinished()) { 2944 LOG.info("there is already a SCP of this server {} running, pid {}", serverName, 2945 procedure.getProcId()); 2946 return false; 2947 } 2948 } 2949 } 2950 return true; 2951 } 2952 2953 @Override 2954 public GetClusterIdResponse getClusterId(RpcController rpcController, GetClusterIdRequest request) 2955 throws ServiceException { 2956 GetClusterIdResponse.Builder resp = GetClusterIdResponse.newBuilder(); 2957 String clusterId = master.getClusterId(); 2958 if (clusterId != null) { 2959 resp.setClusterId(clusterId); 2960 } 2961 return resp.build(); 2962 } 2963 2964 @Override 2965 public GetActiveMasterResponse getActiveMaster(RpcController rpcController, 2966 GetActiveMasterRequest request) throws ServiceException { 2967 GetActiveMasterResponse.Builder resp = GetActiveMasterResponse.newBuilder(); 2968 Optional<ServerName> serverName = master.getActiveMaster(); 2969 serverName.ifPresent(name -> resp.setServerName(ProtobufUtil.toServerName(name))); 2970 return resp.build(); 2971 } 2972 2973 @Override 2974 public GetMastersResponse getMasters(RpcController rpcController, GetMastersRequest request) 2975 throws ServiceException { 2976 GetMastersResponse.Builder resp = GetMastersResponse.newBuilder(); 2977 // Active master 2978 Optional<ServerName> serverName = master.getActiveMaster(); 2979 serverName.ifPresent(name -> resp.addMasterServers(GetMastersResponseEntry.newBuilder() 2980 .setServerName(ProtobufUtil.toServerName(name)).setIsActive(true).build())); 2981 // Backup masters 2982 for (ServerName backupMaster: master.getBackupMasters()) { 2983 resp.addMasterServers(GetMastersResponseEntry.newBuilder().setServerName( 2984 ProtobufUtil.toServerName(backupMaster)).setIsActive(false).build()); 2985 } 2986 return resp.build(); 2987 } 2988 2989 @Override 2990 public GetMetaRegionLocationsResponse getMetaRegionLocations(RpcController rpcController, 2991 GetMetaRegionLocationsRequest request) throws ServiceException { 2992 GetMetaRegionLocationsResponse.Builder response = GetMetaRegionLocationsResponse.newBuilder(); 2993 Optional<List<HRegionLocation>> metaLocations = 2994 master.getMetaRegionLocationCache().getMetaRegionLocations(); 2995 metaLocations.ifPresent(hRegionLocations -> hRegionLocations.forEach( 2996 location -> response.addMetaLocations(ProtobufUtil.toRegionLocation(location)))); 2997 return response.build(); 2998 } 2999 @Override 3000 public HBaseProtos.LogEntry getLogEntries(RpcController controller, 3001 HBaseProtos.LogRequest request) throws ServiceException { 3002 try { 3003 final String logClassName = request.getLogClassName(); 3004 Class<?> logClass = Class.forName(logClassName) 3005 .asSubclass(Message.class); 3006 Method method = logClass.getMethod("parseFrom", ByteString.class); 3007 if (logClassName.contains("BalancerDecisionsRequest")) { 3008 MasterProtos.BalancerDecisionsRequest balancerDecisionsRequest = 3009 (MasterProtos.BalancerDecisionsRequest) method 3010 .invoke(null, request.getLogMessage()); 3011 MasterProtos.BalancerDecisionsResponse balancerDecisionsResponse = 3012 getBalancerDecisions(balancerDecisionsRequest); 3013 return HBaseProtos.LogEntry.newBuilder() 3014 .setLogClassName(balancerDecisionsResponse.getClass().getName()) 3015 .setLogMessage(balancerDecisionsResponse.toByteString()) 3016 .build(); 3017 } 3018 } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException 3019 | InvocationTargetException e) { 3020 LOG.error("Error while retrieving log entries.", e); 3021 throw new ServiceException(e); 3022 } 3023 throw new ServiceException("Invalid request params"); 3024 } 3025 3026 private MasterProtos.BalancerDecisionsResponse getBalancerDecisions( 3027 MasterProtos.BalancerDecisionsRequest request) { 3028 final NamedQueueRecorder namedQueueRecorder = this.regionServer.getNamedQueueRecorder(); 3029 if (namedQueueRecorder == null) { 3030 return MasterProtos.BalancerDecisionsResponse.newBuilder() 3031 .addAllBalancerDecision(Collections.emptyList()).build(); 3032 } 3033 final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest(); 3034 namedQueueGetRequest.setNamedQueueEvent(BalancerDecisionDetails.BALANCER_DECISION_EVENT); 3035 namedQueueGetRequest.setBalancerDecisionsRequest(request); 3036 NamedQueueGetResponse namedQueueGetResponse = 3037 namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); 3038 List<RecentLogs.BalancerDecision> balancerDecisions = 3039 namedQueueGetResponse.getBalancerDecisions(); 3040 return MasterProtos.BalancerDecisionsResponse.newBuilder() 3041 .addAllBalancerDecision(balancerDecisions).build(); 3042 } 3043 3044}