001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import com.google.protobuf.Descriptors; 021import com.google.protobuf.Message; 022import com.google.protobuf.RpcController; 023import edu.umd.cs.findbugs.annotations.Nullable; 024import java.io.Closeable; 025import java.io.IOException; 026import java.io.InterruptedIOException; 027import java.util.ArrayList; 028import java.util.Arrays; 029import java.util.Collections; 030import java.util.EnumSet; 031import java.util.HashMap; 032import java.util.Iterator; 033import java.util.LinkedList; 034import java.util.List; 035import java.util.Map; 036import java.util.Set; 037import java.util.concurrent.Callable; 038import java.util.concurrent.ExecutionException; 039import java.util.concurrent.Future; 040import java.util.concurrent.TimeUnit; 041import java.util.concurrent.TimeoutException; 042import java.util.concurrent.atomic.AtomicInteger; 043import java.util.concurrent.atomic.AtomicReference; 044import java.util.function.Supplier; 045import java.util.regex.Pattern; 046import java.util.stream.Collectors; 047import java.util.stream.Stream; 048import org.apache.hadoop.conf.Configuration; 049import org.apache.hadoop.hbase.Abortable; 050import org.apache.hadoop.hbase.CacheEvictionStats; 051import org.apache.hadoop.hbase.CacheEvictionStatsBuilder; 052import org.apache.hadoop.hbase.ClusterMetrics; 053import org.apache.hadoop.hbase.ClusterMetrics.Option; 054import org.apache.hadoop.hbase.ClusterMetricsBuilder; 055import org.apache.hadoop.hbase.DoNotRetryIOException; 056import org.apache.hadoop.hbase.HBaseConfiguration; 057import org.apache.hadoop.hbase.HConstants; 058import org.apache.hadoop.hbase.HRegionInfo; 059import org.apache.hadoop.hbase.HRegionLocation; 060import org.apache.hadoop.hbase.HTableDescriptor; 061import org.apache.hadoop.hbase.MasterNotRunningException; 062import org.apache.hadoop.hbase.MetaTableAccessor; 063import org.apache.hadoop.hbase.NamespaceDescriptor; 064import org.apache.hadoop.hbase.NamespaceNotFoundException; 065import org.apache.hadoop.hbase.NotServingRegionException; 066import org.apache.hadoop.hbase.RegionLocations; 067import org.apache.hadoop.hbase.RegionMetrics; 068import org.apache.hadoop.hbase.RegionMetricsBuilder; 069import org.apache.hadoop.hbase.ServerName; 070import org.apache.hadoop.hbase.TableExistsException; 071import org.apache.hadoop.hbase.TableName; 072import org.apache.hadoop.hbase.TableNotDisabledException; 073import org.apache.hadoop.hbase.TableNotFoundException; 074import org.apache.hadoop.hbase.UnknownRegionException; 075import org.apache.hadoop.hbase.ZooKeeperConnectionException; 076import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; 077import org.apache.hadoop.hbase.client.replication.TableCFs; 078import org.apache.hadoop.hbase.client.security.SecurityCapability; 079import org.apache.hadoop.hbase.exceptions.TimeoutIOException; 080import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; 081import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; 082import org.apache.hadoop.hbase.ipc.HBaseRpcController; 083import org.apache.hadoop.hbase.ipc.RpcControllerFactory; 084import org.apache.hadoop.hbase.quotas.QuotaFilter; 085import org.apache.hadoop.hbase.quotas.QuotaRetriever; 086import org.apache.hadoop.hbase.quotas.QuotaSettings; 087import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; 088import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; 089import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; 090import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; 091import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest; 092import org.apache.hadoop.hbase.security.access.Permission; 093import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil; 094import org.apache.hadoop.hbase.security.access.UserPermission; 095import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; 096import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; 097import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; 098import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; 099import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; 100import org.apache.hadoop.hbase.util.Addressing; 101import org.apache.hadoop.hbase.util.Bytes; 102import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 103import org.apache.hadoop.hbase.util.ForeignExceptionUtil; 104import org.apache.hadoop.hbase.util.Pair; 105import org.apache.hadoop.ipc.RemoteException; 106import org.apache.hadoop.util.StringUtils; 107import org.apache.yetus.audience.InterfaceAudience; 108import org.apache.yetus.audience.InterfaceStability; 109import org.slf4j.Logger; 110import org.slf4j.LoggerFactory; 111 112import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; 113import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; 114import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; 115import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; 116 117import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 118import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; 119import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; 120import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest; 121import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest; 122import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest; 123import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; 124import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; 125import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest; 126import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; 127import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse; 128import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; 129import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest; 130import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse; 131import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; 132import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; 133import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; 134import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; 135import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; 136import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; 137import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; 138import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; 139import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; 140import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse; 141import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 142import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; 143import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; 144import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; 145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; 146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; 147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; 148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; 149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; 150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; 151import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest; 152import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; 153import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; 154import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; 155import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; 156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; 157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; 158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; 159import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; 160import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; 161import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; 162import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; 163import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; 164import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; 165import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; 166import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; 167import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; 168import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; 169import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; 170import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; 171import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest; 172import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse; 173import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; 174import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; 175import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; 176import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest; 177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse; 178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; 179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; 180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; 181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; 182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; 183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest; 184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse; 185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; 186import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; 187import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledRequest; 188import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos 189 .IsSnapshotCleanupEnabledRequest; 190import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; 191import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; 192import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest; 193import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; 194import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest; 195import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest; 196import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; 197import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest; 198import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest; 199import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; 200import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; 201import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; 202import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; 203import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; 204import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; 205import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest; 206import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse; 207import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; 208import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; 209import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; 210import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest; 211import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; 212import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; 213import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest; 214import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; 215import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; 216import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; 217import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; 218import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; 219import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; 220import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; 221import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; 222import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; 223import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse; 224import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse; 225import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes; 226import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; 227import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot; 228import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; 229import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; 230import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; 231import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; 232import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; 233import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; 234import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; 235import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; 236 237/** 238 * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that 239 * this is an HBase-internal class as defined in 240 * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html 241 * There are no guarantees for backwards source / binary compatibility and methods or class can 242 * change or go away without deprecation. 243 * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing 244 * an HBaseAdmin directly. 245 * 246 * <p>Connection should be an <i>unmanaged</i> connection obtained via 247 * {@link ConnectionFactory#createConnection(Configuration)} 248 * 249 * @see ConnectionFactory 250 * @see Connection 251 * @see Admin 252 */ 253@InterfaceAudience.Private 254public class HBaseAdmin implements Admin { 255 private static final Logger LOG = LoggerFactory.getLogger(HBaseAdmin.class); 256 257 private ClusterConnection connection; 258 259 private final Configuration conf; 260 private final long pause; 261 private final int numRetries; 262 private final int syncWaitTimeout; 263 private boolean aborted; 264 private int operationTimeout; 265 private int rpcTimeout; 266 private int getProcedureTimeout; 267 268 private RpcRetryingCallerFactory rpcCallerFactory; 269 private RpcControllerFactory rpcControllerFactory; 270 271 private NonceGenerator ng; 272 273 @Override 274 public int getOperationTimeout() { 275 return operationTimeout; 276 } 277 278 @Override 279 public int getSyncWaitTimeout() { 280 return syncWaitTimeout; 281 } 282 283 HBaseAdmin(ClusterConnection connection) throws IOException { 284 this.conf = connection.getConfiguration(); 285 this.connection = connection; 286 287 // TODO: receive ConnectionConfiguration here rather than re-parsing these configs every time. 288 this.pause = this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE, 289 HConstants.DEFAULT_HBASE_CLIENT_PAUSE); 290 this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 291 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); 292 this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 293 HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); 294 this.rpcTimeout = this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 295 HConstants.DEFAULT_HBASE_RPC_TIMEOUT); 296 this.syncWaitTimeout = this.conf.getInt( 297 "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min 298 this.getProcedureTimeout = 299 this.conf.getInt("hbase.client.procedure.future.get.timeout.msec", 10 * 60000); // 10min 300 301 this.rpcCallerFactory = connection.getRpcRetryingCallerFactory(); 302 this.rpcControllerFactory = connection.getRpcControllerFactory(); 303 304 this.ng = this.connection.getNonceGenerator(); 305 } 306 307 @Override 308 public void abort(String why, Throwable e) { 309 // Currently does nothing but throw the passed message and exception 310 this.aborted = true; 311 throw new RuntimeException(why, e); 312 } 313 314 @Override 315 public boolean isAborted() { 316 return this.aborted; 317 } 318 319 @Override 320 public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) 321 throws IOException { 322 return get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout, 323 TimeUnit.MILLISECONDS); 324 } 325 326 @Override 327 public Future<Boolean> abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning) 328 throws IOException { 329 Boolean abortProcResponse = 330 executeCallable(new MasterCallable<AbortProcedureResponse>(getConnection(), 331 getRpcControllerFactory()) { 332 @Override 333 protected AbortProcedureResponse rpcCall() throws Exception { 334 AbortProcedureRequest abortProcRequest = 335 AbortProcedureRequest.newBuilder().setProcId(procId).build(); 336 return master.abortProcedure(getRpcController(), abortProcRequest); 337 } 338 }).getIsProcedureAborted(); 339 return new AbortProcedureFuture(this, procId, abortProcResponse); 340 } 341 342 @Override 343 public List<TableDescriptor> listTableDescriptors() throws IOException { 344 return listTableDescriptors((Pattern)null, false); 345 } 346 347 @Override 348 public List<TableDescriptor> listTableDescriptors(Pattern pattern, boolean includeSysTables) 349 throws IOException { 350 return executeCallable(new MasterCallable<List<TableDescriptor>>(getConnection(), 351 getRpcControllerFactory()) { 352 @Override 353 protected List<TableDescriptor> rpcCall() throws Exception { 354 GetTableDescriptorsRequest req = 355 RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables); 356 return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), 357 req)); 358 } 359 }); 360 } 361 362 @Override 363 public TableDescriptor getDescriptor(TableName tableName) 364 throws TableNotFoundException, IOException { 365 return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory, 366 operationTimeout, rpcTimeout); 367 } 368 369 @Override 370 public Future<Void> modifyTableAsync(TableDescriptor td) throws IOException { 371 ModifyTableResponse response = executeCallable( 372 new MasterCallable<ModifyTableResponse>(getConnection(), getRpcControllerFactory()) { 373 Long nonceGroup = ng.getNonceGroup(); 374 Long nonce = ng.newNonce(); 375 @Override 376 protected ModifyTableResponse rpcCall() throws Exception { 377 setPriority(td.getTableName()); 378 ModifyTableRequest request = RequestConverter.buildModifyTableRequest( 379 td.getTableName(), td, nonceGroup, nonce); 380 return master.modifyTable(getRpcController(), request); 381 } 382 }); 383 return new ModifyTableFuture(this, td.getTableName(), response); 384 } 385 386 @Override 387 public List<TableDescriptor> listTableDescriptorsByNamespace(byte[] name) throws IOException { 388 return executeCallable(new MasterCallable<List<TableDescriptor>>(getConnection(), 389 getRpcControllerFactory()) { 390 @Override 391 protected List<TableDescriptor> rpcCall() throws Exception { 392 return master.listTableDescriptorsByNamespace(getRpcController(), 393 ListTableDescriptorsByNamespaceRequest.newBuilder() 394 .setNamespaceName(Bytes.toString(name)).build()) 395 .getTableSchemaList() 396 .stream() 397 .map(ProtobufUtil::toTableDescriptor) 398 .collect(Collectors.toList()); 399 } 400 }); 401 } 402 403 @Override 404 public List<TableDescriptor> listTableDescriptors(List<TableName> tableNames) throws IOException { 405 return executeCallable(new MasterCallable<List<TableDescriptor>>(getConnection(), 406 getRpcControllerFactory()) { 407 @Override 408 protected List<TableDescriptor> rpcCall() throws Exception { 409 GetTableDescriptorsRequest req = 410 RequestConverter.buildGetTableDescriptorsRequest(tableNames); 411 return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), 412 req)); 413 } 414 }); 415 } 416 417 @Override 418 public List<RegionInfo> getRegions(final ServerName sn) throws IOException { 419 AdminService.BlockingInterface admin = this.connection.getAdmin(sn); 420 // TODO: There is no timeout on this controller. Set one! 421 HBaseRpcController controller = rpcControllerFactory.newController(); 422 return ProtobufUtil.getOnlineRegions(controller, admin); 423 } 424 425 @Override 426 public List<RegionInfo> getRegions(TableName tableName) throws IOException { 427 if (TableName.isMetaTableName(tableName)) { 428 return Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO); 429 } else { 430 return MetaTableAccessor.getTableRegions(connection, tableName, true); 431 } 432 } 433 434 private static class AbortProcedureFuture extends ProcedureFuture<Boolean> { 435 private boolean isAbortInProgress; 436 437 public AbortProcedureFuture( 438 final HBaseAdmin admin, 439 final Long procId, 440 final Boolean abortProcResponse) { 441 super(admin, procId); 442 this.isAbortInProgress = abortProcResponse; 443 } 444 445 @Override 446 public Boolean get(long timeout, TimeUnit unit) 447 throws InterruptedException, ExecutionException, TimeoutException { 448 if (!this.isAbortInProgress) { 449 return false; 450 } 451 super.get(timeout, unit); 452 return true; 453 } 454 } 455 456 /** @return Connection used by this object. */ 457 @Override 458 public Connection getConnection() { 459 return connection; 460 } 461 462 @Override 463 public boolean tableExists(final TableName tableName) throws IOException { 464 return executeCallable(new RpcRetryingCallable<Boolean>() { 465 @Override 466 protected Boolean rpcCall(int callTimeout) throws Exception { 467 return MetaTableAccessor.tableExists(connection, tableName); 468 } 469 }); 470 } 471 472 @Override 473 public HTableDescriptor[] listTables() throws IOException { 474 return listTables((Pattern)null, false); 475 } 476 477 @Override 478 public HTableDescriptor[] listTables(Pattern pattern) throws IOException { 479 return listTables(pattern, false); 480 } 481 482 @Override 483 public HTableDescriptor[] listTables(String regex) throws IOException { 484 return listTables(Pattern.compile(regex), false); 485 } 486 487 @Override 488 public HTableDescriptor[] listTables(final Pattern pattern, final boolean includeSysTables) 489 throws IOException { 490 return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection(), 491 getRpcControllerFactory()) { 492 @Override 493 protected HTableDescriptor[] rpcCall() throws Exception { 494 GetTableDescriptorsRequest req = 495 RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables); 496 return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), 497 req)).stream().map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new); 498 } 499 }); 500 } 501 502 @Override 503 public HTableDescriptor[] listTables(String regex, boolean includeSysTables) 504 throws IOException { 505 return listTables(Pattern.compile(regex), includeSysTables); 506 } 507 508 @Override 509 public TableName[] listTableNames() throws IOException { 510 return listTableNames((Pattern)null, false); 511 } 512 513 @Override 514 public TableName[] listTableNames(String regex) throws IOException { 515 return listTableNames(Pattern.compile(regex), false); 516 } 517 518 @Override 519 public TableName[] listTableNames(final Pattern pattern, final boolean includeSysTables) 520 throws IOException { 521 return executeCallable(new MasterCallable<TableName[]>(getConnection(), 522 getRpcControllerFactory()) { 523 @Override 524 protected TableName[] rpcCall() throws Exception { 525 GetTableNamesRequest req = 526 RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables); 527 return ProtobufUtil.getTableNameArray(master.getTableNames(getRpcController(), req) 528 .getTableNamesList()); 529 } 530 }); 531 } 532 533 @Override 534 public TableName[] listTableNames(final String regex, final boolean includeSysTables) 535 throws IOException { 536 return listTableNames(Pattern.compile(regex), includeSysTables); 537 } 538 539 @Override 540 public HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException { 541 return getHTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory, 542 operationTimeout, rpcTimeout); 543 } 544 545 static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection, 546 RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, 547 int operationTimeout, int rpcTimeout) throws IOException { 548 if (tableName == null) return null; 549 TableDescriptor td = 550 executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) { 551 @Override 552 protected TableDescriptor rpcCall() throws Exception { 553 GetTableDescriptorsRequest req = 554 RequestConverter.buildGetTableDescriptorsRequest(tableName); 555 GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); 556 if (!htds.getTableSchemaList().isEmpty()) { 557 return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)); 558 } 559 return null; 560 } 561 }, rpcCallerFactory, operationTimeout, rpcTimeout); 562 if (td != null) { 563 return td; 564 } 565 throw new TableNotFoundException(tableName.getNameAsString()); 566 } 567 568 /** 569 * @deprecated since 2.0 version and will be removed in 3.0 version. 570 * use {@link #getTableDescriptor(TableName, 571 * Connection, RpcRetryingCallerFactory,RpcControllerFactory,int,int)} 572 */ 573 @Deprecated 574 static HTableDescriptor getHTableDescriptor(final TableName tableName, Connection connection, 575 RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, 576 int operationTimeout, int rpcTimeout) throws IOException { 577 if (tableName == null) { 578 return null; 579 } 580 HTableDescriptor htd = 581 executeCallable(new MasterCallable<HTableDescriptor>(connection, rpcControllerFactory) { 582 @Override 583 protected HTableDescriptor rpcCall() throws Exception { 584 GetTableDescriptorsRequest req = 585 RequestConverter.buildGetTableDescriptorsRequest(tableName); 586 GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); 587 if (!htds.getTableSchemaList().isEmpty()) { 588 return new ImmutableHTableDescriptor( 589 ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0))); 590 } 591 return null; 592 } 593 }, rpcCallerFactory, operationTimeout, rpcTimeout); 594 if (htd != null) { 595 return new ImmutableHTableDescriptor(htd); 596 } 597 throw new TableNotFoundException(tableName.getNameAsString()); 598 } 599 600 private long getPauseTime(int tries) { 601 int triesCount = tries; 602 if (triesCount >= HConstants.RETRY_BACKOFF.length) { 603 triesCount = HConstants.RETRY_BACKOFF.length - 1; 604 } 605 return this.pause * HConstants.RETRY_BACKOFF[triesCount]; 606 } 607 608 @Override 609 public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) 610 throws IOException { 611 if (numRegions < 3) { 612 throw new IllegalArgumentException("Must create at least three regions"); 613 } else if (Bytes.compareTo(startKey, endKey) >= 0) { 614 throw new IllegalArgumentException("Start key must be smaller than end key"); 615 } 616 if (numRegions == 3) { 617 createTable(desc, new byte[][] { startKey, endKey }); 618 return; 619 } 620 byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3); 621 if (splitKeys == null || splitKeys.length != numRegions - 1) { 622 throw new IllegalArgumentException("Unable to split key range into enough regions"); 623 } 624 createTable(desc, splitKeys); 625 } 626 627 @Override 628 public Future<Void> createTableAsync(final TableDescriptor desc, final byte[][] splitKeys) 629 throws IOException { 630 if (desc.getTableName() == null) { 631 throw new IllegalArgumentException("TableName cannot be null"); 632 } 633 if (splitKeys != null && splitKeys.length > 0) { 634 Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR); 635 // Verify there are no duplicate split keys 636 byte[] lastKey = null; 637 for (byte[] splitKey : splitKeys) { 638 if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) { 639 throw new IllegalArgumentException( 640 "Empty split key must not be passed in the split keys."); 641 } 642 if (lastKey != null && Bytes.equals(splitKey, lastKey)) { 643 throw new IllegalArgumentException("All split keys must be unique, " + 644 "found duplicate: " + Bytes.toStringBinary(splitKey) + 645 ", " + Bytes.toStringBinary(lastKey)); 646 } 647 lastKey = splitKey; 648 } 649 } 650 651 CreateTableResponse response = executeCallable( 652 new MasterCallable<CreateTableResponse>(getConnection(), getRpcControllerFactory()) { 653 Long nonceGroup = ng.getNonceGroup(); 654 Long nonce = ng.newNonce(); 655 @Override 656 protected CreateTableResponse rpcCall() throws Exception { 657 setPriority(desc.getTableName()); 658 CreateTableRequest request = RequestConverter.buildCreateTableRequest( 659 desc, splitKeys, nonceGroup, nonce); 660 return master.createTable(getRpcController(), request); 661 } 662 }); 663 return new CreateTableFuture(this, desc, splitKeys, response); 664 } 665 666 private static class CreateTableFuture extends TableFuture<Void> { 667 private final TableDescriptor desc; 668 private final byte[][] splitKeys; 669 670 public CreateTableFuture(final HBaseAdmin admin, final TableDescriptor desc, 671 final byte[][] splitKeys, final CreateTableResponse response) { 672 super(admin, desc.getTableName(), 673 (response != null && response.hasProcId()) ? response.getProcId() : null); 674 this.splitKeys = splitKeys; 675 this.desc = desc; 676 } 677 678 @Override 679 protected TableDescriptor getTableDescriptor() { 680 return desc; 681 } 682 683 @Override 684 public String getOperationType() { 685 return "CREATE"; 686 } 687 688 @Override 689 protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException { 690 waitForTableEnabled(deadlineTs); 691 waitForAllRegionsOnline(deadlineTs, splitKeys); 692 return null; 693 } 694 } 695 696 @Override 697 public Future<Void> deleteTableAsync(final TableName tableName) throws IOException { 698 DeleteTableResponse response = executeCallable( 699 new MasterCallable<DeleteTableResponse>(getConnection(), getRpcControllerFactory()) { 700 Long nonceGroup = ng.getNonceGroup(); 701 Long nonce = ng.newNonce(); 702 @Override 703 protected DeleteTableResponse rpcCall() throws Exception { 704 setPriority(tableName); 705 DeleteTableRequest req = 706 RequestConverter.buildDeleteTableRequest(tableName, nonceGroup,nonce); 707 return master.deleteTable(getRpcController(), req); 708 } 709 }); 710 return new DeleteTableFuture(this, tableName, response); 711 } 712 713 private static class DeleteTableFuture extends TableFuture<Void> { 714 public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName, 715 final DeleteTableResponse response) { 716 super(admin, tableName, 717 (response != null && response.hasProcId()) ? response.getProcId() : null); 718 } 719 720 @Override 721 public String getOperationType() { 722 return "DELETE"; 723 } 724 725 @Override 726 protected Void waitOperationResult(final long deadlineTs) 727 throws IOException, TimeoutException { 728 waitTableNotFound(deadlineTs); 729 return null; 730 } 731 732 @Override 733 protected Void postOperationResult(final Void result, final long deadlineTs) 734 throws IOException, TimeoutException { 735 // Delete cached information to prevent clients from using old locations 736 ((ClusterConnection) getAdmin().getConnection()).clearRegionCache(getTableName()); 737 return super.postOperationResult(result, deadlineTs); 738 } 739 } 740 741 @Override 742 public HTableDescriptor[] deleteTables(String regex) throws IOException { 743 return deleteTables(Pattern.compile(regex)); 744 } 745 746 /** 747 * Delete tables matching the passed in pattern and wait on completion. 748 * 749 * Warning: Use this method carefully, there is no prompting and the effect is 750 * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and 751 * {@link #deleteTable(TableName)} 752 * 753 * @param pattern The pattern to match table names against 754 * @return Table descriptors for tables that couldn't be deleted 755 * @throws IOException if a remote or network exception occurs 756 */ 757 @Override 758 public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException { 759 List<HTableDescriptor> failed = new LinkedList<>(); 760 for (HTableDescriptor table : listTables(pattern)) { 761 try { 762 deleteTable(table.getTableName()); 763 } catch (IOException ex) { 764 LOG.info("Failed to delete table " + table.getTableName(), ex); 765 failed.add(table); 766 } 767 } 768 return failed.toArray(new HTableDescriptor[failed.size()]); 769 } 770 771 @Override 772 public Future<Void> truncateTableAsync(final TableName tableName, final boolean preserveSplits) 773 throws IOException { 774 TruncateTableResponse response = 775 executeCallable(new MasterCallable<TruncateTableResponse>(getConnection(), 776 getRpcControllerFactory()) { 777 Long nonceGroup = ng.getNonceGroup(); 778 Long nonce = ng.newNonce(); 779 @Override 780 protected TruncateTableResponse rpcCall() throws Exception { 781 setPriority(tableName); 782 LOG.info("Started truncating " + tableName); 783 TruncateTableRequest req = RequestConverter.buildTruncateTableRequest( 784 tableName, preserveSplits, nonceGroup, nonce); 785 return master.truncateTable(getRpcController(), req); 786 } 787 }); 788 return new TruncateTableFuture(this, tableName, preserveSplits, response); 789 } 790 791 private static class TruncateTableFuture extends TableFuture<Void> { 792 private final boolean preserveSplits; 793 794 public TruncateTableFuture(final HBaseAdmin admin, final TableName tableName, 795 final boolean preserveSplits, final TruncateTableResponse response) { 796 super(admin, tableName, 797 (response != null && response.hasProcId()) ? response.getProcId() : null); 798 this.preserveSplits = preserveSplits; 799 } 800 801 @Override 802 public String getOperationType() { 803 return "TRUNCATE"; 804 } 805 806 @Override 807 protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException { 808 waitForTableEnabled(deadlineTs); 809 // once the table is enabled, we know the operation is done. so we can fetch the splitKeys 810 byte[][] splitKeys = preserveSplits ? getAdmin().getTableSplits(getTableName()) : null; 811 waitForAllRegionsOnline(deadlineTs, splitKeys); 812 return null; 813 } 814 } 815 816 private byte[][] getTableSplits(final TableName tableName) throws IOException { 817 byte[][] splits = null; 818 try (RegionLocator locator = getConnection().getRegionLocator(tableName)) { 819 byte[][] startKeys = locator.getStartKeys(); 820 if (startKeys.length == 1) { 821 return splits; 822 } 823 splits = new byte[startKeys.length - 1][]; 824 for (int i = 1; i < startKeys.length; i++) { 825 splits[i - 1] = startKeys[i]; 826 } 827 } 828 return splits; 829 } 830 831 @Override 832 public Future<Void> enableTableAsync(final TableName tableName) throws IOException { 833 TableName.isLegalFullyQualifiedTableName(tableName.getName()); 834 EnableTableResponse response = executeCallable( 835 new MasterCallable<EnableTableResponse>(getConnection(), getRpcControllerFactory()) { 836 Long nonceGroup = ng.getNonceGroup(); 837 Long nonce = ng.newNonce(); 838 @Override 839 protected EnableTableResponse rpcCall() throws Exception { 840 setPriority(tableName); 841 LOG.info("Started enable of " + tableName); 842 EnableTableRequest req = 843 RequestConverter.buildEnableTableRequest(tableName, nonceGroup, nonce); 844 return master.enableTable(getRpcController(),req); 845 } 846 }); 847 return new EnableTableFuture(this, tableName, response); 848 } 849 850 private static class EnableTableFuture extends TableFuture<Void> { 851 public EnableTableFuture(final HBaseAdmin admin, final TableName tableName, 852 final EnableTableResponse response) { 853 super(admin, tableName, 854 (response != null && response.hasProcId()) ? response.getProcId() : null); 855 } 856 857 @Override 858 public String getOperationType() { 859 return "ENABLE"; 860 } 861 862 @Override 863 protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException { 864 waitForTableEnabled(deadlineTs); 865 return null; 866 } 867 } 868 869 @Override 870 public HTableDescriptor[] enableTables(String regex) throws IOException { 871 return enableTables(Pattern.compile(regex)); 872 } 873 874 @Override 875 public HTableDescriptor[] enableTables(Pattern pattern) throws IOException { 876 List<HTableDescriptor> failed = new LinkedList<>(); 877 for (HTableDescriptor table : listTables(pattern)) { 878 if (isTableDisabled(table.getTableName())) { 879 try { 880 enableTable(table.getTableName()); 881 } catch (IOException ex) { 882 LOG.info("Failed to enable table " + table.getTableName(), ex); 883 failed.add(table); 884 } 885 } 886 } 887 return failed.toArray(new HTableDescriptor[failed.size()]); 888 } 889 890 @Override 891 public Future<Void> disableTableAsync(final TableName tableName) throws IOException { 892 TableName.isLegalFullyQualifiedTableName(tableName.getName()); 893 DisableTableResponse response = executeCallable( 894 new MasterCallable<DisableTableResponse>(getConnection(), getRpcControllerFactory()) { 895 Long nonceGroup = ng.getNonceGroup(); 896 Long nonce = ng.newNonce(); 897 @Override 898 protected DisableTableResponse rpcCall() throws Exception { 899 setPriority(tableName); 900 LOG.info("Started disable of " + tableName); 901 DisableTableRequest req = 902 RequestConverter.buildDisableTableRequest( 903 tableName, nonceGroup, nonce); 904 return master.disableTable(getRpcController(), req); 905 } 906 }); 907 return new DisableTableFuture(this, tableName, response); 908 } 909 910 private static class DisableTableFuture extends TableFuture<Void> { 911 public DisableTableFuture(final HBaseAdmin admin, final TableName tableName, 912 final DisableTableResponse response) { 913 super(admin, tableName, 914 (response != null && response.hasProcId()) ? response.getProcId() : null); 915 } 916 917 @Override 918 public String getOperationType() { 919 return "DISABLE"; 920 } 921 922 @Override 923 protected Void waitOperationResult(long deadlineTs) throws IOException, TimeoutException { 924 waitForTableDisabled(deadlineTs); 925 return null; 926 } 927 } 928 929 @Override 930 public HTableDescriptor[] disableTables(String regex) throws IOException { 931 return disableTables(Pattern.compile(regex)); 932 } 933 934 @Override 935 public HTableDescriptor[] disableTables(Pattern pattern) throws IOException { 936 List<HTableDescriptor> failed = new LinkedList<>(); 937 for (HTableDescriptor table : listTables(pattern)) { 938 if (isTableEnabled(table.getTableName())) { 939 try { 940 disableTable(table.getTableName()); 941 } catch (IOException ex) { 942 LOG.info("Failed to disable table " + table.getTableName(), ex); 943 failed.add(table); 944 } 945 } 946 } 947 return failed.toArray(new HTableDescriptor[failed.size()]); 948 } 949 950 @Override 951 public boolean isTableEnabled(final TableName tableName) throws IOException { 952 checkTableExists(tableName); 953 return executeCallable(new RpcRetryingCallable<Boolean>() { 954 @Override 955 protected Boolean rpcCall(int callTimeout) throws Exception { 956 TableState tableState = MetaTableAccessor.getTableState(getConnection(), tableName); 957 if (tableState == null) { 958 throw new TableNotFoundException(tableName); 959 } 960 return tableState.inStates(TableState.State.ENABLED); 961 } 962 }); 963 } 964 965 @Override 966 public boolean isTableDisabled(TableName tableName) throws IOException { 967 checkTableExists(tableName); 968 return connection.isTableDisabled(tableName); 969 } 970 971 @Override 972 public boolean isTableAvailable(TableName tableName) throws IOException { 973 return connection.isTableAvailable(tableName, null); 974 } 975 976 @Override 977 public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException { 978 return connection.isTableAvailable(tableName, splitKeys); 979 } 980 981 @Override 982 public Pair<Integer, Integer> getAlterStatus(final TableName tableName) throws IOException { 983 return executeCallable(new MasterCallable<Pair<Integer, Integer>>(getConnection(), 984 getRpcControllerFactory()) { 985 @Override 986 protected Pair<Integer, Integer> rpcCall() throws Exception { 987 setPriority(tableName); 988 GetSchemaAlterStatusRequest req = RequestConverter 989 .buildGetSchemaAlterStatusRequest(tableName); 990 GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(getRpcController(), req); 991 Pair<Integer, Integer> pair = new Pair<>(ret.getYetToUpdateRegions(), 992 ret.getTotalRegions()); 993 return pair; 994 } 995 }); 996 } 997 998 @Override 999 public Pair<Integer, Integer> getAlterStatus(final byte[] tableName) throws IOException { 1000 return getAlterStatus(TableName.valueOf(tableName)); 1001 } 1002 1003 @Override 1004 public Future<Void> addColumnFamilyAsync(final TableName tableName, 1005 final ColumnFamilyDescriptor columnFamily) throws IOException { 1006 AddColumnResponse response = 1007 executeCallable(new MasterCallable<AddColumnResponse>(getConnection(), 1008 getRpcControllerFactory()) { 1009 Long nonceGroup = ng.getNonceGroup(); 1010 Long nonce = ng.newNonce(); 1011 @Override 1012 protected AddColumnResponse rpcCall() throws Exception { 1013 setPriority(tableName); 1014 AddColumnRequest req = 1015 RequestConverter.buildAddColumnRequest(tableName, columnFamily, nonceGroup, nonce); 1016 return master.addColumn(getRpcController(), req); 1017 } 1018 }); 1019 return new AddColumnFamilyFuture(this, tableName, response); 1020 } 1021 1022 private static class AddColumnFamilyFuture extends ModifyTableFuture { 1023 public AddColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName, 1024 final AddColumnResponse response) { 1025 super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId() 1026 : null); 1027 } 1028 1029 @Override 1030 public String getOperationType() { 1031 return "ADD_COLUMN_FAMILY"; 1032 } 1033 } 1034 1035 /** 1036 * {@inheritDoc} 1037 * @deprecated Since 2.0. Will be removed in 3.0. Use 1038 * {@link #deleteColumnFamily(TableName, byte[])} instead. 1039 */ 1040 @Override 1041 @Deprecated 1042 public void deleteColumn(final TableName tableName, final byte[] columnFamily) 1043 throws IOException { 1044 deleteColumnFamily(tableName, columnFamily); 1045 } 1046 1047 @Override 1048 public Future<Void> deleteColumnFamilyAsync(final TableName tableName, final byte[] columnFamily) 1049 throws IOException { 1050 DeleteColumnResponse response = 1051 executeCallable(new MasterCallable<DeleteColumnResponse>(getConnection(), 1052 getRpcControllerFactory()) { 1053 Long nonceGroup = ng.getNonceGroup(); 1054 Long nonce = ng.newNonce(); 1055 @Override 1056 protected DeleteColumnResponse rpcCall() throws Exception { 1057 setPriority(tableName); 1058 DeleteColumnRequest req = 1059 RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, 1060 nonceGroup, nonce); 1061 return master.deleteColumn(getRpcController(), req); 1062 } 1063 }); 1064 return new DeleteColumnFamilyFuture(this, tableName, response); 1065 } 1066 1067 private static class DeleteColumnFamilyFuture extends ModifyTableFuture { 1068 public DeleteColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName, 1069 final DeleteColumnResponse response) { 1070 super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId() 1071 : null); 1072 } 1073 1074 @Override 1075 public String getOperationType() { 1076 return "DELETE_COLUMN_FAMILY"; 1077 } 1078 } 1079 1080 @Override 1081 public Future<Void> modifyColumnFamilyAsync(final TableName tableName, 1082 final ColumnFamilyDescriptor columnFamily) throws IOException { 1083 ModifyColumnResponse response = 1084 executeCallable(new MasterCallable<ModifyColumnResponse>(getConnection(), 1085 getRpcControllerFactory()) { 1086 Long nonceGroup = ng.getNonceGroup(); 1087 Long nonce = ng.newNonce(); 1088 @Override 1089 protected ModifyColumnResponse rpcCall() throws Exception { 1090 setPriority(tableName); 1091 ModifyColumnRequest req = 1092 RequestConverter.buildModifyColumnRequest(tableName, columnFamily, 1093 nonceGroup, nonce); 1094 return master.modifyColumn(getRpcController(), req); 1095 } 1096 }); 1097 return new ModifyColumnFamilyFuture(this, tableName, response); 1098 } 1099 1100 private static class ModifyColumnFamilyFuture extends ModifyTableFuture { 1101 public ModifyColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName, 1102 final ModifyColumnResponse response) { 1103 super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId() 1104 : null); 1105 } 1106 1107 @Override 1108 public String getOperationType() { 1109 return "MODIFY_COLUMN_FAMILY"; 1110 } 1111 } 1112 1113 @Deprecated 1114 @Override 1115 public void closeRegion(final String regionName, final String unused) throws IOException { 1116 unassign(Bytes.toBytes(regionName), true); 1117 } 1118 1119 @Deprecated 1120 @Override 1121 public void closeRegion(final byte [] regionName, final String unused) throws IOException { 1122 unassign(regionName, true); 1123 } 1124 1125 @Deprecated 1126 @Override 1127 public boolean closeRegionWithEncodedRegionName(final String encodedRegionName, 1128 final String unused) throws IOException { 1129 unassign(Bytes.toBytes(encodedRegionName), true); 1130 return true; 1131 } 1132 1133 @Deprecated 1134 @Override 1135 public void closeRegion(final ServerName unused, final HRegionInfo hri) throws IOException { 1136 unassign(hri.getRegionName(), true); 1137 } 1138 1139 /** 1140 * @param sn 1141 * @return List of {@link HRegionInfo}. 1142 * @throws IOException if a remote or network exception occurs 1143 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 1144 * Use {@link #getRegions(ServerName)}. 1145 */ 1146 @Deprecated 1147 @Override 1148 public List<HRegionInfo> getOnlineRegions(final ServerName sn) throws IOException { 1149 return getRegions(sn).stream().map(ImmutableHRegionInfo::new).collect(Collectors.toList()); 1150 } 1151 1152 @Override 1153 public void flush(final TableName tableName) throws IOException { 1154 checkTableExists(tableName); 1155 if (isTableDisabled(tableName)) { 1156 LOG.info("Table is disabled: " + tableName.getNameAsString()); 1157 return; 1158 } 1159 execProcedure("flush-table-proc", tableName.getNameAsString(), new HashMap<>()); 1160 } 1161 1162 @Override 1163 public void flushRegion(final byte[] regionName) throws IOException { 1164 Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName); 1165 if (regionServerPair == null) { 1166 throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName)); 1167 } 1168 if (regionServerPair.getSecond() == null) { 1169 throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); 1170 } 1171 final RegionInfo regionInfo = regionServerPair.getFirst(); 1172 ServerName serverName = regionServerPair.getSecond(); 1173 flush(this.connection.getAdmin(serverName), regionInfo); 1174 } 1175 1176 private void flush(AdminService.BlockingInterface admin, final RegionInfo info) 1177 throws IOException { 1178 ProtobufUtil.call(() -> { 1179 // TODO: There is no timeout on this controller. Set one! 1180 HBaseRpcController controller = rpcControllerFactory.newController(); 1181 FlushRegionRequest request = 1182 RequestConverter.buildFlushRegionRequest(info.getRegionName()); 1183 admin.flushRegion(controller, request); 1184 return null; 1185 }); 1186 } 1187 1188 @Override 1189 public void flushRegionServer(ServerName serverName) throws IOException { 1190 for (RegionInfo region : getRegions(serverName)) { 1191 flush(this.connection.getAdmin(serverName), region); 1192 } 1193 } 1194 1195 /** 1196 * {@inheritDoc} 1197 */ 1198 @Override 1199 public void compact(final TableName tableName) 1200 throws IOException { 1201 compact(tableName, null, false, CompactType.NORMAL); 1202 } 1203 1204 @Override 1205 public void compactRegion(final byte[] regionName) 1206 throws IOException { 1207 compactRegion(regionName, null, false); 1208 } 1209 1210 /** 1211 * {@inheritDoc} 1212 */ 1213 @Override 1214 public void compact(final TableName tableName, final byte[] columnFamily) 1215 throws IOException { 1216 compact(tableName, columnFamily, false, CompactType.NORMAL); 1217 } 1218 1219 /** 1220 * {@inheritDoc} 1221 */ 1222 @Override 1223 public void compactRegion(final byte[] regionName, final byte[] columnFamily) 1224 throws IOException { 1225 compactRegion(regionName, columnFamily, false); 1226 } 1227 1228 @Override 1229 public Map<ServerName, Boolean> compactionSwitch(boolean switchState, List<String> 1230 serverNamesList) throws IOException { 1231 List<ServerName> serverList = new ArrayList<>(); 1232 if (serverNamesList.isEmpty()) { 1233 ClusterMetrics status = getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)); 1234 serverList.addAll(status.getLiveServerMetrics().keySet()); 1235 } else { 1236 for (String regionServerName: serverNamesList) { 1237 ServerName serverName = null; 1238 try { 1239 serverName = ServerName.valueOf(regionServerName); 1240 } catch (Exception e) { 1241 throw new IllegalArgumentException(String.format("Invalid ServerName format: %s", 1242 regionServerName)); 1243 } 1244 if (serverName == null) { 1245 throw new IllegalArgumentException(String.format("Null ServerName: %s", 1246 regionServerName)); 1247 } 1248 serverList.add(serverName); 1249 } 1250 } 1251 Map<ServerName, Boolean> res = new HashMap<>(serverList.size()); 1252 for (ServerName serverName: serverList) { 1253 boolean prev_state = switchCompact(this.connection.getAdmin(serverName), switchState); 1254 res.put(serverName, prev_state); 1255 } 1256 return res; 1257 } 1258 1259 private Boolean switchCompact(AdminService.BlockingInterface admin, boolean onOrOff) 1260 throws IOException { 1261 return executeCallable(new RpcRetryingCallable<Boolean>() { 1262 @Override protected Boolean rpcCall(int callTimeout) throws Exception { 1263 HBaseRpcController controller = rpcControllerFactory.newController(); 1264 CompactionSwitchRequest request = 1265 CompactionSwitchRequest.newBuilder().setEnabled(onOrOff).build(); 1266 CompactionSwitchResponse compactionSwitchResponse = 1267 admin.compactionSwitch(controller, request); 1268 return compactionSwitchResponse.getPrevState(); 1269 } 1270 }); 1271 } 1272 1273 @Override 1274 public void compactRegionServer(final ServerName serverName) throws IOException { 1275 for (RegionInfo region : getRegions(serverName)) { 1276 compact(this.connection.getAdmin(serverName), region, false, null); 1277 } 1278 } 1279 1280 @Override 1281 public void majorCompactRegionServer(final ServerName serverName) throws IOException { 1282 for (RegionInfo region : getRegions(serverName)) { 1283 compact(this.connection.getAdmin(serverName), region, true, null); 1284 } 1285 } 1286 1287 @Override 1288 public void majorCompact(final TableName tableName) 1289 throws IOException { 1290 compact(tableName, null, true, CompactType.NORMAL); 1291 } 1292 1293 @Override 1294 public void majorCompactRegion(final byte[] regionName) 1295 throws IOException { 1296 compactRegion(regionName, null, true); 1297 } 1298 1299 /** 1300 * {@inheritDoc} 1301 */ 1302 @Override 1303 public void majorCompact(final TableName tableName, final byte[] columnFamily) 1304 throws IOException { 1305 compact(tableName, columnFamily, true, CompactType.NORMAL); 1306 } 1307 1308 @Override 1309 public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily) 1310 throws IOException { 1311 compactRegion(regionName, columnFamily, true); 1312 } 1313 1314 /** 1315 * Compact a table. 1316 * Asynchronous operation. 1317 * 1318 * @param tableName table or region to compact 1319 * @param columnFamily column family within a table or region 1320 * @param major True if we are to do a major compaction. 1321 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} 1322 * @throws IOException if a remote or network exception occurs 1323 */ 1324 private void compact(final TableName tableName, final byte[] columnFamily,final boolean major, 1325 CompactType compactType) throws IOException { 1326 switch (compactType) { 1327 case MOB: 1328 compact(this.connection.getAdminForMaster(), RegionInfo.createMobRegionInfo(tableName), 1329 major, columnFamily); 1330 break; 1331 case NORMAL: 1332 checkTableExists(tableName); 1333 for (HRegionLocation loc :connection.locateRegions(tableName, false, false)) { 1334 ServerName sn = loc.getServerName(); 1335 if (sn == null) { 1336 continue; 1337 } 1338 try { 1339 compact(this.connection.getAdmin(sn), loc.getRegion(), major, columnFamily); 1340 } catch (NotServingRegionException e) { 1341 if (LOG.isDebugEnabled()) { 1342 LOG.debug("Trying to" + (major ? " major" : "") + " compact " + loc.getRegion() + 1343 ": " + StringUtils.stringifyException(e)); 1344 } 1345 } 1346 } 1347 break; 1348 default: 1349 throw new IllegalArgumentException("Unknown compactType: " + compactType); 1350 } 1351 } 1352 1353 /** 1354 * Compact an individual region. 1355 * Asynchronous operation. 1356 * 1357 * @param regionName region to compact 1358 * @param columnFamily column family within a table or region 1359 * @param major True if we are to do a major compaction. 1360 * @throws IOException if a remote or network exception occurs 1361 * @throws InterruptedException 1362 */ 1363 private void compactRegion(final byte[] regionName, final byte[] columnFamily, 1364 final boolean major) throws IOException { 1365 Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName); 1366 if (regionServerPair == null) { 1367 throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)); 1368 } 1369 if (regionServerPair.getSecond() == null) { 1370 throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); 1371 } 1372 compact(this.connection.getAdmin(regionServerPair.getSecond()), regionServerPair.getFirst(), 1373 major, columnFamily); 1374 } 1375 1376 private void compact(AdminService.BlockingInterface admin, RegionInfo hri, boolean major, 1377 byte[] family) throws IOException { 1378 Callable<Void> callable = new Callable<Void>() { 1379 @Override 1380 public Void call() throws Exception { 1381 // TODO: There is no timeout on this controller. Set one! 1382 HBaseRpcController controller = rpcControllerFactory.newController(); 1383 CompactRegionRequest request = 1384 RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family); 1385 admin.compactRegion(controller, request); 1386 return null; 1387 } 1388 }; 1389 ProtobufUtil.call(callable); 1390 } 1391 1392 @Override 1393 public void move(byte[] encodedRegionName) throws IOException { 1394 move(encodedRegionName, (ServerName) null); 1395 } 1396 1397 public void move(final byte[] encodedRegionName, ServerName destServerName) throws IOException { 1398 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 1399 @Override 1400 protected Void rpcCall() throws Exception { 1401 setPriority(encodedRegionName); 1402 MoveRegionRequest request = 1403 RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName); 1404 master.moveRegion(getRpcController(), request); 1405 return null; 1406 } 1407 }); 1408 } 1409 1410 @Override 1411 public void assign(final byte [] regionName) throws MasterNotRunningException, 1412 ZooKeeperConnectionException, IOException { 1413 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 1414 @Override 1415 protected Void rpcCall() throws Exception { 1416 setPriority(regionName); 1417 AssignRegionRequest request = 1418 RequestConverter.buildAssignRegionRequest(getRegionName(regionName)); 1419 master.assignRegion(getRpcController(), request); 1420 return null; 1421 } 1422 }); 1423 } 1424 1425 @Override 1426 public void unassign(final byte [] regionName, final boolean force) throws IOException { 1427 final byte[] toBeUnassigned = getRegionName(regionName); 1428 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 1429 @Override 1430 protected Void rpcCall() throws Exception { 1431 setPriority(regionName); 1432 UnassignRegionRequest request = 1433 RequestConverter.buildUnassignRegionRequest(toBeUnassigned, force); 1434 master.unassignRegion(getRpcController(), request); 1435 return null; 1436 } 1437 }); 1438 } 1439 1440 @Override 1441 public void offline(final byte [] regionName) 1442 throws IOException { 1443 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 1444 @Override 1445 protected Void rpcCall() throws Exception { 1446 setPriority(regionName); 1447 master.offlineRegion(getRpcController(), 1448 RequestConverter.buildOfflineRegionRequest(regionName)); 1449 return null; 1450 } 1451 }); 1452 } 1453 1454 @Override 1455 public boolean balancerSwitch(final boolean on, final boolean synchronous) 1456 throws IOException { 1457 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1458 @Override 1459 protected Boolean rpcCall() throws Exception { 1460 SetBalancerRunningRequest req = 1461 RequestConverter.buildSetBalancerRunningRequest(on, synchronous); 1462 return master.setBalancerRunning(getRpcController(), req).getPrevBalanceValue(); 1463 } 1464 }); 1465 } 1466 1467 @Override 1468 public boolean balance() throws IOException { 1469 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1470 @Override 1471 protected Boolean rpcCall() throws Exception { 1472 return master.balance(getRpcController(), 1473 RequestConverter.buildBalanceRequest(false)).getBalancerRan(); 1474 } 1475 }); 1476 } 1477 1478 @Override 1479 public boolean balance(final boolean force) throws IOException { 1480 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1481 @Override 1482 protected Boolean rpcCall() throws Exception { 1483 return master.balance(getRpcController(), 1484 RequestConverter.buildBalanceRequest(force)).getBalancerRan(); 1485 } 1486 }); 1487 } 1488 1489 @Override 1490 public boolean isBalancerEnabled() throws IOException { 1491 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1492 @Override 1493 protected Boolean rpcCall() throws Exception { 1494 return master.isBalancerEnabled(getRpcController(), 1495 RequestConverter.buildIsBalancerEnabledRequest()).getEnabled(); 1496 } 1497 }); 1498 } 1499 1500 /** 1501 * {@inheritDoc} 1502 */ 1503 @Override 1504 public CacheEvictionStats clearBlockCache(final TableName tableName) throws IOException { 1505 checkTableExists(tableName); 1506 CacheEvictionStatsBuilder cacheEvictionStats = CacheEvictionStats.builder(); 1507 List<Pair<RegionInfo, ServerName>> pairs = 1508 MetaTableAccessor.getTableRegionsAndLocations(connection, tableName); 1509 Map<ServerName, List<RegionInfo>> regionInfoByServerName = 1510 pairs.stream() 1511 .filter(pair -> !(pair.getFirst().isOffline())) 1512 .filter(pair -> pair.getSecond() != null) 1513 .collect(Collectors.groupingBy(pair -> pair.getSecond(), 1514 Collectors.mapping(pair -> pair.getFirst(), Collectors.toList()))); 1515 1516 for (Map.Entry<ServerName, List<RegionInfo>> entry : regionInfoByServerName.entrySet()) { 1517 CacheEvictionStats stats = clearBlockCache(entry.getKey(), entry.getValue()); 1518 cacheEvictionStats = cacheEvictionStats.append(stats); 1519 if (stats.getExceptionCount() > 0) { 1520 for (Map.Entry<byte[], Throwable> exception : stats.getExceptions().entrySet()) { 1521 LOG.debug("Failed to clear block cache for " 1522 + Bytes.toStringBinary(exception.getKey()) 1523 + " on " + entry.getKey() + ": ", exception.getValue()); 1524 } 1525 } 1526 } 1527 return cacheEvictionStats.build(); 1528 } 1529 1530 private CacheEvictionStats clearBlockCache(final ServerName sn, final List<RegionInfo> hris) 1531 throws IOException { 1532 HBaseRpcController controller = rpcControllerFactory.newController(); 1533 AdminService.BlockingInterface admin = this.connection.getAdmin(sn); 1534 ClearRegionBlockCacheRequest request = 1535 RequestConverter.buildClearRegionBlockCacheRequest(hris); 1536 ClearRegionBlockCacheResponse response; 1537 try { 1538 response = admin.clearRegionBlockCache(controller, request); 1539 return ProtobufUtil.toCacheEvictionStats(response.getStats()); 1540 } catch (ServiceException se) { 1541 throw ProtobufUtil.getRemoteException(se); 1542 } 1543 } 1544 1545 /** 1546 * Invoke region normalizer. Can NOT run for various reasons. Check logs. 1547 * 1548 * @return True if region normalizer ran, false otherwise. 1549 */ 1550 @Override 1551 public boolean normalize() throws IOException { 1552 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1553 @Override 1554 protected Boolean rpcCall() throws Exception { 1555 return master.normalize(getRpcController(), 1556 RequestConverter.buildNormalizeRequest()).getNormalizerRan(); 1557 } 1558 }); 1559 } 1560 1561 @Override 1562 public boolean isNormalizerEnabled() throws IOException { 1563 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1564 @Override 1565 protected Boolean rpcCall() throws Exception { 1566 return master.isNormalizerEnabled(getRpcController(), 1567 RequestConverter.buildIsNormalizerEnabledRequest()).getEnabled(); 1568 } 1569 }); 1570 } 1571 1572 @Override 1573 public boolean normalizerSwitch(final boolean on) throws IOException { 1574 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1575 @Override 1576 protected Boolean rpcCall() throws Exception { 1577 SetNormalizerRunningRequest req = 1578 RequestConverter.buildSetNormalizerRunningRequest(on); 1579 return master.setNormalizerRunning(getRpcController(), req).getPrevNormalizerValue(); 1580 } 1581 }); 1582 } 1583 1584 @Override 1585 public boolean catalogJanitorSwitch(final boolean enable) throws IOException { 1586 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1587 @Override 1588 protected Boolean rpcCall() throws Exception { 1589 return master.enableCatalogJanitor(getRpcController(), 1590 RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue(); 1591 } 1592 }); 1593 } 1594 1595 @Override 1596 public int runCatalogJanitor() throws IOException { 1597 return executeCallable(new MasterCallable<Integer>(getConnection(), getRpcControllerFactory()) { 1598 @Override 1599 protected Integer rpcCall() throws Exception { 1600 return master.runCatalogScan(getRpcController(), 1601 RequestConverter.buildCatalogScanRequest()).getScanResult(); 1602 } 1603 }); 1604 } 1605 1606 @Override 1607 public boolean isCatalogJanitorEnabled() throws IOException { 1608 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1609 @Override 1610 protected Boolean rpcCall() throws Exception { 1611 return master.isCatalogJanitorEnabled(getRpcController(), 1612 RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue(); 1613 } 1614 }); 1615 } 1616 1617 @Override 1618 public boolean cleanerChoreSwitch(final boolean on) throws IOException { 1619 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1620 @Override public Boolean rpcCall() throws Exception { 1621 return master.setCleanerChoreRunning(getRpcController(), 1622 RequestConverter.buildSetCleanerChoreRunningRequest(on)).getPrevValue(); 1623 } 1624 }); 1625 } 1626 1627 @Override 1628 public boolean runCleanerChore() throws IOException { 1629 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1630 @Override public Boolean rpcCall() throws Exception { 1631 return master.runCleanerChore(getRpcController(), 1632 RequestConverter.buildRunCleanerChoreRequest()).getCleanerChoreRan(); 1633 } 1634 }); 1635 } 1636 1637 @Override 1638 public boolean isCleanerChoreEnabled() throws IOException { 1639 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 1640 @Override public Boolean rpcCall() throws Exception { 1641 return master.isCleanerChoreEnabled(getRpcController(), 1642 RequestConverter.buildIsCleanerChoreEnabledRequest()).getValue(); 1643 } 1644 }); 1645 } 1646 1647 /** 1648 * Merge two regions. Synchronous operation. 1649 * Note: It is not feasible to predict the length of merge. 1650 * Therefore, this is for internal testing only. 1651 * @param nameOfRegionA encoded or full name of region a 1652 * @param nameOfRegionB encoded or full name of region b 1653 * @param forcible true if do a compulsory merge, otherwise we will only merge 1654 * two adjacent regions 1655 * @throws IOException if a remote or network exception occurs 1656 */ 1657 @VisibleForTesting 1658 public void mergeRegionsSync( 1659 final byte[] nameOfRegionA, 1660 final byte[] nameOfRegionB, 1661 final boolean forcible) throws IOException { 1662 get( 1663 mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible), 1664 syncWaitTimeout, 1665 TimeUnit.MILLISECONDS); 1666 } 1667 1668 /** 1669 * Merge two regions. Asynchronous operation. 1670 * @param nameOfRegionA encoded or full name of region a 1671 * @param nameOfRegionB encoded or full name of region b 1672 * @param forcible true if do a compulsory merge, otherwise we will only merge 1673 * two adjacent regions 1674 * @throws IOException if a remote or network exception occurs 1675 * @deprecated Since 2.0. Will be removed in 3.0. Use 1676 * {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead. 1677 */ 1678 @Deprecated 1679 @Override 1680 public void mergeRegions(final byte[] nameOfRegionA, 1681 final byte[] nameOfRegionB, final boolean forcible) 1682 throws IOException { 1683 mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible); 1684 } 1685 1686 /** 1687 * Merge two regions. Asynchronous operation. 1688 * @param nameofRegionsToMerge encoded or full name of daughter regions 1689 * @param forcible true if do a compulsory merge, otherwise we will only merge 1690 * adjacent regions 1691 */ 1692 @Override 1693 public Future<Void> mergeRegionsAsync(final byte[][] nameofRegionsToMerge, final boolean forcible) 1694 throws IOException { 1695 Preconditions.checkArgument(nameofRegionsToMerge.length >= 2, "Can not merge only %s region", 1696 nameofRegionsToMerge.length); 1697 byte[][] encodedNameofRegionsToMerge = new byte[nameofRegionsToMerge.length][]; 1698 for (int i = 0; i < nameofRegionsToMerge.length; i++) { 1699 encodedNameofRegionsToMerge[i] = 1700 RegionInfo.isEncodedRegionName(nameofRegionsToMerge[i]) ? nameofRegionsToMerge[i] 1701 : Bytes.toBytes(RegionInfo.encodeRegionName(nameofRegionsToMerge[i])); 1702 } 1703 1704 TableName tableName = null; 1705 Pair<RegionInfo, ServerName> pair; 1706 1707 for(int i = 0; i < nameofRegionsToMerge.length; i++) { 1708 pair = getRegion(nameofRegionsToMerge[i]); 1709 1710 if (pair != null) { 1711 if (pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { 1712 throw new IllegalArgumentException ("Can't invoke merge on non-default regions directly"); 1713 } 1714 if (tableName == null) { 1715 tableName = pair.getFirst().getTable(); 1716 } else if (!tableName.equals(pair.getFirst().getTable())) { 1717 throw new IllegalArgumentException ("Cannot merge regions from two different tables " + 1718 tableName + " and " + pair.getFirst().getTable()); 1719 } 1720 } else { 1721 throw new UnknownRegionException ( 1722 "Can't invoke merge on unknown region " 1723 + Bytes.toStringBinary(encodedNameofRegionsToMerge[i])); 1724 } 1725 } 1726 1727 MergeTableRegionsResponse response = 1728 executeCallable(new MasterCallable<MergeTableRegionsResponse>(getConnection(), 1729 getRpcControllerFactory()) { 1730 Long nonceGroup = ng.getNonceGroup(); 1731 Long nonce = ng.newNonce(); 1732 @Override 1733 protected MergeTableRegionsResponse rpcCall() throws Exception { 1734 MergeTableRegionsRequest request = RequestConverter 1735 .buildMergeTableRegionsRequest( 1736 encodedNameofRegionsToMerge, 1737 forcible, 1738 nonceGroup, 1739 nonce); 1740 return master.mergeTableRegions(getRpcController(), request); 1741 } 1742 }); 1743 return new MergeTableRegionsFuture(this, tableName, response); 1744 } 1745 1746 private static class MergeTableRegionsFuture extends TableFuture<Void> { 1747 public MergeTableRegionsFuture( 1748 final HBaseAdmin admin, 1749 final TableName tableName, 1750 final MergeTableRegionsResponse response) { 1751 super(admin, tableName, 1752 (response != null && response.hasProcId()) ? response.getProcId() : null); 1753 } 1754 1755 public MergeTableRegionsFuture( 1756 final HBaseAdmin admin, 1757 final TableName tableName, 1758 final Long procId) { 1759 super(admin, tableName, procId); 1760 } 1761 1762 @Override 1763 public String getOperationType() { 1764 return "MERGE_REGIONS"; 1765 } 1766 } 1767 /** 1768 * Split one region. Synchronous operation. 1769 * Note: It is not feasible to predict the length of split. 1770 * Therefore, this is for internal testing only. 1771 * @param regionName encoded or full name of region 1772 * @param splitPoint key where region splits 1773 * @throws IOException if a remote or network exception occurs 1774 */ 1775 @VisibleForTesting 1776 public void splitRegionSync(byte[] regionName, byte[] splitPoint) throws IOException { 1777 splitRegionSync(regionName, splitPoint, syncWaitTimeout, TimeUnit.MILLISECONDS); 1778 } 1779 1780 1781 /** 1782 * Split one region. Synchronous operation. 1783 * @param regionName region to be split 1784 * @param splitPoint split point 1785 * @param timeout how long to wait on split 1786 * @param units time units 1787 * @throws IOException if a remote or network exception occurs 1788 */ 1789 public void splitRegionSync(byte[] regionName, byte[] splitPoint, final long timeout, 1790 final TimeUnit units) throws IOException { 1791 get(splitRegionAsync(regionName, splitPoint), timeout, units); 1792 } 1793 1794 @Override 1795 public Future<Void> splitRegionAsync(byte[] regionName, byte[] splitPoint) 1796 throws IOException { 1797 byte[] encodedNameofRegionToSplit = HRegionInfo.isEncodedRegionName(regionName) ? 1798 regionName : Bytes.toBytes(HRegionInfo.encodeRegionName(regionName)); 1799 Pair<RegionInfo, ServerName> pair = getRegion(regionName); 1800 if (pair != null) { 1801 if (pair.getFirst() != null && 1802 pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { 1803 throw new IllegalArgumentException ("Can't invoke split on non-default regions directly"); 1804 } 1805 } else { 1806 throw new UnknownRegionException ( 1807 "Can't invoke merge on unknown region " 1808 + Bytes.toStringBinary(encodedNameofRegionToSplit)); 1809 } 1810 1811 return splitRegionAsync(pair.getFirst(), splitPoint); 1812 } 1813 1814 Future<Void> splitRegionAsync(RegionInfo hri, byte[] splitPoint) throws IOException { 1815 TableName tableName = hri.getTable(); 1816 if (hri.getStartKey() != null && splitPoint != null && 1817 Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) { 1818 throw new IOException("should not give a splitkey which equals to startkey!"); 1819 } 1820 1821 SplitTableRegionResponse response = executeCallable( 1822 new MasterCallable<SplitTableRegionResponse>(getConnection(), getRpcControllerFactory()) { 1823 Long nonceGroup = ng.getNonceGroup(); 1824 Long nonce = ng.newNonce(); 1825 @Override 1826 protected SplitTableRegionResponse rpcCall() throws Exception { 1827 setPriority(tableName); 1828 SplitTableRegionRequest request = RequestConverter 1829 .buildSplitTableRegionRequest(hri, splitPoint, nonceGroup, nonce); 1830 return master.splitRegion(getRpcController(), request); 1831 } 1832 }); 1833 return new SplitTableRegionFuture(this, tableName, response); 1834 } 1835 1836 private static class SplitTableRegionFuture extends TableFuture<Void> { 1837 public SplitTableRegionFuture(final HBaseAdmin admin, 1838 final TableName tableName, 1839 final SplitTableRegionResponse response) { 1840 super(admin, tableName, 1841 (response != null && response.hasProcId()) ? response.getProcId() : null); 1842 } 1843 1844 public SplitTableRegionFuture( 1845 final HBaseAdmin admin, 1846 final TableName tableName, 1847 final Long procId) { 1848 super(admin, tableName, procId); 1849 } 1850 1851 @Override 1852 public String getOperationType() { 1853 return "SPLIT_REGION"; 1854 } 1855 } 1856 1857 @Override 1858 public void split(final TableName tableName) throws IOException { 1859 split(tableName, null); 1860 } 1861 1862 @Override 1863 public void splitRegion(final byte[] regionName) throws IOException { 1864 splitRegion(regionName, null); 1865 } 1866 1867 @Override 1868 public void split(final TableName tableName, final byte[] splitPoint) throws IOException { 1869 checkTableExists(tableName); 1870 for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) { 1871 ServerName sn = loc.getServerName(); 1872 if (sn == null) { 1873 continue; 1874 } 1875 RegionInfo r = loc.getRegion(); 1876 // check for parents 1877 if (r.isSplitParent()) { 1878 continue; 1879 } 1880 // if a split point given, only split that particular region 1881 if (r.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID || 1882 (splitPoint != null && !r.containsRow(splitPoint))) { 1883 continue; 1884 } 1885 // call out to master to do split now 1886 splitRegionAsync(r, splitPoint); 1887 } 1888 } 1889 1890 @Override 1891 public void splitRegion(final byte[] regionName, final byte [] splitPoint) throws IOException { 1892 Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName); 1893 if (regionServerPair == null) { 1894 throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)); 1895 } 1896 if (regionServerPair.getFirst() != null && 1897 regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { 1898 throw new IllegalArgumentException("Can't split replicas directly. " 1899 + "Replicas are auto-split when their primary is split."); 1900 } 1901 if (regionServerPair.getSecond() == null) { 1902 throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); 1903 } 1904 splitRegionAsync(regionServerPair.getFirst(), splitPoint); 1905 } 1906 1907 private static class ModifyTableFuture extends TableFuture<Void> { 1908 public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName, 1909 final ModifyTableResponse response) { 1910 super(admin, tableName, 1911 (response != null && response.hasProcId()) ? response.getProcId() : null); 1912 } 1913 1914 public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) { 1915 super(admin, tableName, procId); 1916 } 1917 1918 @Override 1919 public String getOperationType() { 1920 return "MODIFY"; 1921 } 1922 1923 @Override 1924 protected Void postOperationResult(final Void result, final long deadlineTs) 1925 throws IOException, TimeoutException { 1926 // The modify operation on the table is asynchronous on the server side irrespective 1927 // of whether Procedure V2 is supported or not. So, we wait in the client till 1928 // all regions get updated. 1929 waitForSchemaUpdate(deadlineTs); 1930 return result; 1931 } 1932 } 1933 1934 /** 1935 * @param regionName Name of a region. 1936 * @return a pair of HRegionInfo and ServerName if <code>regionName</code> is 1937 * a verified region name (we call {@link 1938 * MetaTableAccessor#getRegionLocation(Connection, byte[])} 1939 * else null. 1940 * Throw IllegalArgumentException if <code>regionName</code> is null. 1941 * @throws IOException if a remote or network exception occurs 1942 */ 1943 Pair<RegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException { 1944 if (regionName == null) { 1945 throw new IllegalArgumentException("Pass a table name or region name"); 1946 } 1947 Pair<RegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionName); 1948 if (pair == null) { 1949 final AtomicReference<Pair<RegionInfo, ServerName>> result = new AtomicReference<>(null); 1950 final String encodedName = Bytes.toString(regionName); 1951 MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { 1952 @Override 1953 public boolean visit(Result data) throws IOException { 1954 RegionInfo info = MetaTableAccessor.getRegionInfo(data); 1955 if (info == null) { 1956 LOG.warn("No serialized HRegionInfo in " + data); 1957 return true; 1958 } 1959 RegionLocations rl = MetaTableAccessor.getRegionLocations(data); 1960 boolean matched = false; 1961 ServerName sn = null; 1962 if (rl != null) { 1963 for (HRegionLocation h : rl.getRegionLocations()) { 1964 if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) { 1965 sn = h.getServerName(); 1966 info = h.getRegionInfo(); 1967 matched = true; 1968 } 1969 } 1970 } 1971 if (!matched) return true; 1972 result.set(new Pair<>(info, sn)); 1973 return false; // found the region, stop 1974 } 1975 }; 1976 1977 MetaTableAccessor.fullScanRegions(connection, visitor); 1978 pair = result.get(); 1979 } 1980 return pair; 1981 } 1982 1983 /** 1984 * If the input is a region name, it is returned as is. If it's an 1985 * encoded region name, the corresponding region is found from meta 1986 * and its region name is returned. If we can't find any region in 1987 * meta matching the input as either region name or encoded region 1988 * name, the input is returned as is. We don't throw unknown 1989 * region exception. 1990 */ 1991 private byte[] getRegionName( 1992 final byte[] regionNameOrEncodedRegionName) throws IOException { 1993 if (Bytes.equals(regionNameOrEncodedRegionName, 1994 HRegionInfo.FIRST_META_REGIONINFO.getRegionName()) 1995 || Bytes.equals(regionNameOrEncodedRegionName, 1996 HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) { 1997 return HRegionInfo.FIRST_META_REGIONINFO.getRegionName(); 1998 } 1999 byte[] tmp = regionNameOrEncodedRegionName; 2000 Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionNameOrEncodedRegionName); 2001 if (regionServerPair != null && regionServerPair.getFirst() != null) { 2002 tmp = regionServerPair.getFirst().getRegionName(); 2003 } 2004 return tmp; 2005 } 2006 2007 /** 2008 * Check if table exists or not 2009 * @param tableName Name of a table. 2010 * @return tableName instance 2011 * @throws IOException if a remote or network exception occurs. 2012 * @throws TableNotFoundException if table does not exist. 2013 */ 2014 private TableName checkTableExists(final TableName tableName) 2015 throws IOException { 2016 return executeCallable(new RpcRetryingCallable<TableName>() { 2017 @Override 2018 protected TableName rpcCall(int callTimeout) throws Exception { 2019 if (!MetaTableAccessor.tableExists(connection, tableName)) { 2020 throw new TableNotFoundException(tableName); 2021 } 2022 return tableName; 2023 } 2024 }); 2025 } 2026 2027 @Override 2028 public synchronized void shutdown() throws IOException { 2029 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 2030 @Override 2031 protected Void rpcCall() throws Exception { 2032 setPriority(HConstants.HIGH_QOS); 2033 master.shutdown(getRpcController(), ShutdownRequest.newBuilder().build()); 2034 return null; 2035 } 2036 }); 2037 } 2038 2039 @Override 2040 public synchronized void stopMaster() throws IOException { 2041 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 2042 @Override 2043 protected Void rpcCall() throws Exception { 2044 setPriority(HConstants.HIGH_QOS); 2045 master.stopMaster(getRpcController(), StopMasterRequest.newBuilder().build()); 2046 return null; 2047 } 2048 }); 2049 } 2050 2051 @Override 2052 public synchronized void stopRegionServer(final String hostnamePort) 2053 throws IOException { 2054 String hostname = Addressing.parseHostname(hostnamePort); 2055 int port = Addressing.parsePort(hostnamePort); 2056 final AdminService.BlockingInterface admin = 2057 this.connection.getAdmin(ServerName.valueOf(hostname, port, 0)); 2058 // TODO: There is no timeout on this controller. Set one! 2059 HBaseRpcController controller = rpcControllerFactory.newController(); 2060 controller.setPriority(HConstants.HIGH_QOS); 2061 StopServerRequest request = RequestConverter.buildStopServerRequest( 2062 "Called by admin client " + this.connection.toString()); 2063 try { 2064 admin.stopServer(controller, request); 2065 } catch (Exception e) { 2066 throw ProtobufUtil.handleRemoteException(e); 2067 } 2068 } 2069 2070 @Override 2071 public boolean isMasterInMaintenanceMode() throws IOException { 2072 return executeCallable(new MasterCallable<IsInMaintenanceModeResponse>(getConnection(), 2073 this.rpcControllerFactory) { 2074 @Override 2075 protected IsInMaintenanceModeResponse rpcCall() throws Exception { 2076 return master.isMasterInMaintenanceMode(getRpcController(), 2077 IsInMaintenanceModeRequest.newBuilder().build()); 2078 } 2079 }).getInMaintenanceMode(); 2080 } 2081 2082 @Override 2083 public ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException { 2084 return executeCallable(new MasterCallable<ClusterMetrics>(getConnection(), 2085 this.rpcControllerFactory) { 2086 @Override 2087 protected ClusterMetrics rpcCall() throws Exception { 2088 GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest(options); 2089 return ClusterMetricsBuilder.toClusterMetrics( 2090 master.getClusterStatus(getRpcController(), req).getClusterStatus()); 2091 } 2092 }); 2093 } 2094 2095 @Override 2096 public List<RegionMetrics> getRegionMetrics(ServerName serverName, TableName tableName) 2097 throws IOException { 2098 AdminService.BlockingInterface admin = this.connection.getAdmin(serverName); 2099 HBaseRpcController controller = rpcControllerFactory.newController(); 2100 AdminProtos.GetRegionLoadRequest request = 2101 RequestConverter.buildGetRegionLoadRequest(tableName); 2102 try { 2103 return admin.getRegionLoad(controller, request).getRegionLoadsList().stream() 2104 .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList()); 2105 } catch (ServiceException se) { 2106 throw ProtobufUtil.getRemoteException(se); 2107 } 2108 } 2109 2110 @Override 2111 public Configuration getConfiguration() { 2112 return this.conf; 2113 } 2114 2115 /** 2116 * Do a get with a timeout against the passed in <code>future</code>. 2117 */ 2118 private static <T> T get(final Future<T> future, final long timeout, final TimeUnit units) 2119 throws IOException { 2120 try { 2121 // TODO: how long should we wait? Spin forever? 2122 return future.get(timeout, units); 2123 } catch (InterruptedException e) { 2124 IOException ioe = new InterruptedIOException("Interrupt while waiting on " + future); 2125 ioe.initCause(e); 2126 throw ioe; 2127 } catch (TimeoutException e) { 2128 throw new TimeoutIOException(e); 2129 } catch (ExecutionException e) { 2130 if (e.getCause() instanceof IOException) { 2131 throw (IOException)e.getCause(); 2132 } else { 2133 throw new IOException(e.getCause()); 2134 } 2135 } 2136 } 2137 2138 @Override 2139 public Future<Void> createNamespaceAsync(final NamespaceDescriptor descriptor) 2140 throws IOException { 2141 CreateNamespaceResponse response = 2142 executeCallable(new MasterCallable<CreateNamespaceResponse>(getConnection(), 2143 getRpcControllerFactory()) { 2144 @Override 2145 protected CreateNamespaceResponse rpcCall() throws Exception { 2146 return master.createNamespace(getRpcController(), 2147 CreateNamespaceRequest.newBuilder().setNamespaceDescriptor(ProtobufUtil. 2148 toProtoNamespaceDescriptor(descriptor)).build()); 2149 } 2150 }); 2151 return new NamespaceFuture(this, descriptor.getName(), response.getProcId()) { 2152 @Override 2153 public String getOperationType() { 2154 return "CREATE_NAMESPACE"; 2155 } 2156 }; 2157 } 2158 2159 @Override 2160 public Future<Void> modifyNamespaceAsync(final NamespaceDescriptor descriptor) 2161 throws IOException { 2162 ModifyNamespaceResponse response = 2163 executeCallable(new MasterCallable<ModifyNamespaceResponse>(getConnection(), 2164 getRpcControllerFactory()) { 2165 @Override 2166 protected ModifyNamespaceResponse rpcCall() throws Exception { 2167 // TODO: set priority based on NS? 2168 return master.modifyNamespace(getRpcController(), ModifyNamespaceRequest.newBuilder(). 2169 setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build()); 2170 } 2171 }); 2172 return new NamespaceFuture(this, descriptor.getName(), response.getProcId()) { 2173 @Override 2174 public String getOperationType() { 2175 return "MODIFY_NAMESPACE"; 2176 } 2177 }; 2178 } 2179 2180 @Override 2181 public Future<Void> deleteNamespaceAsync(final String name) 2182 throws IOException { 2183 DeleteNamespaceResponse response = 2184 executeCallable(new MasterCallable<DeleteNamespaceResponse>(getConnection(), 2185 getRpcControllerFactory()) { 2186 @Override 2187 protected DeleteNamespaceResponse rpcCall() throws Exception { 2188 // TODO: set priority based on NS? 2189 return master.deleteNamespace(getRpcController(), DeleteNamespaceRequest.newBuilder(). 2190 setNamespaceName(name).build()); 2191 } 2192 }); 2193 return new NamespaceFuture(this, name, response.getProcId()) { 2194 @Override 2195 public String getOperationType() { 2196 return "DELETE_NAMESPACE"; 2197 } 2198 }; 2199 } 2200 2201 @Override 2202 public NamespaceDescriptor getNamespaceDescriptor(final String name) 2203 throws NamespaceNotFoundException, IOException { 2204 return executeCallable(new MasterCallable<NamespaceDescriptor>(getConnection(), 2205 getRpcControllerFactory()) { 2206 @Override 2207 protected NamespaceDescriptor rpcCall() throws Exception { 2208 return ProtobufUtil.toNamespaceDescriptor( 2209 master.getNamespaceDescriptor(getRpcController(), 2210 GetNamespaceDescriptorRequest.newBuilder(). 2211 setNamespaceName(name).build()).getNamespaceDescriptor()); 2212 } 2213 }); 2214 } 2215 2216 /** 2217 * List available namespaces 2218 * @return List of namespace names 2219 * @throws IOException if a remote or network exception occurs 2220 */ 2221 @Override 2222 public String[] listNamespaces() throws IOException { 2223 return executeCallable(new MasterCallable<String[]>(getConnection(), 2224 getRpcControllerFactory()) { 2225 @Override 2226 protected String[] rpcCall() throws Exception { 2227 List<String> list = master.listNamespaces(getRpcController(), 2228 ListNamespacesRequest.newBuilder().build()).getNamespaceNameList(); 2229 return list.toArray(new String[list.size()]); 2230 } 2231 }); 2232 } 2233 2234 /** 2235 * List available namespace descriptors 2236 * @return List of descriptors 2237 * @throws IOException if a remote or network exception occurs 2238 */ 2239 @Override 2240 public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException { 2241 return executeCallable(new MasterCallable<NamespaceDescriptor[]>(getConnection(), 2242 getRpcControllerFactory()) { 2243 @Override 2244 protected NamespaceDescriptor[] rpcCall() throws Exception { 2245 List<HBaseProtos.NamespaceDescriptor> list = 2246 master.listNamespaceDescriptors(getRpcController(), 2247 ListNamespaceDescriptorsRequest.newBuilder().build()).getNamespaceDescriptorList(); 2248 NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()]; 2249 for(int i = 0; i < list.size(); i++) { 2250 res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i)); 2251 } 2252 return res; 2253 } 2254 }); 2255 } 2256 2257 @Override 2258 public String getProcedures() throws IOException { 2259 return executeCallable(new MasterCallable<String>(getConnection(), 2260 getRpcControllerFactory()) { 2261 @Override 2262 protected String rpcCall() throws Exception { 2263 GetProceduresRequest request = GetProceduresRequest.newBuilder().build(); 2264 GetProceduresResponse response = master.getProcedures(getRpcController(), request); 2265 return ProtobufUtil.toProcedureJson(response.getProcedureList()); 2266 } 2267 }); 2268 } 2269 2270 @Override 2271 public String getLocks() throws IOException { 2272 return executeCallable(new MasterCallable<String>(getConnection(), 2273 getRpcControllerFactory()) { 2274 @Override 2275 protected String rpcCall() throws Exception { 2276 GetLocksRequest request = GetLocksRequest.newBuilder().build(); 2277 GetLocksResponse response = master.getLocks(getRpcController(), request); 2278 return ProtobufUtil.toLockJson(response.getLockList()); 2279 } 2280 }); 2281 } 2282 2283 @Override 2284 public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException { 2285 return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection(), 2286 getRpcControllerFactory()) { 2287 @Override 2288 protected HTableDescriptor[] rpcCall() throws Exception { 2289 List<TableSchema> list = 2290 master.listTableDescriptorsByNamespace(getRpcController(), 2291 ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name) 2292 .build()).getTableSchemaList(); 2293 HTableDescriptor[] res = new HTableDescriptor[list.size()]; 2294 for(int i=0; i < list.size(); i++) { 2295 res[i] = new ImmutableHTableDescriptor(ProtobufUtil.toTableDescriptor(list.get(i))); 2296 } 2297 return res; 2298 } 2299 }); 2300 } 2301 2302 @Override 2303 public TableName[] listTableNamesByNamespace(final String name) throws IOException { 2304 return executeCallable(new MasterCallable<TableName[]>(getConnection(), 2305 getRpcControllerFactory()) { 2306 @Override 2307 protected TableName[] rpcCall() throws Exception { 2308 List<HBaseProtos.TableName> tableNames = 2309 master.listTableNamesByNamespace(getRpcController(), ListTableNamesByNamespaceRequest. 2310 newBuilder().setNamespaceName(name).build()) 2311 .getTableNameList(); 2312 TableName[] result = new TableName[tableNames.size()]; 2313 for (int i = 0; i < tableNames.size(); i++) { 2314 result[i] = ProtobufUtil.toTableName(tableNames.get(i)); 2315 } 2316 return result; 2317 } 2318 }); 2319 } 2320 2321 /** 2322 * Is HBase available? Throw an exception if not. 2323 * @param conf system configuration 2324 * @throws MasterNotRunningException if the master is not running. 2325 * @throws ZooKeeperConnectionException if unable to connect to zookeeper. // TODO do not expose 2326 * ZKConnectionException. 2327 */ 2328 public static void available(final Configuration conf) 2329 throws MasterNotRunningException, ZooKeeperConnectionException, IOException { 2330 Configuration copyOfConf = HBaseConfiguration.create(conf); 2331 // We set it to make it fail as soon as possible if HBase is not available 2332 copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); 2333 copyOfConf.setInt("zookeeper.recovery.retry", 0); 2334 2335 // Check ZK first. 2336 // If the connection exists, we may have a connection to ZK that does not work anymore 2337 try (ClusterConnection connection = 2338 (ClusterConnection) ConnectionFactory.createConnection(copyOfConf)) { 2339 // can throw MasterNotRunningException 2340 connection.isMasterRunning(); 2341 } 2342 } 2343 2344 /** 2345 * 2346 * @param tableName 2347 * @return List of {@link HRegionInfo}. 2348 * @throws IOException if a remote or network exception occurs 2349 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 2350 * Use {@link #getRegions(TableName)}. 2351 */ 2352 @Deprecated 2353 @Override 2354 public List<HRegionInfo> getTableRegions(final TableName tableName) 2355 throws IOException { 2356 return getRegions(tableName).stream() 2357 .map(ImmutableHRegionInfo::new) 2358 .collect(Collectors.toList()); 2359 } 2360 2361 @Override 2362 public synchronized void close() throws IOException { 2363 } 2364 2365 @Override 2366 public HTableDescriptor[] getTableDescriptorsByTableName(final List<TableName> tableNames) 2367 throws IOException { 2368 return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection(), 2369 getRpcControllerFactory()) { 2370 @Override 2371 protected HTableDescriptor[] rpcCall() throws Exception { 2372 GetTableDescriptorsRequest req = 2373 RequestConverter.buildGetTableDescriptorsRequest(tableNames); 2374 return ProtobufUtil 2375 .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)).stream() 2376 .map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new); 2377 } 2378 }); 2379 } 2380 2381 @Override 2382 public HTableDescriptor[] getTableDescriptors(List<String> names) 2383 throws IOException { 2384 List<TableName> tableNames = new ArrayList<>(names.size()); 2385 for(String name : names) { 2386 tableNames.add(TableName.valueOf(name)); 2387 } 2388 return getTableDescriptorsByTableName(tableNames); 2389 } 2390 2391 private RollWALWriterResponse rollWALWriterImpl(final ServerName sn) throws IOException, 2392 FailedLogCloseException { 2393 final AdminService.BlockingInterface admin = this.connection.getAdmin(sn); 2394 RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest(); 2395 // TODO: There is no timeout on this controller. Set one! 2396 HBaseRpcController controller = rpcControllerFactory.newController(); 2397 try { 2398 return admin.rollWALWriter(controller, request); 2399 } catch (ServiceException e) { 2400 throw ProtobufUtil.handleRemoteException(e); 2401 } 2402 } 2403 2404 @Override 2405 public synchronized void rollWALWriter(ServerName serverName) 2406 throws IOException, FailedLogCloseException { 2407 rollWALWriterImpl(serverName); 2408 } 2409 2410 @Override 2411 public CompactionState getCompactionState(final TableName tableName) 2412 throws IOException { 2413 return getCompactionState(tableName, CompactType.NORMAL); 2414 } 2415 2416 @Override 2417 public CompactionState getCompactionStateForRegion(final byte[] regionName) 2418 throws IOException { 2419 final Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName); 2420 if (regionServerPair == null) { 2421 throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)); 2422 } 2423 if (regionServerPair.getSecond() == null) { 2424 throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); 2425 } 2426 ServerName sn = regionServerPair.getSecond(); 2427 final AdminService.BlockingInterface admin = this.connection.getAdmin(sn); 2428 // TODO: There is no timeout on this controller. Set one! 2429 HBaseRpcController controller = rpcControllerFactory.newController(); 2430 GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest( 2431 regionServerPair.getFirst().getRegionName(), true); 2432 GetRegionInfoResponse response; 2433 try { 2434 response = admin.getRegionInfo(controller, request); 2435 } catch (ServiceException e) { 2436 throw ProtobufUtil.handleRemoteException(e); 2437 } 2438 if (response.getCompactionState() != null) { 2439 return ProtobufUtil.createCompactionState(response.getCompactionState()); 2440 } 2441 return null; 2442 } 2443 2444 @Override 2445 public void snapshot(SnapshotDescription snapshotDesc) 2446 throws IOException, SnapshotCreationException, IllegalArgumentException { 2447 // actually take the snapshot 2448 SnapshotProtos.SnapshotDescription snapshot = 2449 ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc); 2450 SnapshotResponse response = asyncSnapshot(snapshot); 2451 final IsSnapshotDoneRequest request = 2452 IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build(); 2453 IsSnapshotDoneResponse done = null; 2454 long start = EnvironmentEdgeManager.currentTime(); 2455 long max = response.getExpectedTimeout(); 2456 long maxPauseTime = max / this.numRetries; 2457 int tries = 0; 2458 LOG.debug("Waiting a max of " + max + " ms for snapshot '" + 2459 ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " + 2460 maxPauseTime + " ms per retry)"); 2461 while (tries == 0 2462 || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) { 2463 try { 2464 // sleep a backoff <= pauseTime amount 2465 long sleep = getPauseTime(tries++); 2466 sleep = sleep > maxPauseTime ? maxPauseTime : sleep; 2467 LOG.debug("(#" + tries + ") Sleeping: " + sleep + 2468 "ms while waiting for snapshot completion."); 2469 Thread.sleep(sleep); 2470 } catch (InterruptedException e) { 2471 throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e); 2472 } 2473 LOG.debug("Getting current status of snapshot from master..."); 2474 done = executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection(), 2475 getRpcControllerFactory()) { 2476 @Override 2477 protected IsSnapshotDoneResponse rpcCall() throws Exception { 2478 return master.isSnapshotDone(getRpcController(), request); 2479 } 2480 }); 2481 } 2482 if (!done.getDone()) { 2483 throw new SnapshotCreationException("Snapshot '" + snapshot.getName() 2484 + "' wasn't completed in expectedTime:" + max + " ms", snapshotDesc); 2485 } 2486 } 2487 2488 @Override 2489 public Future<Void> snapshotAsync(SnapshotDescription snapshotDesc) 2490 throws IOException, SnapshotCreationException { 2491 asyncSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc)); 2492 return new ProcedureFuture<Void>(this, null) { 2493 2494 @Override 2495 protected Void waitOperationResult(long deadlineTs) throws IOException, TimeoutException { 2496 waitForState(deadlineTs, new WaitForStateCallable() { 2497 2498 @Override 2499 public void throwInterruptedException() throws InterruptedIOException { 2500 throw new InterruptedIOException( 2501 "Interrupted while waiting for taking snapshot" + snapshotDesc); 2502 } 2503 2504 @Override 2505 public void throwTimeoutException(long elapsedTime) throws TimeoutException { 2506 throw new TimeoutException("Snapshot '" + snapshotDesc.getName() + 2507 "' wasn't completed in expectedTime:" + elapsedTime + " ms"); 2508 } 2509 2510 @Override 2511 public boolean checkState(int tries) throws IOException { 2512 return isSnapshotFinished(snapshotDesc); 2513 } 2514 }); 2515 return null; 2516 } 2517 }; 2518 } 2519 2520 private SnapshotResponse asyncSnapshot(SnapshotProtos.SnapshotDescription snapshot) 2521 throws IOException { 2522 ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot); 2523 final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot) 2524 .build(); 2525 // run the snapshot on the master 2526 return executeCallable(new MasterCallable<SnapshotResponse>(getConnection(), 2527 getRpcControllerFactory()) { 2528 @Override 2529 protected SnapshotResponse rpcCall() throws Exception { 2530 return master.snapshot(getRpcController(), request); 2531 } 2532 }); 2533 } 2534 2535 @Override 2536 public boolean isSnapshotFinished(final SnapshotDescription snapshotDesc) 2537 throws IOException, HBaseSnapshotException, UnknownSnapshotException { 2538 final SnapshotProtos.SnapshotDescription snapshot = 2539 ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc); 2540 return executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection(), 2541 getRpcControllerFactory()) { 2542 @Override 2543 protected IsSnapshotDoneResponse rpcCall() throws Exception { 2544 return master.isSnapshotDone(getRpcController(), 2545 IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build()); 2546 } 2547 }).getDone(); 2548 } 2549 2550 @Override 2551 public void restoreSnapshot(final byte[] snapshotName) 2552 throws IOException, RestoreSnapshotException { 2553 restoreSnapshot(Bytes.toString(snapshotName)); 2554 } 2555 2556 @Override 2557 public void restoreSnapshot(final String snapshotName) 2558 throws IOException, RestoreSnapshotException { 2559 boolean takeFailSafeSnapshot = 2560 conf.getBoolean(HConstants.SNAPSHOT_RESTORE_TAKE_FAILSAFE_SNAPSHOT, 2561 HConstants.DEFAULT_SNAPSHOT_RESTORE_TAKE_FAILSAFE_SNAPSHOT); 2562 restoreSnapshot(snapshotName, takeFailSafeSnapshot); 2563 } 2564 2565 @Override 2566 public void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot) 2567 throws IOException, RestoreSnapshotException { 2568 restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot); 2569 } 2570 2571 /** 2572 * Check whether the snapshot exists and contains disabled table 2573 * 2574 * @param snapshotName name of the snapshot to restore 2575 * @throws IOException if a remote or network exception occurs 2576 * @throws RestoreSnapshotException if no valid snapshot is found 2577 */ 2578 private TableName getTableNameBeforeRestoreSnapshot(final String snapshotName) 2579 throws IOException, RestoreSnapshotException { 2580 TableName tableName = null; 2581 for (SnapshotDescription snapshotInfo: listSnapshots()) { 2582 if (snapshotInfo.getName().equals(snapshotName)) { 2583 tableName = snapshotInfo.getTableName(); 2584 break; 2585 } 2586 } 2587 2588 if (tableName == null) { 2589 throw new RestoreSnapshotException( 2590 "Unable to find the table name for snapshot=" + snapshotName); 2591 } 2592 return tableName; 2593 } 2594 2595 @Override 2596 public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot) 2597 throws IOException, RestoreSnapshotException { 2598 restoreSnapshot(snapshotName, takeFailSafeSnapshot, false); 2599 } 2600 2601 @Override 2602 public void restoreSnapshot(final String snapshotName, final boolean takeFailSafeSnapshot, 2603 final boolean restoreAcl) throws IOException, RestoreSnapshotException { 2604 TableName tableName = getTableNameBeforeRestoreSnapshot(snapshotName); 2605 2606 // The table does not exists, switch to clone. 2607 if (!tableExists(tableName)) { 2608 cloneSnapshot(snapshotName, tableName, restoreAcl); 2609 return; 2610 } 2611 2612 // Check if the table is disabled 2613 if (!isTableDisabled(tableName)) { 2614 throw new TableNotDisabledException(tableName); 2615 } 2616 2617 // Take a snapshot of the current state 2618 String failSafeSnapshotSnapshotName = null; 2619 if (takeFailSafeSnapshot) { 2620 failSafeSnapshotSnapshotName = conf.get("hbase.snapshot.restore.failsafe.name", 2621 "hbase-failsafe-{snapshot.name}-{restore.timestamp}"); 2622 failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName 2623 .replace("{snapshot.name}", snapshotName) 2624 .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.')) 2625 .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime())); 2626 LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); 2627 snapshot(failSafeSnapshotSnapshotName, tableName); 2628 } 2629 2630 try { 2631 // Restore snapshot 2632 get( 2633 internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl), 2634 syncWaitTimeout, 2635 TimeUnit.MILLISECONDS); 2636 } catch (IOException e) { 2637 // Something went wrong during the restore... 2638 // if the pre-restore snapshot is available try to rollback 2639 if (takeFailSafeSnapshot) { 2640 try { 2641 get( 2642 internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, restoreAcl), 2643 syncWaitTimeout, 2644 TimeUnit.MILLISECONDS); 2645 String msg = "Restore snapshot=" + snapshotName + 2646 " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded."; 2647 LOG.error(msg, e); 2648 throw new RestoreSnapshotException(msg, e); 2649 } catch (IOException ex) { 2650 String msg = "Failed to restore and rollback to snapshot=" + failSafeSnapshotSnapshotName; 2651 LOG.error(msg, ex); 2652 throw new RestoreSnapshotException(msg, e); 2653 } 2654 } else { 2655 throw new RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e); 2656 } 2657 } 2658 2659 // If the restore is succeeded, delete the pre-restore snapshot 2660 if (takeFailSafeSnapshot) { 2661 try { 2662 LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); 2663 deleteSnapshot(failSafeSnapshotSnapshotName); 2664 } catch (IOException e) { 2665 LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, e); 2666 } 2667 } 2668 } 2669 2670 @Override 2671 public Future<Void> restoreSnapshotAsync(final String snapshotName) 2672 throws IOException, RestoreSnapshotException { 2673 TableName tableName = getTableNameBeforeRestoreSnapshot(snapshotName); 2674 2675 // The table does not exists, switch to clone. 2676 if (!tableExists(tableName)) { 2677 return cloneSnapshotAsync(snapshotName, tableName); 2678 } 2679 2680 // Check if the table is disabled 2681 if (!isTableDisabled(tableName)) { 2682 throw new TableNotDisabledException(tableName); 2683 } 2684 2685 return internalRestoreSnapshotAsync(snapshotName, tableName, false); 2686 } 2687 2688 @Override 2689 public Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName, 2690 boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException { 2691 if (tableExists(tableName)) { 2692 throw new TableExistsException(tableName); 2693 } 2694 return internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl); 2695 } 2696 2697 @Override 2698 public byte[] execProcedureWithReturn(String signature, String instance, Map<String, 2699 String> props) throws IOException { 2700 ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props); 2701 final ExecProcedureRequest request = 2702 ExecProcedureRequest.newBuilder().setProcedure(desc).build(); 2703 // run the procedure on the master 2704 ExecProcedureResponse response = executeCallable( 2705 new MasterCallable<ExecProcedureResponse>(getConnection(), getRpcControllerFactory()) { 2706 @Override 2707 protected ExecProcedureResponse rpcCall() throws Exception { 2708 return master.execProcedureWithRet(getRpcController(), request); 2709 } 2710 }); 2711 2712 return response.hasReturnData() ? response.getReturnData().toByteArray() : null; 2713 } 2714 2715 @Override 2716 public void execProcedure(String signature, String instance, Map<String, String> props) 2717 throws IOException { 2718 ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props); 2719 final ExecProcedureRequest request = 2720 ExecProcedureRequest.newBuilder().setProcedure(desc).build(); 2721 // run the procedure on the master 2722 ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>( 2723 getConnection(), getRpcControllerFactory()) { 2724 @Override 2725 protected ExecProcedureResponse rpcCall() throws Exception { 2726 return master.execProcedure(getRpcController(), request); 2727 } 2728 }); 2729 2730 long start = EnvironmentEdgeManager.currentTime(); 2731 long max = response.getExpectedTimeout(); 2732 long maxPauseTime = max / this.numRetries; 2733 int tries = 0; 2734 LOG.debug("Waiting a max of " + max + " ms for procedure '" + 2735 signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)"); 2736 boolean done = false; 2737 while (tries == 0 2738 || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) { 2739 try { 2740 // sleep a backoff <= pauseTime amount 2741 long sleep = getPauseTime(tries++); 2742 sleep = sleep > maxPauseTime ? maxPauseTime : sleep; 2743 LOG.debug("(#" + tries + ") Sleeping: " + sleep + 2744 "ms while waiting for procedure completion."); 2745 Thread.sleep(sleep); 2746 } catch (InterruptedException e) { 2747 throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e); 2748 } 2749 LOG.debug("Getting current status of procedure from master..."); 2750 done = isProcedureFinished(signature, instance, props); 2751 } 2752 if (!done) { 2753 throw new IOException("Procedure '" + signature + " : " + instance 2754 + "' wasn't completed in expectedTime:" + max + " ms"); 2755 } 2756 } 2757 2758 @Override 2759 public boolean isProcedureFinished(String signature, String instance, Map<String, String> props) 2760 throws IOException { 2761 ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props); 2762 return executeCallable( 2763 new MasterCallable<IsProcedureDoneResponse>(getConnection(), getRpcControllerFactory()) { 2764 @Override 2765 protected IsProcedureDoneResponse rpcCall() throws Exception { 2766 return master.isProcedureDone(getRpcController(), 2767 IsProcedureDoneRequest.newBuilder().setProcedure(desc).build()); 2768 } 2769 }).getDone(); 2770 } 2771 2772 /** 2773 * Execute Restore/Clone snapshot and wait for the server to complete (blocking). 2774 * To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to 2775 * create an HTable instance to this table before it is available. 2776 * @param snapshotName snapshot to restore 2777 * @param tableName table name to restore the snapshot on 2778 * @throws IOException if a remote or network exception occurs 2779 * @throws RestoreSnapshotException if snapshot failed to be restored 2780 * @throws IllegalArgumentException if the restore request is formatted incorrectly 2781 */ 2782 private Future<Void> internalRestoreSnapshotAsync(final String snapshotName, 2783 final TableName tableName, final boolean restoreAcl) 2784 throws IOException, RestoreSnapshotException { 2785 final SnapshotProtos.SnapshotDescription snapshot = 2786 SnapshotProtos.SnapshotDescription.newBuilder() 2787 .setName(snapshotName).setTable(tableName.getNameAsString()).build(); 2788 2789 // actually restore the snapshot 2790 ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot); 2791 2792 RestoreSnapshotResponse response = executeCallable( 2793 new MasterCallable<RestoreSnapshotResponse>(getConnection(), getRpcControllerFactory()) { 2794 Long nonceGroup = ng.getNonceGroup(); 2795 Long nonce = ng.newNonce(); 2796 @Override 2797 protected RestoreSnapshotResponse rpcCall() throws Exception { 2798 final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder() 2799 .setSnapshot(snapshot) 2800 .setNonceGroup(nonceGroup) 2801 .setNonce(nonce) 2802 .setRestoreACL(restoreAcl) 2803 .build(); 2804 return master.restoreSnapshot(getRpcController(), request); 2805 } 2806 }); 2807 2808 return new RestoreSnapshotFuture(this, snapshot, tableName, response); 2809 } 2810 2811 private static class RestoreSnapshotFuture extends TableFuture<Void> { 2812 public RestoreSnapshotFuture( 2813 final HBaseAdmin admin, 2814 final SnapshotProtos.SnapshotDescription snapshot, 2815 final TableName tableName, 2816 final RestoreSnapshotResponse response) { 2817 super(admin, tableName, 2818 (response != null && response.hasProcId()) ? response.getProcId() : null); 2819 2820 if (response != null && !response.hasProcId()) { 2821 throw new UnsupportedOperationException("Client could not call old version of Server"); 2822 } 2823 } 2824 2825 public RestoreSnapshotFuture( 2826 final HBaseAdmin admin, 2827 final TableName tableName, 2828 final Long procId) { 2829 super(admin, tableName, procId); 2830 } 2831 2832 @Override 2833 public String getOperationType() { 2834 return "MODIFY"; 2835 } 2836 } 2837 2838 @Override 2839 public List<SnapshotDescription> listSnapshots() throws IOException { 2840 return executeCallable(new MasterCallable<List<SnapshotDescription>>(getConnection(), 2841 getRpcControllerFactory()) { 2842 @Override 2843 protected List<SnapshotDescription> rpcCall() throws Exception { 2844 List<SnapshotProtos.SnapshotDescription> snapshotsList = master 2845 .getCompletedSnapshots(getRpcController(), 2846 GetCompletedSnapshotsRequest.newBuilder().build()) 2847 .getSnapshotsList(); 2848 List<SnapshotDescription> result = new ArrayList<>(snapshotsList.size()); 2849 for (SnapshotProtos.SnapshotDescription snapshot : snapshotsList) { 2850 result.add(ProtobufUtil.createSnapshotDesc(snapshot)); 2851 } 2852 return result; 2853 } 2854 }); 2855 } 2856 2857 @Override 2858 public List<SnapshotDescription> listSnapshots(String regex) throws IOException { 2859 return listSnapshots(Pattern.compile(regex)); 2860 } 2861 2862 @Override 2863 public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException { 2864 List<SnapshotDescription> matched = new LinkedList<>(); 2865 List<SnapshotDescription> snapshots = listSnapshots(); 2866 for (SnapshotDescription snapshot : snapshots) { 2867 if (pattern.matcher(snapshot.getName()).matches()) { 2868 matched.add(snapshot); 2869 } 2870 } 2871 return matched; 2872 } 2873 2874 @Override 2875 public List<SnapshotDescription> listTableSnapshots(String tableNameRegex, 2876 String snapshotNameRegex) throws IOException { 2877 return listTableSnapshots(Pattern.compile(tableNameRegex), Pattern.compile(snapshotNameRegex)); 2878 } 2879 2880 @Override 2881 public List<SnapshotDescription> listTableSnapshots(Pattern tableNamePattern, 2882 Pattern snapshotNamePattern) throws IOException { 2883 TableName[] tableNames = listTableNames(tableNamePattern); 2884 2885 List<SnapshotDescription> tableSnapshots = new LinkedList<>(); 2886 List<SnapshotDescription> snapshots = listSnapshots(snapshotNamePattern); 2887 2888 List<TableName> listOfTableNames = Arrays.asList(tableNames); 2889 for (SnapshotDescription snapshot : snapshots) { 2890 if (listOfTableNames.contains(snapshot.getTableName())) { 2891 tableSnapshots.add(snapshot); 2892 } 2893 } 2894 return tableSnapshots; 2895 } 2896 2897 @Override 2898 public void deleteSnapshot(final byte[] snapshotName) throws IOException { 2899 deleteSnapshot(Bytes.toString(snapshotName)); 2900 } 2901 2902 @Override 2903 public void deleteSnapshot(final String snapshotName) throws IOException { 2904 // make sure the snapshot is possibly valid 2905 TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName)); 2906 // do the delete 2907 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 2908 @Override 2909 protected Void rpcCall() throws Exception { 2910 master.deleteSnapshot(getRpcController(), 2911 DeleteSnapshotRequest.newBuilder().setSnapshot( 2912 SnapshotProtos.SnapshotDescription.newBuilder().setName(snapshotName).build()) 2913 .build() 2914 ); 2915 return null; 2916 } 2917 }); 2918 } 2919 2920 @Override 2921 public void deleteSnapshots(final String regex) throws IOException { 2922 deleteSnapshots(Pattern.compile(regex)); 2923 } 2924 2925 @Override 2926 public void deleteSnapshots(final Pattern pattern) throws IOException { 2927 List<SnapshotDescription> snapshots = listSnapshots(pattern); 2928 for (final SnapshotDescription snapshot : snapshots) { 2929 try { 2930 internalDeleteSnapshot(snapshot); 2931 } catch (IOException ex) { 2932 LOG.info("Failed to delete snapshot " + snapshot.getName() + " for table " 2933 + snapshot.getTableNameAsString(), ex); 2934 } 2935 } 2936 } 2937 2938 private void internalDeleteSnapshot(final SnapshotDescription snapshot) throws IOException { 2939 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 2940 @Override 2941 protected Void rpcCall() throws Exception { 2942 this.master.deleteSnapshot(getRpcController(), DeleteSnapshotRequest.newBuilder() 2943 .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build()); 2944 return null; 2945 } 2946 }); 2947 } 2948 2949 @Override 2950 public void deleteTableSnapshots(String tableNameRegex, String snapshotNameRegex) 2951 throws IOException { 2952 deleteTableSnapshots(Pattern.compile(tableNameRegex), Pattern.compile(snapshotNameRegex)); 2953 } 2954 2955 @Override 2956 public void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) 2957 throws IOException { 2958 List<SnapshotDescription> snapshots = listTableSnapshots(tableNamePattern, snapshotNamePattern); 2959 for (SnapshotDescription snapshot : snapshots) { 2960 try { 2961 internalDeleteSnapshot(snapshot); 2962 LOG.debug("Successfully deleted snapshot: " + snapshot.getName()); 2963 } catch (IOException e) { 2964 LOG.error("Failed to delete snapshot: " + snapshot.getName(), e); 2965 } 2966 } 2967 } 2968 2969 @Override 2970 public void setQuota(final QuotaSettings quota) throws IOException { 2971 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 2972 @Override 2973 protected Void rpcCall() throws Exception { 2974 this.master.setQuota(getRpcController(), QuotaSettings.buildSetQuotaRequestProto(quota)); 2975 return null; 2976 } 2977 }); 2978 } 2979 2980 @Override 2981 public QuotaRetriever getQuotaRetriever(final QuotaFilter filter) throws IOException { 2982 return QuotaRetriever.open(conf, filter); 2983 } 2984 2985 @Override 2986 public List<QuotaSettings> getQuota(QuotaFilter filter) throws IOException { 2987 List<QuotaSettings> quotas = new LinkedList<>(); 2988 try (QuotaRetriever retriever = QuotaRetriever.open(conf, filter)) { 2989 Iterator<QuotaSettings> iterator = retriever.iterator(); 2990 while (iterator.hasNext()) { 2991 quotas.add(iterator.next()); 2992 } 2993 } 2994 return quotas; 2995 } 2996 2997 private <C extends RetryingCallable<V> & Closeable, V> V executeCallable(C callable) 2998 throws IOException { 2999 return executeCallable(callable, rpcCallerFactory, operationTimeout, rpcTimeout); 3000 } 3001 3002 static private <C extends RetryingCallable<V> & Closeable, V> V executeCallable(C callable, 3003 RpcRetryingCallerFactory rpcCallerFactory, int operationTimeout, int rpcTimeout) 3004 throws IOException { 3005 RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller(rpcTimeout); 3006 try { 3007 return caller.callWithRetries(callable, operationTimeout); 3008 } finally { 3009 callable.close(); 3010 } 3011 } 3012 3013 @Override 3014 // Coprocessor Endpoint against the Master. 3015 public CoprocessorRpcChannel coprocessorService() { 3016 return new SyncCoprocessorRpcChannel() { 3017 @Override 3018 protected Message callExecService(final RpcController controller, 3019 final Descriptors.MethodDescriptor method, final Message request, 3020 final Message responsePrototype) 3021 throws IOException { 3022 if (LOG.isTraceEnabled()) { 3023 LOG.trace("Call: " + method.getName() + ", " + request.toString()); 3024 } 3025 // Try-with-resources so close gets called when we are done. 3026 try (MasterCallable<CoprocessorServiceResponse> callable = 3027 new MasterCallable<CoprocessorServiceResponse>(connection, 3028 connection.getRpcControllerFactory()) { 3029 @Override 3030 protected CoprocessorServiceResponse rpcCall() throws Exception { 3031 CoprocessorServiceRequest csr = 3032 CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); 3033 return this.master.execMasterService(getRpcController(), csr); 3034 } 3035 }) { 3036 // TODO: Are we retrying here? Does not seem so. We should use RetryingRpcCaller 3037 callable.prepare(false); 3038 int operationTimeout = connection.getConnectionConfiguration().getOperationTimeout(); 3039 CoprocessorServiceResponse result = callable.call(operationTimeout); 3040 return CoprocessorRpcUtils.getResponse(result, responsePrototype); 3041 } 3042 } 3043 }; 3044 } 3045 3046 /** 3047 * Simple {@link Abortable}, throwing RuntimeException on abort. 3048 */ 3049 private static class ThrowableAbortable implements Abortable { 3050 @Override 3051 public void abort(String why, Throwable e) { 3052 throw new RuntimeException(why, e); 3053 } 3054 3055 @Override 3056 public boolean isAborted() { 3057 return true; 3058 } 3059 } 3060 3061 @Override 3062 public CoprocessorRpcChannel coprocessorService(final ServerName serverName) { 3063 return new SyncCoprocessorRpcChannel() { 3064 @Override 3065 protected Message callExecService(RpcController controller, 3066 Descriptors.MethodDescriptor method, Message request, Message responsePrototype) 3067 throws IOException { 3068 if (LOG.isTraceEnabled()) { 3069 LOG.trace("Call: " + method.getName() + ", " + request.toString()); 3070 } 3071 CoprocessorServiceRequest csr = 3072 CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); 3073 // TODO: Are we retrying here? Does not seem so. We should use RetryingRpcCaller 3074 // TODO: Make this same as RegionCoprocessorRpcChannel and MasterCoprocessorRpcChannel. They 3075 // are all different though should do same thing; e.g. RpcChannel setup. 3076 ClientProtos.ClientService.BlockingInterface stub = connection.getClient(serverName); 3077 CoprocessorServiceResponse result; 3078 try { 3079 result = stub. 3080 execRegionServerService(connection.getRpcControllerFactory().newController(), csr); 3081 return CoprocessorRpcUtils.getResponse(result, responsePrototype); 3082 } catch (ServiceException e) { 3083 throw ProtobufUtil.handleRemoteException(e); 3084 } 3085 } 3086 }; 3087 } 3088 3089 @Override 3090 public void updateConfiguration(final ServerName server) throws IOException { 3091 final AdminService.BlockingInterface admin = this.connection.getAdmin(server); 3092 Callable<Void> callable = new Callable<Void>() { 3093 @Override 3094 public Void call() throws Exception { 3095 admin.updateConfiguration(null, UpdateConfigurationRequest.getDefaultInstance()); 3096 return null; 3097 } 3098 }; 3099 ProtobufUtil.call(callable); 3100 } 3101 3102 @Override 3103 public void updateConfiguration() throws IOException { 3104 ClusterMetrics status = getClusterMetrics( 3105 EnumSet.of(Option.LIVE_SERVERS, Option.MASTER, Option.BACKUP_MASTERS)); 3106 for (ServerName server : status.getLiveServerMetrics().keySet()) { 3107 updateConfiguration(server); 3108 } 3109 3110 updateConfiguration(status.getMasterName()); 3111 3112 for (ServerName server : status.getBackupMasterNames()) { 3113 updateConfiguration(server); 3114 } 3115 } 3116 3117 @Override 3118 public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException { 3119 return executeCallable(new MasterCallable<Long>(getConnection(), getRpcControllerFactory()) { 3120 @Override 3121 protected Long rpcCall() throws Exception { 3122 MajorCompactionTimestampRequest req = 3123 MajorCompactionTimestampRequest.newBuilder() 3124 .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); 3125 return master.getLastMajorCompactionTimestamp(getRpcController(), req). 3126 getCompactionTimestamp(); 3127 } 3128 }); 3129 } 3130 3131 @Override 3132 public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException { 3133 return executeCallable(new MasterCallable<Long>(getConnection(), getRpcControllerFactory()) { 3134 @Override 3135 protected Long rpcCall() throws Exception { 3136 MajorCompactionTimestampForRegionRequest req = 3137 MajorCompactionTimestampForRegionRequest.newBuilder().setRegion(RequestConverter 3138 .buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)).build(); 3139 return master.getLastMajorCompactionTimestampForRegion(getRpcController(), req) 3140 .getCompactionTimestamp(); 3141 } 3142 }); 3143 } 3144 3145 /** 3146 * {@inheritDoc} 3147 */ 3148 @Override 3149 public void compact(final TableName tableName, final byte[] columnFamily, CompactType compactType) 3150 throws IOException, InterruptedException { 3151 compact(tableName, columnFamily, false, compactType); 3152 } 3153 3154 /** 3155 * {@inheritDoc} 3156 */ 3157 @Override 3158 public void compact(final TableName tableName, CompactType compactType) 3159 throws IOException, InterruptedException { 3160 compact(tableName, null, false, compactType); 3161 } 3162 3163 /** 3164 * {@inheritDoc} 3165 */ 3166 @Override 3167 public void majorCompact(final TableName tableName, final byte[] columnFamily, 3168 CompactType compactType) throws IOException, InterruptedException { 3169 compact(tableName, columnFamily, true, compactType); 3170 } 3171 3172 /** 3173 * {@inheritDoc} 3174 */ 3175 @Override 3176 public void majorCompact(final TableName tableName, CompactType compactType) 3177 throws IOException, InterruptedException { 3178 compact(tableName, null, true, compactType); 3179 } 3180 3181 /** 3182 * {@inheritDoc} 3183 */ 3184 @Override 3185 public CompactionState getCompactionState(final TableName tableName, CompactType compactType) 3186 throws IOException { 3187 checkTableExists(tableName); 3188 if (!isTableEnabled(tableName)) { 3189 // If the table is disabled, the compaction state of the table should always be NONE 3190 return ProtobufUtil.createCompactionState( 3191 AdminProtos.GetRegionInfoResponse.CompactionState.NONE); 3192 } 3193 3194 AdminProtos.GetRegionInfoResponse.CompactionState state = 3195 AdminProtos.GetRegionInfoResponse.CompactionState.NONE; 3196 3197 // TODO: There is no timeout on this controller. Set one! 3198 HBaseRpcController rpcController = rpcControllerFactory.newController(); 3199 switch (compactType) { 3200 case MOB: 3201 final AdminProtos.AdminService.BlockingInterface masterAdmin = 3202 this.connection.getAdminForMaster(); 3203 Callable<AdminProtos.GetRegionInfoResponse.CompactionState> callable = 3204 new Callable<AdminProtos.GetRegionInfoResponse.CompactionState>() { 3205 @Override 3206 public AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception { 3207 RegionInfo info = RegionInfo.createMobRegionInfo(tableName); 3208 GetRegionInfoRequest request = 3209 RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true); 3210 GetRegionInfoResponse response = masterAdmin.getRegionInfo(rpcController, request); 3211 return response.getCompactionState(); 3212 } 3213 }; 3214 state = ProtobufUtil.call(callable); 3215 break; 3216 case NORMAL: 3217 for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) { 3218 ServerName sn = loc.getServerName(); 3219 if (sn == null) { 3220 continue; 3221 } 3222 byte[] regionName = loc.getRegion().getRegionName(); 3223 AdminService.BlockingInterface snAdmin = this.connection.getAdmin(sn); 3224 try { 3225 Callable<GetRegionInfoResponse> regionInfoCallable = 3226 new Callable<GetRegionInfoResponse>() { 3227 @Override 3228 public GetRegionInfoResponse call() throws Exception { 3229 GetRegionInfoRequest request = 3230 RequestConverter.buildGetRegionInfoRequest(regionName, true); 3231 return snAdmin.getRegionInfo(rpcController, request); 3232 } 3233 }; 3234 GetRegionInfoResponse response = ProtobufUtil.call(regionInfoCallable); 3235 switch (response.getCompactionState()) { 3236 case MAJOR_AND_MINOR: 3237 return CompactionState.MAJOR_AND_MINOR; 3238 case MAJOR: 3239 if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MINOR) { 3240 return CompactionState.MAJOR_AND_MINOR; 3241 } 3242 state = AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR; 3243 break; 3244 case MINOR: 3245 if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR) { 3246 return CompactionState.MAJOR_AND_MINOR; 3247 } 3248 state = AdminProtos.GetRegionInfoResponse.CompactionState.MINOR; 3249 break; 3250 case NONE: 3251 default: // nothing, continue 3252 } 3253 } catch (NotServingRegionException e) { 3254 if (LOG.isDebugEnabled()) { 3255 LOG.debug("Trying to get compaction state of " + loc.getRegion() + ": " + 3256 StringUtils.stringifyException(e)); 3257 } 3258 } catch (RemoteException e) { 3259 if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) { 3260 if (LOG.isDebugEnabled()) { 3261 LOG.debug("Trying to get compaction state of " + loc.getRegion() + ": " + 3262 StringUtils.stringifyException(e)); 3263 } 3264 } else { 3265 throw e; 3266 } 3267 } 3268 } 3269 break; 3270 default: 3271 throw new IllegalArgumentException("Unknown compactType: " + compactType); 3272 } 3273 if (state != null) { 3274 return ProtobufUtil.createCompactionState(state); 3275 } 3276 return null; 3277 } 3278 3279 /** 3280 * Future that waits on a procedure result. 3281 * Returned by the async version of the Admin calls, 3282 * and used internally by the sync calls to wait on the result of the procedure. 3283 */ 3284 @InterfaceAudience.Private 3285 @InterfaceStability.Evolving 3286 protected static class ProcedureFuture<V> implements Future<V> { 3287 private ExecutionException exception = null; 3288 private boolean procResultFound = false; 3289 private boolean done = false; 3290 private boolean cancelled = false; 3291 private V result = null; 3292 3293 private final HBaseAdmin admin; 3294 protected final Long procId; 3295 3296 public ProcedureFuture(final HBaseAdmin admin, final Long procId) { 3297 this.admin = admin; 3298 this.procId = procId; 3299 } 3300 3301 @Override 3302 public boolean cancel(boolean mayInterruptIfRunning) { 3303 AbortProcedureRequest abortProcRequest = AbortProcedureRequest.newBuilder() 3304 .setProcId(procId).setMayInterruptIfRunning(mayInterruptIfRunning).build(); 3305 try { 3306 cancelled = abortProcedureResult(abortProcRequest).getIsProcedureAborted(); 3307 if (cancelled) { 3308 done = true; 3309 } 3310 } catch (IOException e) { 3311 // Cancell thrown exception for some reason. At this time, we are not sure whether 3312 // the cancell succeeds or fails. We assume that it is failed, but print out a warning 3313 // for debugging purpose. 3314 LOG.warn( 3315 "Cancelling the procedure with procId=" + procId + " throws exception " + e.getMessage(), 3316 e); 3317 cancelled = false; 3318 } 3319 return cancelled; 3320 } 3321 3322 @Override 3323 public boolean isCancelled() { 3324 return cancelled; 3325 } 3326 3327 protected AbortProcedureResponse abortProcedureResult( 3328 final AbortProcedureRequest request) throws IOException { 3329 return admin.executeCallable(new MasterCallable<AbortProcedureResponse>( 3330 admin.getConnection(), admin.getRpcControllerFactory()) { 3331 @Override 3332 protected AbortProcedureResponse rpcCall() throws Exception { 3333 return master.abortProcedure(getRpcController(), request); 3334 } 3335 }); 3336 } 3337 3338 @Override 3339 public V get() throws InterruptedException, ExecutionException { 3340 // TODO: should we ever spin forever? 3341 // fix HBASE-21715. TODO: If the function call get() without timeout limit is not allowed, 3342 // is it possible to compose instead of inheriting from the class Future for this class? 3343 try { 3344 return get(admin.getProcedureTimeout, TimeUnit.MILLISECONDS); 3345 } catch (TimeoutException e) { 3346 LOG.warn("Failed to get the procedure with procId=" + procId + " throws exception " + e 3347 .getMessage(), e); 3348 return null; 3349 } 3350 } 3351 3352 @Override 3353 public V get(long timeout, TimeUnit unit) 3354 throws InterruptedException, ExecutionException, TimeoutException { 3355 if (!done) { 3356 long deadlineTs = EnvironmentEdgeManager.currentTime() + unit.toMillis(timeout); 3357 try { 3358 try { 3359 // if the master support procedures, try to wait the result 3360 if (procId != null) { 3361 result = waitProcedureResult(procId, deadlineTs); 3362 } 3363 // if we don't have a proc result, try the compatibility wait 3364 if (!procResultFound) { 3365 result = waitOperationResult(deadlineTs); 3366 } 3367 result = postOperationResult(result, deadlineTs); 3368 done = true; 3369 } catch (IOException e) { 3370 result = postOperationFailure(e, deadlineTs); 3371 done = true; 3372 } 3373 } catch (IOException e) { 3374 exception = new ExecutionException(e); 3375 done = true; 3376 } 3377 } 3378 if (exception != null) { 3379 throw exception; 3380 } 3381 return result; 3382 } 3383 3384 @Override 3385 public boolean isDone() { 3386 return done; 3387 } 3388 3389 protected HBaseAdmin getAdmin() { 3390 return admin; 3391 } 3392 3393 private V waitProcedureResult(long procId, long deadlineTs) 3394 throws IOException, TimeoutException, InterruptedException { 3395 GetProcedureResultRequest request = GetProcedureResultRequest.newBuilder() 3396 .setProcId(procId) 3397 .build(); 3398 3399 int tries = 0; 3400 IOException serviceEx = null; 3401 while (EnvironmentEdgeManager.currentTime() < deadlineTs) { 3402 GetProcedureResultResponse response = null; 3403 try { 3404 // Try to fetch the result 3405 response = getProcedureResult(request); 3406 } catch (IOException e) { 3407 serviceEx = unwrapException(e); 3408 3409 // the master may be down 3410 LOG.warn("failed to get the procedure result procId=" + procId, serviceEx); 3411 3412 // Not much to do, if we have a DoNotRetryIOException 3413 if (serviceEx instanceof DoNotRetryIOException) { 3414 // TODO: looks like there is no way to unwrap this exception and get the proper 3415 // UnsupportedOperationException aside from looking at the message. 3416 // anyway, if we fail here we just failover to the compatibility side 3417 // and that is always a valid solution. 3418 LOG.warn("Proc-v2 is unsupported on this master: " + serviceEx.getMessage(), serviceEx); 3419 procResultFound = false; 3420 return null; 3421 } 3422 } 3423 3424 // If the procedure is no longer running, we should have a result 3425 if (response != null && response.getState() != GetProcedureResultResponse.State.RUNNING) { 3426 procResultFound = response.getState() != GetProcedureResultResponse.State.NOT_FOUND; 3427 return convertResult(response); 3428 } 3429 3430 try { 3431 Thread.sleep(getAdmin().getPauseTime(tries++)); 3432 } catch (InterruptedException e) { 3433 throw new InterruptedException( 3434 "Interrupted while waiting for the result of proc " + procId); 3435 } 3436 } 3437 if (serviceEx != null) { 3438 throw serviceEx; 3439 } else { 3440 throw new TimeoutException("The procedure " + procId + " is still running"); 3441 } 3442 } 3443 3444 private static IOException unwrapException(IOException e) { 3445 if (e instanceof RemoteException) { 3446 return ((RemoteException)e).unwrapRemoteException(); 3447 } 3448 return e; 3449 } 3450 3451 protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request) 3452 throws IOException { 3453 return admin.executeCallable(new MasterCallable<GetProcedureResultResponse>( 3454 admin.getConnection(), admin.getRpcControllerFactory()) { 3455 @Override 3456 protected GetProcedureResultResponse rpcCall() throws Exception { 3457 return master.getProcedureResult(getRpcController(), request); 3458 } 3459 }); 3460 } 3461 3462 /** 3463 * Convert the procedure result response to a specified type. 3464 * @param response the procedure result object to parse 3465 * @return the result data of the procedure. 3466 */ 3467 protected V convertResult(final GetProcedureResultResponse response) throws IOException { 3468 if (response.hasException()) { 3469 throw ForeignExceptionUtil.toIOException(response.getException()); 3470 } 3471 return null; 3472 } 3473 3474 /** 3475 * Fallback implementation in case the procedure is not supported by the server. 3476 * It should try to wait until the operation is completed. 3477 * @param deadlineTs the timestamp after which this method should throw a TimeoutException 3478 * @return the result data of the operation 3479 */ 3480 protected V waitOperationResult(final long deadlineTs) 3481 throws IOException, TimeoutException { 3482 return null; 3483 } 3484 3485 /** 3486 * Called after the operation is completed and the result fetched. this allows to perform extra 3487 * steps after the procedure is completed. it allows to apply transformations to the result that 3488 * will be returned by get(). 3489 * @param result the result of the procedure 3490 * @param deadlineTs the timestamp after which this method should throw a TimeoutException 3491 * @return the result of the procedure, which may be the same as the passed one 3492 */ 3493 protected V postOperationResult(final V result, final long deadlineTs) 3494 throws IOException, TimeoutException { 3495 return result; 3496 } 3497 3498 /** 3499 * Called after the operation is terminated with a failure. 3500 * this allows to perform extra steps after the procedure is terminated. 3501 * it allows to apply transformations to the result that will be returned by get(). 3502 * The default implementation will rethrow the exception 3503 * @param exception the exception got from fetching the result 3504 * @param deadlineTs the timestamp after which this method should throw a TimeoutException 3505 * @return the result of the procedure, which may be the same as the passed one 3506 */ 3507 protected V postOperationFailure(final IOException exception, final long deadlineTs) 3508 throws IOException, TimeoutException { 3509 throw exception; 3510 } 3511 3512 protected interface WaitForStateCallable { 3513 boolean checkState(int tries) throws IOException; 3514 void throwInterruptedException() throws InterruptedIOException; 3515 void throwTimeoutException(long elapsed) throws TimeoutException; 3516 } 3517 3518 protected void waitForState(final long deadlineTs, final WaitForStateCallable callable) 3519 throws IOException, TimeoutException { 3520 int tries = 0; 3521 IOException serverEx = null; 3522 long startTime = EnvironmentEdgeManager.currentTime(); 3523 while (EnvironmentEdgeManager.currentTime() < deadlineTs) { 3524 serverEx = null; 3525 try { 3526 if (callable.checkState(tries)) { 3527 return; 3528 } 3529 } catch (IOException e) { 3530 serverEx = e; 3531 } 3532 try { 3533 Thread.sleep(getAdmin().getPauseTime(tries++)); 3534 } catch (InterruptedException e) { 3535 callable.throwInterruptedException(); 3536 } 3537 } 3538 if (serverEx != null) { 3539 throw unwrapException(serverEx); 3540 } else { 3541 callable.throwTimeoutException(EnvironmentEdgeManager.currentTime() - startTime); 3542 } 3543 } 3544 } 3545 3546 @InterfaceAudience.Private 3547 @InterfaceStability.Evolving 3548 protected static abstract class TableFuture<V> extends ProcedureFuture<V> { 3549 private final TableName tableName; 3550 3551 public TableFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) { 3552 super(admin, procId); 3553 this.tableName = tableName; 3554 } 3555 3556 @Override 3557 public String toString() { 3558 return getDescription(); 3559 } 3560 3561 /** 3562 * @return the table name 3563 */ 3564 protected TableName getTableName() { 3565 return tableName; 3566 } 3567 3568 /** 3569 * @return the table descriptor 3570 */ 3571 protected TableDescriptor getTableDescriptor() throws IOException { 3572 return getAdmin().getDescriptor(getTableName()); 3573 } 3574 3575 /** 3576 * @return the operation type like CREATE, DELETE, DISABLE etc. 3577 */ 3578 public abstract String getOperationType(); 3579 3580 /** 3581 * @return a description of the operation 3582 */ 3583 protected String getDescription() { 3584 return "Operation: " + getOperationType() + ", " + "Table Name: " + 3585 tableName.getNameWithNamespaceInclAsString() + ", procId: " + procId; 3586 } 3587 3588 protected abstract class TableWaitForStateCallable implements WaitForStateCallable { 3589 @Override 3590 public void throwInterruptedException() throws InterruptedIOException { 3591 throw new InterruptedIOException("Interrupted while waiting for " + getDescription()); 3592 } 3593 3594 @Override 3595 public void throwTimeoutException(long elapsedTime) throws TimeoutException { 3596 throw new TimeoutException( 3597 getDescription() + " has not completed after " + elapsedTime + "ms"); 3598 } 3599 } 3600 3601 @Override 3602 protected V postOperationResult(final V result, final long deadlineTs) 3603 throws IOException, TimeoutException { 3604 LOG.info(getDescription() + " completed"); 3605 return super.postOperationResult(result, deadlineTs); 3606 } 3607 3608 @Override 3609 protected V postOperationFailure(final IOException exception, final long deadlineTs) 3610 throws IOException, TimeoutException { 3611 LOG.info(getDescription() + " failed with " + exception.getMessage()); 3612 return super.postOperationFailure(exception, deadlineTs); 3613 } 3614 3615 protected void waitForTableEnabled(final long deadlineTs) 3616 throws IOException, TimeoutException { 3617 waitForState(deadlineTs, new TableWaitForStateCallable() { 3618 @Override 3619 public boolean checkState(int tries) throws IOException { 3620 try { 3621 if (getAdmin().isTableAvailable(tableName)) { 3622 return true; 3623 } 3624 } catch (TableNotFoundException tnfe) { 3625 LOG.debug("Table " + tableName.getNameWithNamespaceInclAsString() 3626 + " was not enabled, sleeping. tries=" + tries); 3627 } 3628 return false; 3629 } 3630 }); 3631 } 3632 3633 protected void waitForTableDisabled(final long deadlineTs) 3634 throws IOException, TimeoutException { 3635 waitForState(deadlineTs, new TableWaitForStateCallable() { 3636 @Override 3637 public boolean checkState(int tries) throws IOException { 3638 return getAdmin().isTableDisabled(tableName); 3639 } 3640 }); 3641 } 3642 3643 protected void waitTableNotFound(final long deadlineTs) 3644 throws IOException, TimeoutException { 3645 waitForState(deadlineTs, new TableWaitForStateCallable() { 3646 @Override 3647 public boolean checkState(int tries) throws IOException { 3648 return !getAdmin().tableExists(tableName); 3649 } 3650 }); 3651 } 3652 3653 protected void waitForSchemaUpdate(final long deadlineTs) 3654 throws IOException, TimeoutException { 3655 waitForState(deadlineTs, new TableWaitForStateCallable() { 3656 @Override 3657 public boolean checkState(int tries) throws IOException { 3658 return getAdmin().getAlterStatus(tableName).getFirst() == 0; 3659 } 3660 }); 3661 } 3662 3663 protected void waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys) 3664 throws IOException, TimeoutException { 3665 final TableDescriptor desc = getTableDescriptor(); 3666 final AtomicInteger actualRegCount = new AtomicInteger(0); 3667 final MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { 3668 @Override 3669 public boolean visit(Result rowResult) throws IOException { 3670 RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult); 3671 if (list == null) { 3672 LOG.warn("No serialized HRegionInfo in " + rowResult); 3673 return true; 3674 } 3675 HRegionLocation l = list.getRegionLocation(); 3676 if (l == null) { 3677 return true; 3678 } 3679 if (!l.getRegionInfo().getTable().equals(desc.getTableName())) { 3680 return false; 3681 } 3682 if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true; 3683 HRegionLocation[] locations = list.getRegionLocations(); 3684 for (HRegionLocation location : locations) { 3685 if (location == null) continue; 3686 ServerName serverName = location.getServerName(); 3687 // Make sure that regions are assigned to server 3688 if (serverName != null && serverName.getHostAndPort() != null) { 3689 actualRegCount.incrementAndGet(); 3690 } 3691 } 3692 return true; 3693 } 3694 }; 3695 3696 int tries = 0; 3697 int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication(); 3698 while (EnvironmentEdgeManager.currentTime() < deadlineTs) { 3699 actualRegCount.set(0); 3700 MetaTableAccessor.scanMetaForTableRegions(getAdmin().getConnection(), visitor, 3701 desc.getTableName()); 3702 if (actualRegCount.get() == numRegs) { 3703 // all the regions are online 3704 return; 3705 } 3706 3707 try { 3708 Thread.sleep(getAdmin().getPauseTime(tries++)); 3709 } catch (InterruptedException e) { 3710 throw new InterruptedIOException("Interrupted when opening" + " regions; " 3711 + actualRegCount.get() + " of " + numRegs + " regions processed so far"); 3712 } 3713 } 3714 throw new TimeoutException("Only " + actualRegCount.get() + " of " + numRegs 3715 + " regions are online; retries exhausted."); 3716 } 3717 } 3718 3719 @InterfaceAudience.Private 3720 @InterfaceStability.Evolving 3721 protected static abstract class NamespaceFuture extends ProcedureFuture<Void> { 3722 private final String namespaceName; 3723 3724 public NamespaceFuture(final HBaseAdmin admin, final String namespaceName, final Long procId) { 3725 super(admin, procId); 3726 this.namespaceName = namespaceName; 3727 } 3728 3729 /** 3730 * @return the namespace name 3731 */ 3732 protected String getNamespaceName() { 3733 return namespaceName; 3734 } 3735 3736 /** 3737 * @return the operation type like CREATE_NAMESPACE, DELETE_NAMESPACE, etc. 3738 */ 3739 public abstract String getOperationType(); 3740 3741 @Override 3742 public String toString() { 3743 return "Operation: " + getOperationType() + ", Namespace: " + getNamespaceName(); 3744 } 3745 } 3746 3747 @InterfaceAudience.Private 3748 @InterfaceStability.Evolving 3749 private static class ReplicationFuture extends ProcedureFuture<Void> { 3750 private final String peerId; 3751 private final Supplier<String> getOperation; 3752 3753 public ReplicationFuture(HBaseAdmin admin, String peerId, Long procId, 3754 Supplier<String> getOperation) { 3755 super(admin, procId); 3756 this.peerId = peerId; 3757 this.getOperation = getOperation; 3758 } 3759 3760 @Override 3761 public String toString() { 3762 return "Operation: " + getOperation.get() + ", peerId: " + peerId; 3763 } 3764 } 3765 3766 @Override 3767 public List<SecurityCapability> getSecurityCapabilities() throws IOException { 3768 try { 3769 return executeCallable(new MasterCallable<List<SecurityCapability>>(getConnection(), 3770 getRpcControllerFactory()) { 3771 @Override 3772 protected List<SecurityCapability> rpcCall() throws Exception { 3773 SecurityCapabilitiesRequest req = SecurityCapabilitiesRequest.newBuilder().build(); 3774 return ProtobufUtil.toSecurityCapabilityList( 3775 master.getSecurityCapabilities(getRpcController(), req).getCapabilitiesList()); 3776 } 3777 }); 3778 } catch (IOException e) { 3779 if (e instanceof RemoteException) { 3780 e = ((RemoteException)e).unwrapRemoteException(); 3781 } 3782 throw e; 3783 } 3784 } 3785 3786 @Override 3787 public boolean splitSwitch(boolean enabled, boolean synchronous) throws IOException { 3788 return splitOrMergeSwitch(enabled, synchronous, MasterSwitchType.SPLIT); 3789 } 3790 3791 @Override 3792 public boolean mergeSwitch(boolean enabled, boolean synchronous) throws IOException { 3793 return splitOrMergeSwitch(enabled, synchronous, MasterSwitchType.MERGE); 3794 } 3795 3796 private boolean splitOrMergeSwitch(boolean enabled, boolean synchronous, 3797 MasterSwitchType switchType) throws IOException { 3798 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 3799 @Override 3800 protected Boolean rpcCall() throws Exception { 3801 MasterProtos.SetSplitOrMergeEnabledResponse response = master.setSplitOrMergeEnabled( 3802 getRpcController(), 3803 RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchType)); 3804 return response.getPrevValueList().get(0); 3805 } 3806 }); 3807 } 3808 3809 @Override 3810 public boolean isSplitEnabled() throws IOException { 3811 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 3812 @Override 3813 protected Boolean rpcCall() throws Exception { 3814 return master.isSplitOrMergeEnabled(getRpcController(), 3815 RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.SPLIT)).getEnabled(); 3816 } 3817 }); 3818 } 3819 3820 @Override 3821 public boolean isMergeEnabled() throws IOException { 3822 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 3823 @Override 3824 protected Boolean rpcCall() throws Exception { 3825 return master.isSplitOrMergeEnabled(getRpcController(), 3826 RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.MERGE)).getEnabled(); 3827 } 3828 }); 3829 } 3830 3831 private RpcControllerFactory getRpcControllerFactory() { 3832 return this.rpcControllerFactory; 3833 } 3834 3835 @Override 3836 public Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig, 3837 boolean enabled) throws IOException { 3838 AddReplicationPeerResponse response = executeCallable( 3839 new MasterCallable<AddReplicationPeerResponse>(getConnection(), getRpcControllerFactory()) { 3840 @Override 3841 protected AddReplicationPeerResponse rpcCall() throws Exception { 3842 return master.addReplicationPeer(getRpcController(), 3843 RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled)); 3844 } 3845 }); 3846 return new ReplicationFuture(this, peerId, response.getProcId(), () -> "ADD_REPLICATION_PEER"); 3847 } 3848 3849 @Override 3850 public Future<Void> removeReplicationPeerAsync(String peerId) throws IOException { 3851 RemoveReplicationPeerResponse response = 3852 executeCallable(new MasterCallable<RemoveReplicationPeerResponse>(getConnection(), 3853 getRpcControllerFactory()) { 3854 @Override 3855 protected RemoveReplicationPeerResponse rpcCall() throws Exception { 3856 return master.removeReplicationPeer(getRpcController(), 3857 RequestConverter.buildRemoveReplicationPeerRequest(peerId)); 3858 } 3859 }); 3860 return new ReplicationFuture(this, peerId, response.getProcId(), 3861 () -> "REMOVE_REPLICATION_PEER"); 3862 } 3863 3864 @Override 3865 public Future<Void> enableReplicationPeerAsync(final String peerId) throws IOException { 3866 EnableReplicationPeerResponse response = 3867 executeCallable(new MasterCallable<EnableReplicationPeerResponse>(getConnection(), 3868 getRpcControllerFactory()) { 3869 @Override 3870 protected EnableReplicationPeerResponse rpcCall() throws Exception { 3871 return master.enableReplicationPeer(getRpcController(), 3872 RequestConverter.buildEnableReplicationPeerRequest(peerId)); 3873 } 3874 }); 3875 return new ReplicationFuture(this, peerId, response.getProcId(), 3876 () -> "ENABLE_REPLICATION_PEER"); 3877 } 3878 3879 @Override 3880 public Future<Void> disableReplicationPeerAsync(final String peerId) throws IOException { 3881 DisableReplicationPeerResponse response = 3882 executeCallable(new MasterCallable<DisableReplicationPeerResponse>(getConnection(), 3883 getRpcControllerFactory()) { 3884 @Override 3885 protected DisableReplicationPeerResponse rpcCall() throws Exception { 3886 return master.disableReplicationPeer(getRpcController(), 3887 RequestConverter.buildDisableReplicationPeerRequest(peerId)); 3888 } 3889 }); 3890 return new ReplicationFuture(this, peerId, response.getProcId(), 3891 () -> "DISABLE_REPLICATION_PEER"); 3892 } 3893 3894 @Override 3895 public ReplicationPeerConfig getReplicationPeerConfig(final String peerId) throws IOException { 3896 return executeCallable(new MasterCallable<ReplicationPeerConfig>(getConnection(), 3897 getRpcControllerFactory()) { 3898 @Override 3899 protected ReplicationPeerConfig rpcCall() throws Exception { 3900 GetReplicationPeerConfigResponse response = master.getReplicationPeerConfig( 3901 getRpcController(), RequestConverter.buildGetReplicationPeerConfigRequest(peerId)); 3902 return ReplicationPeerConfigUtil.convert(response.getPeerConfig()); 3903 } 3904 }); 3905 } 3906 3907 @Override 3908 public Future<Void> updateReplicationPeerConfigAsync(final String peerId, 3909 final ReplicationPeerConfig peerConfig) throws IOException { 3910 UpdateReplicationPeerConfigResponse response = 3911 executeCallable(new MasterCallable<UpdateReplicationPeerConfigResponse>(getConnection(), 3912 getRpcControllerFactory()) { 3913 @Override 3914 protected UpdateReplicationPeerConfigResponse rpcCall() throws Exception { 3915 return master.updateReplicationPeerConfig(getRpcController(), 3916 RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig)); 3917 } 3918 }); 3919 return new ReplicationFuture(this, peerId, response.getProcId(), 3920 () -> "UPDATE_REPLICATION_PEER_CONFIG"); 3921 } 3922 3923 @Override 3924 public List<ReplicationPeerDescription> listReplicationPeers() throws IOException { 3925 return listReplicationPeers((Pattern)null); 3926 } 3927 3928 @Override 3929 public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) 3930 throws IOException { 3931 return executeCallable(new MasterCallable<List<ReplicationPeerDescription>>(getConnection(), 3932 getRpcControllerFactory()) { 3933 @Override 3934 protected List<ReplicationPeerDescription> rpcCall() throws Exception { 3935 List<ReplicationProtos.ReplicationPeerDescription> peersList = master.listReplicationPeers( 3936 getRpcController(), RequestConverter.buildListReplicationPeersRequest(pattern)) 3937 .getPeerDescList(); 3938 List<ReplicationPeerDescription> result = new ArrayList<>(peersList.size()); 3939 for (ReplicationProtos.ReplicationPeerDescription peer : peersList) { 3940 result.add(ReplicationPeerConfigUtil.toReplicationPeerDescription(peer)); 3941 } 3942 return result; 3943 } 3944 }); 3945 } 3946 3947 @Override 3948 public void decommissionRegionServers(List<ServerName> servers, boolean offload) 3949 throws IOException { 3950 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 3951 @Override 3952 public Void rpcCall() throws ServiceException { 3953 master.decommissionRegionServers(getRpcController(), 3954 RequestConverter.buildDecommissionRegionServersRequest(servers, offload)); 3955 return null; 3956 } 3957 }); 3958 } 3959 3960 @Override 3961 public List<ServerName> listDecommissionedRegionServers() throws IOException { 3962 return executeCallable(new MasterCallable<List<ServerName>>(getConnection(), 3963 getRpcControllerFactory()) { 3964 @Override 3965 public List<ServerName> rpcCall() throws ServiceException { 3966 ListDecommissionedRegionServersRequest req = 3967 ListDecommissionedRegionServersRequest.newBuilder().build(); 3968 List<ServerName> servers = new ArrayList<>(); 3969 for (HBaseProtos.ServerName server : master 3970 .listDecommissionedRegionServers(getRpcController(), req).getServerNameList()) { 3971 servers.add(ProtobufUtil.toServerName(server)); 3972 } 3973 return servers; 3974 } 3975 }); 3976 } 3977 3978 @Override 3979 public void recommissionRegionServer(ServerName server, List<byte[]> encodedRegionNames) 3980 throws IOException { 3981 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 3982 @Override 3983 public Void rpcCall() throws ServiceException { 3984 master.recommissionRegionServer(getRpcController(), 3985 RequestConverter.buildRecommissionRegionServerRequest(server, encodedRegionNames)); 3986 return null; 3987 } 3988 }); 3989 } 3990 3991 @Override 3992 public List<TableCFs> listReplicatedTableCFs() throws IOException { 3993 List<TableCFs> replicatedTableCFs = new ArrayList<>(); 3994 List<TableDescriptor> tables = listTableDescriptors(); 3995 tables.forEach(table -> { 3996 Map<String, Integer> cfs = new HashMap<>(); 3997 Stream.of(table.getColumnFamilies()) 3998 .filter(column -> column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) 3999 .forEach(column -> { 4000 cfs.put(column.getNameAsString(), column.getScope()); 4001 }); 4002 if (!cfs.isEmpty()) { 4003 replicatedTableCFs.add(new TableCFs(table.getTableName(), cfs)); 4004 } 4005 }); 4006 return replicatedTableCFs; 4007 } 4008 4009 @Override 4010 public void enableTableReplication(final TableName tableName) throws IOException { 4011 if (tableName == null) { 4012 throw new IllegalArgumentException("Table name cannot be null"); 4013 } 4014 if (!tableExists(tableName)) { 4015 throw new TableNotFoundException("Table '" + tableName.getNameAsString() 4016 + "' does not exists."); 4017 } 4018 byte[][] splits = getTableSplits(tableName); 4019 checkAndSyncTableDescToPeers(tableName, splits); 4020 setTableRep(tableName, true); 4021 } 4022 4023 @Override 4024 public void disableTableReplication(final TableName tableName) throws IOException { 4025 if (tableName == null) { 4026 throw new IllegalArgumentException("Table name is null"); 4027 } 4028 if (!tableExists(tableName)) { 4029 throw new TableNotFoundException("Table '" + tableName.getNameAsString() 4030 + "' does not exists."); 4031 } 4032 setTableRep(tableName, false); 4033 } 4034 4035 /** 4036 * Connect to peer and check the table descriptor on peer: 4037 * <ol> 4038 * <li>Create the same table on peer when not exist.</li> 4039 * <li>Throw an exception if the table already has replication enabled on any of the column 4040 * families.</li> 4041 * <li>Throw an exception if the table exists on peer cluster but descriptors are not same.</li> 4042 * </ol> 4043 * @param tableName name of the table to sync to the peer 4044 * @param splits table split keys 4045 * @throws IOException if a remote or network exception occurs 4046 */ 4047 private void checkAndSyncTableDescToPeers(final TableName tableName, final byte[][] splits) 4048 throws IOException { 4049 List<ReplicationPeerDescription> peers = listReplicationPeers(); 4050 if (peers == null || peers.size() <= 0) { 4051 throw new IllegalArgumentException("Found no peer cluster for replication."); 4052 } 4053 4054 for (ReplicationPeerDescription peerDesc : peers) { 4055 if (peerDesc.getPeerConfig().needToReplicate(tableName)) { 4056 Configuration peerConf = 4057 ReplicationPeerConfigUtil.getPeerClusterConfiguration(this.conf, peerDesc); 4058 try (Connection conn = ConnectionFactory.createConnection(peerConf); 4059 Admin repHBaseAdmin = conn.getAdmin()) { 4060 TableDescriptor tableDesc = getDescriptor(tableName); 4061 TableDescriptor peerTableDesc = null; 4062 if (!repHBaseAdmin.tableExists(tableName)) { 4063 repHBaseAdmin.createTable(tableDesc, splits); 4064 } else { 4065 peerTableDesc = repHBaseAdmin.getDescriptor(tableName); 4066 if (peerTableDesc == null) { 4067 throw new IllegalArgumentException("Failed to get table descriptor for table " 4068 + tableName.getNameAsString() + " from peer cluster " + peerDesc.getPeerId()); 4069 } 4070 if (TableDescriptor.COMPARATOR_IGNORE_REPLICATION.compare(peerTableDesc, 4071 tableDesc) != 0) { 4072 throw new IllegalArgumentException("Table " + tableName.getNameAsString() 4073 + " exists in peer cluster " + peerDesc.getPeerId() 4074 + ", but the table descriptors are not same when compared with source cluster." 4075 + " Thus can not enable the table's replication switch."); 4076 } 4077 } 4078 } 4079 } 4080 } 4081 } 4082 4083 /** 4084 * Set the table's replication switch if the table's replication switch is already not set. 4085 * @param tableName name of the table 4086 * @param enableRep is replication switch enable or disable 4087 * @throws IOException if a remote or network exception occurs 4088 */ 4089 private void setTableRep(final TableName tableName, boolean enableRep) throws IOException { 4090 TableDescriptor tableDesc = getDescriptor(tableName); 4091 if (!tableDesc.matchReplicationScope(enableRep)) { 4092 int scope = 4093 enableRep ? HConstants.REPLICATION_SCOPE_GLOBAL : HConstants.REPLICATION_SCOPE_LOCAL; 4094 modifyTable(TableDescriptorBuilder.newBuilder(tableDesc).setReplicationScope(scope).build()); 4095 } 4096 } 4097 4098 @Override 4099 public void clearCompactionQueues(final ServerName sn, final Set<String> queues) 4100 throws IOException, InterruptedException { 4101 if (queues == null || queues.size() == 0) { 4102 throw new IllegalArgumentException("queues cannot be null or empty"); 4103 } 4104 final AdminService.BlockingInterface admin = this.connection.getAdmin(sn); 4105 Callable<Void> callable = new Callable<Void>() { 4106 @Override 4107 public Void call() throws Exception { 4108 // TODO: There is no timeout on this controller. Set one! 4109 HBaseRpcController controller = rpcControllerFactory.newController(); 4110 ClearCompactionQueuesRequest request = 4111 RequestConverter.buildClearCompactionQueuesRequest(queues); 4112 admin.clearCompactionQueues(controller, request); 4113 return null; 4114 } 4115 }; 4116 ProtobufUtil.call(callable); 4117 } 4118 4119 @Override 4120 public List<ServerName> clearDeadServers(List<ServerName> servers) throws IOException { 4121 return executeCallable(new MasterCallable<List<ServerName>>(getConnection(), 4122 getRpcControllerFactory()) { 4123 @Override 4124 protected List<ServerName> rpcCall() throws Exception { 4125 ClearDeadServersRequest req = RequestConverter. 4126 buildClearDeadServersRequest(servers == null? Collections.EMPTY_LIST: servers); 4127 return ProtobufUtil.toServerNameList( 4128 master.clearDeadServers(getRpcController(), req).getServerNameList()); 4129 } 4130 }); 4131 } 4132 4133 @Override 4134 public void cloneTableSchema(final TableName tableName, final TableName newTableName, 4135 final boolean preserveSplits) throws IOException { 4136 checkTableExists(tableName); 4137 if (tableExists(newTableName)) { 4138 throw new TableExistsException(newTableName); 4139 } 4140 TableDescriptor htd = TableDescriptorBuilder.copy(newTableName, getTableDescriptor(tableName)); 4141 if (preserveSplits) { 4142 createTable(htd, getTableSplits(tableName)); 4143 } else { 4144 createTable(htd); 4145 } 4146 } 4147 4148 @Override 4149 public boolean switchRpcThrottle(final boolean enable) throws IOException { 4150 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 4151 @Override 4152 protected Boolean rpcCall() throws Exception { 4153 return this.master 4154 .switchRpcThrottle(getRpcController(), MasterProtos.SwitchRpcThrottleRequest 4155 .newBuilder().setRpcThrottleEnabled(enable).build()) 4156 .getPreviousRpcThrottleEnabled(); 4157 } 4158 }); 4159 } 4160 4161 @Override 4162 public boolean isRpcThrottleEnabled() throws IOException { 4163 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 4164 @Override 4165 protected Boolean rpcCall() throws Exception { 4166 return this.master.isRpcThrottleEnabled(getRpcController(), 4167 IsRpcThrottleEnabledRequest.newBuilder().build()).getRpcThrottleEnabled(); 4168 } 4169 }); 4170 } 4171 4172 @Override 4173 public boolean exceedThrottleQuotaSwitch(final boolean enable) throws IOException { 4174 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) { 4175 @Override 4176 protected Boolean rpcCall() throws Exception { 4177 return this.master 4178 .switchExceedThrottleQuota(getRpcController(), 4179 MasterProtos.SwitchExceedThrottleQuotaRequest.newBuilder() 4180 .setExceedThrottleQuotaEnabled(enable).build()) 4181 .getPreviousExceedThrottleQuotaEnabled(); 4182 } 4183 }); 4184 } 4185 4186 @Override 4187 public Map<TableName, Long> getSpaceQuotaTableSizes() throws IOException { 4188 return executeCallable( 4189 new MasterCallable<Map<TableName, Long>>(getConnection(), getRpcControllerFactory()) { 4190 @Override 4191 protected Map<TableName, Long> rpcCall() throws Exception { 4192 GetSpaceQuotaRegionSizesResponse resp = master.getSpaceQuotaRegionSizes( 4193 getRpcController(), RequestConverter.buildGetSpaceQuotaRegionSizesRequest()); 4194 Map<TableName, Long> tableSizes = new HashMap<>(); 4195 for (RegionSizes sizes : resp.getSizesList()) { 4196 TableName tn = ProtobufUtil.toTableName(sizes.getTableName()); 4197 tableSizes.put(tn, sizes.getSize()); 4198 } 4199 return tableSizes; 4200 } 4201 }); 4202 } 4203 4204 @Override 4205 public Map<TableName, SpaceQuotaSnapshot> getRegionServerSpaceQuotaSnapshots( 4206 ServerName serverName) throws IOException { 4207 final AdminService.BlockingInterface admin = this.connection.getAdmin(serverName); 4208 Callable<GetSpaceQuotaSnapshotsResponse> callable = 4209 new Callable<GetSpaceQuotaSnapshotsResponse>() { 4210 @Override 4211 public GetSpaceQuotaSnapshotsResponse call() throws Exception { 4212 return admin.getSpaceQuotaSnapshots(rpcControllerFactory.newController(), 4213 RequestConverter.buildGetSpaceQuotaSnapshotsRequest()); 4214 } 4215 }; 4216 GetSpaceQuotaSnapshotsResponse resp = ProtobufUtil.call(callable); 4217 Map<TableName, SpaceQuotaSnapshot> snapshots = new HashMap<>(); 4218 for (TableQuotaSnapshot snapshot : resp.getSnapshotsList()) { 4219 snapshots.put(ProtobufUtil.toTableName(snapshot.getTableName()), 4220 SpaceQuotaSnapshot.toSpaceQuotaSnapshot(snapshot.getSnapshot())); 4221 } 4222 return snapshots; 4223 } 4224 4225 @Override 4226 public SpaceQuotaSnapshot getCurrentSpaceQuotaSnapshot(String namespace) throws IOException { 4227 return executeCallable( 4228 new MasterCallable<SpaceQuotaSnapshot>(getConnection(), getRpcControllerFactory()) { 4229 @Override 4230 protected SpaceQuotaSnapshot rpcCall() throws Exception { 4231 GetQuotaStatesResponse resp = master.getQuotaStates(getRpcController(), 4232 RequestConverter.buildGetQuotaStatesRequest()); 4233 for (GetQuotaStatesResponse.NamespaceQuotaSnapshot nsSnapshot : resp 4234 .getNsSnapshotsList()) { 4235 if (namespace.equals(nsSnapshot.getNamespace())) { 4236 return SpaceQuotaSnapshot.toSpaceQuotaSnapshot(nsSnapshot.getSnapshot()); 4237 } 4238 } 4239 return null; 4240 } 4241 }); 4242 } 4243 4244 @Override 4245 public SpaceQuotaSnapshot getCurrentSpaceQuotaSnapshot(TableName tableName) throws IOException { 4246 return executeCallable( 4247 new MasterCallable<SpaceQuotaSnapshot>(getConnection(), getRpcControllerFactory()) { 4248 @Override 4249 protected SpaceQuotaSnapshot rpcCall() throws Exception { 4250 GetQuotaStatesResponse resp = master.getQuotaStates(getRpcController(), 4251 RequestConverter.buildGetQuotaStatesRequest()); 4252 HBaseProtos.TableName protoTableName = ProtobufUtil.toProtoTableName(tableName); 4253 for (GetQuotaStatesResponse.TableQuotaSnapshot tableSnapshot : resp 4254 .getTableSnapshotsList()) { 4255 if (protoTableName.equals(tableSnapshot.getTableName())) { 4256 return SpaceQuotaSnapshot.toSpaceQuotaSnapshot(tableSnapshot.getSnapshot()); 4257 } 4258 } 4259 return null; 4260 } 4261 }); 4262 } 4263 4264 @Override 4265 public void grant(UserPermission userPermission, boolean mergeExistingPermissions) 4266 throws IOException { 4267 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 4268 @Override 4269 protected Void rpcCall() throws Exception { 4270 GrantRequest req = 4271 ShadedAccessControlUtil.buildGrantRequest(userPermission, mergeExistingPermissions); 4272 this.master.grant(getRpcController(), req); 4273 return null; 4274 } 4275 }); 4276 } 4277 4278 @Override 4279 public void revoke(UserPermission userPermission) throws IOException { 4280 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) { 4281 @Override 4282 protected Void rpcCall() throws Exception { 4283 RevokeRequest req = ShadedAccessControlUtil.buildRevokeRequest(userPermission); 4284 this.master.revoke(getRpcController(), req); 4285 return null; 4286 } 4287 }); 4288 } 4289 4290 @Override 4291 public List<UserPermission> 4292 getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) throws IOException { 4293 return executeCallable( 4294 new MasterCallable<List<UserPermission>>(getConnection(), getRpcControllerFactory()) { 4295 @Override 4296 protected List<UserPermission> rpcCall() throws Exception { 4297 AccessControlProtos.GetUserPermissionsRequest req = 4298 ShadedAccessControlUtil.buildGetUserPermissionsRequest(getUserPermissionsRequest); 4299 AccessControlProtos.GetUserPermissionsResponse response = 4300 this.master.getUserPermissions(getRpcController(), req); 4301 return response.getUserPermissionList().stream() 4302 .map(userPermission -> ShadedAccessControlUtil.toUserPermission(userPermission)) 4303 .collect(Collectors.toList()); 4304 } 4305 }); 4306 } 4307 4308 @Override 4309 public Future<Void> splitRegionAsync(byte[] regionName) throws IOException { 4310 return splitRegionAsync(regionName, null); 4311 } 4312 4313 @Override 4314 public Future<Void> createTableAsync(TableDescriptor desc) throws IOException { 4315 return createTableAsync(desc, null); 4316 } 4317 4318 @Override 4319 public List<Boolean> hasUserPermissions(String userName, List<Permission> permissions) 4320 throws IOException { 4321 return executeCallable( 4322 new MasterCallable<List<Boolean>>(getConnection(), getRpcControllerFactory()) { 4323 @Override 4324 protected List<Boolean> rpcCall() throws Exception { 4325 HasUserPermissionsRequest request = 4326 ShadedAccessControlUtil.buildHasUserPermissionsRequest(userName, permissions); 4327 return this.master.hasUserPermissions(getRpcController(), request) 4328 .getHasUserPermissionList(); 4329 } 4330 }); 4331 } 4332 4333 @Override 4334 public boolean snapshotCleanupSwitch(boolean on, boolean synchronous) throws IOException { 4335 return executeCallable(new MasterCallable<Boolean>(getConnection(), 4336 getRpcControllerFactory()) { 4337 4338 @Override 4339 protected Boolean rpcCall() throws Exception { 4340 SetSnapshotCleanupRequest req = 4341 RequestConverter.buildSetSnapshotCleanupRequest(on, synchronous); 4342 return master.switchSnapshotCleanup(getRpcController(), req).getPrevSnapshotCleanup(); 4343 } 4344 }); 4345 4346 } 4347 4348 @Override 4349 public boolean isSnapshotCleanupEnabled() throws IOException { 4350 return executeCallable(new MasterCallable<Boolean>(getConnection(), 4351 getRpcControllerFactory()) { 4352 4353 @Override 4354 protected Boolean rpcCall() throws Exception { 4355 IsSnapshotCleanupEnabledRequest req = 4356 RequestConverter.buildIsSnapshotCleanupEnabledRequest(); 4357 return master.isSnapshotCleanupEnabled(getRpcController(), req).getEnabled(); 4358 } 4359 }); 4360 4361 } 4362 4363 @Override 4364 public List<OnlineLogRecord> getSlowLogResponses(@Nullable final Set<ServerName> serverNames, 4365 final LogQueryFilter logQueryFilter) throws IOException { 4366 if (CollectionUtils.isEmpty(serverNames)) { 4367 return Collections.emptyList(); 4368 } 4369 return serverNames.stream().map(serverName -> { 4370 try { 4371 return getSlowLogResponseFromServer(serverName, logQueryFilter); 4372 } catch (IOException e) { 4373 throw new RuntimeException(e); 4374 } 4375 } 4376 ).flatMap(List::stream).collect(Collectors.toList()); 4377 } 4378 4379 private List<OnlineLogRecord> getSlowLogResponseFromServer(final ServerName serverName, 4380 final LogQueryFilter logQueryFilter) throws IOException { 4381 return getSlowLogResponsesFromServer(this.connection.getAdmin(serverName), logQueryFilter); 4382 } 4383 4384 private List<OnlineLogRecord> getSlowLogResponsesFromServer(AdminService.BlockingInterface admin, 4385 LogQueryFilter logQueryFilter) throws IOException { 4386 return executeCallable(new RpcRetryingCallable<List<OnlineLogRecord>>() { 4387 @Override 4388 protected List<OnlineLogRecord> rpcCall(int callTimeout) throws Exception { 4389 HBaseRpcController controller = rpcControllerFactory.newController(); 4390 if (logQueryFilter.getType() == null 4391 || logQueryFilter.getType() == LogQueryFilter.Type.SLOW_LOG) { 4392 AdminProtos.SlowLogResponses slowLogResponses = 4393 admin.getSlowLogResponses(controller, 4394 RequestConverter.buildSlowLogResponseRequest(logQueryFilter)); 4395 return ProtobufUtil.toSlowLogPayloads(slowLogResponses); 4396 } else { 4397 AdminProtos.SlowLogResponses slowLogResponses = 4398 admin.getLargeLogResponses(controller, 4399 RequestConverter.buildSlowLogResponseRequest(logQueryFilter)); 4400 return ProtobufUtil.toSlowLogPayloads(slowLogResponses); 4401 } 4402 } 4403 }); 4404 } 4405 4406 @Override 4407 public List<Boolean> clearSlowLogResponses(@Nullable final Set<ServerName> serverNames) 4408 throws IOException { 4409 if (CollectionUtils.isEmpty(serverNames)) { 4410 return Collections.emptyList(); 4411 } 4412 return serverNames.stream().map(serverName -> { 4413 try { 4414 return clearSlowLogsResponses(serverName); 4415 } catch (IOException e) { 4416 throw new RuntimeException(e); 4417 } 4418 }).collect(Collectors.toList()); 4419 } 4420 4421 private Boolean clearSlowLogsResponses(final ServerName serverName) throws IOException { 4422 AdminService.BlockingInterface admin = this.connection.getAdmin(serverName); 4423 return executeCallable(new RpcRetryingCallable<Boolean>() { 4424 @Override 4425 protected Boolean rpcCall(int callTimeout) throws Exception { 4426 HBaseRpcController controller = rpcControllerFactory.newController(); 4427 AdminProtos.ClearSlowLogResponses clearSlowLogResponses = 4428 admin.clearSlowLogsResponses(controller, 4429 RequestConverter.buildClearSlowLogResponseRequest()); 4430 return ProtobufUtil.toClearSlowLogPayload(clearSlowLogResponses); 4431 } 4432 }); 4433 } 4434 4435}