001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import com.google.protobuf.Descriptors;
021import com.google.protobuf.Message;
022import com.google.protobuf.RpcController;
023import edu.umd.cs.findbugs.annotations.Nullable;
024import java.io.Closeable;
025import java.io.IOException;
026import java.io.InterruptedIOException;
027import java.util.ArrayList;
028import java.util.Arrays;
029import java.util.Collections;
030import java.util.EnumSet;
031import java.util.HashMap;
032import java.util.Iterator;
033import java.util.List;
034import java.util.Map;
035import java.util.Set;
036import java.util.concurrent.Callable;
037import java.util.concurrent.ExecutionException;
038import java.util.concurrent.Future;
039import java.util.concurrent.TimeUnit;
040import java.util.concurrent.TimeoutException;
041import java.util.concurrent.atomic.AtomicInteger;
042import java.util.concurrent.atomic.AtomicReference;
043import java.util.function.Supplier;
044import java.util.regex.Pattern;
045import java.util.stream.Collectors;
046import java.util.stream.Stream;
047import org.apache.hadoop.conf.Configuration;
048import org.apache.hadoop.hbase.CacheEvictionStats;
049import org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
050import org.apache.hadoop.hbase.ClusterMetrics;
051import org.apache.hadoop.hbase.ClusterMetrics.Option;
052import org.apache.hadoop.hbase.ClusterMetricsBuilder;
053import org.apache.hadoop.hbase.DoNotRetryIOException;
054import org.apache.hadoop.hbase.HBaseConfiguration;
055import org.apache.hadoop.hbase.HConstants;
056import org.apache.hadoop.hbase.HRegionInfo;
057import org.apache.hadoop.hbase.HRegionLocation;
058import org.apache.hadoop.hbase.HTableDescriptor;
059import org.apache.hadoop.hbase.MasterNotRunningException;
060import org.apache.hadoop.hbase.MetaTableAccessor;
061import org.apache.hadoop.hbase.NamespaceDescriptor;
062import org.apache.hadoop.hbase.NamespaceNotFoundException;
063import org.apache.hadoop.hbase.NotServingRegionException;
064import org.apache.hadoop.hbase.RegionLocations;
065import org.apache.hadoop.hbase.RegionMetrics;
066import org.apache.hadoop.hbase.RegionMetricsBuilder;
067import org.apache.hadoop.hbase.ServerName;
068import org.apache.hadoop.hbase.TableExistsException;
069import org.apache.hadoop.hbase.TableName;
070import org.apache.hadoop.hbase.TableNotDisabledException;
071import org.apache.hadoop.hbase.TableNotFoundException;
072import org.apache.hadoop.hbase.UnknownRegionException;
073import org.apache.hadoop.hbase.ZooKeeperConnectionException;
074import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
075import org.apache.hadoop.hbase.client.replication.TableCFs;
076import org.apache.hadoop.hbase.client.security.SecurityCapability;
077import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
078import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
079import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
080import org.apache.hadoop.hbase.ipc.HBaseRpcController;
081import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
082import org.apache.hadoop.hbase.quotas.QuotaFilter;
083import org.apache.hadoop.hbase.quotas.QuotaRetriever;
084import org.apache.hadoop.hbase.quotas.QuotaSettings;
085import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
086import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
087import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
088import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
089import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest;
090import org.apache.hadoop.hbase.security.access.Permission;
091import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
092import org.apache.hadoop.hbase.security.access.UserPermission;
093import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
094import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
095import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
096import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
097import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
098import org.apache.hadoop.hbase.util.Addressing;
099import org.apache.hadoop.hbase.util.Bytes;
100import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
101import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
102import org.apache.hadoop.hbase.util.Pair;
103import org.apache.hadoop.ipc.RemoteException;
104import org.apache.hadoop.util.StringUtils;
105import org.apache.yetus.audience.InterfaceAudience;
106import org.apache.yetus.audience.InterfaceStability;
107import org.slf4j.Logger;
108import org.slf4j.LoggerFactory;
109
110import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
111import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
112import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
113
114import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
115import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
116import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos;
117import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest;
118import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest;
119import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest;
120import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
121import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
122import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
123import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
124import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse;
125import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
126import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest;
127import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse;
128import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
129import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
130import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
131import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
132import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
133import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
134import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
135import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
136import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
137import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
138import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
139import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
140import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
141import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
151import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
152import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
153import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
154import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
155import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
159import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
160import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
161import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
162import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
163import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
164import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
165import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
166import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreRequest;
167import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
168import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
169import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
170import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse;
171import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
172import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
173import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
174import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
175import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
176import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledRequest;
186import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest;
187import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
188import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
189import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
190import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
191import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest;
192import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
193import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
194import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
195import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
196import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
197import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
198import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
199import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
200import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerRequest;
201import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerResponse;
202import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
203import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
204import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
205import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
206import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerRequest;
207import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerResponse;
208import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
209import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
210import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
211import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
212import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
213import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
214import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
215import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
216import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
217import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
218import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
219import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
220import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
221import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
222import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
223import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
224import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
225import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
226import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
227import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
228import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
229import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
230import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
231import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
232import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
233import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
234import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
235import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
236import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
237
238/**
239 * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that this
240 * is an HBase-internal class as defined in
241 * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
242 * There are no guarantees for backwards source / binary compatibility and methods or class can
243 * change or go away without deprecation. Use {@link Connection#getAdmin()} to obtain an instance of
244 * {@link Admin} instead of constructing an HBaseAdmin directly.
245 * <p>
246 * Connection should be an <i>unmanaged</i> connection obtained via
247 * {@link ConnectionFactory#createConnection(Configuration)}
248 * @see ConnectionFactory
249 * @see Connection
250 * @see Admin
251 */
252@InterfaceAudience.Private
253public class HBaseAdmin implements Admin {
254  private static final Logger LOG = LoggerFactory.getLogger(HBaseAdmin.class);
255
256  private ClusterConnection connection;
257
258  private final Configuration conf;
259  private final long pause;
260  private final int numRetries;
261  private final int syncWaitTimeout;
262  private boolean aborted;
263  private int operationTimeout;
264  private int rpcTimeout;
265  private int getProcedureTimeout;
266
267  private RpcRetryingCallerFactory rpcCallerFactory;
268  private RpcControllerFactory rpcControllerFactory;
269
270  private NonceGenerator ng;
271
272  @Override
273  public int getOperationTimeout() {
274    return operationTimeout;
275  }
276
277  @Override
278  public int getSyncWaitTimeout() {
279    return syncWaitTimeout;
280  }
281
282  HBaseAdmin(ClusterConnection connection) throws IOException {
283    this.conf = connection.getConfiguration();
284    this.connection = connection;
285
286    // TODO: receive ConnectionConfiguration here rather than re-parsing these configs every time.
287    this.pause =
288      this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
289    this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
290      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
291    this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
292      HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
293    this.rpcTimeout =
294      this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
295    this.syncWaitTimeout = this.conf.getInt("hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min
296    this.getProcedureTimeout =
297      this.conf.getInt("hbase.client.procedure.future.get.timeout.msec", 10 * 60000); // 10min
298
299    this.rpcCallerFactory = connection.getRpcRetryingCallerFactory();
300    this.rpcControllerFactory = connection.getRpcControllerFactory();
301
302    this.ng = this.connection.getNonceGenerator();
303  }
304
305  @Override
306  public void abort(String why, Throwable e) {
307    // Currently does nothing but throw the passed message and exception
308    this.aborted = true;
309    throw new RuntimeException(why, e);
310  }
311
312  @Override
313  public boolean isAborted() {
314    return this.aborted;
315  }
316
317  @Override
318  public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
319    throws IOException {
320    return get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
321      TimeUnit.MILLISECONDS);
322  }
323
324  @Override
325  public Future<Boolean> abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning)
326    throws IOException {
327    Boolean abortProcResponse = executeCallable(
328      new MasterCallable<AbortProcedureResponse>(getConnection(), getRpcControllerFactory()) {
329        @Override
330        protected AbortProcedureResponse rpcCall() throws Exception {
331          AbortProcedureRequest abortProcRequest =
332            AbortProcedureRequest.newBuilder().setProcId(procId).build();
333          return master.abortProcedure(getRpcController(), abortProcRequest);
334        }
335      }).getIsProcedureAborted();
336    return new AbortProcedureFuture(this, procId, abortProcResponse);
337  }
338
339  @Override
340  public List<TableDescriptor> listTableDescriptors() throws IOException {
341    return listTableDescriptors((Pattern) null, false);
342  }
343
344  @Override
345  public List<TableDescriptor> listTableDescriptors(Pattern pattern, boolean includeSysTables)
346    throws IOException {
347    return executeCallable(
348      new MasterCallable<List<TableDescriptor>>(getConnection(), getRpcControllerFactory()) {
349        @Override
350        protected List<TableDescriptor> rpcCall() throws Exception {
351          GetTableDescriptorsRequest req =
352            RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
353          return ProtobufUtil
354            .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req));
355        }
356      });
357  }
358
359  @Override
360  public TableDescriptor getDescriptor(TableName tableName)
361    throws TableNotFoundException, IOException {
362    return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory,
363      operationTimeout, rpcTimeout);
364  }
365
366  @Override
367  public Future<Void> modifyTableAsync(TableDescriptor td) throws IOException {
368    ModifyTableResponse response = executeCallable(
369      new MasterCallable<ModifyTableResponse>(getConnection(), getRpcControllerFactory()) {
370        long nonceGroup = ng.getNonceGroup();
371        long nonce = ng.newNonce();
372
373        @Override
374        protected ModifyTableResponse rpcCall() throws Exception {
375          setPriority(td.getTableName());
376          ModifyTableRequest request =
377            RequestConverter.buildModifyTableRequest(td.getTableName(), td, nonceGroup, nonce);
378          return master.modifyTable(getRpcController(), request);
379        }
380      });
381    return new ModifyTableFuture(this, td.getTableName(), response);
382  }
383
384  @Override
385  public Future<Void> modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT)
386    throws IOException {
387    ModifyTableStoreFileTrackerResponse response =
388      executeCallable(new MasterCallable<ModifyTableStoreFileTrackerResponse>(getConnection(),
389        getRpcControllerFactory()) {
390        long nonceGroup = ng.getNonceGroup();
391        long nonce = ng.newNonce();
392
393        @Override
394        protected ModifyTableStoreFileTrackerResponse rpcCall() throws Exception {
395          setPriority(tableName);
396          ModifyTableStoreFileTrackerRequest request = RequestConverter
397            .buildModifyTableStoreFileTrackerRequest(tableName, dstSFT, nonceGroup, nonce);
398          return master.modifyTableStoreFileTracker(getRpcController(), request);
399        }
400      });
401    return new ModifyTablerStoreFileTrackerFuture(this, tableName, response);
402  }
403
404  private static class ModifyTablerStoreFileTrackerFuture extends ModifyTableFuture {
405    public ModifyTablerStoreFileTrackerFuture(HBaseAdmin admin, TableName tableName,
406      ModifyTableStoreFileTrackerResponse response) {
407      super(admin, tableName,
408        (response != null && response.hasProcId()) ? response.getProcId() : null);
409    }
410
411    @Override
412    public String getOperationType() {
413      return "MODIFY_TABLE_STORE_FILE_TRACKER";
414    }
415  }
416
417  @Override
418  public List<TableDescriptor> listTableDescriptorsByNamespace(byte[] name) throws IOException {
419    return executeCallable(
420      new MasterCallable<List<TableDescriptor>>(getConnection(), getRpcControllerFactory()) {
421        @Override
422        protected List<TableDescriptor> rpcCall() throws Exception {
423          return master
424            .listTableDescriptorsByNamespace(getRpcController(),
425              ListTableDescriptorsByNamespaceRequest.newBuilder()
426                .setNamespaceName(Bytes.toString(name)).build())
427            .getTableSchemaList().stream().map(ProtobufUtil::toTableDescriptor)
428            .collect(Collectors.toList());
429        }
430      });
431  }
432
433  @Override
434  public List<TableDescriptor> listTableDescriptors(List<TableName> tableNames) throws IOException {
435    return executeCallable(
436      new MasterCallable<List<TableDescriptor>>(getConnection(), getRpcControllerFactory()) {
437        @Override
438        protected List<TableDescriptor> rpcCall() throws Exception {
439          GetTableDescriptorsRequest req =
440            RequestConverter.buildGetTableDescriptorsRequest(tableNames);
441          return ProtobufUtil
442            .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req));
443        }
444      });
445  }
446
447  @Override
448  public List<RegionInfo> getRegions(final ServerName sn) throws IOException {
449    AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
450    // TODO: There is no timeout on this controller. Set one!
451    HBaseRpcController controller = rpcControllerFactory.newController();
452    return ProtobufUtil.getOnlineRegions(controller, admin);
453  }
454
455  @Override
456  public List<RegionInfo> getRegions(TableName tableName) throws IOException {
457    if (TableName.isMetaTableName(tableName)) {
458      return Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
459    } else {
460      return MetaTableAccessor.getTableRegions(connection, tableName, true);
461    }
462  }
463
464  private static class AbortProcedureFuture extends ProcedureFuture<Boolean> {
465    private boolean isAbortInProgress;
466
467    public AbortProcedureFuture(final HBaseAdmin admin, final Long procId,
468      final Boolean abortProcResponse) {
469      super(admin, procId);
470      this.isAbortInProgress = abortProcResponse;
471    }
472
473    @Override
474    public Boolean get(long timeout, TimeUnit unit)
475      throws InterruptedException, ExecutionException, TimeoutException {
476      if (!this.isAbortInProgress) {
477        return false;
478      }
479      super.get(timeout, unit);
480      return true;
481    }
482  }
483
484  /** Returns Connection used by this object. */
485  @Override
486  public Connection getConnection() {
487    return connection;
488  }
489
490  @Override
491  public boolean tableExists(final TableName tableName) throws IOException {
492    return executeCallable(new RpcRetryingCallable<Boolean>() {
493      @Override
494      protected Boolean rpcCall(int callTimeout) throws Exception {
495        return MetaTableAccessor.getTableState(getConnection(), tableName) != null;
496      }
497    });
498  }
499
500  @Override
501  public HTableDescriptor[] listTables() throws IOException {
502    return listTables((Pattern) null, false);
503  }
504
505  @Override
506  public HTableDescriptor[] listTables(Pattern pattern) throws IOException {
507    return listTables(pattern, false);
508  }
509
510  @Override
511  public HTableDescriptor[] listTables(String regex) throws IOException {
512    return listTables(Pattern.compile(regex), false);
513  }
514
515  @Override
516  public HTableDescriptor[] listTables(final Pattern pattern, final boolean includeSysTables)
517    throws IOException {
518    return executeCallable(
519      new MasterCallable<HTableDescriptor[]>(getConnection(), getRpcControllerFactory()) {
520        @Override
521        protected HTableDescriptor[] rpcCall() throws Exception {
522          GetTableDescriptorsRequest req =
523            RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
524          return ProtobufUtil
525            .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)).stream()
526            .map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new);
527        }
528      });
529  }
530
531  @Override
532  public HTableDescriptor[] listTables(String regex, boolean includeSysTables) throws IOException {
533    return listTables(Pattern.compile(regex), includeSysTables);
534  }
535
536  @Override
537  public TableName[] listTableNames() throws IOException {
538    return listTableNames((Pattern) null, false);
539  }
540
541  @Override
542  public TableName[] listTableNames(String regex) throws IOException {
543    return listTableNames(Pattern.compile(regex), false);
544  }
545
546  @Override
547  public TableName[] listTableNames(final Pattern pattern, final boolean includeSysTables)
548    throws IOException {
549    return executeCallable(
550      new MasterCallable<TableName[]>(getConnection(), getRpcControllerFactory()) {
551        @Override
552        protected TableName[] rpcCall() throws Exception {
553          GetTableNamesRequest req =
554            RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables);
555          return ProtobufUtil
556            .getTableNameArray(master.getTableNames(getRpcController(), req).getTableNamesList());
557        }
558      });
559  }
560
561  @Override
562  public TableName[] listTableNames(final String regex, final boolean includeSysTables)
563    throws IOException {
564    return listTableNames(Pattern.compile(regex), includeSysTables);
565  }
566
567  @Override
568  public HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException {
569    return getHTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory,
570      operationTimeout, rpcTimeout);
571  }
572
573  static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection,
574    RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
575    int operationTimeout, int rpcTimeout) throws IOException {
576    if (tableName == null) return null;
577    TableDescriptor td =
578      executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) {
579        @Override
580        protected TableDescriptor rpcCall() throws Exception {
581          GetTableDescriptorsRequest req =
582            RequestConverter.buildGetTableDescriptorsRequest(tableName);
583          GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
584          if (!htds.getTableSchemaList().isEmpty()) {
585            return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0));
586          }
587          return null;
588        }
589      }, rpcCallerFactory, operationTimeout, rpcTimeout);
590    if (td != null) {
591      return td;
592    }
593    throw new TableNotFoundException(tableName.getNameAsString());
594  }
595
596  /**
597   * @deprecated since 2.0 version and will be removed in 3.0 version. use
598   *             {@link #getTableDescriptor(TableName, Connection, RpcRetryingCallerFactory,RpcControllerFactory,int,int)}
599   */
600  @Deprecated
601  static HTableDescriptor getHTableDescriptor(final TableName tableName, Connection connection,
602    RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
603    int operationTimeout, int rpcTimeout) throws IOException {
604    if (tableName == null) {
605      return null;
606    }
607    HTableDescriptor htd =
608      executeCallable(new MasterCallable<HTableDescriptor>(connection, rpcControllerFactory) {
609        @Override
610        protected HTableDescriptor rpcCall() throws Exception {
611          GetTableDescriptorsRequest req =
612            RequestConverter.buildGetTableDescriptorsRequest(tableName);
613          GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
614          if (!htds.getTableSchemaList().isEmpty()) {
615            return new ImmutableHTableDescriptor(
616              ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)));
617          }
618          return null;
619        }
620      }, rpcCallerFactory, operationTimeout, rpcTimeout);
621    if (htd != null) {
622      return new ImmutableHTableDescriptor(htd);
623    }
624    throw new TableNotFoundException(tableName.getNameAsString());
625  }
626
627  private long getPauseTime(int tries) {
628    int triesCount = tries;
629    if (triesCount >= HConstants.RETRY_BACKOFF.length) {
630      triesCount = HConstants.RETRY_BACKOFF.length - 1;
631    }
632    return this.pause * HConstants.RETRY_BACKOFF[triesCount];
633  }
634
635  @Override
636  public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions)
637    throws IOException {
638    if (numRegions < 3) {
639      throw new IllegalArgumentException("Must create at least three regions");
640    } else if (Bytes.compareTo(startKey, endKey) >= 0) {
641      throw new IllegalArgumentException("Start key must be smaller than end key");
642    }
643    if (numRegions == 3) {
644      createTable(desc, new byte[][] { startKey, endKey });
645      return;
646    }
647    byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
648    if (splitKeys == null || splitKeys.length != numRegions - 1) {
649      throw new IllegalArgumentException("Unable to split key range into enough regions");
650    }
651    createTable(desc, splitKeys);
652  }
653
654  @Override
655  public Future<Void> createTableAsync(final TableDescriptor desc, final byte[][] splitKeys)
656    throws IOException {
657    if (desc.getTableName() == null) {
658      throw new IllegalArgumentException("TableName cannot be null");
659    }
660    if (splitKeys != null && splitKeys.length > 0) {
661      Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
662      // Verify there are no duplicate split keys
663      byte[] lastKey = null;
664      for (byte[] splitKey : splitKeys) {
665        if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
666          throw new IllegalArgumentException(
667            "Empty split key must not be passed in the split keys.");
668        }
669        if (lastKey != null && Bytes.equals(splitKey, lastKey)) {
670          throw new IllegalArgumentException("All split keys must be unique, " + "found duplicate: "
671            + Bytes.toStringBinary(splitKey) + ", " + Bytes.toStringBinary(lastKey));
672        }
673        lastKey = splitKey;
674      }
675    }
676
677    CreateTableResponse response = executeCallable(
678      new MasterCallable<CreateTableResponse>(getConnection(), getRpcControllerFactory()) {
679        Long nonceGroup = ng.getNonceGroup();
680        Long nonce = ng.newNonce();
681
682        @Override
683        protected CreateTableResponse rpcCall() throws Exception {
684          setPriority(desc.getTableName());
685          CreateTableRequest request =
686            RequestConverter.buildCreateTableRequest(desc, splitKeys, nonceGroup, nonce);
687          return master.createTable(getRpcController(), request);
688        }
689      });
690    return new CreateTableFuture(this, desc, splitKeys, response);
691  }
692
693  private static class CreateTableFuture extends TableFuture<Void> {
694    private final TableDescriptor desc;
695    private final byte[][] splitKeys;
696
697    public CreateTableFuture(final HBaseAdmin admin, final TableDescriptor desc,
698      final byte[][] splitKeys, final CreateTableResponse response) {
699      super(admin, desc.getTableName(),
700        (response != null && response.hasProcId()) ? response.getProcId() : null);
701      this.splitKeys = splitKeys;
702      this.desc = desc;
703    }
704
705    @Override
706    protected TableDescriptor getTableDescriptor() {
707      return desc;
708    }
709
710    @Override
711    public String getOperationType() {
712      return "CREATE";
713    }
714
715    @Override
716    protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException {
717      waitForTableEnabled(deadlineTs);
718      waitForAllRegionsOnline(deadlineTs, splitKeys);
719      return null;
720    }
721  }
722
723  @Override
724  public Future<Void> deleteTableAsync(final TableName tableName) throws IOException {
725    DeleteTableResponse response = executeCallable(
726      new MasterCallable<DeleteTableResponse>(getConnection(), getRpcControllerFactory()) {
727        Long nonceGroup = ng.getNonceGroup();
728        Long nonce = ng.newNonce();
729
730        @Override
731        protected DeleteTableResponse rpcCall() throws Exception {
732          setPriority(tableName);
733          DeleteTableRequest req =
734            RequestConverter.buildDeleteTableRequest(tableName, nonceGroup, nonce);
735          return master.deleteTable(getRpcController(), req);
736        }
737      });
738    return new DeleteTableFuture(this, tableName, response);
739  }
740
741  private static class DeleteTableFuture extends TableFuture<Void> {
742    public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName,
743      final DeleteTableResponse response) {
744      super(admin, tableName,
745        (response != null && response.hasProcId()) ? response.getProcId() : null);
746    }
747
748    @Override
749    public String getOperationType() {
750      return "DELETE";
751    }
752
753    @Override
754    protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException {
755      waitTableNotFound(deadlineTs);
756      return null;
757    }
758
759    @Override
760    protected Void postOperationResult(final Void result, final long deadlineTs)
761      throws IOException, TimeoutException {
762      // Delete cached information to prevent clients from using old locations
763      ((ClusterConnection) getAdmin().getConnection()).clearRegionCache(getTableName());
764      return super.postOperationResult(result, deadlineTs);
765    }
766  }
767
768  @Override
769  public HTableDescriptor[] deleteTables(String regex) throws IOException {
770    return deleteTables(Pattern.compile(regex));
771  }
772
773  /**
774   * Delete tables matching the passed in pattern and wait on completion. Warning: Use this method
775   * carefully, there is no prompting and the effect is immediate. Consider using
776   * {@link #listTables(java.util.regex.Pattern) } and {@link #deleteTable(TableName)}
777   * @param pattern The pattern to match table names against
778   * @return Table descriptors for tables that couldn't be deleted
779   * @throws IOException if a remote or network exception occurs
780   */
781  @Override
782  public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException {
783    List<HTableDescriptor> failed = new ArrayList<>();
784    for (HTableDescriptor table : listTables(pattern)) {
785      try {
786        deleteTable(table.getTableName());
787      } catch (IOException ex) {
788        LOG.info("Failed to delete table " + table.getTableName(), ex);
789        failed.add(table);
790      }
791    }
792    return failed.toArray(new HTableDescriptor[failed.size()]);
793  }
794
795  @Override
796  public Future<Void> truncateTableAsync(final TableName tableName, final boolean preserveSplits)
797    throws IOException {
798    TruncateTableResponse response = executeCallable(
799      new MasterCallable<TruncateTableResponse>(getConnection(), getRpcControllerFactory()) {
800        Long nonceGroup = ng.getNonceGroup();
801        Long nonce = ng.newNonce();
802
803        @Override
804        protected TruncateTableResponse rpcCall() throws Exception {
805          setPriority(tableName);
806          LOG.info("Started truncating " + tableName);
807          TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(tableName,
808            preserveSplits, nonceGroup, nonce);
809          return master.truncateTable(getRpcController(), req);
810        }
811      });
812    return new TruncateTableFuture(this, tableName, preserveSplits, response);
813  }
814
815  private static class TruncateTableFuture extends TableFuture<Void> {
816    private final boolean preserveSplits;
817
818    public TruncateTableFuture(final HBaseAdmin admin, final TableName tableName,
819      final boolean preserveSplits, final TruncateTableResponse response) {
820      super(admin, tableName,
821        (response != null && response.hasProcId()) ? response.getProcId() : null);
822      this.preserveSplits = preserveSplits;
823    }
824
825    @Override
826    public String getOperationType() {
827      return "TRUNCATE";
828    }
829
830    @Override
831    protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException {
832      waitForTableEnabled(deadlineTs);
833      // once the table is enabled, we know the operation is done. so we can fetch the splitKeys
834      byte[][] splitKeys = preserveSplits ? getAdmin().getTableSplits(getTableName()) : null;
835      waitForAllRegionsOnline(deadlineTs, splitKeys);
836      return null;
837    }
838  }
839
840  private byte[][] getTableSplits(final TableName tableName) throws IOException {
841    byte[][] splits = null;
842    try (RegionLocator locator = getConnection().getRegionLocator(tableName)) {
843      byte[][] startKeys = locator.getStartKeys();
844      if (startKeys.length == 1) {
845        return splits;
846      }
847      splits = new byte[startKeys.length - 1][];
848      for (int i = 1; i < startKeys.length; i++) {
849        splits[i - 1] = startKeys[i];
850      }
851    }
852    return splits;
853  }
854
855  @Override
856  public Future<Void> enableTableAsync(final TableName tableName) throws IOException {
857    TableName.isLegalFullyQualifiedTableName(tableName.getName());
858    EnableTableResponse response = executeCallable(
859      new MasterCallable<EnableTableResponse>(getConnection(), getRpcControllerFactory()) {
860        Long nonceGroup = ng.getNonceGroup();
861        Long nonce = ng.newNonce();
862
863        @Override
864        protected EnableTableResponse rpcCall() throws Exception {
865          setPriority(tableName);
866          LOG.info("Started enable of " + tableName);
867          EnableTableRequest req =
868            RequestConverter.buildEnableTableRequest(tableName, nonceGroup, nonce);
869          return master.enableTable(getRpcController(), req);
870        }
871      });
872    return new EnableTableFuture(this, tableName, response);
873  }
874
875  private static class EnableTableFuture extends TableFuture<Void> {
876    public EnableTableFuture(final HBaseAdmin admin, final TableName tableName,
877      final EnableTableResponse response) {
878      super(admin, tableName,
879        (response != null && response.hasProcId()) ? response.getProcId() : null);
880    }
881
882    @Override
883    public String getOperationType() {
884      return "ENABLE";
885    }
886
887    @Override
888    protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException {
889      waitForTableEnabled(deadlineTs);
890      return null;
891    }
892  }
893
894  @Override
895  public HTableDescriptor[] enableTables(String regex) throws IOException {
896    return enableTables(Pattern.compile(regex));
897  }
898
899  @Override
900  public HTableDescriptor[] enableTables(Pattern pattern) throws IOException {
901    List<HTableDescriptor> failed = new ArrayList<>();
902    for (HTableDescriptor table : listTables(pattern)) {
903      if (isTableDisabled(table.getTableName())) {
904        try {
905          enableTable(table.getTableName());
906        } catch (IOException ex) {
907          LOG.info("Failed to enable table " + table.getTableName(), ex);
908          failed.add(table);
909        }
910      }
911    }
912    return failed.toArray(new HTableDescriptor[failed.size()]);
913  }
914
915  @Override
916  public Future<Void> disableTableAsync(final TableName tableName) throws IOException {
917    TableName.isLegalFullyQualifiedTableName(tableName.getName());
918    DisableTableResponse response = executeCallable(
919      new MasterCallable<DisableTableResponse>(getConnection(), getRpcControllerFactory()) {
920        Long nonceGroup = ng.getNonceGroup();
921        Long nonce = ng.newNonce();
922
923        @Override
924        protected DisableTableResponse rpcCall() throws Exception {
925          setPriority(tableName);
926          LOG.info("Started disable of " + tableName);
927          DisableTableRequest req =
928            RequestConverter.buildDisableTableRequest(tableName, nonceGroup, nonce);
929          return master.disableTable(getRpcController(), req);
930        }
931      });
932    return new DisableTableFuture(this, tableName, response);
933  }
934
935  private static class DisableTableFuture extends TableFuture<Void> {
936    public DisableTableFuture(final HBaseAdmin admin, final TableName tableName,
937      final DisableTableResponse response) {
938      super(admin, tableName,
939        (response != null && response.hasProcId()) ? response.getProcId() : null);
940    }
941
942    @Override
943    public String getOperationType() {
944      return "DISABLE";
945    }
946
947    @Override
948    protected Void waitOperationResult(long deadlineTs) throws IOException, TimeoutException {
949      waitForTableDisabled(deadlineTs);
950      return null;
951    }
952  }
953
954  @Override
955  public HTableDescriptor[] disableTables(String regex) throws IOException {
956    return disableTables(Pattern.compile(regex));
957  }
958
959  @Override
960  public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
961    List<HTableDescriptor> failed = new ArrayList<>();
962    for (HTableDescriptor table : listTables(pattern)) {
963      if (isTableEnabled(table.getTableName())) {
964        try {
965          disableTable(table.getTableName());
966        } catch (IOException ex) {
967          LOG.info("Failed to disable table " + table.getTableName(), ex);
968          failed.add(table);
969        }
970      }
971    }
972    return failed.toArray(new HTableDescriptor[failed.size()]);
973  }
974
975  @Override
976  public boolean isTableEnabled(final TableName tableName) throws IOException {
977    checkTableExists(tableName);
978    return executeCallable(new RpcRetryingCallable<Boolean>() {
979      @Override
980      protected Boolean rpcCall(int callTimeout) throws Exception {
981        TableState tableState = MetaTableAccessor.getTableState(getConnection(), tableName);
982        if (tableState == null) {
983          throw new TableNotFoundException(tableName);
984        }
985        return tableState.inStates(TableState.State.ENABLED);
986      }
987    });
988  }
989
990  @Override
991  public boolean isTableDisabled(TableName tableName) throws IOException {
992    checkTableExists(tableName);
993    return connection.isTableDisabled(tableName);
994  }
995
996  @Override
997  public boolean isTableAvailable(TableName tableName) throws IOException {
998    return connection.isTableAvailable(tableName, null);
999  }
1000
1001  @Override
1002  public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException {
1003    return connection.isTableAvailable(tableName, splitKeys);
1004  }
1005
1006  @Override
1007  public Pair<Integer, Integer> getAlterStatus(final TableName tableName) throws IOException {
1008    return executeCallable(
1009      new MasterCallable<Pair<Integer, Integer>>(getConnection(), getRpcControllerFactory()) {
1010        @Override
1011        protected Pair<Integer, Integer> rpcCall() throws Exception {
1012          setPriority(tableName);
1013          GetSchemaAlterStatusRequest req =
1014            RequestConverter.buildGetSchemaAlterStatusRequest(tableName);
1015          GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(getRpcController(), req);
1016          Pair<Integer, Integer> pair =
1017            new Pair<>(ret.getYetToUpdateRegions(), ret.getTotalRegions());
1018          return pair;
1019        }
1020      });
1021  }
1022
1023  @Override
1024  public Pair<Integer, Integer> getAlterStatus(final byte[] tableName) throws IOException {
1025    return getAlterStatus(TableName.valueOf(tableName));
1026  }
1027
1028  @Override
1029  public Future<Void> addColumnFamilyAsync(final TableName tableName,
1030    final ColumnFamilyDescriptor columnFamily) throws IOException {
1031    AddColumnResponse response = executeCallable(
1032      new MasterCallable<AddColumnResponse>(getConnection(), getRpcControllerFactory()) {
1033        Long nonceGroup = ng.getNonceGroup();
1034        Long nonce = ng.newNonce();
1035
1036        @Override
1037        protected AddColumnResponse rpcCall() throws Exception {
1038          setPriority(tableName);
1039          AddColumnRequest req =
1040            RequestConverter.buildAddColumnRequest(tableName, columnFamily, nonceGroup, nonce);
1041          return master.addColumn(getRpcController(), req);
1042        }
1043      });
1044    return new AddColumnFamilyFuture(this, tableName, response);
1045  }
1046
1047  private static class AddColumnFamilyFuture extends ModifyTableFuture {
1048    public AddColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName,
1049      final AddColumnResponse response) {
1050      super(admin, tableName,
1051        (response != null && response.hasProcId()) ? response.getProcId() : null);
1052    }
1053
1054    @Override
1055    public String getOperationType() {
1056      return "ADD_COLUMN_FAMILY";
1057    }
1058  }
1059
1060  /**
1061   * {@inheritDoc}
1062   * @deprecated Since 2.0. Will be removed in 3.0. Use
1063   *             {@link #deleteColumnFamily(TableName, byte[])} instead.
1064   */
1065  @Override
1066  @Deprecated
1067  public void deleteColumn(final TableName tableName, final byte[] columnFamily)
1068    throws IOException {
1069    deleteColumnFamily(tableName, columnFamily);
1070  }
1071
1072  @Override
1073  public Future<Void> deleteColumnFamilyAsync(final TableName tableName, final byte[] columnFamily)
1074    throws IOException {
1075    DeleteColumnResponse response = executeCallable(
1076      new MasterCallable<DeleteColumnResponse>(getConnection(), getRpcControllerFactory()) {
1077        Long nonceGroup = ng.getNonceGroup();
1078        Long nonce = ng.newNonce();
1079
1080        @Override
1081        protected DeleteColumnResponse rpcCall() throws Exception {
1082          setPriority(tableName);
1083          DeleteColumnRequest req =
1084            RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, nonceGroup, nonce);
1085          return master.deleteColumn(getRpcController(), req);
1086        }
1087      });
1088    return new DeleteColumnFamilyFuture(this, tableName, response);
1089  }
1090
1091  private static class DeleteColumnFamilyFuture extends ModifyTableFuture {
1092    public DeleteColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName,
1093      final DeleteColumnResponse response) {
1094      super(admin, tableName,
1095        (response != null && response.hasProcId()) ? response.getProcId() : null);
1096    }
1097
1098    @Override
1099    public String getOperationType() {
1100      return "DELETE_COLUMN_FAMILY";
1101    }
1102  }
1103
1104  @Override
1105  public Future<Void> modifyColumnFamilyAsync(final TableName tableName,
1106    final ColumnFamilyDescriptor columnFamily) throws IOException {
1107    ModifyColumnResponse response = executeCallable(
1108      new MasterCallable<ModifyColumnResponse>(getConnection(), getRpcControllerFactory()) {
1109        long nonceGroup = ng.getNonceGroup();
1110        long nonce = ng.newNonce();
1111
1112        @Override
1113        protected ModifyColumnResponse rpcCall() throws Exception {
1114          setPriority(tableName);
1115          ModifyColumnRequest req =
1116            RequestConverter.buildModifyColumnRequest(tableName, columnFamily, nonceGroup, nonce);
1117          return master.modifyColumn(getRpcController(), req);
1118        }
1119      });
1120    return new ModifyColumnFamilyFuture(this, tableName, response);
1121  }
1122
1123  private static class ModifyColumnFamilyFuture extends ModifyTableFuture {
1124    public ModifyColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName,
1125      final ModifyColumnResponse response) {
1126      super(admin, tableName,
1127        (response != null && response.hasProcId()) ? response.getProcId() : null);
1128    }
1129
1130    @Override
1131    public String getOperationType() {
1132      return "MODIFY_COLUMN_FAMILY";
1133    }
1134  }
1135
1136  @Override
1137  public Future<Void> modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] family,
1138    String dstSFT) throws IOException {
1139    ModifyColumnStoreFileTrackerResponse response =
1140      executeCallable(new MasterCallable<ModifyColumnStoreFileTrackerResponse>(getConnection(),
1141        getRpcControllerFactory()) {
1142        long nonceGroup = ng.getNonceGroup();
1143        long nonce = ng.newNonce();
1144
1145        @Override
1146        protected ModifyColumnStoreFileTrackerResponse rpcCall() throws Exception {
1147          setPriority(tableName);
1148          ModifyColumnStoreFileTrackerRequest req = RequestConverter
1149            .buildModifyColumnStoreFileTrackerRequest(tableName, family, dstSFT, nonceGroup, nonce);
1150          return master.modifyColumnStoreFileTracker(getRpcController(), req);
1151        }
1152      });
1153    return new ModifyColumnFamilyStoreFileTrackerFuture(this, tableName, response);
1154  }
1155
1156  private static class ModifyColumnFamilyStoreFileTrackerFuture extends ModifyTableFuture {
1157    public ModifyColumnFamilyStoreFileTrackerFuture(HBaseAdmin admin, TableName tableName,
1158      final ModifyColumnStoreFileTrackerResponse response) {
1159      super(admin, tableName,
1160        (response != null && response.hasProcId()) ? response.getProcId() : null);
1161    }
1162
1163    @Override
1164    public String getOperationType() {
1165      return "MODIFY_COLUMN_FAMILY_STORE_FILE_TRACKER";
1166    }
1167  }
1168
1169  @Deprecated
1170  @Override
1171  public void closeRegion(final String regionName, final String unused) throws IOException {
1172    unassign(Bytes.toBytes(regionName), true);
1173  }
1174
1175  @Deprecated
1176  @Override
1177  public void closeRegion(final byte[] regionName, final String unused) throws IOException {
1178    unassign(regionName, true);
1179  }
1180
1181  @Deprecated
1182  @Override
1183  public boolean closeRegionWithEncodedRegionName(final String encodedRegionName,
1184    final String unused) throws IOException {
1185    unassign(Bytes.toBytes(encodedRegionName), true);
1186    return true;
1187  }
1188
1189  @Deprecated
1190  @Override
1191  public void closeRegion(final ServerName unused, final HRegionInfo hri) throws IOException {
1192    unassign(hri.getRegionName(), true);
1193  }
1194
1195  /**
1196   * n * @return List of {@link HRegionInfo}.
1197   * @throws IOException if a remote or network exception occurs
1198   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
1199   *             {@link #getRegions(ServerName)}.
1200   */
1201  @Deprecated
1202  @Override
1203  public List<HRegionInfo> getOnlineRegions(final ServerName sn) throws IOException {
1204    return getRegions(sn).stream().map(ImmutableHRegionInfo::new).collect(Collectors.toList());
1205  }
1206
1207  @Override
1208  public void flush(final TableName tableName) throws IOException {
1209    flush(tableName, null);
1210  }
1211
1212  @Override
1213  public void flush(final TableName tableName, byte[] columnFamily) throws IOException {
1214    checkTableExists(tableName);
1215    if (isTableDisabled(tableName)) {
1216      LOG.info("Table is disabled: " + tableName.getNameAsString());
1217      return;
1218    }
1219    Map<String, String> props = new HashMap<>();
1220    if (columnFamily != null) {
1221      props.put(HConstants.FAMILY_KEY_STR, Bytes.toString(columnFamily));
1222    }
1223    execProcedure("flush-table-proc", tableName.getNameAsString(), props);
1224  }
1225
1226  @Override
1227  public void flushRegion(final byte[] regionName) throws IOException {
1228    flushRegion(regionName, null);
1229  }
1230
1231  @Override
1232  public void flushRegion(final byte[] regionName, byte[] columnFamily) throws IOException {
1233    Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName);
1234    if (regionServerPair == null) {
1235      throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName));
1236    }
1237    if (regionServerPair.getSecond() == null) {
1238      throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1239    }
1240    final RegionInfo regionInfo = regionServerPair.getFirst();
1241    ServerName serverName = regionServerPair.getSecond();
1242    flush(this.connection.getAdmin(serverName), regionInfo, columnFamily);
1243  }
1244
1245  private void flush(AdminService.BlockingInterface admin, final RegionInfo info,
1246    byte[] columnFamily) throws IOException {
1247    ProtobufUtil.call(() -> {
1248      // TODO: There is no timeout on this controller. Set one!
1249      HBaseRpcController controller = rpcControllerFactory.newController();
1250      FlushRegionRequest request =
1251        RequestConverter.buildFlushRegionRequest(info.getRegionName(), columnFamily, false);
1252      admin.flushRegion(controller, request);
1253      return null;
1254    });
1255  }
1256
1257  @Override
1258  public void flushRegionServer(ServerName serverName) throws IOException {
1259    for (RegionInfo region : getRegions(serverName)) {
1260      flush(this.connection.getAdmin(serverName), region, null);
1261    }
1262  }
1263
1264  /**
1265   * {@inheritDoc}
1266   */
1267  @Override
1268  public void compact(final TableName tableName) throws IOException {
1269    compact(tableName, null, false, CompactType.NORMAL);
1270  }
1271
1272  @Override
1273  public void compactRegion(final byte[] regionName) throws IOException {
1274    compactRegion(regionName, null, false);
1275  }
1276
1277  /**
1278   * {@inheritDoc}
1279   */
1280  @Override
1281  public void compact(final TableName tableName, final byte[] columnFamily) throws IOException {
1282    compact(tableName, columnFamily, false, CompactType.NORMAL);
1283  }
1284
1285  /**
1286   * {@inheritDoc}
1287   */
1288  @Override
1289  public void compactRegion(final byte[] regionName, final byte[] columnFamily) throws IOException {
1290    compactRegion(regionName, columnFamily, false);
1291  }
1292
1293  @Override
1294  public Map<ServerName, Boolean> compactionSwitch(boolean switchState,
1295    List<String> serverNamesList) throws IOException {
1296    List<ServerName> serverList = new ArrayList<>();
1297    if (serverNamesList.isEmpty()) {
1298      ClusterMetrics status = getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
1299      serverList.addAll(status.getLiveServerMetrics().keySet());
1300    } else {
1301      for (String regionServerName : serverNamesList) {
1302        ServerName serverName = null;
1303        try {
1304          serverName = ServerName.valueOf(regionServerName);
1305        } catch (Exception e) {
1306          throw new IllegalArgumentException(
1307            String.format("Invalid ServerName format: %s", regionServerName));
1308        }
1309        if (serverName == null) {
1310          throw new IllegalArgumentException(
1311            String.format("Null ServerName: %s", regionServerName));
1312        }
1313        serverList.add(serverName);
1314      }
1315    }
1316    Map<ServerName, Boolean> res = new HashMap<>(serverList.size());
1317    for (ServerName serverName : serverList) {
1318      boolean prev_state = switchCompact(this.connection.getAdmin(serverName), switchState);
1319      res.put(serverName, prev_state);
1320    }
1321    return res;
1322  }
1323
1324  private Boolean switchCompact(AdminService.BlockingInterface admin, boolean onOrOff)
1325    throws IOException {
1326    return executeCallable(new RpcRetryingCallable<Boolean>() {
1327      @Override
1328      protected Boolean rpcCall(int callTimeout) throws Exception {
1329        HBaseRpcController controller = rpcControllerFactory.newController();
1330        CompactionSwitchRequest request =
1331          CompactionSwitchRequest.newBuilder().setEnabled(onOrOff).build();
1332        CompactionSwitchResponse compactionSwitchResponse =
1333          admin.compactionSwitch(controller, request);
1334        return compactionSwitchResponse.getPrevState();
1335      }
1336    });
1337  }
1338
1339  @Override
1340  public void compactRegionServer(final ServerName serverName) throws IOException {
1341    for (RegionInfo region : getRegions(serverName)) {
1342      compact(this.connection.getAdmin(serverName), region, false, null);
1343    }
1344  }
1345
1346  @Override
1347  public void majorCompactRegionServer(final ServerName serverName) throws IOException {
1348    for (RegionInfo region : getRegions(serverName)) {
1349      compact(this.connection.getAdmin(serverName), region, true, null);
1350    }
1351  }
1352
1353  @Override
1354  public void majorCompact(final TableName tableName) throws IOException {
1355    compact(tableName, null, true, CompactType.NORMAL);
1356  }
1357
1358  @Override
1359  public void majorCompactRegion(final byte[] regionName) throws IOException {
1360    compactRegion(regionName, null, true);
1361  }
1362
1363  /**
1364   * {@inheritDoc}
1365   */
1366  @Override
1367  public void majorCompact(final TableName tableName, final byte[] columnFamily)
1368    throws IOException {
1369    compact(tableName, columnFamily, true, CompactType.NORMAL);
1370  }
1371
1372  @Override
1373  public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
1374    throws IOException {
1375    compactRegion(regionName, columnFamily, true);
1376  }
1377
1378  /**
1379   * Compact a table. Asynchronous operation.
1380   * @param tableName    table or region to compact
1381   * @param columnFamily column family within a table or region
1382   * @param major        True if we are to do a major compaction.
1383   * @param compactType  {@link org.apache.hadoop.hbase.client.CompactType}
1384   * @throws IOException if a remote or network exception occurs
1385   */
1386  private void compact(final TableName tableName, final byte[] columnFamily, final boolean major,
1387    CompactType compactType) throws IOException {
1388    switch (compactType) {
1389      case MOB:
1390        compact(this.connection.getAdminForMaster(), RegionInfo.createMobRegionInfo(tableName),
1391          major, columnFamily);
1392        break;
1393      case NORMAL:
1394        checkTableExists(tableName);
1395        for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) {
1396          ServerName sn = loc.getServerName();
1397          if (sn == null) {
1398            continue;
1399          }
1400          try {
1401            compact(this.connection.getAdmin(sn), loc.getRegion(), major, columnFamily);
1402          } catch (NotServingRegionException e) {
1403            if (LOG.isDebugEnabled()) {
1404              LOG.debug("Trying to" + (major ? " major" : "") + " compact " + loc.getRegion() + ": "
1405                + StringUtils.stringifyException(e));
1406            }
1407          }
1408        }
1409        break;
1410      default:
1411        throw new IllegalArgumentException("Unknown compactType: " + compactType);
1412    }
1413  }
1414
1415  /**
1416   * Compact an individual region. Asynchronous operation.
1417   * @param regionName   region to compact
1418   * @param columnFamily column family within a table or region
1419   * @param major        True if we are to do a major compaction.
1420   * @throws IOException if a remote or network exception occurs n
1421   */
1422  private void compactRegion(final byte[] regionName, final byte[] columnFamily,
1423    final boolean major) throws IOException {
1424    Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName);
1425    if (regionServerPair == null) {
1426      throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
1427    }
1428    if (regionServerPair.getSecond() == null) {
1429      throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1430    }
1431    compact(this.connection.getAdmin(regionServerPair.getSecond()), regionServerPair.getFirst(),
1432      major, columnFamily);
1433  }
1434
1435  private void compact(AdminService.BlockingInterface admin, RegionInfo hri, boolean major,
1436    byte[] family) throws IOException {
1437    Callable<Void> callable = new Callable<Void>() {
1438      @Override
1439      public Void call() throws Exception {
1440        // TODO: There is no timeout on this controller. Set one!
1441        HBaseRpcController controller = rpcControllerFactory.newController();
1442        CompactRegionRequest request =
1443          RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family);
1444        admin.compactRegion(controller, request);
1445        return null;
1446      }
1447    };
1448    ProtobufUtil.call(callable);
1449  }
1450
1451  @Override
1452  public void move(byte[] encodedRegionName) throws IOException {
1453    move(encodedRegionName, (ServerName) null);
1454  }
1455
1456  @Override
1457  public void move(final byte[] encodedRegionName, ServerName destServerName) throws IOException {
1458    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
1459      @Override
1460      protected Void rpcCall() throws Exception {
1461        setPriority(encodedRegionName);
1462        MoveRegionRequest request =
1463          RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
1464        master.moveRegion(getRpcController(), request);
1465        return null;
1466      }
1467    });
1468  }
1469
1470  @Override
1471  public void assign(final byte[] regionName)
1472    throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
1473    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
1474      @Override
1475      protected Void rpcCall() throws Exception {
1476        setPriority(regionName);
1477        AssignRegionRequest request =
1478          RequestConverter.buildAssignRegionRequest(getRegionName(regionName));
1479        master.assignRegion(getRpcController(), request);
1480        return null;
1481      }
1482    });
1483  }
1484
1485  @Override
1486  public void unassign(final byte[] regionName) throws IOException {
1487    final byte[] toBeUnassigned = getRegionName(regionName);
1488    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
1489      @Override
1490      protected Void rpcCall() throws Exception {
1491        setPriority(regionName);
1492        UnassignRegionRequest request = RequestConverter.buildUnassignRegionRequest(toBeUnassigned);
1493        master.unassignRegion(getRpcController(), request);
1494        return null;
1495      }
1496    });
1497  }
1498
1499  @Override
1500  public void offline(final byte[] regionName) throws IOException {
1501    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
1502      @Override
1503      protected Void rpcCall() throws Exception {
1504        setPriority(regionName);
1505        master.offlineRegion(getRpcController(),
1506          RequestConverter.buildOfflineRegionRequest(regionName));
1507        return null;
1508      }
1509    });
1510  }
1511
1512  @Override
1513  public boolean balancerSwitch(final boolean on, final boolean synchronous) throws IOException {
1514    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1515      @Override
1516      protected Boolean rpcCall() throws Exception {
1517        SetBalancerRunningRequest req =
1518          RequestConverter.buildSetBalancerRunningRequest(on, synchronous);
1519        return master.setBalancerRunning(getRpcController(), req).getPrevBalanceValue();
1520      }
1521    });
1522  }
1523
1524  @Override
1525  public BalanceResponse balance(BalanceRequest request) throws IOException {
1526    return executeCallable(
1527      new MasterCallable<BalanceResponse>(getConnection(), getRpcControllerFactory()) {
1528        @Override
1529        protected BalanceResponse rpcCall() throws Exception {
1530          MasterProtos.BalanceRequest req = ProtobufUtil.toBalanceRequest(request);
1531          return ProtobufUtil.toBalanceResponse(master.balance(getRpcController(), req));
1532        }
1533      });
1534  }
1535
1536  @Override
1537  public boolean isBalancerEnabled() throws IOException {
1538    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1539      @Override
1540      protected Boolean rpcCall() throws Exception {
1541        return master
1542          .isBalancerEnabled(getRpcController(), RequestConverter.buildIsBalancerEnabledRequest())
1543          .getEnabled();
1544      }
1545    });
1546  }
1547
1548  /**
1549   * {@inheritDoc}
1550   */
1551  @Override
1552  public CacheEvictionStats clearBlockCache(final TableName tableName) throws IOException {
1553    checkTableExists(tableName);
1554    CacheEvictionStatsBuilder cacheEvictionStats = CacheEvictionStats.builder();
1555    List<Pair<RegionInfo, ServerName>> pairs =
1556      MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
1557    Map<ServerName,
1558      List<RegionInfo>> regionInfoByServerName = pairs.stream()
1559        .filter(pair -> !pair.getFirst().isOffline()).filter(pair -> pair.getSecond() != null)
1560        .collect(Collectors.groupingBy(pair -> pair.getSecond(),
1561          Collectors.mapping(pair -> pair.getFirst(), Collectors.toList())));
1562
1563    for (Map.Entry<ServerName, List<RegionInfo>> entry : regionInfoByServerName.entrySet()) {
1564      CacheEvictionStats stats = clearBlockCache(entry.getKey(), entry.getValue());
1565      cacheEvictionStats = cacheEvictionStats.append(stats);
1566      if (stats.getExceptionCount() > 0) {
1567        for (Map.Entry<byte[], Throwable> exception : stats.getExceptions().entrySet()) {
1568          LOG.debug("Failed to clear block cache for " + Bytes.toStringBinary(exception.getKey())
1569            + " on " + entry.getKey() + ": ", exception.getValue());
1570        }
1571      }
1572    }
1573    return cacheEvictionStats.build();
1574  }
1575
1576  private CacheEvictionStats clearBlockCache(final ServerName sn, final List<RegionInfo> hris)
1577    throws IOException {
1578    HBaseRpcController controller = rpcControllerFactory.newController();
1579    AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1580    ClearRegionBlockCacheRequest request = RequestConverter.buildClearRegionBlockCacheRequest(hris);
1581    ClearRegionBlockCacheResponse response;
1582    try {
1583      response = admin.clearRegionBlockCache(controller, request);
1584      return ProtobufUtil.toCacheEvictionStats(response.getStats());
1585    } catch (ServiceException se) {
1586      throw ProtobufUtil.getRemoteException(se);
1587    }
1588  }
1589
1590  @Override
1591  public boolean normalize(NormalizeTableFilterParams ntfp) throws IOException {
1592    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1593      @Override
1594      protected Boolean rpcCall() throws Exception {
1595        return master.normalize(getRpcController(), RequestConverter.buildNormalizeRequest(ntfp))
1596          .getNormalizerRan();
1597      }
1598    });
1599  }
1600
1601  @Override
1602  public boolean isNormalizerEnabled() throws IOException {
1603    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1604      @Override
1605      protected Boolean rpcCall() throws Exception {
1606        return master.isNormalizerEnabled(getRpcController(),
1607          RequestConverter.buildIsNormalizerEnabledRequest()).getEnabled();
1608      }
1609    });
1610  }
1611
1612  @Override
1613  public boolean normalizerSwitch(final boolean on) throws IOException {
1614    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1615      @Override
1616      protected Boolean rpcCall() throws Exception {
1617        SetNormalizerRunningRequest req = RequestConverter.buildSetNormalizerRunningRequest(on);
1618        return master.setNormalizerRunning(getRpcController(), req).getPrevNormalizerValue();
1619      }
1620    });
1621  }
1622
1623  @Override
1624  public boolean catalogJanitorSwitch(final boolean enable) throws IOException {
1625    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1626      @Override
1627      protected Boolean rpcCall() throws Exception {
1628        return master.enableCatalogJanitor(getRpcController(),
1629          RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
1630      }
1631    });
1632  }
1633
1634  @Override
1635  public int runCatalogJanitor() throws IOException {
1636    return executeCallable(new MasterCallable<Integer>(getConnection(), getRpcControllerFactory()) {
1637      @Override
1638      protected Integer rpcCall() throws Exception {
1639        return master.runCatalogScan(getRpcController(), RequestConverter.buildCatalogScanRequest())
1640          .getScanResult();
1641      }
1642    });
1643  }
1644
1645  @Override
1646  public boolean isCatalogJanitorEnabled() throws IOException {
1647    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1648      @Override
1649      protected Boolean rpcCall() throws Exception {
1650        return master.isCatalogJanitorEnabled(getRpcController(),
1651          RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
1652      }
1653    });
1654  }
1655
1656  @Override
1657  public boolean cleanerChoreSwitch(final boolean on) throws IOException {
1658    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1659      @Override
1660      public Boolean rpcCall() throws Exception {
1661        return master.setCleanerChoreRunning(getRpcController(),
1662          RequestConverter.buildSetCleanerChoreRunningRequest(on)).getPrevValue();
1663      }
1664    });
1665  }
1666
1667  @Override
1668  public boolean runCleanerChore() throws IOException {
1669    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1670      @Override
1671      public Boolean rpcCall() throws Exception {
1672        return master
1673          .runCleanerChore(getRpcController(), RequestConverter.buildRunCleanerChoreRequest())
1674          .getCleanerChoreRan();
1675      }
1676    });
1677  }
1678
1679  @Override
1680  public boolean isCleanerChoreEnabled() throws IOException {
1681    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1682      @Override
1683      public Boolean rpcCall() throws Exception {
1684        return master.isCleanerChoreEnabled(getRpcController(),
1685          RequestConverter.buildIsCleanerChoreEnabledRequest()).getValue();
1686      }
1687    });
1688  }
1689
1690  /**
1691   * Merge two regions. Synchronous operation. Note: It is not feasible to predict the length of
1692   * merge. Therefore, this is for internal testing only.
1693   * @param nameOfRegionA encoded or full name of region a
1694   * @param nameOfRegionB encoded or full name of region b
1695   * @param forcible      true if do a compulsory merge, otherwise we will only merge two adjacent
1696   *                      regions
1697   * @throws IOException if a remote or network exception occurs
1698   */
1699  public void mergeRegionsSync(final byte[] nameOfRegionA, final byte[] nameOfRegionB,
1700    final boolean forcible) throws IOException {
1701    get(mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible), syncWaitTimeout,
1702      TimeUnit.MILLISECONDS);
1703  }
1704
1705  /**
1706   * Merge two regions. Asynchronous operation.
1707   * @param nameOfRegionA encoded or full name of region a
1708   * @param nameOfRegionB encoded or full name of region b
1709   * @param forcible      true if do a compulsory merge, otherwise we will only merge two adjacent
1710   *                      regions
1711   * @throws IOException if a remote or network exception occurs
1712   * @deprecated Since 2.0. Will be removed in 3.0. Use
1713   *             {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead.
1714   */
1715  @Deprecated
1716  @Override
1717  public void mergeRegions(final byte[] nameOfRegionA, final byte[] nameOfRegionB,
1718    final boolean forcible) throws IOException {
1719    mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible);
1720  }
1721
1722  /**
1723   * Merge two regions. Asynchronous operation.
1724   * @param nameofRegionsToMerge encoded or full name of daughter regions
1725   * @param forcible             true if do a compulsory merge, otherwise we will only merge
1726   *                             adjacent regions
1727   */
1728  @Override
1729  public Future<Void> mergeRegionsAsync(final byte[][] nameofRegionsToMerge, final boolean forcible)
1730    throws IOException {
1731    Preconditions.checkArgument(nameofRegionsToMerge.length >= 2, "Can not merge only %s region",
1732      nameofRegionsToMerge.length);
1733    byte[][] encodedNameofRegionsToMerge = new byte[nameofRegionsToMerge.length][];
1734    for (int i = 0; i < nameofRegionsToMerge.length; i++) {
1735      encodedNameofRegionsToMerge[i] = RegionInfo.isEncodedRegionName(nameofRegionsToMerge[i])
1736        ? nameofRegionsToMerge[i]
1737        : Bytes.toBytes(RegionInfo.encodeRegionName(nameofRegionsToMerge[i]));
1738    }
1739
1740    TableName tableName = null;
1741    Pair<RegionInfo, ServerName> pair;
1742
1743    for (int i = 0; i < nameofRegionsToMerge.length; i++) {
1744      pair = getRegion(nameofRegionsToMerge[i]);
1745
1746      if (pair != null) {
1747        if (pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
1748          throw new IllegalArgumentException("Can't invoke merge on non-default regions directly");
1749        }
1750        if (tableName == null) {
1751          tableName = pair.getFirst().getTable();
1752        } else if (!tableName.equals(pair.getFirst().getTable())) {
1753          throw new IllegalArgumentException("Cannot merge regions from two different tables "
1754            + tableName + " and " + pair.getFirst().getTable());
1755        }
1756      } else {
1757        throw new UnknownRegionException("Can't invoke merge on unknown region "
1758          + Bytes.toStringBinary(encodedNameofRegionsToMerge[i]));
1759      }
1760    }
1761
1762    MergeTableRegionsResponse response = executeCallable(
1763      new MasterCallable<MergeTableRegionsResponse>(getConnection(), getRpcControllerFactory()) {
1764        Long nonceGroup = ng.getNonceGroup();
1765        Long nonce = ng.newNonce();
1766
1767        @Override
1768        protected MergeTableRegionsResponse rpcCall() throws Exception {
1769          MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest(
1770            encodedNameofRegionsToMerge, forcible, nonceGroup, nonce);
1771          return master.mergeTableRegions(getRpcController(), request);
1772        }
1773      });
1774    return new MergeTableRegionsFuture(this, tableName, response);
1775  }
1776
1777  private static class MergeTableRegionsFuture extends TableFuture<Void> {
1778    public MergeTableRegionsFuture(final HBaseAdmin admin, final TableName tableName,
1779      final MergeTableRegionsResponse response) {
1780      super(admin, tableName,
1781        (response != null && response.hasProcId()) ? response.getProcId() : null);
1782    }
1783
1784    public MergeTableRegionsFuture(final HBaseAdmin admin, final TableName tableName,
1785      final Long procId) {
1786      super(admin, tableName, procId);
1787    }
1788
1789    @Override
1790    public String getOperationType() {
1791      return "MERGE_REGIONS";
1792    }
1793  }
1794
1795  /**
1796   * Split one region. Synchronous operation. Note: It is not feasible to predict the length of
1797   * split. Therefore, this is for internal testing only.
1798   * @param regionName encoded or full name of region
1799   * @param splitPoint key where region splits
1800   * @throws IOException if a remote or network exception occurs
1801   */
1802  public void splitRegionSync(byte[] regionName, byte[] splitPoint) throws IOException {
1803    splitRegionSync(regionName, splitPoint, syncWaitTimeout, TimeUnit.MILLISECONDS);
1804  }
1805
1806  /**
1807   * Split one region. Synchronous operation.
1808   * @param regionName region to be split
1809   * @param splitPoint split point
1810   * @param timeout    how long to wait on split
1811   * @param units      time units
1812   * @throws IOException if a remote or network exception occurs
1813   */
1814  public void splitRegionSync(byte[] regionName, byte[] splitPoint, final long timeout,
1815    final TimeUnit units) throws IOException {
1816    get(splitRegionAsync(regionName, splitPoint), timeout, units);
1817  }
1818
1819  @Override
1820  public Future<Void> splitRegionAsync(byte[] regionName, byte[] splitPoint) throws IOException {
1821    byte[] encodedNameofRegionToSplit = HRegionInfo.isEncodedRegionName(regionName)
1822      ? regionName
1823      : Bytes.toBytes(HRegionInfo.encodeRegionName(regionName));
1824    Pair<RegionInfo, ServerName> pair = getRegion(regionName);
1825    if (pair != null) {
1826      if (
1827        pair.getFirst() != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID
1828      ) {
1829        throw new IllegalArgumentException("Can't invoke split on non-default regions directly");
1830      }
1831    } else {
1832      throw new UnknownRegionException(
1833        "Can't invoke merge on unknown region " + Bytes.toStringBinary(encodedNameofRegionToSplit));
1834    }
1835
1836    return splitRegionAsync(pair.getFirst(), splitPoint);
1837  }
1838
1839  Future<Void> splitRegionAsync(RegionInfo hri, byte[] splitPoint) throws IOException {
1840    TableName tableName = hri.getTable();
1841    if (
1842      hri.getStartKey() != null && splitPoint != null
1843        && Bytes.compareTo(hri.getStartKey(), splitPoint) == 0
1844    ) {
1845      throw new IOException("should not give a splitkey which equals to startkey!");
1846    }
1847
1848    SplitTableRegionResponse response = executeCallable(
1849      new MasterCallable<SplitTableRegionResponse>(getConnection(), getRpcControllerFactory()) {
1850        Long nonceGroup = ng.getNonceGroup();
1851        Long nonce = ng.newNonce();
1852
1853        @Override
1854        protected SplitTableRegionResponse rpcCall() throws Exception {
1855          setPriority(tableName);
1856          SplitTableRegionRequest request =
1857            RequestConverter.buildSplitTableRegionRequest(hri, splitPoint, nonceGroup, nonce);
1858          return master.splitRegion(getRpcController(), request);
1859        }
1860      });
1861    return new SplitTableRegionFuture(this, tableName, response);
1862  }
1863
1864  private static class SplitTableRegionFuture extends TableFuture<Void> {
1865    public SplitTableRegionFuture(final HBaseAdmin admin, final TableName tableName,
1866      final SplitTableRegionResponse response) {
1867      super(admin, tableName,
1868        (response != null && response.hasProcId()) ? response.getProcId() : null);
1869    }
1870
1871    public SplitTableRegionFuture(final HBaseAdmin admin, final TableName tableName,
1872      final Long procId) {
1873      super(admin, tableName, procId);
1874    }
1875
1876    @Override
1877    public String getOperationType() {
1878      return "SPLIT_REGION";
1879    }
1880  }
1881
1882  @Override
1883  public void split(final TableName tableName) throws IOException {
1884    split(tableName, null);
1885  }
1886
1887  @Override
1888  public void splitRegion(final byte[] regionName) throws IOException {
1889    splitRegion(regionName, null);
1890  }
1891
1892  @Override
1893  public void split(final TableName tableName, final byte[] splitPoint) throws IOException {
1894    checkTableExists(tableName);
1895    for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) {
1896      ServerName sn = loc.getServerName();
1897      if (sn == null) {
1898        continue;
1899      }
1900      RegionInfo r = loc.getRegion();
1901      // check for parents
1902      if (r.isSplitParent()) {
1903        continue;
1904      }
1905      // if a split point given, only split that particular region
1906      if (
1907        r.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID
1908          || (splitPoint != null && !r.containsRow(splitPoint))
1909      ) {
1910        continue;
1911      }
1912      // call out to master to do split now
1913      splitRegionAsync(r, splitPoint);
1914    }
1915  }
1916
1917  @Override
1918  public void splitRegion(final byte[] regionName, final byte[] splitPoint) throws IOException {
1919    Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName);
1920    if (regionServerPair == null) {
1921      throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
1922    }
1923    if (
1924      regionServerPair.getFirst() != null
1925        && regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID
1926    ) {
1927      throw new IllegalArgumentException(
1928        "Can't split replicas directly. " + "Replicas are auto-split when their primary is split.");
1929    }
1930    if (regionServerPair.getSecond() == null) {
1931      throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1932    }
1933    splitRegionAsync(regionServerPair.getFirst(), splitPoint);
1934  }
1935
1936  private static class ModifyTableFuture extends TableFuture<Void> {
1937    public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName,
1938      final ModifyTableResponse response) {
1939      super(admin, tableName,
1940        (response != null && response.hasProcId()) ? response.getProcId() : null);
1941    }
1942
1943    public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) {
1944      super(admin, tableName, procId);
1945    }
1946
1947    @Override
1948    public String getOperationType() {
1949      return "MODIFY";
1950    }
1951
1952    @Override
1953    protected Void postOperationResult(final Void result, final long deadlineTs)
1954      throws IOException, TimeoutException {
1955      // The modify operation on the table is asynchronous on the server side irrespective
1956      // of whether Procedure V2 is supported or not. So, we wait in the client till
1957      // all regions get updated.
1958      waitForSchemaUpdate(deadlineTs);
1959      return result;
1960    }
1961  }
1962
1963  /**
1964   * @param regionName Name of a region.
1965   * @return a pair of HRegionInfo and ServerName if <code>regionName</code> is a verified region
1966   *         name (we call {@link MetaTableAccessor#getRegionLocation(Connection, byte[])} else
1967   *         null. Throw IllegalArgumentException if <code>regionName</code> is null.
1968   * @throws IOException if a remote or network exception occurs
1969   */
1970  Pair<RegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException {
1971    if (regionName == null) {
1972      throw new IllegalArgumentException("Pass a table name or region name");
1973    }
1974    Pair<RegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionName);
1975    if (pair == null) {
1976      final String encodedName = Bytes.toString(regionName);
1977      // When it is not a valid regionName, it is possible that it could be an encoded regionName.
1978      // To match the encoded regionName, it has to scan the meta table and compare entry by entry.
1979      // Since it scans meta table, so it has to be the MD5 hash, it can filter out
1980      // most of invalid cases.
1981      if (!RegionInfo.isMD5Hash(encodedName)) {
1982        return null;
1983      }
1984      final AtomicReference<Pair<RegionInfo, ServerName>> result = new AtomicReference<>(null);
1985      MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
1986        @Override
1987        public boolean visit(Result data) throws IOException {
1988          RegionInfo info = MetaTableAccessor.getRegionInfo(data);
1989          if (info == null) {
1990            LOG.warn("No serialized HRegionInfo in " + data);
1991            return true;
1992          }
1993          RegionLocations rl = MetaTableAccessor.getRegionLocations(data);
1994          boolean matched = false;
1995          ServerName sn = null;
1996          if (rl != null) {
1997            for (HRegionLocation h : rl.getRegionLocations()) {
1998              if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) {
1999                sn = h.getServerName();
2000                info = h.getRegionInfo();
2001                matched = true;
2002              }
2003            }
2004          }
2005          if (!matched) return true;
2006          result.set(new Pair<>(info, sn));
2007          return false; // found the region, stop
2008        }
2009      };
2010
2011      MetaTableAccessor.fullScanRegions(connection, visitor);
2012      pair = result.get();
2013    }
2014    return pair;
2015  }
2016
2017  /**
2018   * If the input is a region name, it is returned as is. If it's an encoded region name, the
2019   * corresponding region is found from meta and its region name is returned. If we can't find any
2020   * region in meta matching the input as either region name or encoded region name, the input is
2021   * returned as is. We don't throw unknown region exception.
2022   */
2023  private byte[] getRegionName(final byte[] regionNameOrEncodedRegionName) throws IOException {
2024    if (
2025      Bytes.equals(regionNameOrEncodedRegionName, HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
2026        || Bytes.equals(regionNameOrEncodedRegionName,
2027          HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())
2028    ) {
2029      return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
2030    }
2031    byte[] tmp = regionNameOrEncodedRegionName;
2032    Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionNameOrEncodedRegionName);
2033    if (regionServerPair != null && regionServerPair.getFirst() != null) {
2034      tmp = regionServerPair.getFirst().getRegionName();
2035    }
2036    return tmp;
2037  }
2038
2039  /**
2040   * Check if table exists or not
2041   * @param tableName Name of a table.
2042   * @return tableName instance
2043   * @throws IOException            if a remote or network exception occurs.
2044   * @throws TableNotFoundException if table does not exist.
2045   */
2046  private TableName checkTableExists(final TableName tableName) throws IOException {
2047    return executeCallable(new RpcRetryingCallable<TableName>() {
2048      @Override
2049      protected TableName rpcCall(int callTimeout) throws Exception {
2050        if (MetaTableAccessor.getTableState(getConnection(), tableName) == null) {
2051          throw new TableNotFoundException(tableName);
2052        }
2053        return tableName;
2054      }
2055    });
2056  }
2057
2058  @Override
2059  public synchronized void shutdown() throws IOException {
2060    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
2061      @Override
2062      protected Void rpcCall() throws Exception {
2063        setPriority(HConstants.HIGH_QOS);
2064        master.shutdown(getRpcController(), ShutdownRequest.newBuilder().build());
2065        return null;
2066      }
2067    });
2068  }
2069
2070  @Override
2071  public synchronized void stopMaster() throws IOException {
2072    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
2073      @Override
2074      protected Void rpcCall() throws Exception {
2075        setPriority(HConstants.HIGH_QOS);
2076        master.stopMaster(getRpcController(), StopMasterRequest.newBuilder().build());
2077        return null;
2078      }
2079    });
2080  }
2081
2082  @Override
2083  public synchronized void stopRegionServer(final String hostnamePort) throws IOException {
2084    String hostname = Addressing.parseHostname(hostnamePort);
2085    int port = Addressing.parsePort(hostnamePort);
2086    final AdminService.BlockingInterface admin =
2087      this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
2088    // TODO: There is no timeout on this controller. Set one!
2089    HBaseRpcController controller = rpcControllerFactory.newController();
2090    controller.setPriority(HConstants.HIGH_QOS);
2091    StopServerRequest request = RequestConverter
2092      .buildStopServerRequest("Called by admin client " + this.connection.toString());
2093    try {
2094      admin.stopServer(controller, request);
2095    } catch (Exception e) {
2096      throw ProtobufUtil.handleRemoteException(e);
2097    }
2098  }
2099
2100  @Override
2101  public boolean isMasterInMaintenanceMode() throws IOException {
2102    return executeCallable(
2103      new MasterCallable<IsInMaintenanceModeResponse>(getConnection(), this.rpcControllerFactory) {
2104        @Override
2105        protected IsInMaintenanceModeResponse rpcCall() throws Exception {
2106          return master.isMasterInMaintenanceMode(getRpcController(),
2107            IsInMaintenanceModeRequest.newBuilder().build());
2108        }
2109      }).getInMaintenanceMode();
2110  }
2111
2112  @Override
2113  public ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException {
2114    return executeCallable(
2115      new MasterCallable<ClusterMetrics>(getConnection(), this.rpcControllerFactory) {
2116        @Override
2117        protected ClusterMetrics rpcCall() throws Exception {
2118          GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest(options);
2119          return ClusterMetricsBuilder
2120            .toClusterMetrics(master.getClusterStatus(getRpcController(), req).getClusterStatus());
2121        }
2122      });
2123  }
2124
2125  @Override
2126  public List<RegionMetrics> getRegionMetrics(ServerName serverName, TableName tableName)
2127    throws IOException {
2128    AdminService.BlockingInterface admin = this.connection.getAdmin(serverName);
2129    HBaseRpcController controller = rpcControllerFactory.newController();
2130    AdminProtos.GetRegionLoadRequest request =
2131      RequestConverter.buildGetRegionLoadRequest(tableName);
2132    try {
2133      return admin.getRegionLoad(controller, request).getRegionLoadsList().stream()
2134        .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList());
2135    } catch (ServiceException se) {
2136      throw ProtobufUtil.getRemoteException(se);
2137    }
2138  }
2139
2140  @Override
2141  public Configuration getConfiguration() {
2142    return this.conf;
2143  }
2144
2145  /**
2146   * Do a get with a timeout against the passed in <code>future</code>.
2147   */
2148  private static <T> T get(final Future<T> future, final long timeout, final TimeUnit units)
2149    throws IOException {
2150    try {
2151      // TODO: how long should we wait? Spin forever?
2152      return future.get(timeout, units);
2153    } catch (InterruptedException e) {
2154      IOException ioe = new InterruptedIOException("Interrupt while waiting on " + future);
2155      ioe.initCause(e);
2156      throw ioe;
2157    } catch (TimeoutException e) {
2158      throw new TimeoutIOException(e);
2159    } catch (ExecutionException e) {
2160      if (e.getCause() instanceof IOException) {
2161        throw (IOException) e.getCause();
2162      } else {
2163        throw new IOException(e.getCause());
2164      }
2165    }
2166  }
2167
2168  @Override
2169  public Future<Void> createNamespaceAsync(final NamespaceDescriptor descriptor)
2170    throws IOException {
2171    CreateNamespaceResponse response = executeCallable(
2172      new MasterCallable<CreateNamespaceResponse>(getConnection(), getRpcControllerFactory()) {
2173        @Override
2174        protected CreateNamespaceResponse rpcCall() throws Exception {
2175          return master.createNamespace(getRpcController(), CreateNamespaceRequest.newBuilder()
2176            .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build());
2177        }
2178      });
2179    return new NamespaceFuture(this, descriptor.getName(), response.getProcId()) {
2180      @Override
2181      public String getOperationType() {
2182        return "CREATE_NAMESPACE";
2183      }
2184    };
2185  }
2186
2187  @Override
2188  public Future<Void> modifyNamespaceAsync(final NamespaceDescriptor descriptor)
2189    throws IOException {
2190    ModifyNamespaceResponse response = executeCallable(
2191      new MasterCallable<ModifyNamespaceResponse>(getConnection(), getRpcControllerFactory()) {
2192        @Override
2193        protected ModifyNamespaceResponse rpcCall() throws Exception {
2194          // TODO: set priority based on NS?
2195          return master.modifyNamespace(getRpcController(), ModifyNamespaceRequest.newBuilder()
2196            .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build());
2197        }
2198      });
2199    return new NamespaceFuture(this, descriptor.getName(), response.getProcId()) {
2200      @Override
2201      public String getOperationType() {
2202        return "MODIFY_NAMESPACE";
2203      }
2204    };
2205  }
2206
2207  @Override
2208  public Future<Void> deleteNamespaceAsync(final String name) throws IOException {
2209    DeleteNamespaceResponse response = executeCallable(
2210      new MasterCallable<DeleteNamespaceResponse>(getConnection(), getRpcControllerFactory()) {
2211        @Override
2212        protected DeleteNamespaceResponse rpcCall() throws Exception {
2213          // TODO: set priority based on NS?
2214          return master.deleteNamespace(getRpcController(),
2215            DeleteNamespaceRequest.newBuilder().setNamespaceName(name).build());
2216        }
2217      });
2218    return new NamespaceFuture(this, name, response.getProcId()) {
2219      @Override
2220      public String getOperationType() {
2221        return "DELETE_NAMESPACE";
2222      }
2223    };
2224  }
2225
2226  @Override
2227  public NamespaceDescriptor getNamespaceDescriptor(final String name)
2228    throws NamespaceNotFoundException, IOException {
2229    return executeCallable(
2230      new MasterCallable<NamespaceDescriptor>(getConnection(), getRpcControllerFactory()) {
2231        @Override
2232        protected NamespaceDescriptor rpcCall() throws Exception {
2233          return ProtobufUtil.toNamespaceDescriptor(master
2234            .getNamespaceDescriptor(getRpcController(),
2235              GetNamespaceDescriptorRequest.newBuilder().setNamespaceName(name).build())
2236            .getNamespaceDescriptor());
2237        }
2238      });
2239  }
2240
2241  /**
2242   * List available namespaces
2243   * @return List of namespace names
2244   * @throws IOException if a remote or network exception occurs
2245   */
2246  @Override
2247  public String[] listNamespaces() throws IOException {
2248    return executeCallable(
2249      new MasterCallable<String[]>(getConnection(), getRpcControllerFactory()) {
2250        @Override
2251        protected String[] rpcCall() throws Exception {
2252          List<String> list =
2253            master.listNamespaces(getRpcController(), ListNamespacesRequest.newBuilder().build())
2254              .getNamespaceNameList();
2255          return list.toArray(new String[list.size()]);
2256        }
2257      });
2258  }
2259
2260  /**
2261   * List available namespace descriptors
2262   * @return List of descriptors
2263   * @throws IOException if a remote or network exception occurs
2264   */
2265  @Override
2266  public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
2267    return executeCallable(
2268      new MasterCallable<NamespaceDescriptor[]>(getConnection(), getRpcControllerFactory()) {
2269        @Override
2270        protected NamespaceDescriptor[] rpcCall() throws Exception {
2271          List<
2272            HBaseProtos.NamespaceDescriptor> list =
2273              master
2274                .listNamespaceDescriptors(getRpcController(),
2275                  ListNamespaceDescriptorsRequest.newBuilder().build())
2276                .getNamespaceDescriptorList();
2277          NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
2278          for (int i = 0; i < list.size(); i++) {
2279            res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
2280          }
2281          return res;
2282        }
2283      });
2284  }
2285
2286  @Override
2287  public String getProcedures() throws IOException {
2288    return executeCallable(new MasterCallable<String>(getConnection(), getRpcControllerFactory()) {
2289      @Override
2290      protected String rpcCall() throws Exception {
2291        GetProceduresRequest request = GetProceduresRequest.newBuilder().build();
2292        GetProceduresResponse response = master.getProcedures(getRpcController(), request);
2293        return ProtobufUtil.toProcedureJson(response.getProcedureList());
2294      }
2295    });
2296  }
2297
2298  @Override
2299  public String getLocks() throws IOException {
2300    return executeCallable(new MasterCallable<String>(getConnection(), getRpcControllerFactory()) {
2301      @Override
2302      protected String rpcCall() throws Exception {
2303        GetLocksRequest request = GetLocksRequest.newBuilder().build();
2304        GetLocksResponse response = master.getLocks(getRpcController(), request);
2305        return ProtobufUtil.toLockJson(response.getLockList());
2306      }
2307    });
2308  }
2309
2310  @Override
2311  public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException {
2312    return executeCallable(
2313      new MasterCallable<HTableDescriptor[]>(getConnection(), getRpcControllerFactory()) {
2314        @Override
2315        protected HTableDescriptor[] rpcCall() throws Exception {
2316          List<TableSchema> list = master
2317            .listTableDescriptorsByNamespace(getRpcController(),
2318              ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name).build())
2319            .getTableSchemaList();
2320          HTableDescriptor[] res = new HTableDescriptor[list.size()];
2321          for (int i = 0; i < list.size(); i++) {
2322            res[i] = new ImmutableHTableDescriptor(ProtobufUtil.toTableDescriptor(list.get(i)));
2323          }
2324          return res;
2325        }
2326      });
2327  }
2328
2329  @Override
2330  public TableName[] listTableNamesByNamespace(final String name) throws IOException {
2331    return executeCallable(
2332      new MasterCallable<TableName[]>(getConnection(), getRpcControllerFactory()) {
2333        @Override
2334        protected TableName[] rpcCall() throws Exception {
2335          List<HBaseProtos.TableName> tableNames = master
2336            .listTableNamesByNamespace(getRpcController(),
2337              ListTableNamesByNamespaceRequest.newBuilder().setNamespaceName(name).build())
2338            .getTableNameList();
2339          TableName[] result = new TableName[tableNames.size()];
2340          for (int i = 0; i < tableNames.size(); i++) {
2341            result[i] = ProtobufUtil.toTableName(tableNames.get(i));
2342          }
2343          return result;
2344        }
2345      });
2346  }
2347
2348  /**
2349   * Is HBase available? Throw an exception if not.
2350   * @param conf system configuration
2351   * @throws MasterNotRunningException    if the master is not running.
2352   * @throws ZooKeeperConnectionException if unable to connect to zookeeper. // TODO do not expose
2353   *                                      ZKConnectionException.
2354   */
2355  public static void available(final Configuration conf)
2356    throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
2357    Configuration copyOfConf = HBaseConfiguration.create(conf);
2358    // We set it to make it fail as soon as possible if HBase is not available
2359    copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
2360    copyOfConf.setInt("zookeeper.recovery.retry", 0);
2361
2362    // Check ZK first.
2363    // If the connection exists, we may have a connection to ZK that does not work anymore
2364    try (ClusterConnection connection =
2365      (ClusterConnection) ConnectionFactory.createConnection(copyOfConf)) {
2366      // can throw MasterNotRunningException
2367      connection.isMasterRunning();
2368    }
2369  }
2370
2371  /**
2372   * n * @return List of {@link HRegionInfo}.
2373   * @throws IOException if a remote or network exception occurs
2374   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
2375   *             {@link #getRegions(TableName)}.
2376   */
2377  @Deprecated
2378  @Override
2379  public List<HRegionInfo> getTableRegions(final TableName tableName) throws IOException {
2380    return getRegions(tableName).stream().map(ImmutableHRegionInfo::new)
2381      .collect(Collectors.toList());
2382  }
2383
2384  @Override
2385  public synchronized void close() throws IOException {
2386  }
2387
2388  @Override
2389  public HTableDescriptor[] getTableDescriptorsByTableName(final List<TableName> tableNames)
2390    throws IOException {
2391    return executeCallable(
2392      new MasterCallable<HTableDescriptor[]>(getConnection(), getRpcControllerFactory()) {
2393        @Override
2394        protected HTableDescriptor[] rpcCall() throws Exception {
2395          GetTableDescriptorsRequest req =
2396            RequestConverter.buildGetTableDescriptorsRequest(tableNames);
2397          return ProtobufUtil
2398            .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)).stream()
2399            .map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new);
2400        }
2401      });
2402  }
2403
2404  @Override
2405  public HTableDescriptor[] getTableDescriptors(List<String> names) throws IOException {
2406    List<TableName> tableNames = new ArrayList<>(names.size());
2407    for (String name : names) {
2408      tableNames.add(TableName.valueOf(name));
2409    }
2410    return getTableDescriptorsByTableName(tableNames);
2411  }
2412
2413  private RollWALWriterResponse rollWALWriterImpl(final ServerName sn)
2414    throws IOException, FailedLogCloseException {
2415    final AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2416    RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();
2417    // TODO: There is no timeout on this controller. Set one!
2418    HBaseRpcController controller = rpcControllerFactory.newController();
2419    try {
2420      return admin.rollWALWriter(controller, request);
2421    } catch (ServiceException e) {
2422      throw ProtobufUtil.handleRemoteException(e);
2423    }
2424  }
2425
2426  @Override
2427  public synchronized void rollWALWriter(ServerName serverName)
2428    throws IOException, FailedLogCloseException {
2429    rollWALWriterImpl(serverName);
2430  }
2431
2432  @Override
2433  public CompactionState getCompactionState(final TableName tableName) throws IOException {
2434    return getCompactionState(tableName, CompactType.NORMAL);
2435  }
2436
2437  @Override
2438  public CompactionState getCompactionStateForRegion(final byte[] regionName) throws IOException {
2439    final Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName);
2440    if (regionServerPair == null) {
2441      throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2442    }
2443    if (regionServerPair.getSecond() == null) {
2444      throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2445    }
2446    ServerName sn = regionServerPair.getSecond();
2447    final AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2448    // TODO: There is no timeout on this controller. Set one!
2449    HBaseRpcController controller = rpcControllerFactory.newController();
2450    GetRegionInfoRequest request =
2451      RequestConverter.buildGetRegionInfoRequest(regionServerPair.getFirst().getRegionName(), true);
2452    GetRegionInfoResponse response;
2453    try {
2454      response = admin.getRegionInfo(controller, request);
2455    } catch (ServiceException e) {
2456      throw ProtobufUtil.handleRemoteException(e);
2457    }
2458    if (response.getCompactionState() != null) {
2459      return ProtobufUtil.createCompactionState(response.getCompactionState());
2460    }
2461    return null;
2462  }
2463
2464  @Override
2465  public void snapshot(SnapshotDescription snapshotDesc)
2466    throws IOException, SnapshotCreationException, IllegalArgumentException {
2467    // actually take the snapshot
2468    SnapshotProtos.SnapshotDescription snapshot =
2469      ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc);
2470    SnapshotResponse response = asyncSnapshot(snapshot);
2471    final IsSnapshotDoneRequest request =
2472      IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build();
2473    IsSnapshotDoneResponse done = null;
2474    long start = EnvironmentEdgeManager.currentTime();
2475    long max = response.getExpectedTimeout();
2476    long maxPauseTime = max / this.numRetries;
2477    int tries = 0;
2478    LOG.debug("Waiting a max of " + max + " ms for snapshot '"
2479      + ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " + maxPauseTime
2480      + " ms per retry)");
2481    while (
2482      tries == 0 || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())
2483    ) {
2484      try {
2485        // sleep a backoff <= pauseTime amount
2486        long sleep = getPauseTime(tries++);
2487        sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
2488        LOG.debug(
2489          "(#" + tries + ") Sleeping: " + sleep + "ms while waiting for snapshot completion.");
2490        Thread.sleep(sleep);
2491      } catch (InterruptedException e) {
2492        throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e);
2493      }
2494      LOG.debug("Getting current status of snapshot from master...");
2495      done = executeCallable(
2496        new MasterCallable<IsSnapshotDoneResponse>(getConnection(), getRpcControllerFactory()) {
2497          @Override
2498          protected IsSnapshotDoneResponse rpcCall() throws Exception {
2499            return master.isSnapshotDone(getRpcController(), request);
2500          }
2501        });
2502    }
2503    if (!done.getDone()) {
2504      throw new SnapshotCreationException(
2505        "Snapshot '" + snapshot.getName() + "' wasn't completed in expectedTime:" + max + " ms",
2506        snapshotDesc);
2507    }
2508  }
2509
2510  @Override
2511  public Future<Void> snapshotAsync(SnapshotDescription snapshotDesc)
2512    throws IOException, SnapshotCreationException {
2513    asyncSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc));
2514    return new ProcedureFuture<Void>(this, null) {
2515
2516      @Override
2517      protected Void waitOperationResult(long deadlineTs) throws IOException, TimeoutException {
2518        waitForState(deadlineTs, new WaitForStateCallable() {
2519
2520          @Override
2521          public void throwInterruptedException() throws InterruptedIOException {
2522            throw new InterruptedIOException(
2523              "Interrupted while waiting for taking snapshot" + snapshotDesc);
2524          }
2525
2526          @Override
2527          public void throwTimeoutException(long elapsedTime) throws TimeoutException {
2528            throw new TimeoutException("Snapshot '" + snapshotDesc.getName()
2529              + "' wasn't completed in expectedTime:" + elapsedTime + " ms");
2530          }
2531
2532          @Override
2533          public boolean checkState(int tries) throws IOException {
2534            return isSnapshotFinished(snapshotDesc);
2535          }
2536        });
2537        return null;
2538      }
2539    };
2540  }
2541
2542  private SnapshotResponse asyncSnapshot(SnapshotProtos.SnapshotDescription snapshot)
2543    throws IOException {
2544    ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
2545    final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot).build();
2546    // run the snapshot on the master
2547    return executeCallable(
2548      new MasterCallable<SnapshotResponse>(getConnection(), getRpcControllerFactory()) {
2549        @Override
2550        protected SnapshotResponse rpcCall() throws Exception {
2551          return master.snapshot(getRpcController(), request);
2552        }
2553      });
2554  }
2555
2556  @Override
2557  public boolean isSnapshotFinished(final SnapshotDescription snapshotDesc)
2558    throws IOException, HBaseSnapshotException, UnknownSnapshotException {
2559    final SnapshotProtos.SnapshotDescription snapshot =
2560      ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc);
2561    return executeCallable(
2562      new MasterCallable<IsSnapshotDoneResponse>(getConnection(), getRpcControllerFactory()) {
2563        @Override
2564        protected IsSnapshotDoneResponse rpcCall() throws Exception {
2565          return master.isSnapshotDone(getRpcController(),
2566            IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build());
2567        }
2568      }).getDone();
2569  }
2570
2571  @Override
2572  public void restoreSnapshot(final byte[] snapshotName)
2573    throws IOException, RestoreSnapshotException {
2574    restoreSnapshot(Bytes.toString(snapshotName));
2575  }
2576
2577  @Override
2578  public void restoreSnapshot(final String snapshotName)
2579    throws IOException, RestoreSnapshotException {
2580    boolean takeFailSafeSnapshot =
2581      conf.getBoolean(HConstants.SNAPSHOT_RESTORE_TAKE_FAILSAFE_SNAPSHOT,
2582        HConstants.DEFAULT_SNAPSHOT_RESTORE_TAKE_FAILSAFE_SNAPSHOT);
2583    restoreSnapshot(snapshotName, takeFailSafeSnapshot);
2584  }
2585
2586  @Override
2587  public void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot)
2588    throws IOException, RestoreSnapshotException {
2589    restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
2590  }
2591
2592  /**
2593   * Check whether the snapshot exists and contains disabled table
2594   * @param snapshotName name of the snapshot to restore
2595   * @throws IOException              if a remote or network exception occurs
2596   * @throws RestoreSnapshotException if no valid snapshot is found
2597   */
2598  private TableName getTableNameBeforeRestoreSnapshot(final String snapshotName)
2599    throws IOException, RestoreSnapshotException {
2600    TableName tableName = null;
2601    for (SnapshotDescription snapshotInfo : listSnapshots()) {
2602      if (snapshotInfo.getName().equals(snapshotName)) {
2603        tableName = snapshotInfo.getTableName();
2604        break;
2605      }
2606    }
2607
2608    if (tableName == null) {
2609      throw new RestoreSnapshotException(
2610        "Unable to find the table name for snapshot=" + snapshotName);
2611    }
2612    return tableName;
2613  }
2614
2615  @Override
2616  public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot)
2617    throws IOException, RestoreSnapshotException {
2618    restoreSnapshot(snapshotName, takeFailSafeSnapshot, false);
2619  }
2620
2621  @Override
2622  public void restoreSnapshot(final String snapshotName, final boolean takeFailSafeSnapshot,
2623    final boolean restoreAcl) throws IOException, RestoreSnapshotException {
2624    TableName tableName = getTableNameBeforeRestoreSnapshot(snapshotName);
2625
2626    // The table does not exists, switch to clone.
2627    if (!tableExists(tableName)) {
2628      cloneSnapshot(snapshotName, tableName, restoreAcl);
2629      return;
2630    }
2631
2632    // Check if the table is disabled
2633    if (!isTableDisabled(tableName)) {
2634      throw new TableNotDisabledException(tableName);
2635    }
2636
2637    // Take a snapshot of the current state
2638    String failSafeSnapshotSnapshotName = null;
2639    if (takeFailSafeSnapshot) {
2640      failSafeSnapshotSnapshotName = conf.get("hbase.snapshot.restore.failsafe.name",
2641        "hbase-failsafe-{snapshot.name}-{restore.timestamp}");
2642      failSafeSnapshotSnapshotName =
2643        failSafeSnapshotSnapshotName.replace("{snapshot.name}", snapshotName)
2644          .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.'))
2645          .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime()));
2646      LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
2647      snapshot(failSafeSnapshotSnapshotName, tableName);
2648    }
2649
2650    try {
2651      // Restore snapshot
2652      get(internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl, null), syncWaitTimeout,
2653        TimeUnit.MILLISECONDS);
2654    } catch (IOException e) {
2655      // Something went wrong during the restore...
2656      // if the pre-restore snapshot is available try to rollback
2657      if (takeFailSafeSnapshot) {
2658        try {
2659          get(
2660            internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, restoreAcl, null),
2661            syncWaitTimeout, TimeUnit.MILLISECONDS);
2662          String msg = "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot="
2663            + failSafeSnapshotSnapshotName + " succeeded.";
2664          LOG.error(msg, e);
2665          throw new RestoreSnapshotException(msg, e);
2666        } catch (IOException ex) {
2667          String msg = "Failed to restore and rollback to snapshot=" + failSafeSnapshotSnapshotName;
2668          LOG.error(msg, ex);
2669          throw new RestoreSnapshotException(msg, e);
2670        }
2671      } else {
2672        throw new RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e);
2673      }
2674    }
2675
2676    // If the restore is succeeded, delete the pre-restore snapshot
2677    if (takeFailSafeSnapshot) {
2678      try {
2679        LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
2680        deleteSnapshot(failSafeSnapshotSnapshotName);
2681      } catch (IOException e) {
2682        LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, e);
2683      }
2684    }
2685  }
2686
2687  @Override
2688  public Future<Void> restoreSnapshotAsync(final String snapshotName)
2689    throws IOException, RestoreSnapshotException {
2690    TableName tableName = getTableNameBeforeRestoreSnapshot(snapshotName);
2691
2692    // The table does not exists, switch to clone.
2693    if (!tableExists(tableName)) {
2694      return cloneSnapshotAsync(snapshotName, tableName);
2695    }
2696
2697    // Check if the table is disabled
2698    if (!isTableDisabled(tableName)) {
2699      throw new TableNotDisabledException(tableName);
2700    }
2701
2702    return internalRestoreSnapshotAsync(snapshotName, tableName, false, null);
2703  }
2704
2705  @Override
2706  public Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName,
2707    boolean restoreAcl, String customSFT)
2708    throws IOException, TableExistsException, RestoreSnapshotException {
2709    if (tableExists(tableName)) {
2710      throw new TableExistsException(tableName);
2711    }
2712    return internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl, customSFT);
2713  }
2714
2715  @Override
2716  public byte[] execProcedureWithReturn(String signature, String instance,
2717    Map<String, String> props) throws IOException {
2718    ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props);
2719    final ExecProcedureRequest request =
2720      ExecProcedureRequest.newBuilder().setProcedure(desc).build();
2721    // run the procedure on the master
2722    ExecProcedureResponse response = executeCallable(
2723      new MasterCallable<ExecProcedureResponse>(getConnection(), getRpcControllerFactory()) {
2724        @Override
2725        protected ExecProcedureResponse rpcCall() throws Exception {
2726          return master.execProcedureWithRet(getRpcController(), request);
2727        }
2728      });
2729
2730    return response.hasReturnData() ? response.getReturnData().toByteArray() : null;
2731  }
2732
2733  @Override
2734  public void execProcedure(String signature, String instance, Map<String, String> props)
2735    throws IOException {
2736    ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props);
2737    final ExecProcedureRequest request =
2738      ExecProcedureRequest.newBuilder().setProcedure(desc).build();
2739    // run the procedure on the master
2740    ExecProcedureResponse response = executeCallable(
2741      new MasterCallable<ExecProcedureResponse>(getConnection(), getRpcControllerFactory()) {
2742        @Override
2743        protected ExecProcedureResponse rpcCall() throws Exception {
2744          return master.execProcedure(getRpcController(), request);
2745        }
2746      });
2747
2748    long start = EnvironmentEdgeManager.currentTime();
2749    long max = response.getExpectedTimeout();
2750    long maxPauseTime = max / this.numRetries;
2751    int tries = 0;
2752    LOG.debug("Waiting a max of " + max + " ms for procedure '" + signature + " : " + instance
2753      + "'' to complete. (max " + maxPauseTime + " ms per retry)");
2754    boolean done = false;
2755    while (tries == 0 || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
2756      try {
2757        // sleep a backoff <= pauseTime amount
2758        long sleep = getPauseTime(tries++);
2759        sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
2760        LOG.debug(
2761          "(#" + tries + ") Sleeping: " + sleep + "ms while waiting for procedure completion.");
2762        Thread.sleep(sleep);
2763      } catch (InterruptedException e) {
2764        throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e);
2765      }
2766      LOG.debug("Getting current status of procedure from master...");
2767      done = isProcedureFinished(signature, instance, props);
2768    }
2769    if (!done) {
2770      throw new IOException("Procedure '" + signature + " : " + instance
2771        + "' wasn't completed in expectedTime:" + max + " ms");
2772    }
2773  }
2774
2775  @Override
2776  public boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
2777    throws IOException {
2778    ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props);
2779    return executeCallable(
2780      new MasterCallable<IsProcedureDoneResponse>(getConnection(), getRpcControllerFactory()) {
2781        @Override
2782        protected IsProcedureDoneResponse rpcCall() throws Exception {
2783          return master.isProcedureDone(getRpcController(),
2784            IsProcedureDoneRequest.newBuilder().setProcedure(desc).build());
2785        }
2786      }).getDone();
2787  }
2788
2789  /**
2790   * Execute Restore/Clone snapshot and wait for the server to complete (blocking). To check if the
2791   * cloned table exists, use {@link #isTableAvailable} -- it is not safe to create an HTable
2792   * instance to this table before it is available.
2793   * @param snapshotName snapshot to restore
2794   * @param tableName    table name to restore the snapshot on
2795   * @throws IOException              if a remote or network exception occurs
2796   * @throws RestoreSnapshotException if snapshot failed to be restored
2797   * @throws IllegalArgumentException if the restore request is formatted incorrectly
2798   */
2799  private Future<Void> internalRestoreSnapshotAsync(final String snapshotName,
2800    final TableName tableName, final boolean restoreAcl, String customSFT)
2801    throws IOException, RestoreSnapshotException {
2802    final SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription
2803      .newBuilder().setName(snapshotName).setTable(tableName.getNameAsString()).build();
2804
2805    // actually restore the snapshot
2806    ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
2807
2808    RestoreSnapshotResponse response = executeCallable(
2809      new MasterCallable<RestoreSnapshotResponse>(getConnection(), getRpcControllerFactory()) {
2810        Long nonceGroup = ng.getNonceGroup();
2811        Long nonce = ng.newNonce();
2812
2813        @Override
2814        protected RestoreSnapshotResponse rpcCall() throws Exception {
2815          final RestoreSnapshotRequest.Builder builder =
2816            RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(nonceGroup)
2817              .setNonce(nonce).setRestoreACL(restoreAcl);
2818          if (customSFT != null) {
2819            builder.setCustomSFT(customSFT);
2820          }
2821          return master.restoreSnapshot(getRpcController(), builder.build());
2822        }
2823      });
2824
2825    return new RestoreSnapshotFuture(this, snapshot, tableName, response);
2826  }
2827
2828  private static class RestoreSnapshotFuture extends TableFuture<Void> {
2829    public RestoreSnapshotFuture(final HBaseAdmin admin,
2830      final SnapshotProtos.SnapshotDescription snapshot, final TableName tableName,
2831      final RestoreSnapshotResponse response) {
2832      super(admin, tableName,
2833        (response != null && response.hasProcId()) ? response.getProcId() : null);
2834
2835      if (response != null && !response.hasProcId()) {
2836        throw new UnsupportedOperationException("Client could not call old version of Server");
2837      }
2838    }
2839
2840    public RestoreSnapshotFuture(final HBaseAdmin admin, final TableName tableName,
2841      final Long procId) {
2842      super(admin, tableName, procId);
2843    }
2844
2845    @Override
2846    public String getOperationType() {
2847      return "MODIFY";
2848    }
2849  }
2850
2851  @Override
2852  public List<SnapshotDescription> listSnapshots() throws IOException {
2853    return executeCallable(
2854      new MasterCallable<List<SnapshotDescription>>(getConnection(), getRpcControllerFactory()) {
2855        @Override
2856        protected List<SnapshotDescription> rpcCall() throws Exception {
2857          List<SnapshotProtos.SnapshotDescription> snapshotsList =
2858            master.getCompletedSnapshots(getRpcController(),
2859              GetCompletedSnapshotsRequest.newBuilder().build()).getSnapshotsList();
2860          List<SnapshotDescription> result = new ArrayList<>(snapshotsList.size());
2861          for (SnapshotProtos.SnapshotDescription snapshot : snapshotsList) {
2862            result.add(ProtobufUtil.createSnapshotDesc(snapshot));
2863          }
2864          return result;
2865        }
2866      });
2867  }
2868
2869  @Override
2870  public List<SnapshotDescription> listSnapshots(String regex) throws IOException {
2871    return listSnapshots(Pattern.compile(regex));
2872  }
2873
2874  @Override
2875  public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
2876    List<SnapshotDescription> matched = new ArrayList<>();
2877    List<SnapshotDescription> snapshots = listSnapshots();
2878    for (SnapshotDescription snapshot : snapshots) {
2879      if (pattern.matcher(snapshot.getName()).matches()) {
2880        matched.add(snapshot);
2881      }
2882    }
2883    return matched;
2884  }
2885
2886  @Override
2887  public List<SnapshotDescription> listTableSnapshots(String tableNameRegex,
2888    String snapshotNameRegex) throws IOException {
2889    return listTableSnapshots(Pattern.compile(tableNameRegex), Pattern.compile(snapshotNameRegex));
2890  }
2891
2892  @Override
2893  public List<SnapshotDescription> listTableSnapshots(Pattern tableNamePattern,
2894    Pattern snapshotNamePattern) throws IOException {
2895    TableName[] tableNames = listTableNames(tableNamePattern);
2896
2897    List<SnapshotDescription> tableSnapshots = new ArrayList<>();
2898    List<SnapshotDescription> snapshots = listSnapshots(snapshotNamePattern);
2899
2900    List<TableName> listOfTableNames = Arrays.asList(tableNames);
2901    for (SnapshotDescription snapshot : snapshots) {
2902      if (listOfTableNames.contains(snapshot.getTableName())) {
2903        tableSnapshots.add(snapshot);
2904      }
2905    }
2906    return tableSnapshots;
2907  }
2908
2909  @Override
2910  public void deleteSnapshot(final byte[] snapshotName) throws IOException {
2911    deleteSnapshot(Bytes.toString(snapshotName));
2912  }
2913
2914  @Override
2915  public void deleteSnapshot(final String snapshotName) throws IOException {
2916    // make sure the snapshot is possibly valid
2917    TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName));
2918    // do the delete
2919    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
2920      @Override
2921      protected Void rpcCall() throws Exception {
2922        master.deleteSnapshot(getRpcController(),
2923          DeleteSnapshotRequest.newBuilder()
2924            .setSnapshot(
2925              SnapshotProtos.SnapshotDescription.newBuilder().setName(snapshotName).build())
2926            .build());
2927        return null;
2928      }
2929    });
2930  }
2931
2932  @Override
2933  public void deleteSnapshots(final String regex) throws IOException {
2934    deleteSnapshots(Pattern.compile(regex));
2935  }
2936
2937  @Override
2938  public void deleteSnapshots(final Pattern pattern) throws IOException {
2939    List<SnapshotDescription> snapshots = listSnapshots(pattern);
2940    for (final SnapshotDescription snapshot : snapshots) {
2941      try {
2942        internalDeleteSnapshot(snapshot);
2943      } catch (IOException ex) {
2944        LOG.info("Failed to delete snapshot " + snapshot.getName() + " for table "
2945          + snapshot.getTableNameAsString(), ex);
2946      }
2947    }
2948  }
2949
2950  private void internalDeleteSnapshot(final SnapshotDescription snapshot) throws IOException {
2951    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
2952      @Override
2953      protected Void rpcCall() throws Exception {
2954        this.master.deleteSnapshot(getRpcController(), DeleteSnapshotRequest.newBuilder()
2955          .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build());
2956        return null;
2957      }
2958    });
2959  }
2960
2961  @Override
2962  public void deleteTableSnapshots(String tableNameRegex, String snapshotNameRegex)
2963    throws IOException {
2964    deleteTableSnapshots(Pattern.compile(tableNameRegex), Pattern.compile(snapshotNameRegex));
2965  }
2966
2967  @Override
2968  public void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern)
2969    throws IOException {
2970    List<SnapshotDescription> snapshots = listTableSnapshots(tableNamePattern, snapshotNamePattern);
2971    for (SnapshotDescription snapshot : snapshots) {
2972      try {
2973        internalDeleteSnapshot(snapshot);
2974        LOG.debug("Successfully deleted snapshot: " + snapshot.getName());
2975      } catch (IOException e) {
2976        LOG.error("Failed to delete snapshot: " + snapshot.getName(), e);
2977      }
2978    }
2979  }
2980
2981  @Override
2982  public void setQuota(final QuotaSettings quota) throws IOException {
2983    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
2984      @Override
2985      protected Void rpcCall() throws Exception {
2986        this.master.setQuota(getRpcController(), QuotaSettings.buildSetQuotaRequestProto(quota));
2987        return null;
2988      }
2989    });
2990  }
2991
2992  @Override
2993  public QuotaRetriever getQuotaRetriever(final QuotaFilter filter) throws IOException {
2994    return QuotaRetriever.open(conf, filter);
2995  }
2996
2997  @Override
2998  public List<QuotaSettings> getQuota(QuotaFilter filter) throws IOException {
2999    List<QuotaSettings> quotas = new ArrayList<>();
3000    try (QuotaRetriever retriever = QuotaRetriever.open(conf, filter)) {
3001      Iterator<QuotaSettings> iterator = retriever.iterator();
3002      while (iterator.hasNext()) {
3003        quotas.add(iterator.next());
3004      }
3005    }
3006    return quotas;
3007  }
3008
3009  private <C extends RetryingCallable<V> & Closeable, V> V executeCallable(C callable)
3010    throws IOException {
3011    return executeCallable(callable, rpcCallerFactory, operationTimeout, rpcTimeout);
3012  }
3013
3014  static private <C extends RetryingCallable<V> & Closeable, V> V executeCallable(C callable,
3015    RpcRetryingCallerFactory rpcCallerFactory, int operationTimeout, int rpcTimeout)
3016    throws IOException {
3017    RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller(rpcTimeout);
3018    try {
3019      return caller.callWithRetries(callable, operationTimeout);
3020    } finally {
3021      callable.close();
3022    }
3023  }
3024
3025  @Override
3026  // Coprocessor Endpoint against the Master.
3027  public CoprocessorRpcChannel coprocessorService() {
3028    return new SyncCoprocessorRpcChannel() {
3029      @Override
3030      protected Message callExecService(final RpcController controller,
3031        final Descriptors.MethodDescriptor method, final Message request,
3032        final Message responsePrototype) throws IOException {
3033        if (LOG.isTraceEnabled()) {
3034          LOG.trace("Call: " + method.getName() + ", " + request.toString());
3035        }
3036        // Try-with-resources so close gets called when we are done.
3037        try (MasterCallable<CoprocessorServiceResponse> callable =
3038          new MasterCallable<CoprocessorServiceResponse>(connection,
3039            connection.getRpcControllerFactory()) {
3040            @Override
3041            protected CoprocessorServiceResponse rpcCall() throws Exception {
3042              CoprocessorServiceRequest csr =
3043                CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request);
3044              return this.master.execMasterService(getRpcController(), csr);
3045            }
3046          }) {
3047          // TODO: Are we retrying here? Does not seem so. We should use RetryingRpcCaller
3048          callable.prepare(false);
3049          int operationTimeout = connection.getConnectionConfiguration().getOperationTimeout();
3050          CoprocessorServiceResponse result = callable.call(operationTimeout);
3051          return CoprocessorRpcUtils.getResponse(result, responsePrototype);
3052        }
3053      }
3054    };
3055  }
3056
3057  @Override
3058  public CoprocessorRpcChannel coprocessorService(final ServerName serverName) {
3059    return new SyncCoprocessorRpcChannel() {
3060      @Override
3061      protected Message callExecService(RpcController controller,
3062        Descriptors.MethodDescriptor method, Message request, Message responsePrototype)
3063        throws IOException {
3064        if (LOG.isTraceEnabled()) {
3065          LOG.trace("Call: " + method.getName() + ", " + request.toString());
3066        }
3067        CoprocessorServiceRequest csr =
3068          CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request);
3069        // TODO: Are we retrying here? Does not seem so. We should use RetryingRpcCaller
3070        // TODO: Make this same as RegionCoprocessorRpcChannel and MasterCoprocessorRpcChannel. They
3071        // are all different though should do same thing; e.g. RpcChannel setup.
3072        ClientProtos.ClientService.BlockingInterface stub = connection.getClient(serverName);
3073        CoprocessorServiceResponse result;
3074        try {
3075          result =
3076            stub.execRegionServerService(connection.getRpcControllerFactory().newController(), csr);
3077          return CoprocessorRpcUtils.getResponse(result, responsePrototype);
3078        } catch (ServiceException e) {
3079          throw ProtobufUtil.handleRemoteException(e);
3080        }
3081      }
3082    };
3083  }
3084
3085  @Override
3086  public void updateConfiguration(final ServerName server) throws IOException {
3087    final AdminService.BlockingInterface admin = this.connection.getAdmin(server);
3088    Callable<Void> callable = new Callable<Void>() {
3089      @Override
3090      public Void call() throws Exception {
3091        admin.updateConfiguration(null, UpdateConfigurationRequest.getDefaultInstance());
3092        return null;
3093      }
3094    };
3095    ProtobufUtil.call(callable);
3096  }
3097
3098  @Override
3099  public void updateConfiguration() throws IOException {
3100    ClusterMetrics status =
3101      getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.MASTER, Option.BACKUP_MASTERS));
3102    for (ServerName server : status.getLiveServerMetrics().keySet()) {
3103      updateConfiguration(server);
3104    }
3105
3106    updateConfiguration(status.getMasterName());
3107
3108    for (ServerName server : status.getBackupMasterNames()) {
3109      updateConfiguration(server);
3110    }
3111  }
3112
3113  @Override
3114  public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException {
3115    return executeCallable(new MasterCallable<Long>(getConnection(), getRpcControllerFactory()) {
3116      @Override
3117      protected Long rpcCall() throws Exception {
3118        MajorCompactionTimestampRequest req = MajorCompactionTimestampRequest.newBuilder()
3119          .setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
3120        return master.getLastMajorCompactionTimestamp(getRpcController(), req)
3121          .getCompactionTimestamp();
3122      }
3123    });
3124  }
3125
3126  @Override
3127  public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException {
3128    return executeCallable(new MasterCallable<Long>(getConnection(), getRpcControllerFactory()) {
3129      @Override
3130      protected Long rpcCall() throws Exception {
3131        MajorCompactionTimestampForRegionRequest req =
3132          MajorCompactionTimestampForRegionRequest.newBuilder()
3133            .setRegion(
3134              RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName))
3135            .build();
3136        return master.getLastMajorCompactionTimestampForRegion(getRpcController(), req)
3137          .getCompactionTimestamp();
3138      }
3139    });
3140  }
3141
3142  /**
3143   * {@inheritDoc}
3144   */
3145  @Override
3146  public void compact(final TableName tableName, final byte[] columnFamily, CompactType compactType)
3147    throws IOException, InterruptedException {
3148    compact(tableName, columnFamily, false, compactType);
3149  }
3150
3151  /**
3152   * {@inheritDoc}
3153   */
3154  @Override
3155  public void compact(final TableName tableName, CompactType compactType)
3156    throws IOException, InterruptedException {
3157    compact(tableName, null, false, compactType);
3158  }
3159
3160  /**
3161   * {@inheritDoc}
3162   */
3163  @Override
3164  public void majorCompact(final TableName tableName, final byte[] columnFamily,
3165    CompactType compactType) throws IOException, InterruptedException {
3166    compact(tableName, columnFamily, true, compactType);
3167  }
3168
3169  /**
3170   * {@inheritDoc}
3171   */
3172  @Override
3173  public void majorCompact(final TableName tableName, CompactType compactType)
3174    throws IOException, InterruptedException {
3175    compact(tableName, null, true, compactType);
3176  }
3177
3178  /**
3179   * {@inheritDoc}
3180   */
3181  @Override
3182  public CompactionState getCompactionState(final TableName tableName, CompactType compactType)
3183    throws IOException {
3184    checkTableExists(tableName);
3185    if (!isTableEnabled(tableName)) {
3186      // If the table is disabled, the compaction state of the table should always be NONE
3187      return ProtobufUtil
3188        .createCompactionState(AdminProtos.GetRegionInfoResponse.CompactionState.NONE);
3189    }
3190
3191    AdminProtos.GetRegionInfoResponse.CompactionState state =
3192      AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
3193
3194    // TODO: There is no timeout on this controller. Set one!
3195    HBaseRpcController rpcController = rpcControllerFactory.newController();
3196    switch (compactType) {
3197      case MOB:
3198        final AdminProtos.AdminService.BlockingInterface masterAdmin =
3199          this.connection.getAdminForMaster();
3200        Callable<AdminProtos.GetRegionInfoResponse.CompactionState> callable =
3201          new Callable<AdminProtos.GetRegionInfoResponse.CompactionState>() {
3202            @Override
3203            public AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
3204              RegionInfo info = RegionInfo.createMobRegionInfo(tableName);
3205              GetRegionInfoRequest request =
3206                RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
3207              GetRegionInfoResponse response = masterAdmin.getRegionInfo(rpcController, request);
3208              return response.getCompactionState();
3209            }
3210          };
3211        state = ProtobufUtil.call(callable);
3212        break;
3213      case NORMAL:
3214        for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) {
3215          ServerName sn = loc.getServerName();
3216          if (sn == null) {
3217            continue;
3218          }
3219          byte[] regionName = loc.getRegion().getRegionName();
3220          AdminService.BlockingInterface snAdmin = this.connection.getAdmin(sn);
3221          try {
3222            Callable<GetRegionInfoResponse> regionInfoCallable =
3223              new Callable<GetRegionInfoResponse>() {
3224                @Override
3225                public GetRegionInfoResponse call() throws Exception {
3226                  GetRegionInfoRequest request =
3227                    RequestConverter.buildGetRegionInfoRequest(regionName, true);
3228                  return snAdmin.getRegionInfo(rpcController, request);
3229                }
3230              };
3231            GetRegionInfoResponse response = ProtobufUtil.call(regionInfoCallable);
3232            switch (response.getCompactionState()) {
3233              case MAJOR_AND_MINOR:
3234                return CompactionState.MAJOR_AND_MINOR;
3235              case MAJOR:
3236                if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MINOR) {
3237                  return CompactionState.MAJOR_AND_MINOR;
3238                }
3239                state = AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR;
3240                break;
3241              case MINOR:
3242                if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR) {
3243                  return CompactionState.MAJOR_AND_MINOR;
3244                }
3245                state = AdminProtos.GetRegionInfoResponse.CompactionState.MINOR;
3246                break;
3247              case NONE:
3248              default: // nothing, continue
3249            }
3250          } catch (NotServingRegionException e) {
3251            if (LOG.isDebugEnabled()) {
3252              LOG.debug("Trying to get compaction state of " + loc.getRegion() + ": "
3253                + StringUtils.stringifyException(e));
3254            }
3255          } catch (RemoteException e) {
3256            if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
3257              if (LOG.isDebugEnabled()) {
3258                LOG.debug("Trying to get compaction state of " + loc.getRegion() + ": "
3259                  + StringUtils.stringifyException(e));
3260              }
3261            } else {
3262              throw e;
3263            }
3264          }
3265        }
3266        break;
3267      default:
3268        throw new IllegalArgumentException("Unknown compactType: " + compactType);
3269    }
3270    if (state != null) {
3271      return ProtobufUtil.createCompactionState(state);
3272    }
3273    return null;
3274  }
3275
3276  /**
3277   * Future that waits on a procedure result. Returned by the async version of the Admin calls, and
3278   * used internally by the sync calls to wait on the result of the procedure.
3279   */
3280  @InterfaceAudience.Private
3281  @InterfaceStability.Evolving
3282  protected static class ProcedureFuture<V> implements Future<V> {
3283    private ExecutionException exception = null;
3284    private boolean procResultFound = false;
3285    private boolean done = false;
3286    private boolean cancelled = false;
3287    private V result = null;
3288
3289    private final HBaseAdmin admin;
3290    protected final Long procId;
3291
3292    public ProcedureFuture(final HBaseAdmin admin, final Long procId) {
3293      this.admin = admin;
3294      this.procId = procId;
3295    }
3296
3297    @Override
3298    public boolean cancel(boolean mayInterruptIfRunning) {
3299      AbortProcedureRequest abortProcRequest = AbortProcedureRequest.newBuilder().setProcId(procId)
3300        .setMayInterruptIfRunning(mayInterruptIfRunning).build();
3301      try {
3302        cancelled = abortProcedureResult(abortProcRequest).getIsProcedureAborted();
3303        if (cancelled) {
3304          done = true;
3305        }
3306      } catch (IOException e) {
3307        // Cancell thrown exception for some reason. At this time, we are not sure whether
3308        // the cancell succeeds or fails. We assume that it is failed, but print out a warning
3309        // for debugging purpose.
3310        LOG.warn(
3311          "Cancelling the procedure with procId=" + procId + " throws exception " + e.getMessage(),
3312          e);
3313        cancelled = false;
3314      }
3315      return cancelled;
3316    }
3317
3318    @Override
3319    public boolean isCancelled() {
3320      return cancelled;
3321    }
3322
3323    protected AbortProcedureResponse abortProcedureResult(final AbortProcedureRequest request)
3324      throws IOException {
3325      return admin.executeCallable(new MasterCallable<AbortProcedureResponse>(admin.getConnection(),
3326        admin.getRpcControllerFactory()) {
3327        @Override
3328        protected AbortProcedureResponse rpcCall() throws Exception {
3329          return master.abortProcedure(getRpcController(), request);
3330        }
3331      });
3332    }
3333
3334    @Override
3335    public V get() throws InterruptedException, ExecutionException {
3336      // TODO: should we ever spin forever?
3337      // fix HBASE-21715. TODO: If the function call get() without timeout limit is not allowed,
3338      // is it possible to compose instead of inheriting from the class Future for this class?
3339      try {
3340        return get(admin.getProcedureTimeout, TimeUnit.MILLISECONDS);
3341      } catch (TimeoutException e) {
3342        LOG.warn("Failed to get the procedure with procId=" + procId + " throws exception "
3343          + e.getMessage(), e);
3344        return null;
3345      }
3346    }
3347
3348    @Override
3349    public V get(long timeout, TimeUnit unit)
3350      throws InterruptedException, ExecutionException, TimeoutException {
3351      if (!done) {
3352        long deadlineTs = EnvironmentEdgeManager.currentTime() + unit.toMillis(timeout);
3353        try {
3354          try {
3355            // if the master support procedures, try to wait the result
3356            if (procId != null) {
3357              result = waitProcedureResult(procId, deadlineTs);
3358            }
3359            // if we don't have a proc result, try the compatibility wait
3360            if (!procResultFound) {
3361              result = waitOperationResult(deadlineTs);
3362            }
3363            result = postOperationResult(result, deadlineTs);
3364            done = true;
3365          } catch (IOException e) {
3366            result = postOperationFailure(e, deadlineTs);
3367            done = true;
3368          }
3369        } catch (IOException e) {
3370          exception = new ExecutionException(e);
3371          done = true;
3372        }
3373      }
3374      if (exception != null) {
3375        throw exception;
3376      }
3377      return result;
3378    }
3379
3380    @Override
3381    public boolean isDone() {
3382      return done;
3383    }
3384
3385    protected HBaseAdmin getAdmin() {
3386      return admin;
3387    }
3388
3389    private V waitProcedureResult(long procId, long deadlineTs)
3390      throws IOException, TimeoutException, InterruptedException {
3391      GetProcedureResultRequest request =
3392        GetProcedureResultRequest.newBuilder().setProcId(procId).build();
3393
3394      int tries = 0;
3395      IOException serviceEx = null;
3396      while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
3397        GetProcedureResultResponse response = null;
3398        try {
3399          // Try to fetch the result
3400          response = getProcedureResult(request);
3401        } catch (IOException e) {
3402          serviceEx = unwrapException(e);
3403
3404          // the master may be down
3405          LOG.warn("failed to get the procedure result procId=" + procId, serviceEx);
3406
3407          // Not much to do, if we have a DoNotRetryIOException
3408          if (serviceEx instanceof DoNotRetryIOException) {
3409            // TODO: looks like there is no way to unwrap this exception and get the proper
3410            // UnsupportedOperationException aside from looking at the message.
3411            // anyway, if we fail here we just failover to the compatibility side
3412            // and that is always a valid solution.
3413            LOG.warn("Proc-v2 is unsupported on this master: " + serviceEx.getMessage(), serviceEx);
3414            procResultFound = false;
3415            return null;
3416          }
3417        }
3418
3419        // If the procedure is no longer running, we should have a result
3420        if (response != null && response.getState() != GetProcedureResultResponse.State.RUNNING) {
3421          procResultFound = response.getState() != GetProcedureResultResponse.State.NOT_FOUND;
3422          return convertResult(response);
3423        }
3424
3425        try {
3426          Thread.sleep(getAdmin().getPauseTime(tries++));
3427        } catch (InterruptedException e) {
3428          throw new InterruptedException(
3429            "Interrupted while waiting for the result of proc " + procId);
3430        }
3431      }
3432      if (serviceEx != null) {
3433        throw serviceEx;
3434      } else {
3435        throw new TimeoutException("The procedure " + procId + " is still running");
3436      }
3437    }
3438
3439    private static IOException unwrapException(IOException e) {
3440      if (e instanceof RemoteException) {
3441        return ((RemoteException) e).unwrapRemoteException();
3442      }
3443      return e;
3444    }
3445
3446    protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request)
3447      throws IOException {
3448      return admin.executeCallable(new MasterCallable<GetProcedureResultResponse>(
3449        admin.getConnection(), admin.getRpcControllerFactory()) {
3450        @Override
3451        protected GetProcedureResultResponse rpcCall() throws Exception {
3452          return master.getProcedureResult(getRpcController(), request);
3453        }
3454      });
3455    }
3456
3457    /**
3458     * Convert the procedure result response to a specified type.
3459     * @param response the procedure result object to parse
3460     * @return the result data of the procedure.
3461     */
3462    protected V convertResult(final GetProcedureResultResponse response) throws IOException {
3463      if (response.hasException()) {
3464        throw ForeignExceptionUtil.toIOException(response.getException());
3465      }
3466      return null;
3467    }
3468
3469    /**
3470     * Fallback implementation in case the procedure is not supported by the server. It should try
3471     * to wait until the operation is completed.
3472     * @param deadlineTs the timestamp after which this method should throw a TimeoutException
3473     * @return the result data of the operation
3474     */
3475    protected V waitOperationResult(final long deadlineTs) throws IOException, TimeoutException {
3476      return null;
3477    }
3478
3479    /**
3480     * Called after the operation is completed and the result fetched. this allows to perform extra
3481     * steps after the procedure is completed. it allows to apply transformations to the result that
3482     * will be returned by get().
3483     * @param result     the result of the procedure
3484     * @param deadlineTs the timestamp after which this method should throw a TimeoutException
3485     * @return the result of the procedure, which may be the same as the passed one
3486     */
3487    protected V postOperationResult(final V result, final long deadlineTs)
3488      throws IOException, TimeoutException {
3489      return result;
3490    }
3491
3492    /**
3493     * Called after the operation is terminated with a failure. this allows to perform extra steps
3494     * after the procedure is terminated. it allows to apply transformations to the result that will
3495     * be returned by get(). The default implementation will rethrow the exception
3496     * @param exception  the exception got from fetching the result
3497     * @param deadlineTs the timestamp after which this method should throw a TimeoutException
3498     * @return the result of the procedure, which may be the same as the passed one
3499     */
3500    protected V postOperationFailure(final IOException exception, final long deadlineTs)
3501      throws IOException, TimeoutException {
3502      throw exception;
3503    }
3504
3505    protected interface WaitForStateCallable {
3506      boolean checkState(int tries) throws IOException;
3507
3508      void throwInterruptedException() throws InterruptedIOException;
3509
3510      void throwTimeoutException(long elapsed) throws TimeoutException;
3511    }
3512
3513    protected void waitForState(final long deadlineTs, final WaitForStateCallable callable)
3514      throws IOException, TimeoutException {
3515      int tries = 0;
3516      IOException serverEx = null;
3517      long startTime = EnvironmentEdgeManager.currentTime();
3518      while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
3519        serverEx = null;
3520        try {
3521          if (callable.checkState(tries)) {
3522            return;
3523          }
3524        } catch (IOException e) {
3525          serverEx = e;
3526        }
3527        try {
3528          Thread.sleep(getAdmin().getPauseTime(tries++));
3529        } catch (InterruptedException e) {
3530          callable.throwInterruptedException();
3531        }
3532      }
3533      if (serverEx != null) {
3534        throw unwrapException(serverEx);
3535      } else {
3536        callable.throwTimeoutException(EnvironmentEdgeManager.currentTime() - startTime);
3537      }
3538    }
3539  }
3540
3541  @InterfaceAudience.Private
3542  @InterfaceStability.Evolving
3543  protected static abstract class TableFuture<V> extends ProcedureFuture<V> {
3544    private final TableName tableName;
3545
3546    public TableFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) {
3547      super(admin, procId);
3548      this.tableName = tableName;
3549    }
3550
3551    @Override
3552    public String toString() {
3553      return getDescription();
3554    }
3555
3556    /** Returns the table name */
3557    protected TableName getTableName() {
3558      return tableName;
3559    }
3560
3561    /** Returns the table descriptor */
3562    protected TableDescriptor getTableDescriptor() throws IOException {
3563      return getAdmin().getDescriptor(getTableName());
3564    }
3565
3566    /** Returns the operation type like CREATE, DELETE, DISABLE etc. */
3567    public abstract String getOperationType();
3568
3569    /** Returns a description of the operation */
3570    protected String getDescription() {
3571      return "Operation: " + getOperationType() + ", " + "Table Name: "
3572        + tableName.getNameWithNamespaceInclAsString() + ", procId: " + procId;
3573    }
3574
3575    protected abstract class TableWaitForStateCallable implements WaitForStateCallable {
3576      @Override
3577      public void throwInterruptedException() throws InterruptedIOException {
3578        throw new InterruptedIOException("Interrupted while waiting for " + getDescription());
3579      }
3580
3581      @Override
3582      public void throwTimeoutException(long elapsedTime) throws TimeoutException {
3583        throw new TimeoutException(
3584          getDescription() + " has not completed after " + elapsedTime + "ms");
3585      }
3586    }
3587
3588    @Override
3589    protected V postOperationResult(final V result, final long deadlineTs)
3590      throws IOException, TimeoutException {
3591      LOG.info(getDescription() + " completed");
3592      return super.postOperationResult(result, deadlineTs);
3593    }
3594
3595    @Override
3596    protected V postOperationFailure(final IOException exception, final long deadlineTs)
3597      throws IOException, TimeoutException {
3598      LOG.info(getDescription() + " failed with " + exception.getMessage());
3599      return super.postOperationFailure(exception, deadlineTs);
3600    }
3601
3602    protected void waitForTableEnabled(final long deadlineTs) throws IOException, TimeoutException {
3603      waitForState(deadlineTs, new TableWaitForStateCallable() {
3604        @Override
3605        public boolean checkState(int tries) throws IOException {
3606          try {
3607            if (getAdmin().isTableAvailable(tableName)) {
3608              return true;
3609            }
3610          } catch (TableNotFoundException tnfe) {
3611            LOG.debug("Table " + tableName.getNameWithNamespaceInclAsString()
3612              + " was not enabled, sleeping. tries=" + tries);
3613          }
3614          return false;
3615        }
3616      });
3617    }
3618
3619    protected void waitForTableDisabled(final long deadlineTs)
3620      throws IOException, TimeoutException {
3621      waitForState(deadlineTs, new TableWaitForStateCallable() {
3622        @Override
3623        public boolean checkState(int tries) throws IOException {
3624          return getAdmin().isTableDisabled(tableName);
3625        }
3626      });
3627    }
3628
3629    protected void waitTableNotFound(final long deadlineTs) throws IOException, TimeoutException {
3630      waitForState(deadlineTs, new TableWaitForStateCallable() {
3631        @Override
3632        public boolean checkState(int tries) throws IOException {
3633          return !getAdmin().tableExists(tableName);
3634        }
3635      });
3636    }
3637
3638    protected void waitForSchemaUpdate(final long deadlineTs) throws IOException, TimeoutException {
3639      waitForState(deadlineTs, new TableWaitForStateCallable() {
3640        @Override
3641        public boolean checkState(int tries) throws IOException {
3642          return getAdmin().getAlterStatus(tableName).getFirst() == 0;
3643        }
3644      });
3645    }
3646
3647    protected void waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
3648      throws IOException, TimeoutException {
3649      final TableDescriptor desc = getTableDescriptor();
3650      final AtomicInteger actualRegCount = new AtomicInteger(0);
3651      final MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
3652        @Override
3653        public boolean visit(Result rowResult) throws IOException {
3654          RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
3655          if (list == null) {
3656            LOG.warn("No serialized HRegionInfo in " + rowResult);
3657            return true;
3658          }
3659          HRegionLocation l = list.getRegionLocation();
3660          if (l == null) {
3661            return true;
3662          }
3663          if (!l.getRegionInfo().getTable().equals(desc.getTableName())) {
3664            return false;
3665          }
3666          if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
3667          HRegionLocation[] locations = list.getRegionLocations();
3668          for (HRegionLocation location : locations) {
3669            if (location == null) continue;
3670            ServerName serverName = location.getServerName();
3671            // Make sure that regions are assigned to server
3672            if (serverName != null && serverName.getAddress() != null) {
3673              actualRegCount.incrementAndGet();
3674            }
3675          }
3676          return true;
3677        }
3678      };
3679
3680      int tries = 0;
3681      int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication();
3682      while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
3683        actualRegCount.set(0);
3684        MetaTableAccessor.scanMetaForTableRegions(getAdmin().getConnection(), visitor,
3685          desc.getTableName());
3686        if (actualRegCount.get() == numRegs) {
3687          // all the regions are online
3688          return;
3689        }
3690
3691        try {
3692          Thread.sleep(getAdmin().getPauseTime(tries++));
3693        } catch (InterruptedException e) {
3694          throw new InterruptedIOException("Interrupted when opening" + " regions; "
3695            + actualRegCount.get() + " of " + numRegs + " regions processed so far");
3696        }
3697      }
3698      throw new TimeoutException("Only " + actualRegCount.get() + " of " + numRegs
3699        + " regions are online; retries exhausted.");
3700    }
3701  }
3702
3703  @InterfaceAudience.Private
3704  @InterfaceStability.Evolving
3705  protected static abstract class NamespaceFuture extends ProcedureFuture<Void> {
3706    private final String namespaceName;
3707
3708    public NamespaceFuture(final HBaseAdmin admin, final String namespaceName, final Long procId) {
3709      super(admin, procId);
3710      this.namespaceName = namespaceName;
3711    }
3712
3713    /** Returns the namespace name */
3714    protected String getNamespaceName() {
3715      return namespaceName;
3716    }
3717
3718    /** Returns the operation type like CREATE_NAMESPACE, DELETE_NAMESPACE, etc. */
3719    public abstract String getOperationType();
3720
3721    @Override
3722    public String toString() {
3723      return "Operation: " + getOperationType() + ", Namespace: " + getNamespaceName();
3724    }
3725  }
3726
3727  @InterfaceAudience.Private
3728  @InterfaceStability.Evolving
3729  private static class ReplicationFuture extends ProcedureFuture<Void> {
3730    private final String peerId;
3731    private final Supplier<String> getOperation;
3732
3733    public ReplicationFuture(HBaseAdmin admin, String peerId, Long procId,
3734      Supplier<String> getOperation) {
3735      super(admin, procId);
3736      this.peerId = peerId;
3737      this.getOperation = getOperation;
3738    }
3739
3740    @Override
3741    public String toString() {
3742      return "Operation: " + getOperation.get() + ", peerId: " + peerId;
3743    }
3744  }
3745
3746  @Override
3747  public List<SecurityCapability> getSecurityCapabilities() throws IOException {
3748    try {
3749      return executeCallable(
3750        new MasterCallable<List<SecurityCapability>>(getConnection(), getRpcControllerFactory()) {
3751          @Override
3752          protected List<SecurityCapability> rpcCall() throws Exception {
3753            SecurityCapabilitiesRequest req = SecurityCapabilitiesRequest.newBuilder().build();
3754            return ProtobufUtil.toSecurityCapabilityList(
3755              master.getSecurityCapabilities(getRpcController(), req).getCapabilitiesList());
3756          }
3757        });
3758    } catch (IOException e) {
3759      if (e instanceof RemoteException) {
3760        e = ((RemoteException) e).unwrapRemoteException();
3761      }
3762      throw e;
3763    }
3764  }
3765
3766  @Override
3767  public boolean splitSwitch(boolean enabled, boolean synchronous) throws IOException {
3768    return splitOrMergeSwitch(enabled, synchronous, MasterSwitchType.SPLIT);
3769  }
3770
3771  @Override
3772  public boolean mergeSwitch(boolean enabled, boolean synchronous) throws IOException {
3773    return splitOrMergeSwitch(enabled, synchronous, MasterSwitchType.MERGE);
3774  }
3775
3776  private boolean splitOrMergeSwitch(boolean enabled, boolean synchronous,
3777    MasterSwitchType switchType) throws IOException {
3778    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
3779      @Override
3780      protected Boolean rpcCall() throws Exception {
3781        MasterProtos.SetSplitOrMergeEnabledResponse response =
3782          master.setSplitOrMergeEnabled(getRpcController(),
3783            RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchType));
3784        return response.getPrevValueList().get(0);
3785      }
3786    });
3787  }
3788
3789  @Override
3790  public boolean isSplitEnabled() throws IOException {
3791    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
3792      @Override
3793      protected Boolean rpcCall() throws Exception {
3794        return master
3795          .isSplitOrMergeEnabled(getRpcController(),
3796            RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.SPLIT))
3797          .getEnabled();
3798      }
3799    });
3800  }
3801
3802  @Override
3803  public boolean isMergeEnabled() throws IOException {
3804    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
3805      @Override
3806      protected Boolean rpcCall() throws Exception {
3807        return master
3808          .isSplitOrMergeEnabled(getRpcController(),
3809            RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.MERGE))
3810          .getEnabled();
3811      }
3812    });
3813  }
3814
3815  private RpcControllerFactory getRpcControllerFactory() {
3816    return this.rpcControllerFactory;
3817  }
3818
3819  @Override
3820  public Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig,
3821    boolean enabled) throws IOException {
3822    AddReplicationPeerResponse response = executeCallable(
3823      new MasterCallable<AddReplicationPeerResponse>(getConnection(), getRpcControllerFactory()) {
3824        @Override
3825        protected AddReplicationPeerResponse rpcCall() throws Exception {
3826          return master.addReplicationPeer(getRpcController(),
3827            RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled));
3828        }
3829      });
3830    return new ReplicationFuture(this, peerId, response.getProcId(), () -> "ADD_REPLICATION_PEER");
3831  }
3832
3833  @Override
3834  public Future<Void> removeReplicationPeerAsync(String peerId) throws IOException {
3835    RemoveReplicationPeerResponse response =
3836      executeCallable(new MasterCallable<RemoveReplicationPeerResponse>(getConnection(),
3837        getRpcControllerFactory()) {
3838        @Override
3839        protected RemoveReplicationPeerResponse rpcCall() throws Exception {
3840          return master.removeReplicationPeer(getRpcController(),
3841            RequestConverter.buildRemoveReplicationPeerRequest(peerId));
3842        }
3843      });
3844    return new ReplicationFuture(this, peerId, response.getProcId(),
3845      () -> "REMOVE_REPLICATION_PEER");
3846  }
3847
3848  @Override
3849  public Future<Void> enableReplicationPeerAsync(final String peerId) throws IOException {
3850    EnableReplicationPeerResponse response =
3851      executeCallable(new MasterCallable<EnableReplicationPeerResponse>(getConnection(),
3852        getRpcControllerFactory()) {
3853        @Override
3854        protected EnableReplicationPeerResponse rpcCall() throws Exception {
3855          return master.enableReplicationPeer(getRpcController(),
3856            RequestConverter.buildEnableReplicationPeerRequest(peerId));
3857        }
3858      });
3859    return new ReplicationFuture(this, peerId, response.getProcId(),
3860      () -> "ENABLE_REPLICATION_PEER");
3861  }
3862
3863  @Override
3864  public Future<Void> disableReplicationPeerAsync(final String peerId) throws IOException {
3865    DisableReplicationPeerResponse response =
3866      executeCallable(new MasterCallable<DisableReplicationPeerResponse>(getConnection(),
3867        getRpcControllerFactory()) {
3868        @Override
3869        protected DisableReplicationPeerResponse rpcCall() throws Exception {
3870          return master.disableReplicationPeer(getRpcController(),
3871            RequestConverter.buildDisableReplicationPeerRequest(peerId));
3872        }
3873      });
3874    return new ReplicationFuture(this, peerId, response.getProcId(),
3875      () -> "DISABLE_REPLICATION_PEER");
3876  }
3877
3878  @Override
3879  public ReplicationPeerConfig getReplicationPeerConfig(final String peerId) throws IOException {
3880    return executeCallable(
3881      new MasterCallable<ReplicationPeerConfig>(getConnection(), getRpcControllerFactory()) {
3882        @Override
3883        protected ReplicationPeerConfig rpcCall() throws Exception {
3884          GetReplicationPeerConfigResponse response = master.getReplicationPeerConfig(
3885            getRpcController(), RequestConverter.buildGetReplicationPeerConfigRequest(peerId));
3886          return ReplicationPeerConfigUtil.convert(response.getPeerConfig());
3887        }
3888      });
3889  }
3890
3891  @Override
3892  public Future<Void> updateReplicationPeerConfigAsync(final String peerId,
3893    final ReplicationPeerConfig peerConfig) throws IOException {
3894    UpdateReplicationPeerConfigResponse response =
3895      executeCallable(new MasterCallable<UpdateReplicationPeerConfigResponse>(getConnection(),
3896        getRpcControllerFactory()) {
3897        @Override
3898        protected UpdateReplicationPeerConfigResponse rpcCall() throws Exception {
3899          return master.updateReplicationPeerConfig(getRpcController(),
3900            RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig));
3901        }
3902      });
3903    return new ReplicationFuture(this, peerId, response.getProcId(),
3904      () -> "UPDATE_REPLICATION_PEER_CONFIG");
3905  }
3906
3907  @Override
3908  public List<ReplicationPeerDescription> listReplicationPeers() throws IOException {
3909    return listReplicationPeers((Pattern) null);
3910  }
3911
3912  @Override
3913  public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) throws IOException {
3914    return executeCallable(new MasterCallable<List<ReplicationPeerDescription>>(getConnection(),
3915      getRpcControllerFactory()) {
3916      @Override
3917      protected List<ReplicationPeerDescription> rpcCall() throws Exception {
3918        List<ReplicationProtos.ReplicationPeerDescription> peersList =
3919          master.listReplicationPeers(getRpcController(),
3920            RequestConverter.buildListReplicationPeersRequest(pattern)).getPeerDescList();
3921        List<ReplicationPeerDescription> result = new ArrayList<>(peersList.size());
3922        for (ReplicationProtos.ReplicationPeerDescription peer : peersList) {
3923          result.add(ReplicationPeerConfigUtil.toReplicationPeerDescription(peer));
3924        }
3925        return result;
3926      }
3927    });
3928  }
3929
3930  @Override
3931  public void decommissionRegionServers(List<ServerName> servers, boolean offload)
3932    throws IOException {
3933    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
3934      @Override
3935      public Void rpcCall() throws ServiceException {
3936        master.decommissionRegionServers(getRpcController(),
3937          RequestConverter.buildDecommissionRegionServersRequest(servers, offload));
3938        return null;
3939      }
3940    });
3941  }
3942
3943  @Override
3944  public List<ServerName> listDecommissionedRegionServers() throws IOException {
3945    return executeCallable(
3946      new MasterCallable<List<ServerName>>(getConnection(), getRpcControllerFactory()) {
3947        @Override
3948        public List<ServerName> rpcCall() throws ServiceException {
3949          ListDecommissionedRegionServersRequest req =
3950            ListDecommissionedRegionServersRequest.newBuilder().build();
3951          List<ServerName> servers = new ArrayList<>();
3952          for (HBaseProtos.ServerName server : master
3953            .listDecommissionedRegionServers(getRpcController(), req).getServerNameList()) {
3954            servers.add(ProtobufUtil.toServerName(server));
3955          }
3956          return servers;
3957        }
3958      });
3959  }
3960
3961  @Override
3962  public void recommissionRegionServer(ServerName server, List<byte[]> encodedRegionNames)
3963    throws IOException {
3964    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
3965      @Override
3966      public Void rpcCall() throws ServiceException {
3967        master.recommissionRegionServer(getRpcController(),
3968          RequestConverter.buildRecommissionRegionServerRequest(server, encodedRegionNames));
3969        return null;
3970      }
3971    });
3972  }
3973
3974  @Override
3975  public List<TableCFs> listReplicatedTableCFs() throws IOException {
3976    List<TableCFs> replicatedTableCFs = new ArrayList<>();
3977    List<TableDescriptor> tables = listTableDescriptors();
3978    tables.forEach(table -> {
3979      Map<String, Integer> cfs = new HashMap<>();
3980      Stream.of(table.getColumnFamilies())
3981        .filter(column -> column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL)
3982        .forEach(column -> {
3983          cfs.put(column.getNameAsString(), column.getScope());
3984        });
3985      if (!cfs.isEmpty()) {
3986        replicatedTableCFs.add(new TableCFs(table.getTableName(), cfs));
3987      }
3988    });
3989    return replicatedTableCFs;
3990  }
3991
3992  @Override
3993  public void enableTableReplication(final TableName tableName) throws IOException {
3994    if (tableName == null) {
3995      throw new IllegalArgumentException("Table name cannot be null");
3996    }
3997    if (!tableExists(tableName)) {
3998      throw new TableNotFoundException(
3999        "Table '" + tableName.getNameAsString() + "' does not exists.");
4000    }
4001    byte[][] splits = getTableSplits(tableName);
4002    checkAndSyncTableDescToPeers(tableName, splits);
4003    setTableRep(tableName, true);
4004  }
4005
4006  @Override
4007  public void disableTableReplication(final TableName tableName) throws IOException {
4008    if (tableName == null) {
4009      throw new IllegalArgumentException("Table name is null");
4010    }
4011    if (!tableExists(tableName)) {
4012      throw new TableNotFoundException(
4013        "Table '" + tableName.getNameAsString() + "' does not exists.");
4014    }
4015    setTableRep(tableName, false);
4016  }
4017
4018  /**
4019   * Connect to peer and check the table descriptor on peer:
4020   * <ol>
4021   * <li>Create the same table on peer when not exist.</li>
4022   * <li>Throw an exception if the table already has replication enabled on any of the column
4023   * families.</li>
4024   * <li>Throw an exception if the table exists on peer cluster but descriptors are not same.</li>
4025   * </ol>
4026   * @param tableName name of the table to sync to the peer
4027   * @param splits    table split keys
4028   * @throws IOException if a remote or network exception occurs
4029   */
4030  private void checkAndSyncTableDescToPeers(final TableName tableName, final byte[][] splits)
4031    throws IOException {
4032    List<ReplicationPeerDescription> peers = listReplicationPeers();
4033    if (peers == null || peers.size() <= 0) {
4034      throw new IllegalArgumentException("Found no peer cluster for replication.");
4035    }
4036
4037    for (ReplicationPeerDescription peerDesc : peers) {
4038      if (peerDesc.getPeerConfig().needToReplicate(tableName)) {
4039        Configuration peerConf =
4040          ReplicationPeerConfigUtil.getPeerClusterConfiguration(this.conf, peerDesc);
4041        try (Connection conn = ConnectionFactory.createConnection(peerConf);
4042          Admin repHBaseAdmin = conn.getAdmin()) {
4043          TableDescriptor tableDesc = getDescriptor(tableName);
4044          TableDescriptor peerTableDesc = null;
4045          if (!repHBaseAdmin.tableExists(tableName)) {
4046            repHBaseAdmin.createTable(tableDesc, splits);
4047          } else {
4048            peerTableDesc = repHBaseAdmin.getDescriptor(tableName);
4049            if (peerTableDesc == null) {
4050              throw new IllegalArgumentException("Failed to get table descriptor for table "
4051                + tableName.getNameAsString() + " from peer cluster " + peerDesc.getPeerId());
4052            }
4053            if (
4054              TableDescriptor.COMPARATOR_IGNORE_REPLICATION.compare(peerTableDesc, tableDesc) != 0
4055            ) {
4056              throw new IllegalArgumentException("Table " + tableName.getNameAsString()
4057                + " exists in peer cluster " + peerDesc.getPeerId()
4058                + ", but the table descriptors are not same when compared with source cluster."
4059                + " Thus can not enable the table's replication switch.");
4060            }
4061          }
4062        }
4063      }
4064    }
4065  }
4066
4067  /**
4068   * Set the table's replication switch if the table's replication switch is already not set.
4069   * @param tableName name of the table
4070   * @param enableRep is replication switch enable or disable
4071   * @throws IOException if a remote or network exception occurs
4072   */
4073  private void setTableRep(final TableName tableName, boolean enableRep) throws IOException {
4074    TableDescriptor tableDesc = getDescriptor(tableName);
4075    if (!tableDesc.matchReplicationScope(enableRep)) {
4076      int scope =
4077        enableRep ? HConstants.REPLICATION_SCOPE_GLOBAL : HConstants.REPLICATION_SCOPE_LOCAL;
4078      modifyTable(TableDescriptorBuilder.newBuilder(tableDesc).setReplicationScope(scope).build());
4079    }
4080  }
4081
4082  @Override
4083  public void clearCompactionQueues(final ServerName sn, final Set<String> queues)
4084    throws IOException, InterruptedException {
4085    if (queues == null || queues.size() == 0) {
4086      throw new IllegalArgumentException("queues cannot be null or empty");
4087    }
4088    final AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
4089    Callable<Void> callable = new Callable<Void>() {
4090      @Override
4091      public Void call() throws Exception {
4092        // TODO: There is no timeout on this controller. Set one!
4093        HBaseRpcController controller = rpcControllerFactory.newController();
4094        ClearCompactionQueuesRequest request =
4095          RequestConverter.buildClearCompactionQueuesRequest(queues);
4096        admin.clearCompactionQueues(controller, request);
4097        return null;
4098      }
4099    };
4100    ProtobufUtil.call(callable);
4101  }
4102
4103  @Override
4104  public List<ServerName> clearDeadServers(List<ServerName> servers) throws IOException {
4105    return executeCallable(
4106      new MasterCallable<List<ServerName>>(getConnection(), getRpcControllerFactory()) {
4107        @Override
4108        protected List<ServerName> rpcCall() throws Exception {
4109          ClearDeadServersRequest req = RequestConverter
4110            .buildClearDeadServersRequest(servers == null ? Collections.EMPTY_LIST : servers);
4111          return ProtobufUtil
4112            .toServerNameList(master.clearDeadServers(getRpcController(), req).getServerNameList());
4113        }
4114      });
4115  }
4116
4117  @Override
4118  public void cloneTableSchema(final TableName tableName, final TableName newTableName,
4119    final boolean preserveSplits) throws IOException {
4120    checkTableExists(tableName);
4121    if (tableExists(newTableName)) {
4122      throw new TableExistsException(newTableName);
4123    }
4124    TableDescriptor htd = TableDescriptorBuilder.copy(newTableName, getTableDescriptor(tableName));
4125    if (preserveSplits) {
4126      createTable(htd, getTableSplits(tableName));
4127    } else {
4128      createTable(htd);
4129    }
4130  }
4131
4132  @Override
4133  public boolean switchRpcThrottle(final boolean enable) throws IOException {
4134    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
4135      @Override
4136      protected Boolean rpcCall() throws Exception {
4137        return this.master.switchRpcThrottle(getRpcController(),
4138          MasterProtos.SwitchRpcThrottleRequest.newBuilder().setRpcThrottleEnabled(enable).build())
4139          .getPreviousRpcThrottleEnabled();
4140      }
4141    });
4142  }
4143
4144  @Override
4145  public boolean isRpcThrottleEnabled() throws IOException {
4146    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
4147      @Override
4148      protected Boolean rpcCall() throws Exception {
4149        return this.master.isRpcThrottleEnabled(getRpcController(),
4150          IsRpcThrottleEnabledRequest.newBuilder().build()).getRpcThrottleEnabled();
4151      }
4152    });
4153  }
4154
4155  @Override
4156  public boolean exceedThrottleQuotaSwitch(final boolean enable) throws IOException {
4157    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
4158      @Override
4159      protected Boolean rpcCall() throws Exception {
4160        return this.master
4161          .switchExceedThrottleQuota(getRpcController(),
4162            MasterProtos.SwitchExceedThrottleQuotaRequest.newBuilder()
4163              .setExceedThrottleQuotaEnabled(enable).build())
4164          .getPreviousExceedThrottleQuotaEnabled();
4165      }
4166    });
4167  }
4168
4169  @Override
4170  public Map<TableName, Long> getSpaceQuotaTableSizes() throws IOException {
4171    return executeCallable(
4172      new MasterCallable<Map<TableName, Long>>(getConnection(), getRpcControllerFactory()) {
4173        @Override
4174        protected Map<TableName, Long> rpcCall() throws Exception {
4175          GetSpaceQuotaRegionSizesResponse resp = master.getSpaceQuotaRegionSizes(
4176            getRpcController(), RequestConverter.buildGetSpaceQuotaRegionSizesRequest());
4177          Map<TableName, Long> tableSizes = new HashMap<>();
4178          for (RegionSizes sizes : resp.getSizesList()) {
4179            TableName tn = ProtobufUtil.toTableName(sizes.getTableName());
4180            tableSizes.put(tn, sizes.getSize());
4181          }
4182          return tableSizes;
4183        }
4184      });
4185  }
4186
4187  @Override
4188  public Map<TableName, SpaceQuotaSnapshot>
4189    getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException {
4190    final AdminService.BlockingInterface admin = this.connection.getAdmin(serverName);
4191    Callable<GetSpaceQuotaSnapshotsResponse> callable =
4192      new Callable<GetSpaceQuotaSnapshotsResponse>() {
4193        @Override
4194        public GetSpaceQuotaSnapshotsResponse call() throws Exception {
4195          return admin.getSpaceQuotaSnapshots(rpcControllerFactory.newController(),
4196            RequestConverter.buildGetSpaceQuotaSnapshotsRequest());
4197        }
4198      };
4199    GetSpaceQuotaSnapshotsResponse resp = ProtobufUtil.call(callable);
4200    Map<TableName, SpaceQuotaSnapshot> snapshots = new HashMap<>();
4201    for (TableQuotaSnapshot snapshot : resp.getSnapshotsList()) {
4202      snapshots.put(ProtobufUtil.toTableName(snapshot.getTableName()),
4203        SpaceQuotaSnapshot.toSpaceQuotaSnapshot(snapshot.getSnapshot()));
4204    }
4205    return snapshots;
4206  }
4207
4208  @Override
4209  public SpaceQuotaSnapshot getCurrentSpaceQuotaSnapshot(String namespace) throws IOException {
4210    return executeCallable(
4211      new MasterCallable<SpaceQuotaSnapshot>(getConnection(), getRpcControllerFactory()) {
4212        @Override
4213        protected SpaceQuotaSnapshot rpcCall() throws Exception {
4214          GetQuotaStatesResponse resp = master.getQuotaStates(getRpcController(),
4215            RequestConverter.buildGetQuotaStatesRequest());
4216          for (GetQuotaStatesResponse.NamespaceQuotaSnapshot nsSnapshot : resp
4217            .getNsSnapshotsList()) {
4218            if (namespace.equals(nsSnapshot.getNamespace())) {
4219              return SpaceQuotaSnapshot.toSpaceQuotaSnapshot(nsSnapshot.getSnapshot());
4220            }
4221          }
4222          return null;
4223        }
4224      });
4225  }
4226
4227  @Override
4228  public SpaceQuotaSnapshot getCurrentSpaceQuotaSnapshot(TableName tableName) throws IOException {
4229    return executeCallable(
4230      new MasterCallable<SpaceQuotaSnapshot>(getConnection(), getRpcControllerFactory()) {
4231        @Override
4232        protected SpaceQuotaSnapshot rpcCall() throws Exception {
4233          GetQuotaStatesResponse resp = master.getQuotaStates(getRpcController(),
4234            RequestConverter.buildGetQuotaStatesRequest());
4235          HBaseProtos.TableName protoTableName = ProtobufUtil.toProtoTableName(tableName);
4236          for (GetQuotaStatesResponse.TableQuotaSnapshot tableSnapshot : resp
4237            .getTableSnapshotsList()) {
4238            if (protoTableName.equals(tableSnapshot.getTableName())) {
4239              return SpaceQuotaSnapshot.toSpaceQuotaSnapshot(tableSnapshot.getSnapshot());
4240            }
4241          }
4242          return null;
4243        }
4244      });
4245  }
4246
4247  @Override
4248  public void grant(UserPermission userPermission, boolean mergeExistingPermissions)
4249    throws IOException {
4250    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
4251      @Override
4252      protected Void rpcCall() throws Exception {
4253        GrantRequest req =
4254          ShadedAccessControlUtil.buildGrantRequest(userPermission, mergeExistingPermissions);
4255        this.master.grant(getRpcController(), req);
4256        return null;
4257      }
4258    });
4259  }
4260
4261  @Override
4262  public void revoke(UserPermission userPermission) throws IOException {
4263    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
4264      @Override
4265      protected Void rpcCall() throws Exception {
4266        RevokeRequest req = ShadedAccessControlUtil.buildRevokeRequest(userPermission);
4267        this.master.revoke(getRpcController(), req);
4268        return null;
4269      }
4270    });
4271  }
4272
4273  @Override
4274  public List<UserPermission>
4275    getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) throws IOException {
4276    return executeCallable(
4277      new MasterCallable<List<UserPermission>>(getConnection(), getRpcControllerFactory()) {
4278        @Override
4279        protected List<UserPermission> rpcCall() throws Exception {
4280          AccessControlProtos.GetUserPermissionsRequest req =
4281            ShadedAccessControlUtil.buildGetUserPermissionsRequest(getUserPermissionsRequest);
4282          AccessControlProtos.GetUserPermissionsResponse response =
4283            this.master.getUserPermissions(getRpcController(), req);
4284          return response.getUserPermissionList().stream()
4285            .map(userPermission -> ShadedAccessControlUtil.toUserPermission(userPermission))
4286            .collect(Collectors.toList());
4287        }
4288      });
4289  }
4290
4291  @Override
4292  public Future<Void> splitRegionAsync(byte[] regionName) throws IOException {
4293    return splitRegionAsync(regionName, null);
4294  }
4295
4296  @Override
4297  public Future<Void> createTableAsync(TableDescriptor desc) throws IOException {
4298    return createTableAsync(desc, null);
4299  }
4300
4301  @Override
4302  public List<Boolean> hasUserPermissions(String userName, List<Permission> permissions)
4303    throws IOException {
4304    return executeCallable(
4305      new MasterCallable<List<Boolean>>(getConnection(), getRpcControllerFactory()) {
4306        @Override
4307        protected List<Boolean> rpcCall() throws Exception {
4308          HasUserPermissionsRequest request =
4309            ShadedAccessControlUtil.buildHasUserPermissionsRequest(userName, permissions);
4310          return this.master.hasUserPermissions(getRpcController(), request)
4311            .getHasUserPermissionList();
4312        }
4313      });
4314  }
4315
4316  @Override
4317  public boolean snapshotCleanupSwitch(boolean on, boolean synchronous) throws IOException {
4318    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
4319
4320      @Override
4321      protected Boolean rpcCall() throws Exception {
4322        SetSnapshotCleanupRequest req =
4323          RequestConverter.buildSetSnapshotCleanupRequest(on, synchronous);
4324        return master.switchSnapshotCleanup(getRpcController(), req).getPrevSnapshotCleanup();
4325      }
4326    });
4327
4328  }
4329
4330  @Override
4331  public boolean isSnapshotCleanupEnabled() throws IOException {
4332    return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
4333
4334      @Override
4335      protected Boolean rpcCall() throws Exception {
4336        IsSnapshotCleanupEnabledRequest req =
4337          RequestConverter.buildIsSnapshotCleanupEnabledRequest();
4338        return master.isSnapshotCleanupEnabled(getRpcController(), req).getEnabled();
4339      }
4340    });
4341
4342  }
4343
4344  private List<LogEntry> getSlowLogResponses(final Map<String, Object> filterParams,
4345    final Set<ServerName> serverNames, final int limit, final String logType) {
4346    if (CollectionUtils.isEmpty(serverNames)) {
4347      return Collections.emptyList();
4348    }
4349    return serverNames.stream().map(serverName -> {
4350      try {
4351        return getSlowLogResponseFromServer(serverName, filterParams, limit, logType);
4352      } catch (IOException e) {
4353        throw new RuntimeException(e);
4354      }
4355    }).flatMap(List::stream).collect(Collectors.toList());
4356  }
4357
4358  private List<LogEntry> getSlowLogResponseFromServer(ServerName serverName,
4359    Map<String, Object> filterParams, int limit, String logType) throws IOException {
4360    AdminService.BlockingInterface admin = this.connection.getAdmin(serverName);
4361    return executeCallable(new RpcRetryingCallable<List<LogEntry>>() {
4362      @Override
4363      protected List<LogEntry> rpcCall(int callTimeout) throws Exception {
4364        HBaseRpcController controller = rpcControllerFactory.newController();
4365        HBaseProtos.LogRequest logRequest =
4366          RequestConverter.buildSlowLogResponseRequest(filterParams, limit, logType);
4367        HBaseProtos.LogEntry logEntry = admin.getLogEntries(controller, logRequest);
4368        return ProtobufUtil.toSlowLogPayloads(logEntry);
4369      }
4370    });
4371  }
4372
4373  @Override
4374  public List<Boolean> clearSlowLogResponses(@Nullable final Set<ServerName> serverNames)
4375    throws IOException {
4376    if (CollectionUtils.isEmpty(serverNames)) {
4377      return Collections.emptyList();
4378    }
4379    return serverNames.stream().map(serverName -> {
4380      try {
4381        return clearSlowLogsResponses(serverName);
4382      } catch (IOException e) {
4383        throw new RuntimeException(e);
4384      }
4385    }).collect(Collectors.toList());
4386  }
4387
4388  @Override
4389  public List<LogEntry> getLogEntries(Set<ServerName> serverNames, String logType,
4390    ServerType serverType, int limit, Map<String, Object> filterParams) throws IOException {
4391    if (logType == null || serverType == null) {
4392      throw new IllegalArgumentException("logType and/or serverType cannot be empty");
4393    }
4394    if (logType.equals("SLOW_LOG") || logType.equals("LARGE_LOG")) {
4395      if (ServerType.MASTER.equals(serverType)) {
4396        throw new IllegalArgumentException("Slow/Large logs are not maintained by HMaster");
4397      }
4398      return getSlowLogResponses(filterParams, serverNames, limit, logType);
4399    } else if (logType.equals("BALANCER_DECISION")) {
4400      if (ServerType.REGION_SERVER.equals(serverType)) {
4401        throw new IllegalArgumentException(
4402          "Balancer Decision logs are not maintained by HRegionServer");
4403      }
4404      return getBalancerDecisions(limit);
4405    } else if (logType.equals("BALANCER_REJECTION")) {
4406      if (ServerType.REGION_SERVER.equals(serverType)) {
4407        throw new IllegalArgumentException(
4408          "Balancer Rejection logs are not maintained by HRegionServer");
4409      }
4410      return getBalancerRejections(limit);
4411    }
4412    return Collections.emptyList();
4413  }
4414
4415  @Override
4416  public void flushMasterStore() throws IOException {
4417    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
4418      @Override
4419      protected Void rpcCall() throws Exception {
4420        FlushMasterStoreRequest request = FlushMasterStoreRequest.newBuilder().build();
4421        master.flushMasterStore(getRpcController(), request);
4422        return null;
4423      }
4424    });
4425  }
4426
4427  private List<LogEntry> getBalancerDecisions(final int limit) throws IOException {
4428    return executeCallable(
4429      new MasterCallable<List<LogEntry>>(getConnection(), getRpcControllerFactory()) {
4430        @Override
4431        protected List<LogEntry> rpcCall() throws Exception {
4432          HBaseProtos.LogEntry logEntry =
4433            master.getLogEntries(getRpcController(), ProtobufUtil.toBalancerDecisionRequest(limit));
4434          return ProtobufUtil.toBalancerDecisionResponse(logEntry);
4435        }
4436      });
4437  }
4438
4439  private List<LogEntry> getBalancerRejections(final int limit) throws IOException {
4440    return executeCallable(
4441      new MasterCallable<List<LogEntry>>(getConnection(), getRpcControllerFactory()) {
4442        @Override
4443        protected List<LogEntry> rpcCall() throws Exception {
4444          HBaseProtos.LogEntry logEntry = master.getLogEntries(getRpcController(),
4445            ProtobufUtil.toBalancerRejectionRequest(limit));
4446          return ProtobufUtil.toBalancerRejectionResponse(logEntry);
4447        }
4448      });
4449  }
4450
4451  private Boolean clearSlowLogsResponses(final ServerName serverName) throws IOException {
4452    AdminService.BlockingInterface admin = this.connection.getAdmin(serverName);
4453    return executeCallable(new RpcRetryingCallable<Boolean>() {
4454      @Override
4455      protected Boolean rpcCall(int callTimeout) throws Exception {
4456        HBaseRpcController controller = rpcControllerFactory.newController();
4457        AdminProtos.ClearSlowLogResponses clearSlowLogResponses = admin
4458          .clearSlowLogsResponses(controller, RequestConverter.buildClearSlowLogResponseRequest());
4459        return ProtobufUtil.toClearSlowLogPayload(clearSlowLogResponses);
4460      }
4461    });
4462  }
4463}