001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import static org.apache.hadoop.hbase.util.FutureUtils.get;
021
022import java.io.Closeable;
023import java.io.IOException;
024import java.util.Collection;
025import java.util.EnumSet;
026import java.util.HashMap;
027import java.util.List;
028import java.util.Map;
029import java.util.Set;
030import java.util.concurrent.Future;
031import java.util.concurrent.TimeUnit;
032import java.util.regex.Pattern;
033import java.util.stream.Collectors;
034import org.apache.hadoop.conf.Configuration;
035import org.apache.hadoop.hbase.Abortable;
036import org.apache.hadoop.hbase.CacheEvictionStats;
037import org.apache.hadoop.hbase.ClusterMetrics;
038import org.apache.hadoop.hbase.ClusterMetrics.Option;
039import org.apache.hadoop.hbase.NamespaceDescriptor;
040import org.apache.hadoop.hbase.NamespaceNotFoundException;
041import org.apache.hadoop.hbase.RegionMetrics;
042import org.apache.hadoop.hbase.ServerName;
043import org.apache.hadoop.hbase.TableExistsException;
044import org.apache.hadoop.hbase.TableName;
045import org.apache.hadoop.hbase.TableNotFoundException;
046import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
047import org.apache.hadoop.hbase.client.replication.TableCFs;
048import org.apache.hadoop.hbase.client.security.SecurityCapability;
049import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
050import org.apache.hadoop.hbase.net.Address;
051import org.apache.hadoop.hbase.quotas.QuotaFilter;
052import org.apache.hadoop.hbase.quotas.QuotaSettings;
053import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotView;
054import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
055import org.apache.hadoop.hbase.replication.ReplicationException;
056import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
057import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
058import org.apache.hadoop.hbase.replication.SyncReplicationState;
059import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
060import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest;
061import org.apache.hadoop.hbase.security.access.Permission;
062import org.apache.hadoop.hbase.security.access.UserPermission;
063import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
064import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
065import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
066import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
067import org.apache.hadoop.hbase.util.Bytes;
068import org.apache.hadoop.hbase.util.Pair;
069import org.apache.yetus.audience.InterfaceAudience;
070
071import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
072
073/**
074 * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and call
075 * {@link #close()} when done.
076 * <p>
077 * Admin can be used to create, drop, list, enable and disable and otherwise modify tables, as well
078 * as perform other administrative operations.
079 * @see ConnectionFactory
080 * @see Connection
081 * @see Table
082 * @since 0.99.0
083 */
084@InterfaceAudience.Public
085public interface Admin extends Abortable, Closeable {
086
087  /**
088   * Return the operation timeout for a rpc call.
089   * @see #getSyncWaitTimeout()
090   */
091  int getOperationTimeout();
092
093  /**
094   * Return the blocking wait time for an asynchronous operation. Can be configured by
095   * {@code hbase.client.sync.wait.timeout.msec}.
096   * <p/>
097   * For several operations, such as createTable, deleteTable, etc, the rpc call will finish right
098   * after we schedule a procedure at master side, so the timeout will not be controlled by the
099   * above {@link #getOperationTimeout()}. And timeout value here tells you how much time we will
100   * wait until the procedure at master side is finished.
101   * <p/>
102   * In general, you can consider that the implementation for XXXX method is just a
103   * XXXXAsync().get(getSyncWaitTimeout(), TimeUnit.MILLISECONDS).
104   * @see #getOperationTimeout()
105   */
106  int getSyncWaitTimeout();
107
108  @Override
109  void abort(String why, Throwable e);
110
111  @Override
112  boolean isAborted();
113
114  /** Returns Connection used by this object. */
115  Connection getConnection();
116
117  /**
118   * Check if a table exists.
119   * @param tableName Table to check.
120   * @return <code>true</code> if table exists already.
121   * @throws IOException if a remote or network exception occurs
122   */
123  boolean tableExists(TableName tableName) throws IOException;
124
125  /**
126   * List all the userspace tables.
127   * @return a list of TableDescriptors
128   * @throws IOException if a remote or network exception occurs
129   */
130  List<TableDescriptor> listTableDescriptors() throws IOException;
131
132  /**
133   * List all userspace tables and whether or not include system tables.
134   * @return a list of TableDescriptors
135   * @throws IOException if a remote or network exception occurs
136   */
137  List<TableDescriptor> listTableDescriptors(boolean includeSysTables) throws IOException;
138
139  /**
140   * List all the userspace tables that match the given pattern.
141   * @param pattern The compiled regular expression to match against
142   * @return a list of TableDescriptors
143   * @throws IOException if a remote or network exception occurs
144   * @see #listTableDescriptors()
145   */
146  default List<TableDescriptor> listTableDescriptors(Pattern pattern) throws IOException {
147    return listTableDescriptors(pattern, false);
148  }
149
150  /**
151   * List all the tables matching the given pattern.
152   * @param pattern          The compiled regular expression to match against
153   * @param includeSysTables <code>false</code> to match only against userspace tables
154   * @return a list of TableDescriptors
155   * @throws IOException if a remote or network exception occurs
156   * @see #listTableDescriptors()
157   */
158  List<TableDescriptor> listTableDescriptors(Pattern pattern, boolean includeSysTables)
159    throws IOException;
160
161  /**
162   * List all enabled or disabled tables
163   * @param isEnabled is true means return enabled tables, false means return disabled tables
164   * @return a list of enabled or disabled tables
165   */
166  List<TableDescriptor> listTableDescriptorsByState(boolean isEnabled) throws IOException;
167
168  /**
169   * List all of the names of userspace tables.
170   * @return TableName[] table names
171   * @throws IOException if a remote or network exception occurs
172   */
173  TableName[] listTableNames() throws IOException;
174
175  /**
176   * List all of the names of userspace tables.
177   * @param pattern The regular expression to match against
178   * @return array of table names
179   * @throws IOException if a remote or network exception occurs
180   */
181  default TableName[] listTableNames(Pattern pattern) throws IOException {
182    return listTableNames(pattern, false);
183  }
184
185  /**
186   * List all of the names of userspace tables.
187   * @param pattern          The regular expression to match against
188   * @param includeSysTables <code>false</code> to match only against userspace tables
189   * @return TableName[] table names
190   * @throws IOException if a remote or network exception occurs
191   */
192  TableName[] listTableNames(Pattern pattern, boolean includeSysTables) throws IOException;
193
194  /**
195   * List all enabled or disabled table names
196   * @param isEnabled is true means return enabled table names, false means return disabled table
197   *                  names
198   * @return a list of enabled or disabled table names
199   */
200  List<TableName> listTableNamesByState(boolean isEnabled) throws IOException;
201
202  /**
203   * Get a table descriptor.
204   * @param tableName as a {@link TableName}
205   * @return the tableDescriptor
206   * @throws TableNotFoundException if the table was not found
207   * @throws IOException            if a remote or network exception occurs
208   */
209  TableDescriptor getDescriptor(TableName tableName) throws TableNotFoundException, IOException;
210
211  /**
212   * Creates a new table. Synchronous operation.
213   * @param desc table descriptor for table
214   * @throws IllegalArgumentException                          if the table name is reserved
215   * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
216   * @throws TableExistsException                              if table already exists (If
217   *                                                           concurrent threads, the table may
218   *                                                           have been created between
219   *                                                           test-for-existence and
220   *                                                           attempt-at-creation).
221   * @throws IOException                                       if a remote or network exception
222   *                                                           occurs
223   */
224  default void createTable(TableDescriptor desc) throws IOException {
225    get(createTableAsync(desc), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
226  }
227
228  /**
229   * Creates a new table with the specified number of regions. The start key specified will become
230   * the end key of the first region of the table, and the end key specified will become the start
231   * key of the last region of the table (the first region has a null start key and the last region
232   * has a null end key). BigInteger math will be used to divide the key range specified into enough
233   * segments to make the required number of total regions. Synchronous operation.
234   * @param desc       table descriptor for table
235   * @param startKey   beginning of key range
236   * @param endKey     end of key range
237   * @param numRegions the total number of regions to create
238   * @throws IOException                                       if a remote or network exception
239   *                                                           occurs
240   * @throws IllegalArgumentException                          if the table name is reserved
241   * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
242   * @throws TableExistsException                              if table already exists (If
243   *                                                           concurrent threads, the table may
244   *                                                           have been created between
245   *                                                           test-for-existence and
246   *                                                           attempt-at-creation).
247   */
248  void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions)
249    throws IOException;
250
251  /**
252   * Creates a new table with an initial set of empty regions defined by the specified split keys.
253   * The total number of regions created will be the number of split keys plus one. Synchronous
254   * operation. Note : Avoid passing empty split key.
255   * @param desc      table descriptor for table
256   * @param splitKeys array of split keys for the initial regions of the table
257   * @throws IllegalArgumentException                          if the table name is reserved, if the
258   *                                                           split keys are repeated and if the
259   *                                                           split key has empty byte array.
260   * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
261   * @throws TableExistsException                              if table already exists (If
262   *                                                           concurrent threads, the table may
263   *                                                           have been created between
264   *                                                           test-for-existence and
265   *                                                           attempt-at-creation).
266   * @throws IOException                                       if a remote or network exception
267   *                                                           occurs
268   */
269  default void createTable(TableDescriptor desc, byte[][] splitKeys) throws IOException {
270    get(createTableAsync(desc, splitKeys), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
271  }
272
273  /**
274   * Creates a new table but does not block and wait for it to come online. You can use
275   * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
276   * ExecutionException if there was an error while executing the operation or TimeoutException in
277   * case the wait timeout was not long enough to allow the operation to complete.
278   * <p/>
279   * Throws IllegalArgumentException Bad table name, if the split keys are repeated and if the split
280   * key has empty byte array.
281   * @param desc table descriptor for table
282   * @throws IOException if a remote or network exception occurs
283   * @return the result of the async creation. You can use Future.get(long, TimeUnit) to wait on the
284   *         operation to complete.
285   */
286  Future<Void> createTableAsync(TableDescriptor desc) throws IOException;
287
288  /**
289   * Creates a new table but does not block and wait for it to come online. You can use
290   * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
291   * ExecutionException if there was an error while executing the operation or TimeoutException in
292   * case the wait timeout was not long enough to allow the operation to complete. Throws
293   * IllegalArgumentException Bad table name, if the split keys are repeated and if the split key
294   * has empty byte array.
295   * @param desc      table descriptor for table
296   * @param splitKeys keys to check if the table has been created with all split keys
297   * @throws IOException if a remote or network exception occurs
298   * @return the result of the async creation. You can use Future.get(long, TimeUnit) to wait on the
299   *         operation to complete.
300   */
301  Future<Void> createTableAsync(TableDescriptor desc, byte[][] splitKeys) throws IOException;
302
303  /**
304   * Deletes a table. Synchronous operation.
305   * @param tableName name of table to delete
306   * @throws IOException if a remote or network exception occurs
307   */
308  default void deleteTable(TableName tableName) throws IOException {
309    get(deleteTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
310  }
311
312  /**
313   * Deletes the table but does not block and wait for it to be completely removed. You can use
314   * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
315   * ExecutionException if there was an error while executing the operation or TimeoutException in
316   * case the wait timeout was not long enough to allow the operation to complete.
317   * @param tableName name of table to delete
318   * @throws IOException if a remote or network exception occurs
319   * @return the result of the async delete. You can use Future.get(long, TimeUnit) to wait on the
320   *         operation to complete.
321   */
322  Future<Void> deleteTableAsync(TableName tableName) throws IOException;
323
324  /**
325   * Truncate a table. Synchronous operation.
326   * @param tableName      name of table to truncate
327   * @param preserveSplits <code>true</code> if the splits should be preserved
328   * @throws IOException if a remote or network exception occurs
329   */
330  default void truncateTable(TableName tableName, boolean preserveSplits) throws IOException {
331    get(truncateTableAsync(tableName, preserveSplits), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
332  }
333
334  /**
335   * Truncate the table but does not block and wait for it to be completely enabled. You can use
336   * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
337   * ExecutionException if there was an error while executing the operation or TimeoutException in
338   * case the wait timeout was not long enough to allow the operation to complete.
339   * @param tableName      name of table to delete
340   * @param preserveSplits <code>true</code> if the splits should be preserved
341   * @throws IOException if a remote or network exception occurs
342   * @return the result of the async truncate. You can use Future.get(long, TimeUnit) to wait on the
343   *         operation to complete.
344   */
345  Future<Void> truncateTableAsync(TableName tableName, boolean preserveSplits) throws IOException;
346
347  /**
348   * Enable a table. May timeout. Use {@link #enableTableAsync(org.apache.hadoop.hbase.TableName)}
349   * and {@link #isTableEnabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
350   * disabled state for it to be enabled.
351   * @param tableName name of the table
352   * @throws IOException There could be couple types of IOException TableNotFoundException means the
353   *                     table doesn't exist. TableNotDisabledException means the table isn't in
354   *                     disabled state.
355   * @see #isTableEnabled(org.apache.hadoop.hbase.TableName)
356   * @see #disableTable(org.apache.hadoop.hbase.TableName)
357   * @see #enableTableAsync(org.apache.hadoop.hbase.TableName)
358   */
359  default void enableTable(TableName tableName) throws IOException {
360    get(enableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
361  }
362
363  /**
364   * Enable the table but does not block and wait for it to be completely enabled. You can use
365   * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
366   * ExecutionException if there was an error while executing the operation or TimeoutException in
367   * case the wait timeout was not long enough to allow the operation to complete.
368   * @param tableName name of table to delete
369   * @throws IOException if a remote or network exception occurs
370   * @return the result of the async enable. You can use Future.get(long, TimeUnit) to wait on the
371   *         operation to complete.
372   */
373  Future<Void> enableTableAsync(TableName tableName) throws IOException;
374
375  /**
376   * Disable the table but does not block and wait for it to be completely disabled. You can use
377   * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
378   * ExecutionException if there was an error while executing the operation or TimeoutException in
379   * case the wait timeout was not long enough to allow the operation to complete.
380   * @param tableName name of table to delete
381   * @throws IOException if a remote or network exception occurs
382   * @return the result of the async disable. You can use Future.get(long, TimeUnit) to wait on the
383   *         operation to complete.
384   */
385  Future<Void> disableTableAsync(TableName tableName) throws IOException;
386
387  /**
388   * Disable table and wait on completion. May timeout eventually. Use
389   * {@link #disableTableAsync(org.apache.hadoop.hbase.TableName)} and
390   * {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
391   * enabled state for it to be disabled.
392   * @throws IOException There could be couple types of IOException TableNotFoundException means the
393   *                     table doesn't exist. TableNotEnabledException means the table isn't in
394   *                     enabled state.
395   */
396  default void disableTable(TableName tableName) throws IOException {
397    get(disableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
398  }
399
400  /**
401   * Check if a table is enabled.
402   * @param tableName name of table to check
403   * @return <code>true</code> if table is on-line
404   * @throws IOException if a remote or network exception occurs
405   */
406  boolean isTableEnabled(TableName tableName) throws IOException;
407
408  /**
409   * Check if a table is disabled.
410   * @param tableName name of table to check
411   * @return <code>true</code> if table is off-line
412   * @throws IOException if a remote or network exception occurs
413   */
414  boolean isTableDisabled(TableName tableName) throws IOException;
415
416  /**
417   * Check if a table is available.
418   * @param tableName name of table to check
419   * @return <code>true</code> if all regions of the table are available
420   * @throws IOException if a remote or network exception occurs
421   */
422  boolean isTableAvailable(TableName tableName) throws IOException;
423
424  /**
425   * Add a column family to an existing table. Synchronous operation. Use
426   * {@link #addColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns a
427   * {@link Future} from which you can learn whether success or failure.
428   * @param tableName    name of the table to add column family to
429   * @param columnFamily column family descriptor of column family to be added
430   * @throws IOException if a remote or network exception occurs
431   */
432  default void addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily)
433    throws IOException {
434    get(addColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
435  }
436
437  /**
438   * Add a column family to an existing table. Asynchronous operation. You can use Future.get(long,
439   * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an
440   * error while executing the operation or TimeoutException in case the wait timeout was not long
441   * enough to allow the operation to complete.
442   * @param tableName    name of the table to add column family to
443   * @param columnFamily column family descriptor of column family to be added
444   * @throws IOException if a remote or network exception occurs
445   * @return the result of the async add column family. You can use Future.get(long, TimeUnit) to
446   *         wait on the operation to complete.
447   */
448  Future<Void> addColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily)
449    throws IOException;
450
451  /**
452   * Delete a column family from a table. Synchronous operation. Use
453   * {@link #deleteColumnFamily(TableName, byte[])} instead because it returns a {@link Future} from
454   * which you can learn whether success or failure.
455   * @param tableName    name of table
456   * @param columnFamily name of column family to be deleted
457   * @throws IOException if a remote or network exception occurs
458   */
459  default void deleteColumnFamily(TableName tableName, byte[] columnFamily) throws IOException {
460    get(deleteColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(),
461      TimeUnit.MILLISECONDS);
462  }
463
464  /**
465   * Delete a column family from a table. Asynchronous operation. You can use Future.get(long,
466   * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an
467   * error while executing the operation or TimeoutException in case the wait timeout was not long
468   * enough to allow the operation to complete.
469   * @param tableName    name of table
470   * @param columnFamily name of column family to be deleted
471   * @throws IOException if a remote or network exception occurs
472   * @return the result of the async delete column family. You can use Future.get(long, TimeUnit) to
473   *         wait on the operation to complete.
474   */
475  Future<Void> deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) throws IOException;
476
477  /**
478   * Modify an existing column family on a table. Synchronous operation. Use
479   * {@link #modifyColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns
480   * a {@link Future} from which you can learn whether success or failure.
481   * @param tableName    name of table
482   * @param columnFamily new column family descriptor to use
483   * @throws IOException if a remote or network exception occurs
484   */
485  default void modifyColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily)
486    throws IOException {
487    get(modifyColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(),
488      TimeUnit.MILLISECONDS);
489  }
490
491  /**
492   * Modify an existing column family on a table. Asynchronous operation. You can use
493   * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
494   * ExecutionException if there was an error while executing the operation or TimeoutException in
495   * case the wait timeout was not long enough to allow the operation to complete.
496   * @param tableName    name of table
497   * @param columnFamily new column family descriptor to use
498   * @throws IOException if a remote or network exception occurs
499   * @return the result of the async modify column family. You can use Future.get(long, TimeUnit) to
500   *         wait on the operation to complete.
501   */
502  Future<Void> modifyColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily)
503    throws IOException;
504
505  /**
506   * Change the store file tracker of the given table's given family.
507   * @param tableName the table you want to change
508   * @param family    the family you want to change
509   * @param dstSFT    the destination store file tracker
510   * @throws IOException if a remote or network exception occurs
511   */
512  default void modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] family, String dstSFT)
513    throws IOException {
514    get(modifyColumnFamilyStoreFileTrackerAsync(tableName, family, dstSFT), getSyncWaitTimeout(),
515      TimeUnit.MILLISECONDS);
516  }
517
518  /**
519   * Change the store file tracker of the given table's given family.
520   * @param tableName the table you want to change
521   * @param family    the family you want to change
522   * @param dstSFT    the destination store file tracker
523   * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the
524   *         operation to complete
525   * @throws IOException if a remote or network exception occurs
526   */
527  Future<Void> modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] family,
528    String dstSFT) throws IOException;
529
530  /**
531   * Get all the online regions on a region server.
532   * @return List of {@link RegionInfo}
533   * @throws IOException if a remote or network exception occurs
534   */
535  List<RegionInfo> getRegions(ServerName serverName) throws IOException;
536
537  /**
538   * Flush a table. Synchronous operation.
539   * @param tableName table to flush
540   * @throws IOException if a remote or network exception occurs
541   */
542  void flush(TableName tableName) throws IOException;
543
544  /**
545   * Flush the specified column family stores on all regions of the passed table. This runs as a
546   * synchronous operation.
547   * @param tableName    table to flush
548   * @param columnFamily column family within a table
549   * @throws IOException if a remote or network exception occurs
550   */
551  void flush(TableName tableName, byte[] columnFamily) throws IOException;
552
553  /**
554   * Flush an individual region. Synchronous operation.
555   * @param regionName region to flush
556   * @throws IOException if a remote or network exception occurs
557   */
558  void flushRegion(byte[] regionName) throws IOException;
559
560  /**
561   * Flush a column family within a region. Synchronous operation.
562   * @param regionName   region to flush
563   * @param columnFamily column family within a region
564   * @throws IOException if a remote or network exception occurs
565   */
566  void flushRegion(byte[] regionName, byte[] columnFamily) throws IOException;
567
568  /**
569   * Flush all regions on the region server. Synchronous operation.
570   * @param serverName the region server name to flush
571   * @throws IOException if a remote or network exception occurs
572   */
573  void flushRegionServer(ServerName serverName) throws IOException;
574
575  /**
576   * Compact a table. Asynchronous operation in that this method requests that a Compaction run and
577   * then it returns. It does not wait on the completion of Compaction (it can take a while).
578   * @param tableName table to compact
579   * @throws IOException if a remote or network exception occurs
580   */
581  void compact(TableName tableName) throws IOException;
582
583  /**
584   * Compact an individual region. Asynchronous operation in that this method requests that a
585   * Compaction run and then it returns. It does not wait on the completion of Compaction (it can
586   * take a while).
587   * @param regionName region to compact
588   * @throws IOException if a remote or network exception occurs
589   */
590  void compactRegion(byte[] regionName) throws IOException;
591
592  /**
593   * Compact a column family within a table. Asynchronous operation in that this method requests
594   * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it
595   * can take a while).
596   * @param tableName    table to compact
597   * @param columnFamily column family within a table
598   * @throws IOException if a remote or network exception occurs
599   */
600  void compact(TableName tableName, byte[] columnFamily) throws IOException;
601
602  /**
603   * Compact a column family within a region. Asynchronous operation in that this method requests
604   * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it
605   * can take a while).
606   * @param regionName   region to compact
607   * @param columnFamily column family within a region
608   * @throws IOException if a remote or network exception occurs
609   */
610  void compactRegion(byte[] regionName, byte[] columnFamily) throws IOException;
611
612  /**
613   * Compact a table. Asynchronous operation in that this method requests that a Compaction run and
614   * then it returns. It does not wait on the completion of Compaction (it can take a while).
615   * @param tableName   table to compact
616   * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
617   * @throws IOException if a remote or network exception occurs
618   */
619  void compact(TableName tableName, CompactType compactType)
620    throws IOException, InterruptedException;
621
622  /**
623   * Compact a column family within a table. Asynchronous operation in that this method requests
624   * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it
625   * can take a while).
626   * @param tableName    table to compact
627   * @param columnFamily column family within a table
628   * @param compactType  {@link org.apache.hadoop.hbase.client.CompactType}
629   * @throws IOException if not a mob column family or if a remote or network exception occurs
630   */
631  void compact(TableName tableName, byte[] columnFamily, CompactType compactType)
632    throws IOException, InterruptedException;
633
634  /**
635   * Major compact a table. Asynchronous operation in that this method requests that a Compaction
636   * run and then it returns. It does not wait on the completion of Compaction (it can take a
637   * while).
638   * @param tableName table to major compact
639   * @throws IOException if a remote or network exception occurs
640   */
641  void majorCompact(TableName tableName) throws IOException;
642
643  /**
644   * Major compact a table or an individual region. Asynchronous operation in that this method
645   * requests that a Compaction run and then it returns. It does not wait on the completion of
646   * Compaction (it can take a while).
647   * @param regionName region to major compact
648   * @throws IOException if a remote or network exception occurs
649   */
650  void majorCompactRegion(byte[] regionName) throws IOException;
651
652  /**
653   * Major compact a column family within a table. Asynchronous operation in that this method
654   * requests that a Compaction run and then it returns. It does not wait on the completion of
655   * Compaction (it can take a while).
656   * @param tableName    table to major compact
657   * @param columnFamily column family within a table
658   * @throws IOException if a remote or network exception occurs
659   */
660  void majorCompact(TableName tableName, byte[] columnFamily) throws IOException;
661
662  /**
663   * Major compact a column family within region. Asynchronous operation in that this method
664   * requests that a Compaction run and then it returns. It does not wait on the completion of
665   * Compaction (it can take a while).
666   * @param regionName   egion to major compact
667   * @param columnFamily column family within a region
668   * @throws IOException if a remote or network exception occurs
669   */
670  void majorCompactRegion(byte[] regionName, byte[] columnFamily) throws IOException;
671
672  /**
673   * Major compact a table. Asynchronous operation in that this method requests that a Compaction
674   * run and then it returns. It does not wait on the completion of Compaction (it can take a
675   * while).
676   * @param tableName   table to compact
677   * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
678   * @throws IOException if a remote or network exception occurs
679   */
680  void majorCompact(TableName tableName, CompactType compactType)
681    throws IOException, InterruptedException;
682
683  /**
684   * Major compact a column family within a table. Asynchronous operation in that this method
685   * requests that a Compaction run and then it returns. It does not wait on the completion of
686   * Compaction (it can take a while).
687   * @param tableName    table to compact
688   * @param columnFamily column family within a table
689   * @param compactType  {@link org.apache.hadoop.hbase.client.CompactType}
690   * @throws IOException if not a mob column family or if a remote or network exception occurs
691   */
692  void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType)
693    throws IOException, InterruptedException;
694
695  /**
696   * Turn the compaction on or off. Disabling compactions will also interrupt any currently ongoing
697   * compactions. This state is ephemeral. The setting will be lost on restart. Compaction can also
698   * be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled in
699   * hbase-site.xml.
700   * @param switchState     Set to <code>true</code> to enable, <code>false</code> to disable.
701   * @param serverNamesList list of region servers.
702   * @return Previous compaction states for region servers
703   * @throws IOException if a remote or network exception occurs
704   */
705  Map<ServerName, Boolean> compactionSwitch(boolean switchState, List<String> serverNamesList)
706    throws IOException;
707
708  /**
709   * Compact all regions on the region server. Asynchronous operation in that this method requests
710   * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it
711   * can take a while).
712   * @param serverName the region server name
713   * @throws IOException if a remote or network exception occurs
714   */
715  void compactRegionServer(ServerName serverName) throws IOException;
716
717  /**
718   * Major compact all regions on the region server. Asynchronous operation in that this method
719   * requests that a Compaction run and then it returns. It does not wait on the completion of
720   * Compaction (it can take a while).
721   * @param serverName the region server name
722   * @throws IOException if a remote or network exception occurs
723   */
724  void majorCompactRegionServer(ServerName serverName) throws IOException;
725
726  /**
727   * Move the region <code>encodedRegionName</code> to a random server.
728   * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
729   *                          suffix: e.g. if regionname is
730   *                          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
731   *                          then the encoded region name is:
732   *                          <code>527db22f95c8a9e0116f0cc13c680396</code>.
733   * @throws IOException if we can't find a region named <code>encodedRegionName</code>
734   */
735  void move(byte[] encodedRegionName) throws IOException;
736
737  /**
738   * Move the region <code>rencodedRegionName</code> to <code>destServerName</code>.
739   * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
740   *                          suffix: e.g. if regionname is
741   *                          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
742   *                          then the encoded region name is:
743   *                          <code>527db22f95c8a9e0116f0cc13c680396</code>.
744   * @param destServerName    The servername of the destination regionserver. If passed the empty
745   *                          byte array we'll assign to a random server. A server name is made of
746   *                          host, port and startcode. Here is an example:
747   *                          <code> host187.example.com,60020,1289493121758</code>
748   * @throws IOException if we can't find a region named <code>encodedRegionName</code>
749   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use {@link #move(byte[], ServerName)}
750   *             instead. And if you want to move the region to a random server, please use
751   *             {@link #move(byte[])}.
752   * @see <a href="https://issues.apache.org/jira/browse/HBASE-22108">HBASE-22108</a>
753   */
754  @Deprecated
755  default void move(byte[] encodedRegionName, byte[] destServerName) throws IOException {
756    if (destServerName == null || destServerName.length == 0) {
757      move(encodedRegionName);
758    } else {
759      move(encodedRegionName, ServerName.valueOf(Bytes.toString(destServerName)));
760    }
761  }
762
763  /**
764   * Move the region <code>encodedRegionName</code> to <code>destServerName</code>.
765   * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
766   *                          suffix: e.g. if regionname is
767   *                          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
768   *                          then the encoded region name is:
769   *                          <code>527db22f95c8a9e0116f0cc13c680396</code>.
770   * @param destServerName    The servername of the destination regionserver. A server name is made
771   *                          of host, port and startcode. Here is an example:
772   *                          <code> host187.example.com,60020,1289493121758</code>
773   * @throws IOException if we can't find a region named <code>encodedRegionName</code>
774   */
775  void move(byte[] encodedRegionName, ServerName destServerName) throws IOException;
776
777  /**
778   * Assign a Region.
779   * @param regionName Region name to assign.
780   * @throws IOException if a remote or network exception occurs
781   */
782  void assign(byte[] regionName) throws IOException;
783
784  /**
785   * Unassign a Region.
786   * @param regionName Region name to assign.
787   * @throws IOException if a remote or network exception occurs
788   */
789  void unassign(byte[] regionName) throws IOException;
790
791  /**
792   * Unassign a region from current hosting regionserver. Region will then be assigned to a
793   * regionserver chosen at random. Region could be reassigned back to the same server. Use
794   * {@link #move(byte[], ServerName)} if you want to control the region movement.
795   * @param regionName Region to unassign. Will clear any existing RegionPlan if one found.
796   * @param force      If <code>true</code>, force unassign (Will remove region from
797   *                   regions-in-transition too if present. If results in double assignment use
798   *                   hbck -fix to resolve. To be used by experts).
799   * @throws IOException if a remote or network exception occurs
800   * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} instead.
801   * @see <a href="https://issues.apache.org/jira/browse/HBASE-24875">HBASE-24875</a>
802   */
803  @Deprecated
804  default void unassign(byte[] regionName, boolean force) throws IOException {
805    unassign(regionName);
806  }
807
808  /**
809   * Offline specified region from master's in-memory state. It will not attempt to reassign the
810   * region as in unassign. This API can be used when a region not served by any region server and
811   * still online as per Master's in memory state. If this API is incorrectly used on active region
812   * then master will loose track of that region. This is a special method that should be used by
813   * experts or hbck.
814   * @param regionName Region to offline.
815   * @throws IOException if a remote or network exception occurs
816   */
817  void offline(byte[] regionName) throws IOException;
818
819  /**
820   * Turn the load balancer on or off.
821   * @param onOrOff     Set to <code>true</code> to enable, <code>false</code> to disable.
822   * @param synchronous If <code>true</code>, it waits until current balance() call, if outstanding,
823   *                    to return.
824   * @return Previous balancer value
825   * @throws IOException if a remote or network exception occurs
826   */
827  boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws IOException;
828
829  /**
830   * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the
831   * reassignments. Can NOT run for various reasons. Check logs.
832   * @return <code>true</code> if balancer ran, <code>false</code> otherwise.
833   * @throws IOException if a remote or network exception occurs
834   */
835  default boolean balance() throws IOException {
836    return balance(BalanceRequest.defaultInstance()).isBalancerRan();
837  }
838
839  /**
840   * Invoke the balancer with the given balance request. The BalanceRequest defines how the balancer
841   * will run. See {@link BalanceRequest} for more details.
842   * @param request defines how the balancer should run
843   * @return {@link BalanceResponse} with details about the results of the invocation.
844   * @throws IOException if a remote or network exception occurs
845   */
846  BalanceResponse balance(BalanceRequest request) throws IOException;
847
848  /**
849   * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the
850   * reassignments. If there is region in transition, force parameter of true would still run
851   * balancer. Can *not* run for other reasons. Check logs.
852   * @param force whether we should force balance even if there is region in transition
853   * @return <code>true</code> if balancer ran, <code>false</code> otherwise.
854   * @throws IOException if a remote or network exception occurs
855   * @deprecated Since 2.5.0. Will be removed in 4.0.0. Use {@link #balance(BalanceRequest)}
856   *             instead.
857   */
858  @Deprecated
859  default boolean balance(boolean force) throws IOException {
860    return balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(force).build())
861      .isBalancerRan();
862  }
863
864  /**
865   * Query the current state of the balancer.
866   * @return <code>true</code> if the balancer is enabled, <code>false</code> otherwise.
867   * @throws IOException if a remote or network exception occurs
868   */
869  boolean isBalancerEnabled() throws IOException;
870
871  /**
872   * Clear all the blocks corresponding to this table from BlockCache. For expert-admins. Calling
873   * this API will drop all the cached blocks specific to a table from BlockCache. This can
874   * significantly impact the query performance as the subsequent queries will have to retrieve the
875   * blocks from underlying filesystem.
876   * @param tableName table to clear block cache
877   * @return CacheEvictionStats related to the eviction
878   * @throws IOException if a remote or network exception occurs
879   */
880  CacheEvictionStats clearBlockCache(final TableName tableName) throws IOException;
881
882  /**
883   * Invoke region normalizer. Can NOT run for various reasons. Check logs. This is a non-blocking
884   * invocation to region normalizer. If return value is true, it means the request was submitted
885   * successfully. We need to check logs for the details of which regions were split/merged.
886   * @return {@code true} if region normalizer ran, {@code false} otherwise.
887   * @throws IOException if a remote or network exception occurs
888   */
889  default boolean normalize() throws IOException {
890    return normalize(new NormalizeTableFilterParams.Builder().build());
891  }
892
893  /**
894   * Invoke region normalizer. Can NOT run for various reasons. Check logs. This is a non-blocking
895   * invocation to region normalizer. If return value is true, it means the request was submitted
896   * successfully. We need to check logs for the details of which regions were split/merged.
897   * @param ntfp limit to tables matching the specified filter.
898   * @return {@code true} if region normalizer ran, {@code false} otherwise.
899   * @throws IOException if a remote or network exception occurs
900   */
901  boolean normalize(NormalizeTableFilterParams ntfp) throws IOException;
902
903  /**
904   * Query the current state of the region normalizer.
905   * @return <code>true</code> if region normalizer is enabled, <code>false</code> otherwise.
906   * @throws IOException if a remote or network exception occurs
907   */
908  boolean isNormalizerEnabled() throws IOException;
909
910  /**
911   * Turn region normalizer on or off.
912   * @return Previous normalizer value
913   * @throws IOException if a remote or network exception occurs
914   */
915  boolean normalizerSwitch(boolean on) throws IOException;
916
917  /**
918   * Enable/Disable the catalog janitor/
919   * @param onOrOff if <code>true</code> enables the catalog janitor
920   * @return the previous state
921   * @throws IOException if a remote or network exception occurs
922   */
923  boolean catalogJanitorSwitch(boolean onOrOff) throws IOException;
924
925  /**
926   * Ask for a scan of the catalog table.
927   * @return the number of entries cleaned. Returns -1 if previous run is in progress.
928   * @throws IOException if a remote or network exception occurs
929   */
930  int runCatalogJanitor() throws IOException;
931
932  /**
933   * Query on the catalog janitor state (Enabled/Disabled?).
934   * @throws IOException if a remote or network exception occurs
935   */
936  boolean isCatalogJanitorEnabled() throws IOException;
937
938  /**
939   * Enable/Disable the cleaner chore.
940   * @param onOrOff if <code>true</code> enables the cleaner chore
941   * @return the previous state
942   * @throws IOException if a remote or network exception occurs
943   */
944  boolean cleanerChoreSwitch(boolean onOrOff) throws IOException;
945
946  /**
947   * Ask for cleaner chore to run.
948   * @return <code>true</code> if cleaner chore ran, <code>false</code> otherwise
949   * @throws IOException if a remote or network exception occurs
950   */
951  boolean runCleanerChore() throws IOException;
952
953  /**
954   * Query on the cleaner chore state (Enabled/Disabled?).
955   * @throws IOException if a remote or network exception occurs
956   */
957  boolean isCleanerChoreEnabled() throws IOException;
958
959  /**
960   * Merge two regions. Asynchronous operation.
961   * @param nameOfRegionA encoded or full name of region a
962   * @param nameOfRegionB encoded or full name of region b
963   * @param forcible      <code>true</code> if do a compulsory merge, otherwise we will only merge
964   *                      two adjacent regions
965   * @throws IOException if a remote or network exception occurs
966   * @deprecated since 2.3.0 and will be removed in 4.0.0. Multi-region merge feature is now
967   *             supported. Use {@link #mergeRegionsAsync(byte[][], boolean)} instead.
968   */
969  @Deprecated
970  default Future<Void> mergeRegionsAsync(byte[] nameOfRegionA, byte[] nameOfRegionB,
971    boolean forcible) throws IOException {
972    byte[][] nameofRegionsToMerge = new byte[2][];
973    nameofRegionsToMerge[0] = nameOfRegionA;
974    nameofRegionsToMerge[1] = nameOfRegionB;
975    return mergeRegionsAsync(nameofRegionsToMerge, forcible);
976  }
977
978  /**
979   * Merge multiple regions (>=2). Asynchronous operation.
980   * @param nameofRegionsToMerge encoded or full name of daughter regions
981   * @param forcible             <code>true</code> if do a compulsory merge, otherwise we will only
982   *                             merge adjacent regions
983   * @throws IOException if a remote or network exception occurs
984   */
985  Future<Void> mergeRegionsAsync(byte[][] nameofRegionsToMerge, boolean forcible)
986    throws IOException;
987
988  /**
989   * Split a table. The method will execute split action for each region in table.
990   * @param tableName table to split
991   * @throws IOException if a remote or network exception occurs
992   */
993  void split(TableName tableName) throws IOException;
994
995  /**
996   * Split a table.
997   * @param tableName  table to split
998   * @param splitPoint the explicit position to split on
999   * @throws IOException if a remote or network exception occurs
1000   */
1001  void split(TableName tableName, byte[] splitPoint) throws IOException;
1002
1003  /**
1004   * Split an individual region. Asynchronous operation.
1005   * @param regionName region to split
1006   * @throws IOException if a remote or network exception occurs
1007   */
1008  Future<Void> splitRegionAsync(byte[] regionName) throws IOException;
1009
1010  /**
1011   * Split an individual region. Asynchronous operation.
1012   * @param regionName region to split
1013   * @param splitPoint the explicit position to split on
1014   * @throws IOException if a remote or network exception occurs
1015   */
1016  Future<Void> splitRegionAsync(byte[] regionName, byte[] splitPoint) throws IOException;
1017
1018  /**
1019   * Modify an existing table, more IRB friendly version.
1020   * @param td modified description of the table
1021   * @throws IOException if a remote or network exception occurs
1022   */
1023  default void modifyTable(TableDescriptor td) throws IOException {
1024    get(modifyTableAsync(td), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
1025  }
1026
1027  /**
1028   * Modify an existing table, more IRB (ruby) friendly version. Asynchronous operation. This means
1029   * that it may be a while before your schema change is updated across all of the table. You can
1030   * use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
1031   * ExecutionException if there was an error while executing the operation or TimeoutException in
1032   * case the wait timeout was not long enough to allow the operation to complete.
1033   * @param td description of the table
1034   * @throws IOException if a remote or network exception occurs
1035   * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the
1036   *         operation to complete
1037   */
1038  Future<Void> modifyTableAsync(TableDescriptor td) throws IOException;
1039
1040  /**
1041   * Change the store file tracker of the given table.
1042   * @param tableName the table you want to change
1043   * @param dstSFT    the destination store file tracker
1044   * @throws IOException if a remote or network exception occurs
1045   */
1046  default void modifyTableStoreFileTracker(TableName tableName, String dstSFT) throws IOException {
1047    get(modifyTableStoreFileTrackerAsync(tableName, dstSFT), getSyncWaitTimeout(),
1048      TimeUnit.MILLISECONDS);
1049  }
1050
1051  /**
1052   * Change the store file tracker of the given table.
1053   * @param tableName the table you want to change
1054   * @param dstSFT    the destination store file tracker
1055   * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the
1056   *         operation to complete
1057   * @throws IOException if a remote or network exception occurs
1058   */
1059  Future<Void> modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT)
1060    throws IOException;
1061
1062  /**
1063   * Shuts down the HBase cluster.
1064   * <p/>
1065   * Notice that, a success shutdown call may ends with an error since the remote server has already
1066   * been shutdown.
1067   * @throws IOException if a remote or network exception occurs
1068   */
1069  void shutdown() throws IOException;
1070
1071  /**
1072   * Shuts down the current HBase master only. Does not shutdown the cluster.
1073   * <p/>
1074   * Notice that, a success stopMaster call may ends with an error since the remote server has
1075   * already been shutdown.
1076   * @throws IOException if a remote or network exception occurs
1077   * @see #shutdown()
1078   */
1079  void stopMaster() throws IOException;
1080
1081  /**
1082   * Check whether Master is in maintenance mode.
1083   * @throws IOException if a remote or network exception occurs
1084   */
1085  boolean isMasterInMaintenanceMode() throws IOException;
1086
1087  /**
1088   * Stop the designated regionserver.
1089   * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
1090   *                     <code>example.org:1234</code>
1091   * @throws IOException if a remote or network exception occurs
1092   */
1093  void stopRegionServer(String hostnamePort) throws IOException;
1094
1095  /**
1096   * Get whole cluster metrics, containing status about:
1097   *
1098   * <pre>
1099   * hbase version
1100   * cluster id
1101   * primary/backup master(s)
1102   * master's coprocessors
1103   * live/dead regionservers
1104   * balancer
1105   * regions in transition
1106   * </pre>
1107   *
1108   * @return cluster metrics
1109   * @throws IOException if a remote or network exception occurs
1110   */
1111  default ClusterMetrics getClusterMetrics() throws IOException {
1112    return getClusterMetrics(EnumSet.allOf(ClusterMetrics.Option.class));
1113  }
1114
1115  /**
1116   * Get cluster status with a set of {@link Option} to get desired status.
1117   * @return cluster status
1118   * @throws IOException if a remote or network exception occurs
1119   */
1120  ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException;
1121
1122  /**
1123   * Get the current active master.
1124   * @return current master server name
1125   * @throws IOException if a remote or network exception occurs
1126   */
1127  default ServerName getMaster() throws IOException {
1128    return getClusterMetrics(EnumSet.of(Option.MASTER)).getMasterName();
1129  }
1130
1131  /**
1132   * Get a list of current backup masters.
1133   * @return current backup master list
1134   * @throws IOException if a remote or network exception occurs
1135   */
1136  default Collection<ServerName> getBackupMasters() throws IOException {
1137    return getClusterMetrics(EnumSet.of(Option.BACKUP_MASTERS)).getBackupMasterNames();
1138  }
1139
1140  /**
1141   * Get the live server list.
1142   * @return current live region servers list
1143   * @throws IOException if a remote or network exception occurs
1144   */
1145  default Collection<ServerName> getRegionServers() throws IOException {
1146    return getRegionServers(false);
1147  }
1148
1149  /**
1150   * Retrieve all current live region servers including decommissioned if excludeDecommissionedRS is
1151   * false, else non-decommissioned ones only
1152   * @param excludeDecommissionedRS should we exclude decommissioned RS nodes
1153   * @return all current live region servers including/excluding decommissioned hosts
1154   * @throws IOException if a remote or network exception occurs
1155   */
1156  default Collection<ServerName> getRegionServers(boolean excludeDecommissionedRS)
1157    throws IOException {
1158    List<ServerName> allServers =
1159      getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)).getServersName();
1160    if (!excludeDecommissionedRS) {
1161      return allServers;
1162    }
1163    List<ServerName> decommissionedRegionServers = listDecommissionedRegionServers();
1164    return allServers.stream().filter(s -> !decommissionedRegionServers.contains(s))
1165      .collect(ImmutableList.toImmutableList());
1166  }
1167
1168  /**
1169   * Get {@link RegionMetrics} of all regions hosted on a regionserver.
1170   * @param serverName region server from which {@link RegionMetrics} is required.
1171   * @return a {@link RegionMetrics} list of all regions hosted on a region server
1172   * @throws IOException if a remote or network exception occurs
1173   */
1174  List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException;
1175
1176  /**
1177   * Get {@link RegionMetrics} of all regions hosted on a regionserver for a table.
1178   * @param serverName region server from which {@link RegionMetrics} is required.
1179   * @param tableName  get {@link RegionMetrics} of regions belonging to the table
1180   * @return region metrics map of all regions of a table hosted on a region server
1181   * @throws IOException if a remote or network exception occurs
1182   */
1183  List<RegionMetrics> getRegionMetrics(ServerName serverName, TableName tableName)
1184    throws IOException;
1185
1186  /** Returns Configuration used by the instance. */
1187  Configuration getConfiguration();
1188
1189  /**
1190   * Create a new namespace. Blocks until namespace has been successfully created or an exception is
1191   * thrown.
1192   * @param descriptor descriptor which describes the new namespace.
1193   * @throws IOException if a remote or network exception occurs
1194   */
1195  default void createNamespace(NamespaceDescriptor descriptor) throws IOException {
1196    get(createNamespaceAsync(descriptor), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
1197  }
1198
1199  /**
1200   * Create a new namespace.
1201   * @param descriptor descriptor which describes the new namespace
1202   * @return the result of the async create namespace operation. Use Future.get(long, TimeUnit) to
1203   *         wait on the operation to complete.
1204   * @throws IOException if a remote or network exception occurs
1205   */
1206  Future<Void> createNamespaceAsync(NamespaceDescriptor descriptor) throws IOException;
1207
1208  /**
1209   * Modify an existing namespace. Blocks until namespace has been successfully modified or an
1210   * exception is thrown.
1211   * @param descriptor descriptor which describes the new namespace
1212   * @throws IOException if a remote or network exception occurs
1213   */
1214  default void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
1215    get(modifyNamespaceAsync(descriptor), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
1216  }
1217
1218  /**
1219   * Modify an existing namespace.
1220   * @param descriptor descriptor which describes the new namespace
1221   * @return the result of the async modify namespace operation. Use Future.get(long, TimeUnit) to
1222   *         wait on the operation to complete.
1223   * @throws IOException if a remote or network exception occurs
1224   */
1225  Future<Void> modifyNamespaceAsync(NamespaceDescriptor descriptor) throws IOException;
1226
1227  /**
1228   * Delete an existing namespace. Only empty namespaces (no tables) can be removed. Blocks until
1229   * namespace has been successfully deleted or an exception is thrown.
1230   * @param name namespace name
1231   * @throws IOException if a remote or network exception occurs
1232   */
1233  default void deleteNamespace(String name) throws IOException {
1234    get(deleteNamespaceAsync(name), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
1235  }
1236
1237  /**
1238   * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
1239   * @param name namespace name
1240   * @return the result of the async delete namespace operation. Use Future.get(long, TimeUnit) to
1241   *         wait on the operation to complete.
1242   * @throws IOException if a remote or network exception occurs
1243   */
1244  Future<Void> deleteNamespaceAsync(String name) throws IOException;
1245
1246  /**
1247   * Get a namespace descriptor by name.
1248   * @param name name of namespace descriptor
1249   * @return A descriptor
1250   * @throws org.apache.hadoop.hbase.NamespaceNotFoundException if the namespace was not found
1251   * @throws IOException                                        if a remote or network exception
1252   *                                                            occurs
1253   */
1254  NamespaceDescriptor getNamespaceDescriptor(String name)
1255    throws NamespaceNotFoundException, IOException;
1256
1257  /**
1258   * List available namespaces
1259   * @return List of namespace names
1260   * @throws IOException if a remote or network exception occurs
1261   */
1262  String[] listNamespaces() throws IOException;
1263
1264  /**
1265   * List available namespace descriptors
1266   * @return List of descriptors
1267   * @throws IOException if a remote or network exception occurs
1268   */
1269  NamespaceDescriptor[] listNamespaceDescriptors() throws IOException;
1270
1271  /**
1272   * Get list of table descriptors by namespace.
1273   * @param name namespace name
1274   * @return returns a list of TableDescriptors
1275   * @throws IOException if a remote or network exception occurs
1276   */
1277  List<TableDescriptor> listTableDescriptorsByNamespace(byte[] name) throws IOException;
1278
1279  /**
1280   * Get list of table names by namespace.
1281   * @param name namespace name
1282   * @return The list of table names in the namespace
1283   * @throws IOException if a remote or network exception occurs
1284   */
1285  TableName[] listTableNamesByNamespace(String name) throws IOException;
1286
1287  /**
1288   * Get the regions of a given table.
1289   * @param tableName the name of the table
1290   * @return List of {@link RegionInfo}.
1291   * @throws IOException if a remote or network exception occurs
1292   */
1293  List<RegionInfo> getRegions(TableName tableName) throws IOException;
1294
1295  @Override
1296  void close();
1297
1298  /**
1299   * Get tableDescriptors.
1300   * @param tableNames List of table names
1301   * @return returns a list of TableDescriptors
1302   * @throws IOException if a remote or network exception occurs
1303   */
1304  List<TableDescriptor> listTableDescriptors(List<TableName> tableNames) throws IOException;
1305
1306  /**
1307   * Abort a procedure.
1308   * <p/>
1309   * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2.
1310   * @param procId                ID of the procedure to abort
1311   * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
1312   * @return <code>true</code> if aborted, <code>false</code> if procedure already completed or does
1313   *         not exist
1314   * @throws IOException if a remote or network exception occurs
1315   * @deprecated since 2.1.1 and will be removed in 4.0.0.
1316   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21223">HBASE-21223</a>
1317   */
1318  @Deprecated
1319  default boolean abortProcedure(long procId, boolean mayInterruptIfRunning) throws IOException {
1320    return get(abortProcedureAsync(procId, mayInterruptIfRunning), getSyncWaitTimeout(),
1321      TimeUnit.MILLISECONDS);
1322  }
1323
1324  /**
1325   * Abort a procedure but does not block and wait for completion. You can use Future.get(long,
1326   * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an
1327   * error while executing the operation or TimeoutException in case the wait timeout was not long
1328   * enough to allow the operation to complete. Do not use. Usually it is ignored but if not, it can
1329   * do more damage than good. See hbck2.
1330   * @param procId                ID of the procedure to abort
1331   * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
1332   * @return <code>true</code> if aborted, <code>false</code> if procedure already completed or does
1333   *         not exist
1334   * @throws IOException if a remote or network exception occurs
1335   * @deprecated since 2.1.1 and will be removed in 4.0.0.
1336   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21223">HBASE-21223</a>
1337   */
1338  @Deprecated
1339  Future<Boolean> abortProcedureAsync(long procId, boolean mayInterruptIfRunning)
1340    throws IOException;
1341
1342  /**
1343   * Get procedures.
1344   * @return procedure list in JSON
1345   * @throws IOException if a remote or network exception occurs
1346   */
1347  String getProcedures() throws IOException;
1348
1349  /**
1350   * Get locks.
1351   * @return lock list in JSON
1352   * @throws IOException if a remote or network exception occurs
1353   */
1354  String getLocks() throws IOException;
1355
1356  /**
1357   * Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file.
1358   * Note that the actual rolling of the log writer is asynchronous and may not be complete when
1359   * this method returns. As a side effect of this call, the named region server may schedule store
1360   * flushes at the request of the wal.
1361   * @param serverName The servername of the regionserver.
1362   * @throws IOException             if a remote or network exception occurs
1363   * @throws FailedLogCloseException if we failed to close the WAL
1364   */
1365  void rollWALWriter(ServerName serverName) throws IOException, FailedLogCloseException;
1366
1367  /**
1368   * Helper that delegates to getClusterMetrics().getMasterCoprocessorNames().
1369   * @return an array of master coprocessors
1370   * @see org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames()
1371   */
1372  default List<String> getMasterCoprocessorNames() throws IOException {
1373    return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS)).getMasterCoprocessorNames();
1374  }
1375
1376  /**
1377   * Get the current compaction state of a table. It could be in a major compaction, a minor
1378   * compaction, both, or none.
1379   * @param tableName table to examine
1380   * @return the current compaction state
1381   * @throws IOException if a remote or network exception occurs
1382   */
1383  CompactionState getCompactionState(TableName tableName) throws IOException;
1384
1385  /**
1386   * Get the current compaction state of a table. It could be in a compaction, or none.
1387   * @param tableName   table to examine
1388   * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
1389   * @return the current compaction state
1390   * @throws IOException if a remote or network exception occurs
1391   */
1392  CompactionState getCompactionState(TableName tableName, CompactType compactType)
1393    throws IOException;
1394
1395  /**
1396   * Get the current compaction state of region. It could be in a major compaction, a minor
1397   * compaction, both, or none.
1398   * @param regionName region to examine
1399   * @return the current compaction state
1400   * @throws IOException if a remote or network exception occurs
1401   */
1402  CompactionState getCompactionStateForRegion(byte[] regionName) throws IOException;
1403
1404  /**
1405   * Get the timestamp of the last major compaction for the passed table The timestamp of the oldest
1406   * HFile resulting from a major compaction of that table, or 0 if no such HFile could be found.
1407   * @param tableName table to examine
1408   * @return the last major compaction timestamp or 0
1409   * @throws IOException if a remote or network exception occurs
1410   */
1411  long getLastMajorCompactionTimestamp(TableName tableName) throws IOException;
1412
1413  /**
1414   * Get the timestamp of the last major compaction for the passed region. The timestamp of the
1415   * oldest HFile resulting from a major compaction of that region, or 0 if no such HFile could be
1416   * found.
1417   * @param regionName region to examine
1418   * @return the last major compaction timestamp or 0
1419   * @throws IOException if a remote or network exception occurs
1420   */
1421  long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException;
1422
1423  /**
1424   * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
1425   * taken. If the table is disabled, an offline snapshot is taken. Snapshots are taken sequentially
1426   * even when requested concurrently, across all tables. Snapshots are considered unique based on
1427   * <b>the name of the snapshot</b>. Attempts to take a snapshot with the same name (even a
1428   * different type or with different parameters) will fail with a
1429   * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate
1430   * naming. Snapshot names follow the same naming constraints as tables in HBase. See
1431   * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
1432   * @param snapshotName name of the snapshot to be created
1433   * @param tableName    name of the table for which snapshot is created
1434   * @throws IOException                                                if a remote or network
1435   *                                                                    exception occurs
1436   * @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if snapshot creation failed
1437   * @throws IllegalArgumentException                                   if the snapshot request is
1438   *                                                                    formatted incorrectly
1439   */
1440  default void snapshot(String snapshotName, TableName tableName)
1441    throws IOException, SnapshotCreationException, IllegalArgumentException {
1442    snapshot(snapshotName, tableName, SnapshotType.FLUSH);
1443  }
1444
1445  /**
1446   * Create typed snapshot of the table. Snapshots are considered unique based on <b>the name of the
1447   * snapshot</b>. Snapshots are taken sequentially even when requested concurrently, across all
1448   * tables. Attempts to take a snapshot with the same name (even a different type or with different
1449   * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming.
1450   * Snapshot names follow the same naming constraints as tables in HBase. See
1451   * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
1452   * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
1453   *                     snapshots stored on the cluster
1454   * @param tableName    name of the table to snapshot
1455   * @param type         type of snapshot to take
1456   * @throws IOException               we fail to reach the master
1457   * @throws SnapshotCreationException if snapshot creation failed
1458   * @throws IllegalArgumentException  if the snapshot request is formatted incorrectly
1459   */
1460  default void snapshot(String snapshotName, TableName tableName, SnapshotType type)
1461    throws IOException, SnapshotCreationException, IllegalArgumentException {
1462    snapshot(new SnapshotDescription(snapshotName, tableName, type));
1463  }
1464
1465  /**
1466   * Create typed snapshot of the table. Snapshots are considered unique based on <b>the name of the
1467   * snapshot</b>. Snapshots are taken sequentially even when requested concurrently, across all
1468   * tables. Attempts to take a snapshot with the same name (even a different type or with different
1469   * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming.
1470   * Snapshot names follow the same naming constraints as tables in HBase. See
1471   * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. Snapshot can
1472   * live with ttl seconds.
1473   * @param snapshotName  name to give the snapshot on the filesystem. Must be unique from all other
1474   *                      snapshots stored on the cluster
1475   * @param tableName     name of the table to snapshot
1476   * @param type          type of snapshot to take
1477   * @param snapshotProps snapshot additional properties e.g. TTL
1478   * @throws IOException               we fail to reach the master
1479   * @throws SnapshotCreationException if snapshot creation failed
1480   * @throws IllegalArgumentException  if the snapshot request is formatted incorrectly
1481   */
1482  default void snapshot(String snapshotName, TableName tableName, SnapshotType type,
1483    Map<String, Object> snapshotProps)
1484    throws IOException, SnapshotCreationException, IllegalArgumentException {
1485    snapshot(new SnapshotDescription(snapshotName, tableName, type, snapshotProps));
1486  }
1487
1488  /**
1489   * Create typed snapshot of the table. Snapshots are considered unique based on <b>the name of the
1490   * snapshot</b>. Snapshots are taken sequentially even when requested concurrently, across all
1491   * tables. Attempts to take a snapshot with the same name (even a different type or with different
1492   * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming.
1493   * Snapshot names follow the same naming constraints as tables in HBase. See
1494   * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. Snapshot can
1495   * live with ttl seconds.
1496   * @param snapshotName  name to give the snapshot on the filesystem. Must be unique from all other
1497   *                      snapshots stored on the cluster
1498   * @param tableName     name of the table to snapshot
1499   * @param snapshotProps snapshot additional properties e.g. TTL
1500   * @throws IOException               we fail to reach the master
1501   * @throws SnapshotCreationException if snapshot creation failed
1502   * @throws IllegalArgumentException  if the snapshot request is formatted incorrectly
1503   */
1504  default void snapshot(String snapshotName, TableName tableName, Map<String, Object> snapshotProps)
1505    throws IOException, SnapshotCreationException, IllegalArgumentException {
1506    snapshot(new SnapshotDescription(snapshotName, tableName, SnapshotType.FLUSH, snapshotProps));
1507  }
1508
1509  /**
1510   * Take a snapshot and wait for the server to complete that snapshot (blocking). Snapshots are
1511   * considered unique based on <b>the name of the snapshot</b>. Snapshots are taken sequentially
1512   * even when requested concurrently, across all tables. Attempts to take a snapshot with the same
1513   * name (even a different type or with different parameters) will fail with a
1514   * {@link SnapshotCreationException} indicating the duplicate naming. Snapshot names follow the
1515   * same naming constraints as tables in HBase. See
1516   * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. You should
1517   * probably use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} unless you are sure
1518   * about the type of snapshot that you want to take.
1519   * @param snapshot snapshot to take
1520   * @throws IOException               or we lose contact with the master.
1521   * @throws SnapshotCreationException if snapshot failed to be taken
1522   * @throws IllegalArgumentException  if the snapshot request is formatted incorrectly
1523   */
1524  void snapshot(SnapshotDescription snapshot)
1525    throws IOException, SnapshotCreationException, IllegalArgumentException;
1526
1527  /**
1528   * Take a snapshot without waiting for the server to complete that snapshot (asynchronous).
1529   * Snapshots are considered unique based on <b>the name of the snapshot</b>. Snapshots are taken
1530   * sequentially even when requested concurrently, across all tables.
1531   * @param snapshot snapshot to take
1532   * @throws IOException               if the snapshot did not succeed or we lose contact with the
1533   *                                   master.
1534   * @throws SnapshotCreationException if snapshot creation failed
1535   * @throws IllegalArgumentException  if the snapshot request is formatted incorrectly
1536   */
1537  Future<Void> snapshotAsync(SnapshotDescription snapshot)
1538    throws IOException, SnapshotCreationException;
1539
1540  /**
1541   * Check the current state of the passed snapshot. There are three possible states:
1542   * <ol>
1543   * <li>running - returns <tt>false</tt></li>
1544   * <li>finished - returns <tt>true</tt></li>
1545   * <li>finished with error - throws the exception that caused the snapshot to fail</li>
1546   * </ol>
1547   * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
1548   * run/started since the snapshot you are checking, you will receive an
1549   * {@link org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}.
1550   * @param snapshot description of the snapshot to check
1551   * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
1552   *         running
1553   * @throws IOException                                               if we have a network issue
1554   * @throws org.apache.hadoop.hbase.snapshot.HBaseSnapshotException   if the snapshot failed
1555   * @throws org.apache.hadoop.hbase.snapshot.UnknownSnapshotException if the requested snapshot is
1556   *                                                                   unknown
1557   */
1558  boolean isSnapshotFinished(SnapshotDescription snapshot)
1559    throws IOException, HBaseSnapshotException, UnknownSnapshotException;
1560
1561  /**
1562   * Restore the specified snapshot on the original table. (The table must be disabled) If the
1563   * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to
1564   * <code>true</code>, a snapshot of the current table is taken before executing the restore
1565   * operation. In case of restore failure, the failsafe snapshot will be restored. If the restore
1566   * completes without problem the failsafe snapshot is deleted.
1567   * @param snapshotName name of the snapshot to restore
1568   * @throws IOException              if a remote or network exception occurs
1569   * @throws RestoreSnapshotException if snapshot failed to be restored
1570   * @throws IllegalArgumentException if the restore request is formatted incorrectly
1571   */
1572  void restoreSnapshot(String snapshotName) throws IOException, RestoreSnapshotException;
1573
1574  /**
1575   * Restore the specified snapshot on the original table. (The table must be disabled) If
1576   * 'takeFailSafeSnapshot' is set to <code>true</code>, a snapshot of the current table is taken
1577   * before executing the restore operation. In case of restore failure, the failsafe snapshot will
1578   * be restored. If the restore completes without problem the failsafe snapshot is deleted. The
1579   * failsafe snapshot name is configurable by using the property
1580   * "hbase.snapshot.restore.failsafe.name".
1581   * @param snapshotName         name of the snapshot to restore
1582   * @param takeFailSafeSnapshot <code>true</code> if the failsafe snapshot should be taken
1583   * @throws IOException              if a remote or network exception occurs
1584   * @throws RestoreSnapshotException if snapshot failed to be restored
1585   * @throws IllegalArgumentException if the restore request is formatted incorrectly
1586   */
1587  default void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot)
1588    throws IOException, RestoreSnapshotException {
1589    restoreSnapshot(snapshotName, takeFailSafeSnapshot, false);
1590  }
1591
1592  /**
1593   * Restore the specified snapshot on the original table. (The table must be disabled) If
1594   * 'takeFailSafeSnapshot' is set to <code>true</code>, a snapshot of the current table is taken
1595   * before executing the restore operation. In case of restore failure, the failsafe snapshot will
1596   * be restored. If the restore completes without problem the failsafe snapshot is deleted. The
1597   * failsafe snapshot name is configurable by using the property
1598   * "hbase.snapshot.restore.failsafe.name".
1599   * @param snapshotName         name of the snapshot to restore
1600   * @param takeFailSafeSnapshot <code>true</code> if the failsafe snapshot should be taken
1601   * @param restoreAcl           <code>true</code> to restore acl of snapshot
1602   * @throws IOException              if a remote or network exception occurs
1603   * @throws RestoreSnapshotException if snapshot failed to be restored
1604   * @throws IllegalArgumentException if the restore request is formatted incorrectly
1605   */
1606  void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl)
1607    throws IOException, RestoreSnapshotException;
1608
1609  /**
1610   * Create a new table by cloning the snapshot content.
1611   * @param snapshotName name of the snapshot to be cloned
1612   * @param tableName    name of the table where the snapshot will be restored
1613   * @throws IOException              if a remote or network exception occurs
1614   * @throws TableExistsException     if table to be created already exists
1615   * @throws RestoreSnapshotException if snapshot failed to be cloned
1616   * @throws IllegalArgumentException if the specified table has not a valid name
1617   */
1618  default void cloneSnapshot(String snapshotName, TableName tableName)
1619    throws IOException, TableExistsException, RestoreSnapshotException {
1620    cloneSnapshot(snapshotName, tableName, false, null);
1621  }
1622
1623  /**
1624   * Create a new table by cloning the snapshot content.
1625   * @param snapshotName name of the snapshot to be cloned
1626   * @param tableName    name of the table where the snapshot will be restored
1627   * @param restoreAcl   <code>true</code> to clone acl into newly created table
1628   * @param customSFT    specify the StoreFileTracker used for the table
1629   * @throws IOException              if a remote or network exception occurs
1630   * @throws TableExistsException     if table to be created already exists
1631   * @throws RestoreSnapshotException if snapshot failed to be cloned
1632   * @throws IllegalArgumentException if the specified table has not a valid name
1633   */
1634  default void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl,
1635    String customSFT) throws IOException, TableExistsException, RestoreSnapshotException {
1636    get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl, customSFT), getSyncWaitTimeout(),
1637      TimeUnit.MILLISECONDS);
1638  }
1639
1640  /**
1641   * Create a new table by cloning the snapshot content.
1642   * @param snapshotName name of the snapshot to be cloned
1643   * @param tableName    name of the table where the snapshot will be restored
1644   * @param restoreAcl   <code>true</code> to clone acl into newly created table
1645   * @throws IOException              if a remote or network exception occurs
1646   * @throws TableExistsException     if table to be created already exists
1647   * @throws RestoreSnapshotException if snapshot failed to be cloned
1648   * @throws IllegalArgumentException if the specified table has not a valid name
1649   */
1650  default void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl)
1651    throws IOException, TableExistsException, RestoreSnapshotException {
1652    get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl), getSyncWaitTimeout(),
1653      TimeUnit.MILLISECONDS);
1654  }
1655
1656  /**
1657   * Create a new table by cloning the snapshot content, but does not block and wait for it to be
1658   * completely cloned. You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1659   * It may throw ExecutionException if there was an error while executing the operation or
1660   * TimeoutException in case the wait timeout was not long enough to allow the operation to
1661   * complete.
1662   * @param snapshotName name of the snapshot to be cloned
1663   * @param tableName    name of the table where the snapshot will be restored
1664   * @throws IOException          if a remote or network exception occurs
1665   * @throws TableExistsException if table to be cloned already exists
1666   * @return the result of the async clone snapshot. You can use Future.get(long, TimeUnit) to wait
1667   *         on the operation to complete.
1668   */
1669  default Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName)
1670    throws IOException, TableExistsException {
1671    return cloneSnapshotAsync(snapshotName, tableName, false);
1672  }
1673
1674  /**
1675   * Create a new table by cloning the snapshot content.
1676   * @param snapshotName name of the snapshot to be cloned
1677   * @param tableName    name of the table where the snapshot will be restored
1678   * @param restoreAcl   <code>true</code> to clone acl into newly created table
1679   * @throws IOException              if a remote or network exception occurs
1680   * @throws TableExistsException     if table to be created already exists
1681   * @throws RestoreSnapshotException if snapshot failed to be cloned
1682   * @throws IllegalArgumentException if the specified table has not a valid name
1683   */
1684  default Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName,
1685    boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException {
1686    return cloneSnapshotAsync(snapshotName, tableName, restoreAcl, null);
1687  }
1688
1689  /**
1690   * Create a new table by cloning the snapshot content.
1691   * @param snapshotName name of the snapshot to be cloned
1692   * @param tableName    name of the table where the snapshot will be restored
1693   * @param restoreAcl   <code>true</code> to clone acl into newly created table
1694   * @param customSFT    specify the StroreFileTracker used for the table
1695   * @throws IOException              if a remote or network exception occurs
1696   * @throws TableExistsException     if table to be created already exists
1697   * @throws RestoreSnapshotException if snapshot failed to be cloned
1698   * @throws IllegalArgumentException if the specified table has not a valid name
1699   */
1700  Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName, boolean restoreAcl,
1701    String customSFT) throws IOException, TableExistsException, RestoreSnapshotException;
1702
1703  /**
1704   * Execute a distributed procedure on a cluster.
1705   * @param signature A distributed procedure is uniquely identified by its signature (default the
1706   *                  root ZK node name of the procedure).
1707   * @param instance  The instance name of the procedure. For some procedures, this parameter is
1708   *                  optional.
1709   * @param props     Property/Value pairs of properties passing to the procedure
1710   * @throws IOException if a remote or network exception occurs
1711   */
1712  void execProcedure(String signature, String instance, Map<String, String> props)
1713    throws IOException;
1714
1715  /**
1716   * Execute a distributed procedure on a cluster.
1717   * @param signature A distributed procedure is uniquely identified by its signature (default the
1718   *                  root ZK node name of the procedure).
1719   * @param instance  The instance name of the procedure. For some procedures, this parameter is
1720   *                  optional.
1721   * @param props     Property/Value pairs of properties passing to the procedure
1722   * @return data returned after procedure execution. null if no return data.
1723   * @throws IOException if a remote or network exception occurs
1724   */
1725  byte[] execProcedureWithReturn(String signature, String instance, Map<String, String> props)
1726    throws IOException;
1727
1728  /**
1729   * Check the current state of the specified procedure. There are three possible states:
1730   * <ol>
1731   * <li>running - returns <tt>false</tt></li>
1732   * <li>finished - returns <tt>true</tt></li>
1733   * <li>finished with error - throws the exception that caused the procedure to fail</li>
1734   * </ol>
1735   * @param signature The signature that uniquely identifies a procedure
1736   * @param instance  The instance name of the procedure
1737   * @param props     Property/Value pairs of properties passing to the procedure
1738   * @return <code>true</code> if the specified procedure is finished successfully,
1739   *         <code>false</code> if it is still running
1740   * @throws IOException if the specified procedure finished with error
1741   */
1742  boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
1743    throws IOException;
1744
1745  /**
1746   * List completed snapshots.
1747   * @return a list of snapshot descriptors for completed snapshots
1748   * @throws IOException if a network error occurs
1749   */
1750  List<SnapshotDescription> listSnapshots() throws IOException;
1751
1752  /**
1753   * List all the completed snapshots matching the given pattern.
1754   * @param pattern The compiled regular expression to match against
1755   * @return list of SnapshotDescription
1756   * @throws IOException if a remote or network exception occurs
1757   */
1758  List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException;
1759
1760  /**
1761   * List all the completed snapshots matching the given table name regular expression and snapshot
1762   * name regular expression.
1763   * @param tableNamePattern    The compiled table name regular expression to match against
1764   * @param snapshotNamePattern The compiled snapshot name regular expression to match against
1765   * @return list of completed SnapshotDescription
1766   * @throws IOException if a remote or network exception occurs
1767   */
1768  List<SnapshotDescription> listTableSnapshots(Pattern tableNamePattern,
1769    Pattern snapshotNamePattern) throws IOException;
1770
1771  /**
1772   * Delete an existing snapshot.
1773   * @param snapshotName name of the snapshot
1774   * @throws IOException if a remote or network exception occurs
1775   */
1776  void deleteSnapshot(String snapshotName) throws IOException;
1777
1778  /**
1779   * Delete existing snapshots whose names match the pattern passed.
1780   * @param pattern pattern for names of the snapshot to match
1781   * @throws IOException if a remote or network exception occurs
1782   */
1783  void deleteSnapshots(Pattern pattern) throws IOException;
1784
1785  /**
1786   * Delete all existing snapshots matching the given table name regular expression and snapshot
1787   * name regular expression.
1788   * @param tableNamePattern    The compiled table name regular expression to match against
1789   * @param snapshotNamePattern The compiled snapshot name regular expression to match against
1790   * @throws IOException if a remote or network exception occurs
1791   */
1792  void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern)
1793    throws IOException;
1794
1795  /**
1796   * Apply the new quota settings.
1797   * @param quota the quota settings
1798   * @throws IOException if a remote or network exception occurs
1799   */
1800  void setQuota(QuotaSettings quota) throws IOException;
1801
1802  /**
1803   * List the quotas based on the filter.
1804   * @param filter the quota settings filter
1805   * @return the QuotaSetting list
1806   * @throws IOException if a remote or network exception occurs
1807   */
1808  List<QuotaSettings> getQuota(QuotaFilter filter) throws IOException;
1809
1810  /**
1811   * Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel}
1812   * instance connected to the active master.
1813   * <p/>
1814   * The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be
1815   * used to access a published coprocessor
1816   * {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} using standard protobuf service
1817   * invocations:
1818   * <p/>
1819   * <div style="background-color: #cccccc; padding: 2px"> <blockquote>
1820   *
1821   * <pre>
1822   * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
1823   * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
1824   * MyCallRequest request = MyCallRequest.newBuilder()
1825   *     ...
1826   *     .build();
1827   * MyCallResponse response = service.myCall(null, request);
1828   * </pre>
1829   *
1830   * </blockquote> </div>
1831   * @return A MasterCoprocessorRpcChannel instance
1832   * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
1833   *             more. Use the coprocessorService methods in {@link AsyncAdmin} instead.
1834   */
1835  @Deprecated
1836  CoprocessorRpcChannel coprocessorService();
1837
1838  /**
1839   * Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel}
1840   * instance connected to the passed region server.
1841   * <p/>
1842   * The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be
1843   * used to access a published coprocessor
1844   * {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} using standard protobuf service
1845   * invocations:
1846   * <p/>
1847   * <div style="background-color: #cccccc; padding: 2px"> <blockquote>
1848   *
1849   * <pre>
1850   * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
1851   * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
1852   * MyCallRequest request = MyCallRequest.newBuilder()
1853   *     ...
1854   *     .build();
1855   * MyCallResponse response = service.myCall(null, request);
1856   * </pre>
1857   *
1858   * </blockquote> </div>
1859   * @param serverName the server name to which the endpoint call is made
1860   * @return A RegionServerCoprocessorRpcChannel instance
1861   * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
1862   *             more. Use the coprocessorService methods in {@link AsyncAdmin} instead.
1863   */
1864  @Deprecated
1865  CoprocessorRpcChannel coprocessorService(ServerName serverName);
1866
1867  /**
1868   * Update the configuration and trigger an online config change on the regionserver.
1869   * @param server : The server whose config needs to be updated.
1870   * @throws IOException if a remote or network exception occurs
1871   */
1872  void updateConfiguration(ServerName server) throws IOException;
1873
1874  /**
1875   * Update the configuration and trigger an online config change on all the regionservers.
1876   * @throws IOException if a remote or network exception occurs
1877   */
1878  void updateConfiguration() throws IOException;
1879
1880  /**
1881   * Update the configuration and trigger an online config change on all the regionservers in the
1882   * RSGroup.
1883   * @param groupName the group name
1884   * @throws IOException if a remote or network exception occurs
1885   */
1886  void updateConfiguration(String groupName) throws IOException;
1887
1888  /**
1889   * Get the info port of the current master if one is available.
1890   * @return master info port
1891   * @throws IOException if a remote or network exception occurs
1892   */
1893  default int getMasterInfoPort() throws IOException {
1894    return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)).getMasterInfoPort();
1895  }
1896
1897  /**
1898   * Return the set of supported security capabilities.
1899   * @throws IOException if a remote or network exception occurs
1900   */
1901  List<SecurityCapability> getSecurityCapabilities() throws IOException;
1902
1903  /**
1904   * Turn the split switch on or off.
1905   * @param enabled     enabled or not
1906   * @param synchronous If <code>true</code>, it waits until current split() call, if outstanding,
1907   *                    to return.
1908   * @return Previous switch value
1909   * @throws IOException if a remote or network exception occurs
1910   */
1911  boolean splitSwitch(boolean enabled, boolean synchronous) throws IOException;
1912
1913  /**
1914   * Turn the merge switch on or off.
1915   * @param enabled     enabled or not
1916   * @param synchronous If <code>true</code>, it waits until current merge() call, if outstanding,
1917   *                    to return.
1918   * @return Previous switch value
1919   * @throws IOException if a remote or network exception occurs
1920   */
1921  boolean mergeSwitch(boolean enabled, boolean synchronous) throws IOException;
1922
1923  /**
1924   * Query the current state of the split switch.
1925   * @return <code>true</code> if the switch is enabled, <code>false</code> otherwise.
1926   * @throws IOException if a remote or network exception occurs
1927   */
1928  boolean isSplitEnabled() throws IOException;
1929
1930  /**
1931   * Query the current state of the merge switch.
1932   * @return <code>true</code> if the switch is enabled, <code>false</code> otherwise.
1933   * @throws IOException if a remote or network exception occurs
1934   */
1935  boolean isMergeEnabled() throws IOException;
1936
1937  /**
1938   * Add a new replication peer for replicating data to slave cluster.
1939   * @param peerId     a short name that identifies the peer
1940   * @param peerConfig configuration for the replication peer
1941   * @throws IOException if a remote or network exception occurs
1942   */
1943  default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
1944    throws IOException {
1945    addReplicationPeer(peerId, peerConfig, true);
1946  }
1947
1948  /**
1949   * Add a new replication peer for replicating data to slave cluster.
1950   * @param peerId     a short name that identifies the peer
1951   * @param peerConfig configuration for the replication peer
1952   * @param enabled    peer state, true if ENABLED and false if DISABLED
1953   * @throws IOException if a remote or network exception occurs
1954   */
1955  default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
1956    throws IOException {
1957    get(addReplicationPeerAsync(peerId, peerConfig, enabled), getSyncWaitTimeout(),
1958      TimeUnit.MILLISECONDS);
1959  }
1960
1961  /**
1962   * Add a new replication peer but does not block and wait for it.
1963   * <p/>
1964   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
1965   * ExecutionException if there was an error while executing the operation or TimeoutException in
1966   * case the wait timeout was not long enough to allow the operation to complete.
1967   * @param peerId     a short name that identifies the peer
1968   * @param peerConfig configuration for the replication peer
1969   * @return the result of the async operation
1970   * @throws IOException IOException if a remote or network exception occurs
1971   */
1972  default Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig)
1973    throws IOException {
1974    return addReplicationPeerAsync(peerId, peerConfig, true);
1975  }
1976
1977  /**
1978   * Add a new replication peer but does not block and wait for it.
1979   * <p>
1980   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
1981   * ExecutionException if there was an error while executing the operation or TimeoutException in
1982   * case the wait timeout was not long enough to allow the operation to complete.
1983   * @param peerId     a short name that identifies the peer
1984   * @param peerConfig configuration for the replication peer
1985   * @param enabled    peer state, true if ENABLED and false if DISABLED
1986   * @return the result of the async operation
1987   * @throws IOException IOException if a remote or network exception occurs
1988   */
1989  Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig,
1990    boolean enabled) throws IOException;
1991
1992  /**
1993   * Remove a peer and stop the replication.
1994   * @param peerId a short name that identifies the peer
1995   * @throws IOException if a remote or network exception occurs
1996   */
1997  default void removeReplicationPeer(String peerId) throws IOException {
1998    get(removeReplicationPeerAsync(peerId), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
1999  }
2000
2001  /**
2002   * Remove a replication peer but does not block and wait for it.
2003   * <p>
2004   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
2005   * ExecutionException if there was an error while executing the operation or TimeoutException in
2006   * case the wait timeout was not long enough to allow the operation to complete.
2007   * @param peerId a short name that identifies the peer
2008   * @return the result of the async operation
2009   * @throws IOException IOException if a remote or network exception occurs
2010   */
2011  Future<Void> removeReplicationPeerAsync(String peerId) throws IOException;
2012
2013  /**
2014   * Restart the replication stream to the specified peer.
2015   * @param peerId a short name that identifies the peer
2016   * @throws IOException if a remote or network exception occurs
2017   */
2018  default void enableReplicationPeer(String peerId) throws IOException {
2019    get(enableReplicationPeerAsync(peerId), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
2020  }
2021
2022  /**
2023   * Enable a replication peer but does not block and wait for it.
2024   * <p>
2025   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
2026   * ExecutionException if there was an error while executing the operation or TimeoutException in
2027   * case the wait timeout was not long enough to allow the operation to complete.
2028   * @param peerId a short name that identifies the peer
2029   * @return the result of the async operation
2030   * @throws IOException IOException if a remote or network exception occurs
2031   */
2032  Future<Void> enableReplicationPeerAsync(String peerId) throws IOException;
2033
2034  /**
2035   * Stop the replication stream to the specified peer.
2036   * @param peerId a short name that identifies the peer
2037   * @throws IOException if a remote or network exception occurs
2038   */
2039  default void disableReplicationPeer(String peerId) throws IOException {
2040    get(disableReplicationPeerAsync(peerId), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
2041  }
2042
2043  /**
2044   * Disable a replication peer but does not block and wait for it.
2045   * <p/>
2046   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
2047   * ExecutionException if there was an error while executing the operation or TimeoutException in
2048   * case the wait timeout was not long enough to allow the operation to complete.
2049   * @param peerId a short name that identifies the peer
2050   * @return the result of the async operation
2051   * @throws IOException IOException if a remote or network exception occurs
2052   */
2053  Future<Void> disableReplicationPeerAsync(String peerId) throws IOException;
2054
2055  /**
2056   * Returns the configured ReplicationPeerConfig for the specified peer.
2057   * @param peerId a short name that identifies the peer
2058   * @return ReplicationPeerConfig for the peer
2059   * @throws IOException if a remote or network exception occurs
2060   */
2061  ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws IOException;
2062
2063  /**
2064   * Update the peerConfig for the specified peer.
2065   * @param peerId     a short name that identifies the peer
2066   * @param peerConfig new config for the replication peer
2067   * @throws IOException if a remote or network exception occurs
2068   */
2069  default void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
2070    throws IOException {
2071    get(updateReplicationPeerConfigAsync(peerId, peerConfig), getSyncWaitTimeout(),
2072      TimeUnit.MILLISECONDS);
2073  }
2074
2075  /**
2076   * Update the peerConfig for the specified peer but does not block and wait for it.
2077   * <p/>
2078   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
2079   * ExecutionException if there was an error while executing the operation or TimeoutException in
2080   * case the wait timeout was not long enough to allow the operation to complete.
2081   * @param peerId     a short name that identifies the peer
2082   * @param peerConfig new config for the replication peer
2083   * @return the result of the async operation
2084   * @throws IOException IOException if a remote or network exception occurs
2085   */
2086  Future<Void> updateReplicationPeerConfigAsync(String peerId, ReplicationPeerConfig peerConfig)
2087    throws IOException;
2088
2089  /**
2090   * Append the replicable table column family config from the specified peer.
2091   * @param id       a short that identifies the cluster
2092   * @param tableCfs A map from tableName to column family names
2093   * @throws ReplicationException if tableCfs has conflict with existing config
2094   * @throws IOException          if a remote or network exception occurs
2095   */
2096  default void appendReplicationPeerTableCFs(String id, Map<TableName, List<String>> tableCfs)
2097    throws ReplicationException, IOException {
2098    if (tableCfs == null) {
2099      throw new ReplicationException("tableCfs is null");
2100    }
2101    ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
2102    ReplicationPeerConfig newPeerConfig =
2103      ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(tableCfs, peerConfig);
2104    updateReplicationPeerConfig(id, newPeerConfig);
2105  }
2106
2107  /**
2108   * Remove some table-cfs from config of the specified peer.
2109   * @param id       a short name that identifies the cluster
2110   * @param tableCfs A map from tableName to column family names
2111   * @throws ReplicationException if tableCfs has conflict with existing config
2112   * @throws IOException          if a remote or network exception occurs
2113   */
2114  default void removeReplicationPeerTableCFs(String id, Map<TableName, List<String>> tableCfs)
2115    throws ReplicationException, IOException {
2116    if (tableCfs == null) {
2117      throw new ReplicationException("tableCfs is null");
2118    }
2119    ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
2120    ReplicationPeerConfig newPeerConfig =
2121      ReplicationPeerConfigUtil.removeTableCFsFromReplicationPeerConfig(tableCfs, peerConfig, id);
2122    updateReplicationPeerConfig(id, newPeerConfig);
2123  }
2124
2125  /**
2126   * Return a list of replication peers.
2127   * @return a list of replication peers description
2128   * @throws IOException if a remote or network exception occurs
2129   */
2130  List<ReplicationPeerDescription> listReplicationPeers() throws IOException;
2131
2132  /**
2133   * Return a list of replication peers.
2134   * @param pattern The compiled regular expression to match peer id
2135   * @return a list of replication peers description
2136   * @throws IOException if a remote or network exception occurs
2137   */
2138  List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) throws IOException;
2139
2140  /**
2141   * Transit current cluster to a new state in a synchronous replication peer.
2142   * @param peerId a short name that identifies the peer
2143   * @param state  a new state of current cluster
2144   * @throws IOException if a remote or network exception occurs
2145   */
2146  default void transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state)
2147    throws IOException {
2148    get(transitReplicationPeerSyncReplicationStateAsync(peerId, state), getSyncWaitTimeout(),
2149      TimeUnit.MILLISECONDS);
2150  }
2151
2152  /**
2153   * Transit current cluster to a new state in a synchronous replication peer. But does not block
2154   * and wait for it.
2155   * <p>
2156   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
2157   * ExecutionException if there was an error while executing the operation or TimeoutException in
2158   * case the wait timeout was not long enough to allow the operation to complete.
2159   * @param peerId a short name that identifies the peer
2160   * @param state  a new state of current cluster
2161   * @throws IOException if a remote or network exception occurs
2162   */
2163  Future<Void> transitReplicationPeerSyncReplicationStateAsync(String peerId,
2164    SyncReplicationState state) throws IOException;
2165
2166  /**
2167   * Get the current cluster state in a synchronous replication peer.
2168   * @param peerId a short name that identifies the peer
2169   * @return the current cluster state
2170   * @throws IOException if a remote or network exception occurs
2171   */
2172  default SyncReplicationState getReplicationPeerSyncReplicationState(String peerId)
2173    throws IOException {
2174    List<ReplicationPeerDescription> peers = listReplicationPeers(Pattern.compile(peerId));
2175    if (peers.isEmpty() || !peers.get(0).getPeerId().equals(peerId)) {
2176      throw new IOException("Replication peer " + peerId + " does not exist");
2177    }
2178    return peers.get(0).getSyncReplicationState();
2179  }
2180
2181  /**
2182   * Check if a replication peer is enabled.
2183   * @param peerId id of replication peer to check
2184   * @return <code>true</code> if replication peer is enabled
2185   * @throws IOException if a remote or network exception occurs
2186   */
2187  boolean isReplicationPeerEnabled(String peerId) throws IOException;
2188
2189  /**
2190   * Mark region server(s) as decommissioned to prevent additional regions from getting assigned to
2191   * them. Optionally unload the regions on the servers. If there are multiple servers to be
2192   * decommissioned, decommissioning them at the same time can prevent wasteful region movements.
2193   * Region unloading is asynchronous.
2194   * @param servers The list of servers to decommission.
2195   * @param offload True to offload the regions from the decommissioned servers
2196   * @throws IOException if a remote or network exception occurs
2197   */
2198  void decommissionRegionServers(List<ServerName> servers, boolean offload) throws IOException;
2199
2200  /**
2201   * List region servers marked as decommissioned, which can not be assigned regions.
2202   * @return List of decommissioned region servers.
2203   * @throws IOException if a remote or network exception occurs
2204   */
2205  List<ServerName> listDecommissionedRegionServers() throws IOException;
2206
2207  /**
2208   * Remove decommission marker from a region server to allow regions assignments. Load regions onto
2209   * the server if a list of regions is given. Region loading is asynchronous.
2210   * @param server             The server to recommission.
2211   * @param encodedRegionNames Regions to load onto the server.
2212   * @throws IOException if a remote or network exception occurs
2213   */
2214  void recommissionRegionServer(ServerName server, List<byte[]> encodedRegionNames)
2215    throws IOException;
2216
2217  /**
2218   * Find all table and column families that are replicated from this cluster
2219   * @return the replicated table-cfs list of this cluster.
2220   * @throws IOException if a remote or network exception occurs
2221   */
2222  List<TableCFs> listReplicatedTableCFs() throws IOException;
2223
2224  /**
2225   * Enable a table's replication switch.
2226   * @param tableName name of the table
2227   * @throws IOException if a remote or network exception occurs
2228   */
2229  void enableTableReplication(TableName tableName) throws IOException;
2230
2231  /**
2232   * Disable a table's replication switch.
2233   * @param tableName name of the table
2234   * @throws IOException if a remote or network exception occurs
2235   */
2236  void disableTableReplication(TableName tableName) throws IOException;
2237
2238  /**
2239   * Clear compacting queues on a regionserver.
2240   * @param serverName the region server name
2241   * @param queues     the set of queue name
2242   * @throws IOException if a remote or network exception occurs
2243   */
2244  void clearCompactionQueues(ServerName serverName, Set<String> queues)
2245    throws IOException, InterruptedException;
2246
2247  /**
2248   * List dead region servers.
2249   * @return List of dead region servers.
2250   */
2251  default List<ServerName> listDeadServers() throws IOException {
2252    return getClusterMetrics(EnumSet.of(Option.DEAD_SERVERS)).getDeadServerNames();
2253  }
2254
2255  /**
2256   * List unknown region servers.
2257   * @return List of unknown region servers.
2258   */
2259  default List<ServerName> listUnknownServers() throws IOException {
2260    return getClusterMetrics(EnumSet.of(Option.UNKNOWN_SERVERS)).getUnknownServerNames();
2261  }
2262
2263  /**
2264   * Clear dead region servers from master.
2265   * @param servers list of dead region servers.
2266   * @throws IOException if a remote or network exception occurs
2267   * @return List of servers that are not cleared
2268   */
2269  List<ServerName> clearDeadServers(List<ServerName> servers) throws IOException;
2270
2271  /**
2272   * Create a new table by cloning the existent table schema.
2273   * @param tableName      name of the table to be cloned
2274   * @param newTableName   name of the new table where the table will be created
2275   * @param preserveSplits True if the splits should be preserved
2276   * @throws IOException if a remote or network exception occurs
2277   */
2278  void cloneTableSchema(TableName tableName, TableName newTableName, boolean preserveSplits)
2279    throws IOException;
2280
2281  /**
2282   * Switch the rpc throttle enable state.
2283   * @param enable Set to <code>true</code> to enable, <code>false</code> to disable.
2284   * @return Previous rpc throttle enabled value
2285   * @throws IOException if a remote or network exception occurs
2286   */
2287  boolean switchRpcThrottle(boolean enable) throws IOException;
2288
2289  /**
2290   * Get if the rpc throttle is enabled.
2291   * @return True if rpc throttle is enabled
2292   * @throws IOException if a remote or network exception occurs
2293   */
2294  boolean isRpcThrottleEnabled() throws IOException;
2295
2296  /**
2297   * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota can be
2298   * exceeded if region server has availble quota.
2299   * @param enable Set to <code>true</code> to enable, <code>false</code> to disable.
2300   * @return Previous exceed throttle enabled value
2301   * @throws IOException if a remote or network exception occurs
2302   */
2303  boolean exceedThrottleQuotaSwitch(final boolean enable) throws IOException;
2304
2305  /**
2306   * Fetches the table sizes on the filesystem as tracked by the HBase Master.
2307   * @throws IOException if a remote or network exception occurs
2308   */
2309  Map<TableName, Long> getSpaceQuotaTableSizes() throws IOException;
2310
2311  /**
2312   * Fetches the observed {@link SpaceQuotaSnapshotView}s observed by a RegionServer.
2313   * @throws IOException if a remote or network exception occurs
2314   */
2315  Map<TableName, ? extends SpaceQuotaSnapshotView>
2316    getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException;
2317
2318  /**
2319   * Returns the Master's view of a quota on the given {@code namespace} or null if the Master has
2320   * no quota information on that namespace.
2321   * @throws IOException if a remote or network exception occurs
2322   */
2323  SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(String namespace) throws IOException;
2324
2325  /**
2326   * Returns the Master's view of a quota on the given {@code tableName} or null if the Master has
2327   * no quota information on that table.
2328   * @throws IOException if a remote or network exception occurs
2329   */
2330  SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(TableName tableName) throws IOException;
2331
2332  /**
2333   * Grants user specific permissions
2334   * @param userPermission           user name and the specific permission
2335   * @param mergeExistingPermissions If set to false, later granted permissions will override
2336   *                                 previous granted permissions. otherwise, it'll merge with
2337   *                                 previous granted permissions.
2338   * @throws IOException if a remote or network exception occurs
2339   */
2340  void grant(UserPermission userPermission, boolean mergeExistingPermissions) throws IOException;
2341
2342  /**
2343   * Revokes user specific permissions
2344   * @param userPermission user name and the specific permission
2345   * @throws IOException if a remote or network exception occurs
2346   */
2347  void revoke(UserPermission userPermission) throws IOException;
2348
2349  /**
2350   * Get the global/namespace/table permissions for user
2351   * @param getUserPermissionsRequest A request contains which user, global, namespace or table
2352   *                                  permissions needed
2353   * @return The user and permission list
2354   * @throws IOException if a remote or network exception occurs
2355   */
2356  List<UserPermission> getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest)
2357    throws IOException;
2358
2359  /**
2360   * Check if the user has specific permissions
2361   * @param userName    the user name
2362   * @param permissions the specific permission list
2363   * @return True if user has the specific permissions
2364   * @throws IOException if a remote or network exception occurs
2365   */
2366  List<Boolean> hasUserPermissions(String userName, List<Permission> permissions)
2367    throws IOException;
2368
2369  /**
2370   * Check if call user has specific permissions
2371   * @param permissions the specific permission list
2372   * @return True if user has the specific permissions
2373   * @throws IOException if a remote or network exception occurs
2374   */
2375  default List<Boolean> hasUserPermissions(List<Permission> permissions) throws IOException {
2376    return hasUserPermissions(null, permissions);
2377  }
2378
2379  /**
2380   * Turn on or off the auto snapshot cleanup based on TTL.
2381   * @param on          Set to <code>true</code> to enable, <code>false</code> to disable.
2382   * @param synchronous If <code>true</code>, it waits until current snapshot cleanup is completed,
2383   *                    if outstanding.
2384   * @return Previous auto snapshot cleanup value
2385   * @throws IOException if a remote or network exception occurs
2386   */
2387  boolean snapshotCleanupSwitch(final boolean on, final boolean synchronous) throws IOException;
2388
2389  /**
2390   * Query the current state of the auto snapshot cleanup based on TTL.
2391   * @return <code>true</code> if the auto snapshot cleanup is enabled, <code>false</code>
2392   *         otherwise.
2393   * @throws IOException if a remote or network exception occurs
2394   */
2395  boolean isSnapshotCleanupEnabled() throws IOException;
2396
2397  /**
2398   * Retrieves online slow/large RPC logs from the provided list of RegionServers
2399   * @param serverNames    Server names to get slowlog responses from
2400   * @param logQueryFilter filter to be used if provided (determines slow / large RPC logs)
2401   * @return online slowlog response list
2402   * @throws IOException if a remote or network exception occurs
2403   * @deprecated since 2.4.0 and will be removed in 4.0.0. Use
2404   *             {@link #getLogEntries(Set, String, ServerType, int, Map)} instead.
2405   */
2406  @Deprecated
2407  default List<OnlineLogRecord> getSlowLogResponses(final Set<ServerName> serverNames,
2408    final LogQueryFilter logQueryFilter) throws IOException {
2409    String logType;
2410    if (LogQueryFilter.Type.LARGE_LOG.equals(logQueryFilter.getType())) {
2411      logType = "LARGE_LOG";
2412    } else {
2413      logType = "SLOW_LOG";
2414    }
2415    Map<String, Object> filterParams = new HashMap<>();
2416    filterParams.put("regionName", logQueryFilter.getRegionName());
2417    filterParams.put("clientAddress", logQueryFilter.getClientAddress());
2418    filterParams.put("tableName", logQueryFilter.getTableName());
2419    filterParams.put("userName", logQueryFilter.getUserName());
2420    filterParams.put("filterByOperator", logQueryFilter.getFilterByOperator().toString());
2421    List<LogEntry> logEntries = getLogEntries(serverNames, logType, ServerType.REGION_SERVER,
2422      logQueryFilter.getLimit(), filterParams);
2423    return logEntries.stream().map(logEntry -> (OnlineLogRecord) logEntry)
2424      .collect(Collectors.toList());
2425  }
2426
2427  /**
2428   * Clears online slow/large RPC logs from the provided list of RegionServers
2429   * @param serverNames Set of Server names to clean slowlog responses from
2430   * @return List of booleans representing if online slowlog response buffer is cleaned from each
2431   *         RegionServer
2432   * @throws IOException if a remote or network exception occurs
2433   */
2434  List<Boolean> clearSlowLogResponses(final Set<ServerName> serverNames) throws IOException;
2435
2436  /**
2437   * Creates a new RegionServer group with the given name
2438   * @param groupName the name of the group
2439   * @throws IOException if a remote or network exception occurs
2440   */
2441  void addRSGroup(String groupName) throws IOException;
2442
2443  /**
2444   * Get group info for the given group name
2445   * @param groupName the group name
2446   * @return group info
2447   * @throws IOException if a remote or network exception occurs
2448   */
2449  RSGroupInfo getRSGroup(String groupName) throws IOException;
2450
2451  /**
2452   * Get group info for the given hostPort
2453   * @param hostPort HostPort to get RSGroupInfo for
2454   * @throws IOException if a remote or network exception occurs
2455   */
2456  RSGroupInfo getRSGroup(Address hostPort) throws IOException;
2457
2458  /**
2459   * Get group info for the given table
2460   * @param tableName table name to get RSGroupInfo for
2461   * @throws IOException if a remote or network exception occurs
2462   */
2463  RSGroupInfo getRSGroup(TableName tableName) throws IOException;
2464
2465  /**
2466   * Lists current set of RegionServer groups
2467   * @throws IOException if a remote or network exception occurs
2468   */
2469  List<RSGroupInfo> listRSGroups() throws IOException;
2470
2471  /**
2472   * Get all tables in this RegionServer group.
2473   * @param groupName the group name
2474   * @throws IOException if a remote or network exception occurs
2475   * @see #getConfiguredNamespacesAndTablesInRSGroup(String)
2476   */
2477  List<TableName> listTablesInRSGroup(String groupName) throws IOException;
2478
2479  /**
2480   * Get the namespaces and tables which have this RegionServer group in descriptor.
2481   * <p/>
2482   * The difference between this method and {@link #listTablesInRSGroup(String)} is that, this
2483   * method will not include the table which is actually in this RegionServr group but without the
2484   * RegionServer group configuration in its {@link TableDescriptor}. For example, we have a group
2485   * 'A', and we make namespace 'nsA' in this group, then all the tables under this namespace will
2486   * in the group 'A', but this method will not return these tables but only the namespace 'nsA',
2487   * while the {@link #listTablesInRSGroup(String)} will return all these tables.
2488   * @param groupName the group name
2489   * @throws IOException if a remote or network exception occurs
2490   * @see #listTablesInRSGroup(String)
2491   */
2492  Pair<List<String>, List<TableName>> getConfiguredNamespacesAndTablesInRSGroup(String groupName)
2493    throws IOException;
2494
2495  /**
2496   * Remove RegionServer group associated with the given name
2497   * @param groupName the group name
2498   * @throws IOException if a remote or network exception occurs
2499   */
2500  void removeRSGroup(String groupName) throws IOException;
2501
2502  /**
2503   * Remove decommissioned servers from group 1. Sometimes we may find the server aborted due to
2504   * some hardware failure and we must offline the server for repairing. Or we need to move some
2505   * servers to join other clusters. So we need to remove these servers from the group. 2.
2506   * Dead/recovering/live servers will be disallowed.
2507   * @param servers set of servers to remove
2508   * @throws IOException if a remote or network exception occurs
2509   */
2510  void removeServersFromRSGroup(Set<Address> servers) throws IOException;
2511
2512  /**
2513   * Move given set of servers to the specified target RegionServer group
2514   * @param servers     set of servers to move
2515   * @param targetGroup the group to move servers to
2516   * @throws IOException if a remote or network exception occurs
2517   */
2518  void moveServersToRSGroup(Set<Address> servers, String targetGroup) throws IOException;
2519
2520  /**
2521   * Set the RegionServer group for tables
2522   * @param tables    tables to set group for
2523   * @param groupName group name for tables
2524   * @throws IOException if a remote or network exception occurs
2525   */
2526  void setRSGroup(Set<TableName> tables, String groupName) throws IOException;
2527
2528  /**
2529   * Balance regions in the given RegionServer group
2530   * @param groupName the group name
2531   * @return BalanceResponse details about the balancer run
2532   * @throws IOException if a remote or network exception occurs
2533   */
2534  default BalanceResponse balanceRSGroup(String groupName) throws IOException {
2535    return balanceRSGroup(groupName, BalanceRequest.defaultInstance());
2536  }
2537
2538  /**
2539   * Balance regions in the given RegionServer group, running based on the given
2540   * {@link BalanceRequest}.
2541   * @return BalanceResponse details about the balancer run
2542   */
2543  BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException;
2544
2545  /**
2546   * Rename rsgroup
2547   * @param oldName old rsgroup name
2548   * @param newName new rsgroup name
2549   * @throws IOException if a remote or network exception occurs
2550   */
2551  void renameRSGroup(String oldName, String newName) throws IOException;
2552
2553  /**
2554   * Update RSGroup configuration
2555   * @param groupName     the group name
2556   * @param configuration new configuration of the group name to be set
2557   * @throws IOException if a remote or network exception occurs
2558   */
2559  void updateRSGroupConfig(String groupName, Map<String, String> configuration) throws IOException;
2560
2561  /**
2562   * Retrieve recent online records from HMaster / RegionServers. Examples include slow/large RPC
2563   * logs, balancer decisions by master.
2564   * @param serverNames  servers to retrieve records from, useful in case of records maintained by
2565   *                     RegionServer as we can select specific server. In case of
2566   *                     servertype=MASTER, logs will only come from the currently active master.
2567   * @param logType      string representing type of log records
2568   * @param serverType   enum for server type: HMaster or RegionServer
2569   * @param limit        put a limit to list of records that server should send in response
2570   * @param filterParams additional filter params
2571   * @return Log entries representing online records from servers
2572   * @throws IOException if a remote or network exception occurs
2573   */
2574  List<LogEntry> getLogEntries(Set<ServerName> serverNames, String logType, ServerType serverType,
2575    int limit, Map<String, Object> filterParams) throws IOException;
2576
2577  /**
2578   * Flush master local region
2579   */
2580  void flushMasterStore() throws IOException;
2581}