View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import java.io.Closeable;
22  import java.io.IOException;
23  import java.io.InterruptedIOException;
24  import java.net.SocketTimeoutException;
25  import java.util.ArrayList;
26  import java.util.Arrays;
27  import java.util.HashMap;
28  import java.util.LinkedList;
29  import java.util.List;
30  import java.util.Map;
31  import java.util.Map.Entry;
32  import java.util.concurrent.atomic.AtomicInteger;
33  import java.util.concurrent.atomic.AtomicReference;
34  import java.util.concurrent.ExecutionException;
35  import java.util.concurrent.Future;
36  import java.util.concurrent.TimeUnit;
37  import java.util.concurrent.TimeoutException;
38  import java.util.regex.Pattern;
39  
40  import org.apache.commons.logging.Log;
41  import org.apache.commons.logging.LogFactory;
42  import org.apache.hadoop.conf.Configuration;
43  import org.apache.hadoop.hbase.Abortable;
44  import org.apache.hadoop.hbase.ClusterStatus;
45  import org.apache.hadoop.hbase.DoNotRetryIOException;
46  import org.apache.hadoop.hbase.HBaseConfiguration;
47  import org.apache.hadoop.hbase.HColumnDescriptor;
48  import org.apache.hadoop.hbase.HConstants;
49  import org.apache.hadoop.hbase.HRegionInfo;
50  import org.apache.hadoop.hbase.HRegionLocation;
51  import org.apache.hadoop.hbase.HTableDescriptor;
52  import org.apache.hadoop.hbase.MasterNotRunningException;
53  import org.apache.hadoop.hbase.MetaTableAccessor;
54  import org.apache.hadoop.hbase.NamespaceDescriptor;
55  import org.apache.hadoop.hbase.NotServingRegionException;
56  import org.apache.hadoop.hbase.ProcedureInfo;
57  import org.apache.hadoop.hbase.RegionException;
58  import org.apache.hadoop.hbase.RegionLocations;
59  import org.apache.hadoop.hbase.ServerName;
60  import org.apache.hadoop.hbase.TableExistsException;
61  import org.apache.hadoop.hbase.TableName;
62  import org.apache.hadoop.hbase.TableNotDisabledException;
63  import org.apache.hadoop.hbase.TableNotEnabledException;
64  import org.apache.hadoop.hbase.TableNotFoundException;
65  import org.apache.hadoop.hbase.UnknownRegionException;
66  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
67  import org.apache.hadoop.hbase.classification.InterfaceAudience;
68  import org.apache.hadoop.hbase.classification.InterfaceStability;
69  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
70  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
71  import org.apache.hadoop.hbase.exceptions.DeserializationException;
72  import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
73  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
74  import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
75  import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
76  import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel;
77  import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
78  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
79  import org.apache.hadoop.hbase.protobuf.RequestConverter;
80  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
81  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
82  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
83  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
84  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
85  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
86  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
87  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
88  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
89  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
90  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
91  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
92  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
93  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
94  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
95  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
96  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
97  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
98  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
99  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse;
100 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
101 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
102 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
103 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
104 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
105 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
106 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
107 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
108 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
109 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
110 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
111 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
112 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
113 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
114 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
115 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
116 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
117 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
118 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
119 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
120 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest;
121 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse;
122 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
123 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
124 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
125 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
126 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
127 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
128 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
129 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
130 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
131 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
132 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
133 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
134 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest;
135 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
136 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
137 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
138 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
139 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
140 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
141 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
142 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
143 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
144 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
145 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
146 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
147 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
148 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
149 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
150 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
151 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
152 import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos;
153 import org.apache.hadoop.hbase.quotas.QuotaFilter;
154 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
155 import org.apache.hadoop.hbase.quotas.QuotaSettings;
156 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
157 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
158 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
159 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
160 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
161 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
162 import org.apache.hadoop.hbase.util.Addressing;
163 import org.apache.hadoop.hbase.util.Bytes;
164 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
165 import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
166 import org.apache.hadoop.hbase.util.Pair;
167 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
168 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
169 import org.apache.hadoop.ipc.RemoteException;
170 import org.apache.hadoop.util.StringUtils;
171 import org.apache.zookeeper.KeeperException;
172 
173 import com.google.common.annotations.VisibleForTesting;
174 import com.google.protobuf.ByteString;
175 import com.google.protobuf.ServiceException;
176 
177 /**
178  * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that
179  * this is an HBase-internal class as defined in
180  * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
181  * There are no guarantees for backwards source / binary compatibility and methods or class can
182  * change or go away without deprecation.
183  * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing
184  * an HBaseAdmin directly.
185  *
186  * <p>Connection should be an <i>unmanaged</i> connection obtained via
187  * {@link ConnectionFactory#createConnection(Configuration)}
188  *
189  * @see ConnectionFactory
190  * @see Connection
191  * @see Admin
192  */
193 @InterfaceAudience.Private
194 @InterfaceStability.Evolving
195 public class HBaseAdmin implements Admin {
196   private static final Log LOG = LogFactory.getLog(HBaseAdmin.class);
197 
198   private static final String ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
199 
200   private ClusterConnection connection;
201 
202   private volatile Configuration conf;
203   private final long pause;
204   private final int numRetries;
205   // Some operations can take a long time such as disable of big table.
206   // numRetries is for 'normal' stuff... Multiply by this factor when
207   // want to wait a long time.
208   private final int retryLongerMultiplier;
209   private final int syncWaitTimeout;
210   private boolean aborted;
211   private boolean cleanupConnectionOnClose = false; // close the connection in close()
212   private boolean closed = false;
213   private int operationTimeout;
214   private int rpcTimeout;
215 
216   private RpcRetryingCallerFactory rpcCallerFactory;
217   private RpcControllerFactory rpcControllerFactory;
218 
219   private NonceGenerator ng;
220 
221   /**
222    * Constructor.
223    * See {@link #HBaseAdmin(Connection connection)}
224    *
225    * @param c Configuration object. Copied internally.
226    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
227    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
228    */
229   @Deprecated
230   public HBaseAdmin(Configuration c)
231   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
232     // Will not leak connections, as the new implementation of the constructor
233     // does not throw exceptions anymore.
234     this(ConnectionManager.getConnectionInternal(new Configuration(c)));
235     this.cleanupConnectionOnClose = true;
236   }
237 
238   @Override
239   public int getOperationTimeout() {
240     return operationTimeout;
241   }
242 
243 
244   /**
245    * Constructor for externally managed Connections.
246    * The connection to master will be created when required by admin functions.
247    *
248    * @param connection The Connection instance to use
249    * @throws MasterNotRunningException, ZooKeeperConnectionException are not
250    *  thrown anymore but kept into the interface for backward api compatibility
251    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
252    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
253    */
254   @Deprecated
255   public HBaseAdmin(Connection connection)
256       throws MasterNotRunningException, ZooKeeperConnectionException {
257     this((ClusterConnection)connection);
258   }
259 
260   HBaseAdmin(ClusterConnection connection) {
261     this.conf = connection.getConfiguration();
262     this.connection = connection;
263 
264     // TODO: receive ConnectionConfiguration here rather than re-parsing these configs every time.
265     this.pause = this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
266         HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
267     this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
268         HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
269     this.retryLongerMultiplier = this.conf.getInt(
270         "hbase.client.retries.longer.multiplier", 10);
271     this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
272         HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
273     this.rpcTimeout = this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
274         HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
275     this.syncWaitTimeout = this.conf.getInt(
276       "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min
277 
278     this.rpcCallerFactory = connection.getRpcRetryingCallerFactory();
279     this.rpcControllerFactory = connection.getRpcControllerFactory();
280 
281     this.ng = this.connection.getNonceGenerator();
282   }
283 
284   @Override
285   public void abort(String why, Throwable e) {
286     // Currently does nothing but throw the passed message and exception
287     this.aborted = true;
288     throw new RuntimeException(why, e);
289   }
290 
291   @Override
292   public boolean isAborted(){
293     return this.aborted;
294   }
295 
296   /**
297    * Abort a procedure
298    * @param procId ID of the procedure to abort
299    * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
300    * @return true if aborted, false if procedure already completed or does not exist
301    * @throws IOException
302    */
303   @Override
304   public boolean abortProcedure(
305       final long procId,
306       final boolean mayInterruptIfRunning) throws IOException {
307     Future<Boolean> future = abortProcedureAsync(procId, mayInterruptIfRunning);
308     try {
309       return future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
310     } catch (InterruptedException e) {
311       throw new InterruptedIOException("Interrupted when waiting for procedure to be cancelled");
312     } catch (TimeoutException e) {
313       throw new TimeoutIOException(e);
314     } catch (ExecutionException e) {
315       if (e.getCause() instanceof IOException) {
316         throw (IOException)e.getCause();
317       } else {
318         throw new IOException(e.getCause());
319       }
320     }
321   }
322 
323   /**
324    * Abort a procedure but does not block and wait for it be completely removed.
325    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
326    * It may throw ExecutionException if there was an error while executing the operation
327    * or TimeoutException in case the wait timeout was not long enough to allow the
328    * operation to complete.
329    *
330    * @param procId ID of the procedure to abort
331    * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
332    * @return true if aborted, false if procedure already completed or does not exist
333    * @throws IOException
334    */
335   @Override
336   public Future<Boolean> abortProcedureAsync(
337       final long procId,
338       final boolean mayInterruptIfRunning) throws IOException {
339     Boolean abortProcResponse = executeCallable(
340       new MasterCallable<AbortProcedureResponse>(getConnection()) {
341         @Override
342         public AbortProcedureResponse call(int callTimeout) throws ServiceException {
343           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
344           controller.setCallTimeout(callTimeout);
345           AbortProcedureRequest abortProcRequest =
346               AbortProcedureRequest.newBuilder().setProcId(procId).build();
347           return master.abortProcedure(controller, abortProcRequest);
348         }
349       }).getIsProcedureAborted();
350 
351     AbortProcedureFuture abortProcFuture =
352         new AbortProcedureFuture(this, procId, abortProcResponse);
353     return abortProcFuture;
354   }
355 
356   private static class AbortProcedureFuture extends ProcedureFuture<Boolean> {
357     private boolean isAbortInProgress;
358 
359     public AbortProcedureFuture(
360         final HBaseAdmin admin,
361         final Long procId,
362         final Boolean abortProcResponse) {
363       super(admin, procId);
364       this.isAbortInProgress = abortProcResponse;
365     }
366 
367     @Override
368     public Boolean get(long timeout, TimeUnit unit)
369         throws InterruptedException, ExecutionException, TimeoutException {
370       if (!this.isAbortInProgress) {
371         return false;
372       }
373       super.get(timeout, unit);
374       return true;
375     }
376   }
377 
378   /** @return HConnection used by this object. */
379   @Override
380   public HConnection getConnection() {
381     return connection;
382   }
383 
384   /** @return - true if the master server is running. Throws an exception
385    *  otherwise.
386    * @throws ZooKeeperConnectionException
387    * @throws MasterNotRunningException
388    * @deprecated this has been deprecated without a replacement
389    */
390   @Deprecated
391   public boolean isMasterRunning()
392   throws MasterNotRunningException, ZooKeeperConnectionException {
393     return connection.isMasterRunning();
394   }
395 
396   /**
397    * @param tableName Table to check.
398    * @return True if table exists already.
399    * @throws IOException
400    */
401   @Override
402   public boolean tableExists(final TableName tableName) throws IOException {
403     return MetaTableAccessor.tableExists(connection, tableName);
404   }
405 
406   public boolean tableExists(final byte[] tableName)
407   throws IOException {
408     return tableExists(TableName.valueOf(tableName));
409   }
410 
411   public boolean tableExists(final String tableName)
412   throws IOException {
413     return tableExists(TableName.valueOf(tableName));
414   }
415 
416   @Override
417   public HTableDescriptor[] listTables() throws IOException {
418     return listTables((Pattern)null, false);
419   }
420 
421   @Override
422   public HTableDescriptor[] listTables(Pattern pattern) throws IOException {
423     return listTables(pattern, false);
424   }
425 
426   @Override
427   public HTableDescriptor[] listTables(String regex) throws IOException {
428     return listTables(Pattern.compile(regex), false);
429   }
430 
431   @Override
432   public HTableDescriptor[] listTables(final Pattern pattern, final boolean includeSysTables)
433       throws IOException {
434     return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
435       @Override
436       public HTableDescriptor[] call(int callTimeout) throws ServiceException {
437         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
438         controller.setCallTimeout(callTimeout);
439         GetTableDescriptorsRequest req =
440             RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
441         return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(controller, req));
442       }
443     });
444   }
445 
446   @Override
447   public HTableDescriptor[] listTables(String regex, boolean includeSysTables)
448       throws IOException {
449     return listTables(Pattern.compile(regex), includeSysTables);
450   }
451 
452   /**
453    * List all of the names of userspace tables.
454    * @return String[] table names
455    * @throws IOException if a remote or network exception occurs
456    * @deprecated Use {@link Admin#listTableNames()} instead
457    */
458   @Deprecated
459   public String[] getTableNames() throws IOException {
460     TableName[] tableNames = listTableNames();
461     String result[] = new String[tableNames.length];
462     for (int i = 0; i < tableNames.length; i++) {
463       result[i] = tableNames[i].getNameAsString();
464     }
465     return result;
466   }
467 
468   /**
469    * List all of the names of userspace tables matching the given regular expression.
470    * @param pattern The regular expression to match against
471    * @return String[] table names
472    * @throws IOException if a remote or network exception occurs
473    * @deprecated Use {@link Admin#listTableNames(Pattern)} instead.
474    */
475   @Deprecated
476   public String[] getTableNames(Pattern pattern) throws IOException {
477     TableName[] tableNames = listTableNames(pattern);
478     String result[] = new String[tableNames.length];
479     for (int i = 0; i < tableNames.length; i++) {
480       result[i] = tableNames[i].getNameAsString();
481     }
482     return result;
483   }
484 
485   /**
486    * List all of the names of userspace tables matching the given regular expression.
487    * @param regex The regular expression to match against
488    * @return String[] table names
489    * @throws IOException if a remote or network exception occurs
490    * @deprecated Use {@link Admin#listTableNames(Pattern)} instead.
491    */
492   @Deprecated
493   public String[] getTableNames(String regex) throws IOException {
494     return getTableNames(Pattern.compile(regex));
495   }
496 
497   @Override
498   public TableName[] listTableNames() throws IOException {
499     return listTableNames((Pattern)null, false);
500   }
501 
502   @Override
503   public TableName[] listTableNames(Pattern pattern) throws IOException {
504     return listTableNames(pattern, false);
505   }
506 
507   @Override
508   public TableName[] listTableNames(String regex) throws IOException {
509     return listTableNames(Pattern.compile(regex), false);
510   }
511 
512   @Override
513   public TableName[] listTableNames(final Pattern pattern, final boolean includeSysTables)
514       throws IOException {
515     return executeCallable(new MasterCallable<TableName[]>(getConnection()) {
516       @Override
517       public TableName[] call(int callTimeout) throws ServiceException {
518         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
519         controller.setCallTimeout(callTimeout);
520         GetTableNamesRequest req =
521             RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables);
522         return ProtobufUtil.getTableNameArray(master.getTableNames(controller, req)
523             .getTableNamesList());
524       }
525     });
526   }
527 
528   @Override
529   public TableName[] listTableNames(final String regex, final boolean includeSysTables)
530       throws IOException {
531     return listTableNames(Pattern.compile(regex), includeSysTables);
532   }
533 
534   /**
535    * Method for getting the tableDescriptor
536    * @param tableName as a byte []
537    * @return the tableDescriptor
538    * @throws TableNotFoundException
539    * @throws IOException if a remote or network exception occurs
540    */
541   @Override
542   public HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException {
543      return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory,
544        operationTimeout, rpcTimeout);
545   }
546 
547   static HTableDescriptor getTableDescriptor(final TableName tableName, HConnection connection,
548       RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
549       int operationTimeout, int rpcTimeout) throws IOException {
550       if (tableName == null) return null;
551       HTableDescriptor htd = executeCallable(new MasterCallable<HTableDescriptor>(connection) {
552         @Override
553         public HTableDescriptor call(int callTimeout) throws ServiceException {
554         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
555         controller.setCallTimeout(callTimeout);
556           GetTableDescriptorsResponse htds;
557           GetTableDescriptorsRequest req =
558                   RequestConverter.buildGetTableDescriptorsRequest(tableName);
559           htds = master.getTableDescriptors(controller, req);
560 
561           if (!htds.getTableSchemaList().isEmpty()) {
562             return HTableDescriptor.convert(htds.getTableSchemaList().get(0));
563           }
564           return null;
565         }
566       }, rpcCallerFactory, operationTimeout, rpcTimeout);
567       if (htd != null) {
568         return htd;
569       }
570       throw new TableNotFoundException(tableName.getNameAsString());
571   }
572 
573   public HTableDescriptor getTableDescriptor(final byte[] tableName)
574   throws TableNotFoundException, IOException {
575     return getTableDescriptor(TableName.valueOf(tableName));
576   }
577 
578   private long getPauseTime(int tries) {
579     int triesCount = tries;
580     if (triesCount >= HConstants.RETRY_BACKOFF.length) {
581       triesCount = HConstants.RETRY_BACKOFF.length - 1;
582     }
583     return this.pause * HConstants.RETRY_BACKOFF[triesCount];
584   }
585 
586   /**
587    * Creates a new table.
588    * Synchronous operation.
589    *
590    * @param desc table descriptor for table
591    *
592    * @throws IllegalArgumentException if the table name is reserved
593    * @throws MasterNotRunningException if master is not running
594    * @throws TableExistsException if table already exists (If concurrent
595    * threads, the table may have been created between test-for-existence
596    * and attempt-at-creation).
597    * @throws IOException if a remote or network exception occurs
598    */
599   @Override
600   public void createTable(HTableDescriptor desc)
601   throws IOException {
602     createTable(desc, null);
603   }
604 
605   /**
606    * Creates a new table with the specified number of regions.  The start key
607    * specified will become the end key of the first region of the table, and
608    * the end key specified will become the start key of the last region of the
609    * table (the first region has a null start key and the last region has a
610    * null end key).
611    *
612    * BigInteger math will be used to divide the key range specified into
613    * enough segments to make the required number of total regions.
614    *
615    * Synchronous operation.
616    *
617    * @param desc table descriptor for table
618    * @param startKey beginning of key range
619    * @param endKey end of key range
620    * @param numRegions the total number of regions to create
621    *
622    * @throws IllegalArgumentException if the table name is reserved
623    * @throws MasterNotRunningException if master is not running
624    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
625    * threads, the table may have been created between test-for-existence
626    * and attempt-at-creation).
627    * @throws IOException
628    */
629   @Override
630   public void createTable(HTableDescriptor desc, byte [] startKey,
631       byte [] endKey, int numRegions)
632   throws IOException {
633     if(numRegions < 3) {
634       throw new IllegalArgumentException("Must create at least three regions");
635     } else if(Bytes.compareTo(startKey, endKey) >= 0) {
636       throw new IllegalArgumentException("Start key must be smaller than end key");
637     }
638     if (numRegions == 3) {
639       createTable(desc, new byte[][]{startKey, endKey});
640       return;
641     }
642     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
643     if(splitKeys == null || splitKeys.length != numRegions - 1) {
644       throw new IllegalArgumentException("Unable to split key range into enough regions");
645     }
646     createTable(desc, splitKeys);
647   }
648 
649   /**
650    * Creates a new table with an initial set of empty regions defined by the
651    * specified split keys.  The total number of regions created will be the
652    * number of split keys plus one. Synchronous operation.
653    * Note : Avoid passing empty split key.
654    *
655    * @param desc table descriptor for table
656    * @param splitKeys array of split keys for the initial regions of the table
657    *
658    * @throws IllegalArgumentException if the table name is reserved, if the split keys
659    * are repeated and if the split key has empty byte array.
660    * @throws MasterNotRunningException if master is not running
661    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
662    * threads, the table may have been created between test-for-existence
663    * and attempt-at-creation).
664    * @throws IOException
665    */
666   @Override
667   public void createTable(final HTableDescriptor desc, byte [][] splitKeys)
668       throws IOException {
669     Future<Void> future = createTableAsyncV2(desc, splitKeys);
670     try {
671       // TODO: how long should we wait? spin forever?
672       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
673     } catch (InterruptedException e) {
674       throw new InterruptedIOException("Interrupted when waiting" +
675           " for table to be enabled; meta scan was done");
676     } catch (TimeoutException e) {
677       throw new TimeoutIOException(e);
678     } catch (ExecutionException e) {
679       if (e.getCause() instanceof IOException) {
680         throw (IOException)e.getCause();
681       } else {
682         throw new IOException(e.getCause());
683       }
684     }
685   }
686 
687   /**
688    * Creates a new table but does not block and wait for it to come online.
689    * Asynchronous operation.  To check if the table exists, use
690    * {@link #isTableAvailable} -- it is not safe to create an HTable
691    * instance to this table before it is available.
692    * Note : Avoid passing empty split key.
693    * @param desc table descriptor for table
694    *
695    * @throws IllegalArgumentException Bad table name, if the split keys
696    * are repeated and if the split key has empty byte array.
697    * @throws MasterNotRunningException if master is not running
698    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
699    * threads, the table may have been created between test-for-existence
700    * and attempt-at-creation).
701    * @throws IOException
702    */
703   @Override
704   public void createTableAsync(final HTableDescriptor desc, final byte [][] splitKeys)
705       throws IOException {
706     createTableAsyncV2(desc, splitKeys);
707   }
708 
709   /**
710    * Creates a new table but does not block and wait for it to come online.
711    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
712    * It may throw ExecutionException if there was an error while executing the operation
713    * or TimeoutException in case the wait timeout was not long enough to allow the
714    * operation to complete.
715    *
716    * @param desc table descriptor for table
717    * @param splitKeys keys to check if the table has been created with all split keys
718    * @throws IllegalArgumentException Bad table name, if the split keys
719    *    are repeated and if the split key has empty byte array.
720    * @throws IOException if a remote or network exception occurs
721    * @return the result of the async creation. You can use Future.get(long, TimeUnit)
722    *    to wait on the operation to complete.
723    */
724   // TODO: This should be called Async but it will break binary compatibility
725   private Future<Void> createTableAsyncV2(final HTableDescriptor desc, final byte[][] splitKeys)
726       throws IOException {
727     if (desc.getTableName() == null) {
728       throw new IllegalArgumentException("TableName cannot be null");
729     }
730     if (splitKeys != null && splitKeys.length > 0) {
731       Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
732       // Verify there are no duplicate split keys
733       byte[] lastKey = null;
734       for (byte[] splitKey : splitKeys) {
735         if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
736           throw new IllegalArgumentException(
737               "Empty split key must not be passed in the split keys.");
738         }
739         if (lastKey != null && Bytes.equals(splitKey, lastKey)) {
740           throw new IllegalArgumentException("All split keys must be unique, " +
741             "found duplicate: " + Bytes.toStringBinary(splitKey) +
742             ", " + Bytes.toStringBinary(lastKey));
743         }
744         lastKey = splitKey;
745       }
746     }
747 
748     CreateTableResponse response = executeCallable(
749       new MasterCallable<CreateTableResponse>(getConnection()) {
750         @Override
751         public CreateTableResponse call(int callTimeout) throws ServiceException {
752           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
753           controller.setCallTimeout(callTimeout);
754           controller.setPriority(desc.getTableName());
755           CreateTableRequest request = RequestConverter.buildCreateTableRequest(
756             desc, splitKeys, ng.getNonceGroup(), ng.newNonce());
757           return master.createTable(controller, request);
758         }
759       });
760     return new CreateTableFuture(this, desc, splitKeys, response);
761   }
762 
763   private static class CreateTableFuture extends ProcedureFuture<Void> {
764     private final HTableDescriptor desc;
765     private final byte[][] splitKeys;
766 
767     public CreateTableFuture(final HBaseAdmin admin, final HTableDescriptor desc,
768         final byte[][] splitKeys, final CreateTableResponse response) {
769       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
770       this.splitKeys = splitKeys;
771       this.desc = desc;
772     }
773 
774     @Override
775     protected Void waitOperationResult(final long deadlineTs)
776         throws IOException, TimeoutException {
777       waitForTableEnabled(deadlineTs);
778       waitForAllRegionsOnline(deadlineTs);
779       return null;
780     }
781 
782     @Override
783     protected Void postOperationResult(final Void result, final long deadlineTs)
784         throws IOException, TimeoutException {
785       LOG.info("Created " + desc.getTableName());
786       return result;
787     }
788 
789     private void waitForTableEnabled(final long deadlineTs)
790         throws IOException, TimeoutException {
791       waitForState(deadlineTs, new WaitForStateCallable() {
792         @Override
793         public boolean checkState(int tries) throws IOException {
794           try {
795             if (getAdmin().isTableAvailable(desc.getTableName())) {
796               return true;
797             }
798           } catch (TableNotFoundException tnfe) {
799             LOG.debug("Table "+ desc.getTableName() +" was not enabled, sleeping. tries="+  tries);
800           }
801           return false;
802         }
803 
804         @Override
805         public void throwInterruptedException() throws InterruptedIOException {
806           throw new InterruptedIOException("Interrupted when waiting for table " +
807               desc.getTableName() + " to be enabled");
808         }
809 
810         @Override
811         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
812           throw new TimeoutException("Table " + desc.getTableName() +
813             " not enabled after " + elapsedTime + "msec");
814         }
815       });
816     }
817 
818     private void waitForAllRegionsOnline(final long deadlineTs)
819         throws IOException, TimeoutException {
820       final AtomicInteger actualRegCount = new AtomicInteger(0);
821       final MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
822         @Override
823         public boolean processRow(Result rowResult) throws IOException {
824           RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
825           if (list == null) {
826             LOG.warn("No serialized HRegionInfo in " + rowResult);
827             return true;
828           }
829           HRegionLocation l = list.getRegionLocation();
830           if (l == null) {
831             return true;
832           }
833           if (!l.getRegionInfo().getTable().equals(desc.getTableName())) {
834             return false;
835           }
836           if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
837           HRegionLocation[] locations = list.getRegionLocations();
838           for (HRegionLocation location : locations) {
839             if (location == null) continue;
840             ServerName serverName = location.getServerName();
841             // Make sure that regions are assigned to server
842             if (serverName != null && serverName.getHostAndPort() != null) {
843               actualRegCount.incrementAndGet();
844             }
845           }
846           return true;
847         }
848       };
849 
850       int tries = 0;
851       IOException serverEx = null;
852       int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication();
853       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
854         actualRegCount.set(0);
855         MetaScanner.metaScan(getAdmin().getConnection(), visitor, desc.getTableName());
856         if (actualRegCount.get() == numRegs) {
857           // all the regions are online
858           return;
859         }
860 
861         try {
862           Thread.sleep(getAdmin().getPauseTime(tries++));
863         } catch (InterruptedException e) {
864           throw new InterruptedIOException("Interrupted when opening" +
865             " regions; " + actualRegCount.get() + " of " + numRegs +
866             " regions processed so far");
867         }
868       }
869       throw new TimeoutException("Only " + actualRegCount.get() +
870               " of " + numRegs + " regions are online; retries exhausted.");
871     }
872   }
873 
874   public void deleteTable(final String tableName) throws IOException {
875     deleteTable(TableName.valueOf(tableName));
876   }
877 
878   public void deleteTable(final byte[] tableName) throws IOException {
879     deleteTable(TableName.valueOf(tableName));
880   }
881 
882   /**
883    * Deletes a table.
884    * Synchronous operation.
885    *
886    * @param tableName name of table to delete
887    * @throws IOException if a remote or network exception occurs
888    */
889   @Override
890   public void deleteTable(final TableName tableName) throws IOException {
891     Future<Void> future = deleteTableAsyncV2(tableName);
892     try {
893       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
894     } catch (InterruptedException e) {
895       throw new InterruptedIOException("Interrupted when waiting for table to be deleted");
896     } catch (TimeoutException e) {
897       throw new TimeoutIOException(e);
898     } catch (ExecutionException e) {
899       if (e.getCause() instanceof IOException) {
900         throw (IOException)e.getCause();
901       } else {
902         throw new IOException(e.getCause());
903       }
904     }
905   }
906 
907   /**
908    * Deletes the table but does not block and wait for it be completely removed.
909    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
910    * It may throw ExecutionException if there was an error while executing the operation
911    * or TimeoutException in case the wait timeout was not long enough to allow the
912    * operation to complete.
913    *
914    * @param desc table descriptor for table
915    * @param tableName name of table to delete
916    * @throws IOException if a remote or network exception occurs
917    * @return the result of the async delete. You can use Future.get(long, TimeUnit)
918    *    to wait on the operation to complete.
919    */
920   // TODO: This should be called Async but it will break binary compatibility
921   private Future<Void> deleteTableAsyncV2(final TableName tableName) throws IOException {
922     DeleteTableResponse response = executeCallable(
923       new MasterCallable<DeleteTableResponse>(getConnection()) {
924         @Override
925         public DeleteTableResponse call(int callTimeout) throws ServiceException {
926           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
927           controller.setCallTimeout(callTimeout);
928           controller.setPriority(tableName);
929           DeleteTableRequest req =
930               RequestConverter.buildDeleteTableRequest(tableName, ng.getNonceGroup(),ng.newNonce());
931           return master.deleteTable(controller,req);
932         }
933       });
934     return new DeleteTableFuture(this, tableName, response);
935   }
936 
937   private static class DeleteTableFuture extends ProcedureFuture<Void> {
938     private final TableName tableName;
939 
940     public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName,
941         final DeleteTableResponse response) {
942       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
943       this.tableName = tableName;
944     }
945 
946     @Override
947     protected Void waitOperationResult(final long deadlineTs)
948         throws IOException, TimeoutException {
949       waitTableNotFound(deadlineTs);
950       return null;
951     }
952 
953     @Override
954     protected Void postOperationResult(final Void result, final long deadlineTs)
955         throws IOException, TimeoutException {
956       // Delete cached information to prevent clients from using old locations
957       getAdmin().getConnection().clearRegionCache(tableName);
958       LOG.info("Deleted " + tableName);
959       return result;
960     }
961 
962     private void waitTableNotFound(final long deadlineTs)
963         throws IOException, TimeoutException {
964       waitForState(deadlineTs, new WaitForStateCallable() {
965         @Override
966         public boolean checkState(int tries) throws IOException {
967           return !getAdmin().tableExists(tableName);
968         }
969 
970         @Override
971         public void throwInterruptedException() throws InterruptedIOException {
972           throw new InterruptedIOException("Interrupted when waiting for table to be deleted");
973         }
974 
975         @Override
976         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
977           throw new TimeoutException("Table " + tableName + " not yet deleted after " +
978               elapsedTime + "msec");
979         }
980       });
981     }
982   }
983 
984   /**
985    * Deletes tables matching the passed in pattern and wait on completion.
986    *
987    * Warning: Use this method carefully, there is no prompting and the effect is
988    * immediate. Consider using {@link #listTables(java.lang.String)} and
989    * {@link #deleteTable(byte[])}
990    *
991    * @param regex The regular expression to match table names against
992    * @return Table descriptors for tables that couldn't be deleted
993    * @throws IOException
994    * @see #deleteTables(java.util.regex.Pattern)
995    * @see #deleteTable(java.lang.String)
996    */
997   @Override
998   public HTableDescriptor[] deleteTables(String regex) throws IOException {
999     return deleteTables(Pattern.compile(regex));
1000   }
1001 
1002   /**
1003    * Delete tables matching the passed in pattern and wait on completion.
1004    *
1005    * Warning: Use this method carefully, there is no prompting and the effect is
1006    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1007    * {@link #deleteTable(byte[])}
1008    *
1009    * @param pattern The pattern to match table names against
1010    * @return Table descriptors for tables that couldn't be deleted
1011    * @throws IOException
1012    */
1013   @Override
1014   public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException {
1015     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1016     for (HTableDescriptor table : listTables(pattern)) {
1017       try {
1018         deleteTable(table.getTableName());
1019       } catch (IOException ex) {
1020         LOG.info("Failed to delete table " + table.getTableName(), ex);
1021         failed.add(table);
1022       }
1023     }
1024     return failed.toArray(new HTableDescriptor[failed.size()]);
1025   }
1026 
1027   /**
1028    * Truncate a table.
1029    * Synchronous operation.
1030    *
1031    * @param tableName name of table to truncate
1032    * @param preserveSplits True if the splits should be preserved
1033    * @throws IOException if a remote or network exception occurs
1034    */
1035   @Override
1036   public void truncateTable(final TableName tableName, final boolean preserveSplits)
1037       throws IOException {
1038     executeCallable(new MasterCallable<Void>(getConnection()) {
1039       @Override
1040       public Void call(int callTimeout) throws ServiceException {
1041         TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(
1042           tableName, preserveSplits, ng.getNonceGroup(), ng.newNonce());
1043         master.truncateTable(null, req);
1044         return null;
1045       }
1046     });
1047   }
1048 
1049   /**
1050    * Enable a table.  May timeout.  Use {@link #enableTableAsync(byte[])}
1051    * and {@link #isTableEnabled(byte[])} instead.
1052    * The table has to be in disabled state for it to be enabled.
1053    * @param tableName name of the table
1054    * @throws IOException if a remote or network exception occurs
1055    * There could be couple types of IOException
1056    * TableNotFoundException means the table doesn't exist.
1057    * TableNotDisabledException means the table isn't in disabled state.
1058    * @see #isTableEnabled(byte[])
1059    * @see #disableTable(byte[])
1060    * @see #enableTableAsync(byte[])
1061    */
1062   @Override
1063   public void enableTable(final TableName tableName)
1064   throws IOException {
1065     Future<Void> future = enableTableAsyncV2(tableName);
1066     try {
1067       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
1068     } catch (InterruptedException e) {
1069       throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
1070     } catch (TimeoutException e) {
1071       throw new TimeoutIOException(e);
1072     } catch (ExecutionException e) {
1073       if (e.getCause() instanceof IOException) {
1074         throw (IOException)e.getCause();
1075       } else {
1076         throw new IOException(e.getCause());
1077       }
1078     }
1079   }
1080 
1081   public void enableTable(final byte[] tableName)
1082   throws IOException {
1083     enableTable(TableName.valueOf(tableName));
1084   }
1085 
1086   public void enableTable(final String tableName)
1087   throws IOException {
1088     enableTable(TableName.valueOf(tableName));
1089   }
1090 
1091   /**
1092    * Wait for the table to be enabled and available
1093    * If enabling the table exceeds the retry period, an exception is thrown.
1094    * @param tableName name of the table
1095    * @throws IOException if a remote or network exception occurs or
1096    *    table is not enabled after the retries period.
1097    */
1098   private void waitUntilTableIsEnabled(final TableName tableName) throws IOException {
1099     boolean enabled = false;
1100     long start = EnvironmentEdgeManager.currentTime();
1101     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
1102       try {
1103         enabled = isTableEnabled(tableName);
1104       } catch (TableNotFoundException tnfe) {
1105         // wait for table to be created
1106         enabled = false;
1107       }
1108       enabled = enabled && isTableAvailable(tableName);
1109       if (enabled) {
1110         break;
1111       }
1112       long sleep = getPauseTime(tries);
1113       if (LOG.isDebugEnabled()) {
1114         LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
1115           "enabled in " + tableName);
1116       }
1117       try {
1118         Thread.sleep(sleep);
1119       } catch (InterruptedException e) {
1120         // Do this conversion rather than let it out because do not want to
1121         // change the method signature.
1122         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
1123       }
1124     }
1125     if (!enabled) {
1126       long msec = EnvironmentEdgeManager.currentTime() - start;
1127       throw new IOException("Table '" + tableName +
1128         "' not yet enabled, after " + msec + "ms.");
1129     }
1130   }
1131 
1132   /**
1133    * Brings a table on-line (enables it).  Method returns immediately though
1134    * enable of table may take some time to complete, especially if the table
1135    * is large (All regions are opened as part of enabling process).  Check
1136    * {@link #isTableEnabled(byte[])} to learn when table is fully online.  If
1137    * table is taking too long to online, check server logs.
1138    * @param tableName
1139    * @throws IOException
1140    * @since 0.90.0
1141    */
1142   @Override
1143   public void enableTableAsync(final TableName tableName)
1144   throws IOException {
1145     enableTableAsyncV2(tableName);
1146   }
1147 
1148   public void enableTableAsync(final byte[] tableName)
1149   throws IOException {
1150     enableTable(TableName.valueOf(tableName));
1151   }
1152 
1153   public void enableTableAsync(final String tableName)
1154   throws IOException {
1155     enableTableAsync(TableName.valueOf(tableName));
1156   }
1157 
1158   /**
1159    * Enable the table but does not block and wait for it be completely enabled.
1160    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1161    * It may throw ExecutionException if there was an error while executing the operation
1162    * or TimeoutException in case the wait timeout was not long enough to allow the
1163    * operation to complete.
1164    *
1165    * @param tableName name of table to delete
1166    * @throws IOException if a remote or network exception occurs
1167    * @return the result of the async enable. You can use Future.get(long, TimeUnit)
1168    *    to wait on the operation to complete.
1169    */
1170   // TODO: This should be called Async but it will break binary compatibility
1171   private Future<Void> enableTableAsyncV2(final TableName tableName) throws IOException {
1172     TableName.isLegalFullyQualifiedTableName(tableName.getName());
1173     EnableTableResponse response = executeCallable(
1174       new MasterCallable<EnableTableResponse>(getConnection()) {
1175         @Override
1176         public EnableTableResponse call(int callTimeout) throws ServiceException {
1177           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1178           controller.setCallTimeout(callTimeout);
1179           controller.setPriority(tableName);
1180 
1181           LOG.info("Started enable of " + tableName);
1182           EnableTableRequest req =
1183               RequestConverter.buildEnableTableRequest(tableName, ng.getNonceGroup(),ng.newNonce());
1184           return master.enableTable(controller,req);
1185         }
1186       });
1187     return new EnableTableFuture(this, tableName, response);
1188   }
1189 
1190   private static class EnableTableFuture extends ProcedureFuture<Void> {
1191     private final TableName tableName;
1192 
1193     public EnableTableFuture(final HBaseAdmin admin, final TableName tableName,
1194         final EnableTableResponse response) {
1195       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
1196       this.tableName = tableName;
1197     }
1198 
1199     @Override
1200     protected Void waitOperationResult(final long deadlineTs)
1201         throws IOException, TimeoutException {
1202       waitTableEnabled(deadlineTs);
1203       return null;
1204     }
1205 
1206     @Override
1207     protected Void postOperationResult(final Void result, final long deadlineTs)
1208         throws IOException, TimeoutException {
1209       LOG.info("Enabled " + tableName);
1210       return result;
1211     }
1212 
1213     private void waitTableEnabled(final long deadlineTs)
1214         throws IOException, TimeoutException {
1215       waitForState(deadlineTs, new WaitForStateCallable() {
1216         @Override
1217         public boolean checkState(int tries) throws IOException {
1218           boolean enabled;
1219           try {
1220             enabled = getAdmin().isTableEnabled(tableName);
1221           } catch (TableNotFoundException tnfe) {
1222             return false;
1223           }
1224           return enabled && getAdmin().isTableAvailable(tableName);
1225         }
1226 
1227         @Override
1228         public void throwInterruptedException() throws InterruptedIOException {
1229           throw new InterruptedIOException("Interrupted when waiting for table to be enabled");
1230         }
1231 
1232         @Override
1233         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
1234           throw new TimeoutException("Table " + tableName + " not yet enabled after " +
1235               elapsedTime + "msec");
1236         }
1237       });
1238     }
1239   }
1240 
1241   /**
1242    * Enable tables matching the passed in pattern and wait on completion.
1243    *
1244    * Warning: Use this method carefully, there is no prompting and the effect is
1245    * immediate. Consider using {@link #listTables(java.lang.String)} and
1246    * {@link #enableTable(byte[])}
1247    *
1248    * @param regex The regular expression to match table names against
1249    * @throws IOException
1250    * @see #enableTables(java.util.regex.Pattern)
1251    * @see #enableTable(java.lang.String)
1252    */
1253   @Override
1254   public HTableDescriptor[] enableTables(String regex) throws IOException {
1255     return enableTables(Pattern.compile(regex));
1256   }
1257 
1258   /**
1259    * Enable tables matching the passed in pattern and wait on completion.
1260    *
1261    * Warning: Use this method carefully, there is no prompting and the effect is
1262    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1263    * {@link #enableTable(byte[])}
1264    *
1265    * @param pattern The pattern to match table names against
1266    * @throws IOException
1267    */
1268   @Override
1269   public HTableDescriptor[] enableTables(Pattern pattern) throws IOException {
1270     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1271     for (HTableDescriptor table : listTables(pattern)) {
1272       if (isTableDisabled(table.getTableName())) {
1273         try {
1274           enableTable(table.getTableName());
1275         } catch (IOException ex) {
1276           LOG.info("Failed to enable table " + table.getTableName(), ex);
1277           failed.add(table);
1278         }
1279       }
1280     }
1281     return failed.toArray(new HTableDescriptor[failed.size()]);
1282   }
1283 
1284   /**
1285    * Starts the disable of a table.  If it is being served, the master
1286    * will tell the servers to stop serving it.  This method returns immediately.
1287    * The disable of a table can take some time if the table is large (all
1288    * regions are closed as part of table disable operation).
1289    * Call {@link #isTableDisabled(byte[])} to check for when disable completes.
1290    * If table is taking too long to online, check server logs.
1291    * @param tableName name of table
1292    * @throws IOException if a remote or network exception occurs
1293    * @see #isTableDisabled(byte[])
1294    * @see #isTableEnabled(byte[])
1295    * @since 0.90.0
1296    */
1297   @Override
1298   public void disableTableAsync(final TableName tableName) throws IOException {
1299     disableTableAsyncV2(tableName);
1300   }
1301 
1302   public void disableTableAsync(final byte[] tableName) throws IOException {
1303     disableTableAsync(TableName.valueOf(tableName));
1304   }
1305 
1306   public void disableTableAsync(final String tableName) throws IOException {
1307     disableTableAsync(TableName.valueOf(tableName));
1308   }
1309 
1310   /**
1311    * Disable table and wait on completion.  May timeout eventually.  Use
1312    * {@link #disableTableAsync(byte[])} and {@link #isTableDisabled(String)}
1313    * instead.
1314    * The table has to be in enabled state for it to be disabled.
1315    * @param tableName
1316    * @throws IOException
1317    * There could be couple types of IOException
1318    * TableNotFoundException means the table doesn't exist.
1319    * TableNotEnabledException means the table isn't in enabled state.
1320    */
1321   @Override
1322   public void disableTable(final TableName tableName)
1323   throws IOException {
1324     Future<Void> future = disableTableAsyncV2(tableName);
1325     try {
1326       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
1327     } catch (InterruptedException e) {
1328       throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
1329     } catch (TimeoutException e) {
1330       throw new TimeoutIOException(e);
1331     } catch (ExecutionException e) {
1332       if (e.getCause() instanceof IOException) {
1333         throw (IOException)e.getCause();
1334       } else {
1335         throw new IOException(e.getCause());
1336       }
1337     }
1338   }
1339 
1340   public void disableTable(final byte[] tableName)
1341   throws IOException {
1342     disableTable(TableName.valueOf(tableName));
1343   }
1344 
1345   public void disableTable(final String tableName)
1346   throws IOException {
1347     disableTable(TableName.valueOf(tableName));
1348   }
1349 
1350   /**
1351    * Disable the table but does not block and wait for it be completely disabled.
1352    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1353    * It may throw ExecutionException if there was an error while executing the operation
1354    * or TimeoutException in case the wait timeout was not long enough to allow the
1355    * operation to complete.
1356    *
1357    * @param tableName name of table to delete
1358    * @throws IOException if a remote or network exception occurs
1359    * @return the result of the async disable. You can use Future.get(long, TimeUnit)
1360    *    to wait on the operation to complete.
1361    */
1362   // TODO: This should be called Async but it will break binary compatibility
1363   private Future<Void> disableTableAsyncV2(final TableName tableName) throws IOException {
1364     TableName.isLegalFullyQualifiedTableName(tableName.getName());
1365     DisableTableResponse response = executeCallable(
1366       new MasterCallable<DisableTableResponse>(getConnection()) {
1367         @Override
1368         public DisableTableResponse call(int callTimeout) throws ServiceException {
1369           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1370           controller.setCallTimeout(callTimeout);
1371           controller.setPriority(tableName);
1372 
1373           LOG.info("Started disable of " + tableName);
1374           DisableTableRequest req =
1375               RequestConverter.buildDisableTableRequest(
1376                 tableName, ng.getNonceGroup(), ng.newNonce());
1377           return master.disableTable(controller, req);
1378         }
1379       });
1380     return new DisableTableFuture(this, tableName, response);
1381   }
1382 
1383   private static class DisableTableFuture extends ProcedureFuture<Void> {
1384     private final TableName tableName;
1385 
1386     public DisableTableFuture(final HBaseAdmin admin, final TableName tableName,
1387         final DisableTableResponse response) {
1388       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
1389       this.tableName = tableName;
1390     }
1391 
1392     @Override
1393     protected Void waitOperationResult(final long deadlineTs)
1394         throws IOException, TimeoutException {
1395       waitTableDisabled(deadlineTs);
1396       return null;
1397     }
1398 
1399     @Override
1400     protected Void postOperationResult(final Void result, final long deadlineTs)
1401         throws IOException, TimeoutException {
1402       LOG.info("Disabled " + tableName);
1403       return result;
1404     }
1405 
1406     private void waitTableDisabled(final long deadlineTs)
1407         throws IOException, TimeoutException {
1408       waitForState(deadlineTs, new WaitForStateCallable() {
1409         @Override
1410         public boolean checkState(int tries) throws IOException {
1411           return getAdmin().isTableDisabled(tableName);
1412         }
1413 
1414         @Override
1415         public void throwInterruptedException() throws InterruptedIOException {
1416           throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
1417         }
1418 
1419         @Override
1420         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
1421           throw new TimeoutException("Table " + tableName + " not yet disabled after " +
1422               elapsedTime + "msec");
1423         }
1424       });
1425     }
1426   }
1427 
1428   /**
1429    * Disable tables matching the passed in pattern and wait on completion.
1430    *
1431    * Warning: Use this method carefully, there is no prompting and the effect is
1432    * immediate. Consider using {@link #listTables(java.lang.String)} and
1433    * {@link #disableTable(byte[])}
1434    *
1435    * @param regex The regular expression to match table names against
1436    * @return Table descriptors for tables that couldn't be disabled
1437    * @throws IOException
1438    * @see #disableTables(java.util.regex.Pattern)
1439    * @see #disableTable(java.lang.String)
1440    */
1441   @Override
1442   public HTableDescriptor[] disableTables(String regex) throws IOException {
1443     return disableTables(Pattern.compile(regex));
1444   }
1445 
1446   /**
1447    * Disable tables matching the passed in pattern and wait on completion.
1448    *
1449    * Warning: Use this method carefully, there is no prompting and the effect is
1450    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1451    * {@link #disableTable(byte[])}
1452    *
1453    * @param pattern The pattern to match table names against
1454    * @return Table descriptors for tables that couldn't be disabled
1455    * @throws IOException
1456    */
1457   @Override
1458   public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
1459     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1460     for (HTableDescriptor table : listTables(pattern)) {
1461       if (isTableEnabled(table.getTableName())) {
1462         try {
1463           disableTable(table.getTableName());
1464         } catch (IOException ex) {
1465           LOG.info("Failed to disable table " + table.getTableName(), ex);
1466           failed.add(table);
1467         }
1468       }
1469     }
1470     return failed.toArray(new HTableDescriptor[failed.size()]);
1471   }
1472 
1473   /*
1474    * Checks whether table exists. If not, throws TableNotFoundException
1475    * @param tableName
1476    */
1477   private void checkTableExistence(TableName tableName) throws IOException {
1478     if (!tableExists(tableName)) {
1479       throw new TableNotFoundException(tableName);
1480     }
1481   }
1482 
1483   /**
1484    * @param tableName name of table to check
1485    * @return true if table is on-line
1486    * @throws IOException if a remote or network exception occurs
1487    */
1488   @Override
1489   public boolean isTableEnabled(TableName tableName) throws IOException {
1490     checkTableExistence(tableName);
1491     return connection.isTableEnabled(tableName);
1492   }
1493 
1494   public boolean isTableEnabled(byte[] tableName) throws IOException {
1495     return isTableEnabled(TableName.valueOf(tableName));
1496   }
1497 
1498   public boolean isTableEnabled(String tableName) throws IOException {
1499     return isTableEnabled(TableName.valueOf(tableName));
1500   }
1501 
1502 
1503 
1504   /**
1505    * @param tableName name of table to check
1506    * @return true if table is off-line
1507    * @throws IOException if a remote or network exception occurs
1508    */
1509   @Override
1510   public boolean isTableDisabled(TableName tableName) throws IOException {
1511     checkTableExistence(tableName);
1512     return connection.isTableDisabled(tableName);
1513   }
1514 
1515   public boolean isTableDisabled(byte[] tableName) throws IOException {
1516     return isTableDisabled(TableName.valueOf(tableName));
1517   }
1518 
1519   public boolean isTableDisabled(String tableName) throws IOException {
1520     return isTableDisabled(TableName.valueOf(tableName));
1521   }
1522 
1523   /**
1524    * @param tableName name of table to check
1525    * @return true if all regions of the table are available
1526    * @throws IOException if a remote or network exception occurs
1527    */
1528   @Override
1529   public boolean isTableAvailable(TableName tableName) throws IOException {
1530     return connection.isTableAvailable(tableName);
1531   }
1532 
1533   public boolean isTableAvailable(byte[] tableName) throws IOException {
1534     return isTableAvailable(TableName.valueOf(tableName));
1535   }
1536 
1537   public boolean isTableAvailable(String tableName) throws IOException {
1538     return isTableAvailable(TableName.valueOf(tableName));
1539   }
1540 
1541   /**
1542    * Use this api to check if the table has been created with the specified number of
1543    * splitkeys which was used while creating the given table.
1544    * Note : If this api is used after a table's region gets splitted, the api may return
1545    * false.
1546    * @param tableName
1547    *          name of table to check
1548    * @param splitKeys
1549    *          keys to check if the table has been created with all split keys
1550    * @throws IOException
1551    *           if a remote or network excpetion occurs
1552    */
1553   @Override
1554   public boolean isTableAvailable(TableName tableName,
1555                                   byte[][] splitKeys) throws IOException {
1556     return connection.isTableAvailable(tableName, splitKeys);
1557   }
1558 
1559   public boolean isTableAvailable(byte[] tableName,
1560                                   byte[][] splitKeys) throws IOException {
1561     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1562   }
1563 
1564   public boolean isTableAvailable(String tableName,
1565                                   byte[][] splitKeys) throws IOException {
1566     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1567   }
1568 
1569   /**
1570    * Get the status of alter command - indicates how many regions have received
1571    * the updated schema Asynchronous operation.
1572    *
1573    * @param tableName TableName instance
1574    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1575    *         regions that are yet to be updated Pair.getSecond() is the total number
1576    *         of regions of the table
1577    * @throws IOException
1578    *           if a remote or network exception occurs
1579    */
1580   @Override
1581   public Pair<Integer, Integer> getAlterStatus(final TableName tableName)
1582   throws IOException {
1583     return executeCallable(new MasterCallable<Pair<Integer, Integer>>(getConnection()) {
1584       @Override
1585       public Pair<Integer, Integer> call(int callTimeout) throws ServiceException {
1586         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1587         controller.setCallTimeout(callTimeout);
1588         controller.setPriority(tableName);
1589 
1590         GetSchemaAlterStatusRequest req = RequestConverter
1591             .buildGetSchemaAlterStatusRequest(tableName);
1592         GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(controller, req);
1593         Pair<Integer, Integer> pair = new Pair<Integer, Integer>(Integer.valueOf(ret
1594             .getYetToUpdateRegions()), Integer.valueOf(ret.getTotalRegions()));
1595         return pair;
1596       }
1597     });
1598   }
1599 
1600   /**
1601    * Get the status of alter command - indicates how many regions have received
1602    * the updated schema Asynchronous operation.
1603    *
1604    * @param tableName
1605    *          name of the table to get the status of
1606    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1607    *         regions that are yet to be updated Pair.getSecond() is the total number
1608    *         of regions of the table
1609    * @throws IOException
1610    *           if a remote or network exception occurs
1611    */
1612   @Override
1613   public Pair<Integer, Integer> getAlterStatus(final byte[] tableName)
1614    throws IOException {
1615     return getAlterStatus(TableName.valueOf(tableName));
1616   }
1617 
1618   /**
1619    * Add a column to an existing table.
1620    * Asynchronous operation.
1621    *
1622    * @param tableName name of the table to add column to
1623    * @param column column descriptor of column to be added
1624    * @throws IOException if a remote or network exception occurs
1625    */
1626   public void addColumn(final byte[] tableName, HColumnDescriptor column)
1627   throws IOException {
1628     addColumn(TableName.valueOf(tableName), column);
1629   }
1630 
1631   /**
1632    * Add a column to an existing table.
1633    * Asynchronous operation.
1634    *
1635    * @param tableName name of the table to add column to
1636    * @param column column descriptor of column to be added
1637    * @throws IOException if a remote or network exception occurs
1638    */
1639   public void addColumn(final String tableName, HColumnDescriptor column)
1640   throws IOException {
1641     addColumn(TableName.valueOf(tableName), column);
1642   }
1643 
1644   /**
1645    * Add a column to an existing table.
1646    * Asynchronous operation.
1647    *
1648    * @param tableName name of the table to add column to
1649    * @param column column descriptor of column to be added
1650    * @throws IOException if a remote or network exception occurs
1651    */
1652   @Override
1653   public void addColumn(final TableName tableName, final HColumnDescriptor column)
1654   throws IOException {
1655     executeCallable(new MasterCallable<Void>(getConnection()) {
1656       @Override
1657       public Void call(int callTimeout) throws ServiceException {
1658         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1659         controller.setCallTimeout(callTimeout);
1660         controller.setPriority(tableName);
1661         AddColumnRequest req = RequestConverter.buildAddColumnRequest(
1662           tableName, column, ng.getNonceGroup(), ng.newNonce());
1663         master.addColumn(controller,req);
1664         return null;
1665       }
1666     });
1667   }
1668 
1669   /**
1670    * Delete a column from a table.
1671    * Asynchronous operation.
1672    *
1673    * @param tableName name of table
1674    * @param columnName name of column to be deleted
1675    * @throws IOException if a remote or network exception occurs
1676    */
1677   public void deleteColumn(final byte[] tableName, final String columnName)
1678   throws IOException {
1679     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1680   }
1681 
1682   /**
1683    * Delete a column from a table.
1684    * Asynchronous operation.
1685    *
1686    * @param tableName name of table
1687    * @param columnName name of column to be deleted
1688    * @throws IOException if a remote or network exception occurs
1689    */
1690   public void deleteColumn(final String tableName, final String columnName)
1691   throws IOException {
1692     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1693   }
1694 
1695   /**
1696    * Delete a column from a table.
1697    * Asynchronous operation.
1698    *
1699    * @param tableName name of table
1700    * @param columnName name of column to be deleted
1701    * @throws IOException if a remote or network exception occurs
1702    */
1703   @Override
1704   public void deleteColumn(final TableName tableName, final byte [] columnName)
1705   throws IOException {
1706     executeCallable(new MasterCallable<Void>(getConnection()) {
1707       @Override
1708       public Void call(int callTimeout) throws ServiceException {
1709         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1710         controller.setCallTimeout(callTimeout);
1711         controller.setPriority(tableName);
1712         DeleteColumnRequest req = RequestConverter.buildDeleteColumnRequest(
1713           tableName, columnName, ng.getNonceGroup(), ng.newNonce());
1714         master.deleteColumn(controller, req);
1715         return null;
1716       }
1717     });
1718   }
1719 
1720   /**
1721    * Modify an existing column family on a table.
1722    * Asynchronous operation.
1723    *
1724    * @param tableName name of table
1725    * @param descriptor new column descriptor to use
1726    * @throws IOException if a remote or network exception occurs
1727    */
1728   public void modifyColumn(final String tableName, HColumnDescriptor descriptor)
1729   throws IOException {
1730     modifyColumn(TableName.valueOf(tableName), descriptor);
1731   }
1732 
1733   /**
1734    * Modify an existing column family on a table.
1735    * Asynchronous operation.
1736    *
1737    * @param tableName name of table
1738    * @param descriptor new column descriptor to use
1739    * @throws IOException if a remote or network exception occurs
1740    */
1741   public void modifyColumn(final byte[] tableName, HColumnDescriptor descriptor)
1742   throws IOException {
1743     modifyColumn(TableName.valueOf(tableName), descriptor);
1744   }
1745 
1746   /**
1747    * Modify an existing column family on a table.
1748    * Asynchronous operation.
1749    *
1750    * @param tableName name of table
1751    * @param descriptor new column descriptor to use
1752    * @throws IOException if a remote or network exception occurs
1753    */
1754   @Override
1755   public void modifyColumn(final TableName tableName, final HColumnDescriptor descriptor)
1756   throws IOException {
1757     executeCallable(new MasterCallable<Void>(getConnection()) {
1758       @Override
1759       public Void call(int callTimeout) throws ServiceException {
1760         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1761         controller.setCallTimeout(callTimeout);
1762         controller.setPriority(tableName);
1763         ModifyColumnRequest req = RequestConverter.buildModifyColumnRequest(
1764           tableName, descriptor, ng.getNonceGroup(), ng.newNonce());
1765         master.modifyColumn(controller, req);
1766         return null;
1767       }
1768     });
1769   }
1770 
1771   /**
1772    * Close a region. For expert-admins.  Runs close on the regionserver.  The
1773    * master will not be informed of the close.
1774    * @param regionname region name to close
1775    * @param serverName If supplied, we'll use this location rather than
1776    * the one currently in <code>hbase:meta</code>
1777    * @throws IOException if a remote or network exception occurs
1778    */
1779   @Override
1780   public void closeRegion(final String regionname, final String serverName)
1781   throws IOException {
1782     closeRegion(Bytes.toBytes(regionname), serverName);
1783   }
1784 
1785   /**
1786    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1787    * master will not be informed of the close.
1788    * @param regionname region name to close
1789    * @param serverName The servername of the regionserver.  If passed null we
1790    * will use servername found in the hbase:meta table. A server name
1791    * is made of host, port and startcode.  Here is an example:
1792    * <code> host187.example.com,60020,1289493121758</code>
1793    * @throws IOException if a remote or network exception occurs
1794    */
1795   @Override
1796   public void closeRegion(final byte [] regionname, final String serverName)
1797       throws IOException {
1798     if (serverName != null) {
1799       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1800       if (pair == null || pair.getFirst() == null) {
1801         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1802       } else {
1803         closeRegion(ServerName.valueOf(serverName), pair.getFirst());
1804       }
1805     } else {
1806       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1807       if (pair == null) {
1808         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1809       } else if (pair.getSecond() == null) {
1810         throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
1811       } else {
1812         closeRegion(pair.getSecond(), pair.getFirst());
1813       }
1814     }
1815   }
1816 
1817   /**
1818    * For expert-admins. Runs close on the regionserver. Closes a region based on
1819    * the encoded region name. The region server name is mandatory. If the
1820    * servername is provided then based on the online regions in the specified
1821    * regionserver the specified region will be closed. The master will not be
1822    * informed of the close. Note that the regionname is the encoded regionname.
1823    *
1824    * @param encodedRegionName
1825    *          The encoded region name; i.e. the hash that makes up the region
1826    *          name suffix: e.g. if regionname is
1827    *          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>
1828    *          , then the encoded region name is:
1829    *          <code>527db22f95c8a9e0116f0cc13c680396</code>.
1830    * @param serverName
1831    *          The servername of the regionserver. A server name is made of host,
1832    *          port and startcode. This is mandatory. Here is an example:
1833    *          <code> host187.example.com,60020,1289493121758</code>
1834    * @return true if the region was closed, false if not.
1835    * @throws IOException
1836    *           if a remote or network exception occurs
1837    */
1838   @Override
1839   public boolean closeRegionWithEncodedRegionName(final String encodedRegionName,
1840       final String serverName) throws IOException {
1841     if (null == serverName || ("").equals(serverName.trim())) {
1842       throw new IllegalArgumentException(
1843           "The servername cannot be null or empty.");
1844     }
1845     ServerName sn = ServerName.valueOf(serverName);
1846     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1847     // Close the region without updating zk state.
1848     CloseRegionRequest request =
1849       RequestConverter.buildCloseRegionRequest(sn, encodedRegionName, false);
1850     try {
1851       PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1852 
1853       // TODO: this does not do retries, it should. Set priority and timeout in controller
1854       CloseRegionResponse response = admin.closeRegion(controller, request);
1855       boolean isRegionClosed = response.getClosed();
1856       if (false == isRegionClosed) {
1857         LOG.error("Not able to close the region " + encodedRegionName + ".");
1858       }
1859       return isRegionClosed;
1860     } catch (ServiceException se) {
1861       throw ProtobufUtil.getRemoteException(se);
1862     }
1863   }
1864 
1865   /**
1866    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1867    * master will not be informed of the close.
1868    * @param sn
1869    * @param hri
1870    * @throws IOException
1871    */
1872   @Override
1873   public void closeRegion(final ServerName sn, final HRegionInfo hri)
1874   throws IOException {
1875     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1876     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1877 
1878     // Close the region without updating zk state.
1879     ProtobufUtil.closeRegion(controller, admin, sn, hri.getRegionName(), false);
1880   }
1881 
1882   /**
1883    * Get all the online regions on a region server.
1884    */
1885   @Override
1886   public List<HRegionInfo> getOnlineRegions(final ServerName sn) throws IOException {
1887     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1888     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1889     return ProtobufUtil.getOnlineRegions(controller, admin);
1890   }
1891 
1892   /**
1893    * {@inheritDoc}
1894    */
1895   @Override
1896   public void flush(final TableName tableName) throws IOException {
1897     checkTableExists(tableName);
1898     if (isTableDisabled(tableName)) {
1899       LOG.info("Table is disabled: " + tableName.getNameAsString());
1900       return;
1901     }
1902     execProcedure("flush-table-proc", tableName.getNameAsString(),
1903       new HashMap<String, String>());
1904   }
1905 
1906   /**
1907    * {@inheritDoc}
1908    */
1909   @Override
1910   public void flushRegion(final byte[] regionName) throws IOException {
1911     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
1912     if (regionServerPair == null) {
1913       throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName));
1914     }
1915     if (regionServerPair.getSecond() == null) {
1916       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1917     }
1918     flush(regionServerPair.getSecond(), regionServerPair.getFirst());
1919   }
1920 
1921   /**
1922    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1923    * (byte[])} instead.
1924    */
1925   @Deprecated
1926   public void flush(final String tableNameOrRegionName)
1927   throws IOException, InterruptedException {
1928     flush(Bytes.toBytes(tableNameOrRegionName));
1929   }
1930 
1931   /**
1932    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1933    * (byte[])} instead.
1934    */
1935   @Deprecated
1936   public void flush(final byte[] tableNameOrRegionName)
1937   throws IOException, InterruptedException {
1938     try {
1939       flushRegion(tableNameOrRegionName);
1940     } catch (IllegalArgumentException e) {
1941       // Unknown region.  Try table.
1942       flush(TableName.valueOf(tableNameOrRegionName));
1943     }
1944   }
1945 
1946   private void flush(final ServerName sn, final HRegionInfo hri)
1947   throws IOException {
1948     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1949     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1950     FlushRegionRequest request =
1951       RequestConverter.buildFlushRegionRequest(hri.getRegionName());
1952     try {
1953       admin.flushRegion(controller, request);
1954     } catch (ServiceException se) {
1955       throw ProtobufUtil.getRemoteException(se);
1956     }
1957   }
1958 
1959   /**
1960    * {@inheritDoc}
1961    */
1962   @Override
1963   public void compact(final TableName tableName)
1964     throws IOException {
1965     compact(tableName, null, false);
1966   }
1967 
1968   /**
1969    * {@inheritDoc}
1970    */
1971   @Override
1972   public void compactRegion(final byte[] regionName)
1973     throws IOException {
1974     compactRegion(regionName, null, false);
1975   }
1976 
1977   /**
1978    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1979    * (byte[])} instead.
1980    */
1981   @Deprecated
1982   public void compact(final String tableNameOrRegionName)
1983   throws IOException {
1984     compact(Bytes.toBytes(tableNameOrRegionName));
1985   }
1986 
1987   /**
1988    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1989    * (byte[])} instead.
1990    */
1991   @Deprecated
1992   public void compact(final byte[] tableNameOrRegionName)
1993   throws IOException {
1994     try {
1995       compactRegion(tableNameOrRegionName, null, false);
1996     } catch (IllegalArgumentException e) {
1997       compact(TableName.valueOf(tableNameOrRegionName), null, false);
1998     }
1999   }
2000 
2001   /**
2002    * {@inheritDoc}
2003    */
2004   @Override
2005   public void compact(final TableName tableName, final byte[] columnFamily)
2006     throws IOException {
2007     compact(tableName, columnFamily, false);
2008   }
2009 
2010   /**
2011    * {@inheritDoc}
2012    */
2013   @Override
2014   public void compactRegion(final byte[] regionName, final byte[] columnFamily)
2015     throws IOException {
2016     compactRegion(regionName, columnFamily, false);
2017   }
2018 
2019   /**
2020    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
2021    * (byte[], byte[])} instead.
2022    */
2023   @Deprecated
2024   public void compact(String tableOrRegionName, String columnFamily)
2025     throws IOException {
2026     compact(Bytes.toBytes(tableOrRegionName), Bytes.toBytes(columnFamily));
2027   }
2028 
2029   /**
2030    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
2031    * (byte[], byte[])} instead.
2032    */
2033   @Deprecated
2034   public void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
2035   throws IOException {
2036     try {
2037       compactRegion(tableNameOrRegionName, columnFamily, false);
2038     } catch (IllegalArgumentException e) {
2039       // Bad region, try table
2040       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, false);
2041     }
2042   }
2043 
2044   /**
2045    * {@inheritDoc}
2046    */
2047   @Override
2048   public void compactRegionServer(final ServerName sn, boolean major)
2049   throws IOException, InterruptedException {
2050     for (HRegionInfo region : getOnlineRegions(sn)) {
2051       compact(sn, region, major, null);
2052     }
2053   }
2054 
2055   /**
2056    * {@inheritDoc}
2057    */
2058   @Override
2059   public void majorCompact(final TableName tableName)
2060   throws IOException {
2061     compact(tableName, null, true);
2062   }
2063 
2064   /**
2065    * {@inheritDoc}
2066    */
2067   @Override
2068   public void majorCompactRegion(final byte[] regionName)
2069   throws IOException {
2070     compactRegion(regionName, null, true);
2071   }
2072 
2073   /**
2074    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
2075    * #majorCompactRegion(byte[])} instead.
2076    */
2077   @Deprecated
2078   public void majorCompact(final String tableNameOrRegionName)
2079   throws IOException {
2080     majorCompact(Bytes.toBytes(tableNameOrRegionName));
2081   }
2082 
2083   /**
2084    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
2085    * #majorCompactRegion(byte[])} instead.
2086    */
2087   @Deprecated
2088   public void majorCompact(final byte[] tableNameOrRegionName)
2089   throws IOException {
2090     try {
2091       compactRegion(tableNameOrRegionName, null, true);
2092     } catch (IllegalArgumentException e) {
2093       // Invalid region, try table
2094       compact(TableName.valueOf(tableNameOrRegionName), null, true);
2095     }
2096   }
2097 
2098   /**
2099    * {@inheritDoc}
2100    */
2101   @Override
2102   public void majorCompact(final TableName tableName, final byte[] columnFamily)
2103   throws IOException {
2104     compact(tableName, columnFamily, true);
2105   }
2106 
2107   /**
2108    * {@inheritDoc}
2109    */
2110   @Override
2111   public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
2112   throws IOException {
2113     compactRegion(regionName, columnFamily, true);
2114   }
2115 
2116   /**
2117    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
2118    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
2119    */
2120   @Deprecated
2121   public void majorCompact(final String tableNameOrRegionName, final String columnFamily)
2122   throws IOException {
2123     majorCompact(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(columnFamily));
2124   }
2125 
2126   /**
2127    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
2128    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
2129    */
2130   @Deprecated
2131   public void majorCompact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
2132   throws IOException {
2133     try {
2134       compactRegion(tableNameOrRegionName, columnFamily, true);
2135     } catch (IllegalArgumentException e) {
2136       // Invalid region, try table
2137       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, true);
2138     }
2139   }
2140 
2141   /**
2142    * Compact a table.
2143    * Asynchronous operation.
2144    *
2145    * @param tableName table or region to compact
2146    * @param columnFamily column family within a table or region
2147    * @param major True if we are to do a major compaction.
2148    * @throws IOException if a remote or network exception occurs
2149    * @throws InterruptedException
2150    */
2151   private void compact(final TableName tableName, final byte[] columnFamily,final boolean major)
2152   throws IOException {
2153     ZooKeeperWatcher zookeeper = null;
2154     try {
2155       checkTableExists(tableName);
2156       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2157           new ThrowableAbortable());
2158       List<Pair<HRegionInfo, ServerName>> pairs =
2159         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
2160       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2161         if (pair.getFirst().isOffline()) continue;
2162         if (pair.getSecond() == null) continue;
2163         try {
2164           compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
2165         } catch (NotServingRegionException e) {
2166           if (LOG.isDebugEnabled()) {
2167             LOG.debug("Trying to" + (major ? " major" : "") + " compact " +
2168               pair.getFirst() + ": " +
2169               StringUtils.stringifyException(e));
2170           }
2171         }
2172       }
2173     } finally {
2174       if (zookeeper != null) {
2175         zookeeper.close();
2176       }
2177     }
2178   }
2179 
2180   /**
2181    * Compact an individual region.
2182    * Asynchronous operation.
2183    *
2184    * @param regionName region to compact
2185    * @param columnFamily column family within a table or region
2186    * @param major True if we are to do a major compaction.
2187    * @throws IOException if a remote or network exception occurs
2188    * @throws InterruptedException
2189    */
2190   private void compactRegion(final byte[] regionName, final byte[] columnFamily,final boolean major)
2191   throws IOException {
2192     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2193     if (regionServerPair == null) {
2194       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2195     }
2196     if (regionServerPair.getSecond() == null) {
2197       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2198     }
2199     compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
2200   }
2201 
2202   private void compact(final ServerName sn, final HRegionInfo hri,
2203       final boolean major, final byte [] family)
2204   throws IOException {
2205     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2206     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2207     CompactRegionRequest request =
2208       RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family);
2209     try {
2210       // TODO: this does not do retries, it should. Set priority and timeout in controller
2211       admin.compactRegion(controller, request);
2212     } catch (ServiceException se) {
2213       throw ProtobufUtil.getRemoteException(se);
2214     }
2215   }
2216 
2217   /**
2218    * Move the region <code>r</code> to <code>dest</code>.
2219    * @param encodedRegionName The encoded region name; i.e. the hash that makes
2220    * up the region name suffix: e.g. if regionname is
2221    * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
2222    * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
2223    * @param destServerName The servername of the destination regionserver.  If
2224    * passed the empty byte array we'll assign to a random server.  A server name
2225    * is made of host, port and startcode.  Here is an example:
2226    * <code> host187.example.com,60020,1289493121758</code>
2227    * @throws UnknownRegionException Thrown if we can't find a region named
2228    * <code>encodedRegionName</code>
2229    */
2230   @Override
2231   public void move(final byte [] encodedRegionName, final byte [] destServerName)
2232       throws IOException {
2233 
2234     executeCallable(new MasterCallable<Void>(getConnection()) {
2235       @Override
2236       public Void call(int callTimeout) throws ServiceException {
2237         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2238         controller.setCallTimeout(callTimeout);
2239         // Hard to know the table name, at least check if meta
2240         if (isMetaRegion(encodedRegionName)) {
2241           controller.setPriority(TableName.META_TABLE_NAME);
2242         }
2243 
2244         try {
2245           MoveRegionRequest request =
2246               RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
2247             master.moveRegion(controller, request);
2248         } catch (DeserializationException de) {
2249           LOG.error("Could not parse destination server name: " + de);
2250           throw new ServiceException(new DoNotRetryIOException(de));
2251         }
2252         return null;
2253       }
2254     });
2255   }
2256 
2257   private boolean isMetaRegion(final byte[] regionName) {
2258     return Bytes.equals(regionName, HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
2259         || Bytes.equals(regionName, HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes());
2260   }
2261 
2262   /**
2263    * @param regionName
2264    *          Region name to assign.
2265    * @throws MasterNotRunningException
2266    * @throws ZooKeeperConnectionException
2267    * @throws IOException
2268    */
2269   @Override
2270   public void assign(final byte[] regionName) throws MasterNotRunningException,
2271       ZooKeeperConnectionException, IOException {
2272     final byte[] toBeAssigned = getRegionName(regionName);
2273     executeCallable(new MasterCallable<Void>(getConnection()) {
2274       @Override
2275       public Void call(int callTimeout) throws ServiceException {
2276         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2277         controller.setCallTimeout(callTimeout);
2278         // Hard to know the table name, at least check if meta
2279         if (isMetaRegion(regionName)) {
2280           controller.setPriority(TableName.META_TABLE_NAME);
2281         }
2282 
2283         AssignRegionRequest request =
2284           RequestConverter.buildAssignRegionRequest(toBeAssigned);
2285         master.assignRegion(controller,request);
2286         return null;
2287       }
2288     });
2289   }
2290 
2291   /**
2292    * Unassign a region from current hosting regionserver.  Region will then be
2293    * assigned to a regionserver chosen at random.  Region could be reassigned
2294    * back to the same server.  Use {@link #move(byte[], byte[])} if you want
2295    * to control the region movement.
2296    * @param regionName Region to unassign. Will clear any existing RegionPlan
2297    * if one found.
2298    * @param force If true, force unassign (Will remove region from
2299    * regions-in-transition too if present. If results in double assignment
2300    * use hbck -fix to resolve. To be used by experts).
2301    * @throws MasterNotRunningException
2302    * @throws ZooKeeperConnectionException
2303    * @throws IOException
2304    */
2305   @Override
2306   public void unassign(final byte [] regionName, final boolean force)
2307   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
2308     final byte[] toBeUnassigned = getRegionName(regionName);
2309     executeCallable(new MasterCallable<Void>(getConnection()) {
2310       @Override
2311       public Void call(int callTimeout) throws ServiceException {
2312         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2313         controller.setCallTimeout(callTimeout);
2314         // Hard to know the table name, at least check if meta
2315         if (isMetaRegion(regionName)) {
2316           controller.setPriority(TableName.META_TABLE_NAME);
2317         }
2318         UnassignRegionRequest request =
2319           RequestConverter.buildUnassignRegionRequest(toBeUnassigned, force);
2320         master.unassignRegion(controller, request);
2321         return null;
2322       }
2323     });
2324   }
2325 
2326   /**
2327    * Offline specified region from master's in-memory state. It will not attempt to reassign the
2328    * region as in unassign. This API can be used when a region not served by any region server and
2329    * still online as per Master's in memory state. If this API is incorrectly used on active region
2330    * then master will loose track of that region.
2331    *
2332    * This is a special method that should be used by experts or hbck.
2333    *
2334    * @param regionName
2335    *          Region to offline.
2336    * @throws IOException
2337    */
2338   @Override
2339   public void offline(final byte [] regionName)
2340   throws IOException {
2341     executeCallable(new MasterCallable<Void>(getConnection()) {
2342       @Override
2343       public Void call(int callTimeout) throws ServiceException {
2344         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2345         controller.setCallTimeout(callTimeout);
2346         // Hard to know the table name, at least check if meta
2347         if (isMetaRegion(regionName)) {
2348           controller.setPriority(TableName.META_TABLE_NAME);
2349         }
2350         master.offlineRegion(controller, RequestConverter.buildOfflineRegionRequest(regionName));
2351         return null;
2352       }
2353     });
2354   }
2355 
2356   /**
2357    * Turn the load balancer on or off.
2358    * @param on If true, enable balancer. If false, disable balancer.
2359    * @param synchronous If true, it waits until current balance() call, if outstanding, to return.
2360    * @return Previous balancer value
2361    */
2362   @Override
2363   public boolean setBalancerRunning(final boolean on, final boolean synchronous)
2364   throws IOException {
2365     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2366       @Override
2367       public Boolean call(int callTimeout) throws ServiceException {
2368         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2369         controller.setCallTimeout(callTimeout);
2370 
2371         SetBalancerRunningRequest req =
2372             RequestConverter.buildSetBalancerRunningRequest(on, synchronous);
2373         return master.setBalancerRunning(controller, req).getPrevBalanceValue();
2374       }
2375     });
2376   }
2377 
2378   /**
2379    * Invoke the balancer.  Will run the balancer and if regions to move, it will
2380    * go ahead and do the reassignments.  Can NOT run for various reasons.  Check
2381    * logs.
2382    * @return True if balancer ran, false otherwise.
2383    */
2384   @Override
2385   public boolean balancer() throws IOException {
2386     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2387       @Override
2388       public Boolean call(int callTimeout) throws ServiceException {
2389         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2390         controller.setCallTimeout(callTimeout);
2391         return master.balance(controller, RequestConverter.buildBalanceRequest()).getBalancerRan();
2392       }
2393     });
2394   }
2395 
2396   /**
2397    * Query the state of the balancer from the Master. It's not a guarantee that the balancer is
2398    * actually running this very moment, but that it will run.
2399    *
2400    * @return True if the balancer is enabled, false otherwise.
2401    */
2402   @Override
2403   public boolean isBalancerEnabled() throws IOException {
2404     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2405       @Override
2406       public Boolean call(int callTimeout) throws ServiceException {
2407         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2408         controller.setCallTimeout(callTimeout);
2409 
2410         return master.isBalancerEnabled(controller,
2411           RequestConverter.buildIsBalancerEnabledRequest()).getEnabled();
2412       }
2413     });
2414   }
2415 
2416   /**
2417    * Enable/Disable the catalog janitor
2418    * @param enable if true enables the catalog janitor
2419    * @return the previous state
2420    * @throws MasterNotRunningException
2421    */
2422   @Override
2423   public boolean enableCatalogJanitor(final boolean enable)
2424       throws IOException {
2425     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2426       @Override
2427       public Boolean call(int callTimeout) throws ServiceException {
2428         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2429         controller.setCallTimeout(callTimeout);
2430 
2431         return master.enableCatalogJanitor(controller,
2432           RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
2433       }
2434     });
2435   }
2436 
2437   /**
2438    * Ask for a scan of the catalog table
2439    * @return the number of entries cleaned
2440    * @throws MasterNotRunningException
2441    */
2442   @Override
2443   public int runCatalogScan() throws IOException {
2444     return executeCallable(new MasterCallable<Integer>(getConnection()) {
2445       @Override
2446       public Integer call(int callTimeout) throws ServiceException {
2447         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2448         controller.setCallTimeout(callTimeout);
2449 
2450         return master.runCatalogScan(controller,
2451           RequestConverter.buildCatalogScanRequest()).getScanResult();
2452       }
2453     });
2454   }
2455 
2456   /**
2457    * Query on the catalog janitor state (Enabled/Disabled?)
2458    * @throws org.apache.hadoop.hbase.MasterNotRunningException
2459    */
2460   @Override
2461   public boolean isCatalogJanitorEnabled() throws IOException {
2462     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2463       @Override
2464       public Boolean call(int callTimeout) throws ServiceException {
2465         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2466         controller.setCallTimeout(callTimeout);
2467 
2468         return master.isCatalogJanitorEnabled(controller,
2469           RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
2470       }
2471     });
2472   }
2473 
2474   /**
2475    * Merge two regions. Asynchronous operation.
2476    * @param encodedNameOfRegionA encoded name of region a
2477    * @param encodedNameOfRegionB encoded name of region b
2478    * @param forcible true if do a compulsory merge, otherwise we will only merge
2479    *          two adjacent regions
2480    * @throws IOException
2481    */
2482   @Override
2483   public void mergeRegions(final byte[] encodedNameOfRegionA,
2484       final byte[] encodedNameOfRegionB, final boolean forcible)
2485       throws IOException {
2486     Pair<HRegionInfo, ServerName> pair = getRegion(encodedNameOfRegionA);
2487     if (pair != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
2488       throw new IllegalArgumentException("Can't invoke merge on non-default regions directly");
2489     pair = getRegion(encodedNameOfRegionB);
2490     if (pair != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
2491       throw new IllegalArgumentException("Can't invoke merge on non-default regions directly");
2492     executeCallable(new MasterCallable<Void>(getConnection()) {
2493       @Override
2494       public Void call(int callTimeout) throws ServiceException {
2495         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2496         controller.setCallTimeout(callTimeout);
2497 
2498         try {
2499           DispatchMergingRegionsRequest request = RequestConverter
2500               .buildDispatchMergingRegionsRequest(encodedNameOfRegionA,
2501                 encodedNameOfRegionB, forcible);
2502           master.dispatchMergingRegions(controller, request);
2503         } catch (DeserializationException de) {
2504           LOG.error("Could not parse destination server name: " + de);
2505         }
2506         return null;
2507       }
2508     });
2509   }
2510 
2511   /**
2512    * {@inheritDoc}
2513    */
2514   @Override
2515   public void split(final TableName tableName)
2516     throws IOException {
2517     split(tableName, null);
2518   }
2519 
2520   /**
2521    * {@inheritDoc}
2522    */
2523   @Override
2524   public void splitRegion(final byte[] regionName)
2525     throws IOException {
2526     splitRegion(regionName, null);
2527   }
2528 
2529   /**
2530    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2531    * (byte[])} instead.
2532    */
2533   @Deprecated
2534   public void split(final String tableNameOrRegionName)
2535   throws IOException, InterruptedException {
2536     split(Bytes.toBytes(tableNameOrRegionName));
2537   }
2538 
2539   /**
2540    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2541    * (byte[])} instead.
2542    */
2543   @Deprecated
2544   public void split(final byte[] tableNameOrRegionName)
2545   throws IOException, InterruptedException {
2546     split(tableNameOrRegionName, null);
2547   }
2548 
2549   /**
2550    * {@inheritDoc}
2551    */
2552   @Override
2553   public void split(final TableName tableName, final byte [] splitPoint)
2554   throws IOException {
2555     ZooKeeperWatcher zookeeper = null;
2556     try {
2557       checkTableExists(tableName);
2558       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2559         new ThrowableAbortable());
2560       List<Pair<HRegionInfo, ServerName>> pairs =
2561         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
2562       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2563         // May not be a server for a particular row
2564         if (pair.getSecond() == null) continue;
2565         HRegionInfo r = pair.getFirst();
2566         // check for parents
2567         if (r.isSplitParent()) continue;
2568         // if a split point given, only split that particular region
2569         if (r.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID ||
2570            (splitPoint != null && !r.containsRow(splitPoint))) continue;
2571         // call out to region server to do split now
2572         split(pair.getSecond(), pair.getFirst(), splitPoint);
2573       }
2574     } finally {
2575       if (zookeeper != null) {
2576         zookeeper.close();
2577       }
2578     }
2579   }
2580 
2581   /**
2582    * {@inheritDoc}
2583    */
2584   @Override
2585   public void splitRegion(final byte[] regionName, final byte [] splitPoint)
2586   throws IOException {
2587     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2588     if (regionServerPair == null) {
2589       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2590     }
2591     if (regionServerPair.getFirst() != null &&
2592         regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
2593       throw new IllegalArgumentException("Can't split replicas directly. "
2594           + "Replicas are auto-split when their primary is split.");
2595     }
2596     if (regionServerPair.getSecond() == null) {
2597       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2598     }
2599     split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
2600   }
2601 
2602   /**
2603    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2604    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2605    */
2606   @Deprecated
2607   public void split(final String tableNameOrRegionName,
2608     final String splitPoint) throws IOException {
2609     split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint));
2610   }
2611 
2612   /**
2613    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2614    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2615    */
2616   @Deprecated
2617   public void split(final byte[] tableNameOrRegionName,
2618       final byte [] splitPoint) throws IOException {
2619     try {
2620       splitRegion(tableNameOrRegionName, splitPoint);
2621     } catch (IllegalArgumentException e) {
2622       // Bad region, try table
2623       split(TableName.valueOf(tableNameOrRegionName), splitPoint);
2624     }
2625   }
2626 
2627   @VisibleForTesting
2628   public void split(final ServerName sn, final HRegionInfo hri,
2629       byte[] splitPoint) throws IOException {
2630     if (hri.getStartKey() != null && splitPoint != null &&
2631          Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
2632        throw new IOException("should not give a splitkey which equals to startkey!");
2633     }
2634     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2635     controller.setPriority(hri.getTable());
2636 
2637     // TODO: this does not do retries, it should. Set priority and timeout in controller
2638     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2639     ProtobufUtil.split(controller, admin, hri, splitPoint);
2640   }
2641 
2642   /**
2643    * Modify an existing table, more IRB friendly version.
2644    * Asynchronous operation.  This means that it may be a while before your
2645    * schema change is updated across all of the table.
2646    *
2647    * @param tableName name of table.
2648    * @param htd modified description of the table
2649    * @throws IOException if a remote or network exception occurs
2650    */
2651   @Override
2652   public void modifyTable(final TableName tableName, final HTableDescriptor htd)
2653   throws IOException {
2654     if (!tableName.equals(htd.getTableName())) {
2655       throw new IllegalArgumentException("the specified table name '" + tableName +
2656         "' doesn't match with the HTD one: " + htd.getTableName());
2657     }
2658 
2659     executeCallable(new MasterCallable<Void>(getConnection()) {
2660       @Override
2661       public Void call(int callTimeout) throws ServiceException {
2662         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2663         controller.setCallTimeout(callTimeout);
2664         controller.setPriority(tableName);
2665         ModifyTableRequest request = RequestConverter.buildModifyTableRequest(
2666           tableName, htd, ng.getNonceGroup(), ng.newNonce());
2667         master.modifyTable(controller, request);
2668         return null;
2669       }
2670     });
2671   }
2672 
2673   public void modifyTable(final byte[] tableName, final HTableDescriptor htd)
2674   throws IOException {
2675     modifyTable(TableName.valueOf(tableName), htd);
2676   }
2677 
2678   public void modifyTable(final String tableName, final HTableDescriptor htd)
2679   throws IOException {
2680     modifyTable(TableName.valueOf(tableName), htd);
2681   }
2682 
2683   /**
2684    * @param regionName Name of a region.
2685    * @return a pair of HRegionInfo and ServerName if <code>regionName</code> is
2686    *  a verified region name (we call {@link
2687    *  MetaTableAccessor#getRegion(HConnection, byte[])}
2688    *  else null.
2689    * Throw IllegalArgumentException if <code>regionName</code> is null.
2690    * @throws IOException
2691    */
2692   Pair<HRegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException {
2693     if (regionName == null) {
2694       throw new IllegalArgumentException("Pass a table name or region name");
2695     }
2696     Pair<HRegionInfo, ServerName> pair =
2697       MetaTableAccessor.getRegion(connection, regionName);
2698     if (pair == null) {
2699       final AtomicReference<Pair<HRegionInfo, ServerName>> result =
2700         new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
2701       final String encodedName = Bytes.toString(regionName);
2702       MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
2703         @Override
2704         public boolean processRow(Result data) throws IOException {
2705           HRegionInfo info = HRegionInfo.getHRegionInfo(data);
2706           if (info == null) {
2707             LOG.warn("No serialized HRegionInfo in " + data);
2708             return true;
2709           }
2710           RegionLocations rl = MetaTableAccessor.getRegionLocations(data);
2711           boolean matched = false;
2712           ServerName sn = null;
2713           for (HRegionLocation h : rl.getRegionLocations()) {
2714             if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) {
2715               sn = h.getServerName();
2716               info = h.getRegionInfo();
2717               matched = true;
2718             }
2719           }
2720           if (!matched) return true;
2721           result.set(new Pair<HRegionInfo, ServerName>(info, sn));
2722           return false; // found the region, stop
2723         }
2724       };
2725 
2726       MetaScanner.metaScan(connection, visitor, null);
2727       pair = result.get();
2728     }
2729     return pair;
2730   }
2731 
2732   /**
2733    * If the input is a region name, it is returned as is. If it's an
2734    * encoded region name, the corresponding region is found from meta
2735    * and its region name is returned. If we can't find any region in
2736    * meta matching the input as either region name or encoded region
2737    * name, the input is returned as is. We don't throw unknown
2738    * region exception.
2739    */
2740   private byte[] getRegionName(
2741       final byte[] regionNameOrEncodedRegionName) throws IOException {
2742     if (Bytes.equals(regionNameOrEncodedRegionName,
2743         HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
2744           || Bytes.equals(regionNameOrEncodedRegionName,
2745             HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
2746       return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
2747     }
2748     byte[] tmp = regionNameOrEncodedRegionName;
2749     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionNameOrEncodedRegionName);
2750     if (regionServerPair != null && regionServerPair.getFirst() != null) {
2751       tmp = regionServerPair.getFirst().getRegionName();
2752     }
2753     return tmp;
2754   }
2755 
2756   /**
2757    * Check if table exists or not
2758    * @param tableName Name of a table.
2759    * @return tableName instance
2760    * @throws IOException if a remote or network exception occurs.
2761    * @throws TableNotFoundException if table does not exist.
2762    */
2763   private TableName checkTableExists(final TableName tableName)
2764       throws IOException {
2765     if (!MetaTableAccessor.tableExists(connection, tableName)) {
2766       throw new TableNotFoundException(tableName);
2767     }
2768     return tableName;
2769   }
2770 
2771   /**
2772    * Shuts down the HBase cluster
2773    * @throws IOException if a remote or network exception occurs
2774    */
2775   @Override
2776   public synchronized void shutdown() throws IOException {
2777     executeCallable(new MasterCallable<Void>(getConnection()) {
2778       @Override
2779       public Void call(int callTimeout) throws ServiceException {
2780         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2781         controller.setCallTimeout(callTimeout);
2782         controller.setPriority(HConstants.HIGH_QOS);
2783         master.shutdown(controller, ShutdownRequest.newBuilder().build());
2784         return null;
2785       }
2786     });
2787   }
2788 
2789   /**
2790    * Shuts down the current HBase master only.
2791    * Does not shutdown the cluster.
2792    * @see #shutdown()
2793    * @throws IOException if a remote or network exception occurs
2794    */
2795   @Override
2796   public synchronized void stopMaster() throws IOException {
2797     executeCallable(new MasterCallable<Void>(getConnection()) {
2798       @Override
2799       public Void call(int callTimeout) throws ServiceException {
2800         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2801         controller.setCallTimeout(callTimeout);
2802         controller.setPriority(HConstants.HIGH_QOS);
2803         master.stopMaster(controller, StopMasterRequest.newBuilder().build());
2804         return null;
2805       }
2806     });
2807   }
2808 
2809   /**
2810    * Stop the designated regionserver
2811    * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
2812    * <code>example.org:1234</code>
2813    * @throws IOException if a remote or network exception occurs
2814    */
2815   @Override
2816   public synchronized void stopRegionServer(final String hostnamePort)
2817   throws IOException {
2818     String hostname = Addressing.parseHostname(hostnamePort);
2819     int port = Addressing.parsePort(hostnamePort);
2820     AdminService.BlockingInterface admin =
2821       this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
2822     StopServerRequest request = RequestConverter.buildStopServerRequest(
2823       "Called by admin client " + this.connection.toString());
2824     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2825 
2826     controller.setPriority(HConstants.HIGH_QOS);
2827     try {
2828       // TODO: this does not do retries, it should. Set priority and timeout in controller
2829       admin.stopServer(controller, request);
2830     } catch (ServiceException se) {
2831       throw ProtobufUtil.getRemoteException(se);
2832     }
2833   }
2834 
2835 
2836   /**
2837    * @return cluster status
2838    * @throws IOException if a remote or network exception occurs
2839    */
2840   @Override
2841   public ClusterStatus getClusterStatus() throws IOException {
2842     return executeCallable(new MasterCallable<ClusterStatus>(getConnection()) {
2843       @Override
2844       public ClusterStatus call(int callTimeout) throws ServiceException {
2845         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2846         controller.setCallTimeout(callTimeout);
2847         GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest();
2848         return ClusterStatus.convert(master.getClusterStatus(controller, req).getClusterStatus());
2849       }
2850     });
2851   }
2852 
2853   /**
2854    * @return Configuration used by the instance.
2855    */
2856   @Override
2857   public Configuration getConfiguration() {
2858     return this.conf;
2859   }
2860 
2861   /**
2862    * Create a new namespace
2863    * @param descriptor descriptor which describes the new namespace
2864    * @throws IOException
2865    */
2866   @Override
2867   public void createNamespace(final NamespaceDescriptor descriptor) throws IOException {
2868     executeCallable(new MasterCallable<Void>(getConnection()) {
2869       @Override
2870       public Void call(int callTimeout) throws Exception {
2871         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2872         controller.setCallTimeout(callTimeout);
2873         // TODO: set priority based on NS?
2874         master.createNamespace(controller,
2875           CreateNamespaceRequest.newBuilder()
2876             .setNamespaceDescriptor(ProtobufUtil
2877               .toProtoNamespaceDescriptor(descriptor)).build()
2878         );
2879         return null;
2880       }
2881     });
2882   }
2883 
2884   /**
2885    * Modify an existing namespace
2886    * @param descriptor descriptor which describes the new namespace
2887    * @throws IOException
2888    */
2889   @Override
2890   public void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException {
2891     executeCallable(new MasterCallable<Void>(getConnection()) {
2892       @Override
2893       public Void call(int callTimeout) throws Exception {
2894         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2895         controller.setCallTimeout(callTimeout);
2896         master.modifyNamespace(controller, ModifyNamespaceRequest.newBuilder().
2897           setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build());
2898         return null;
2899       }
2900     });
2901   }
2902 
2903   /**
2904    * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
2905    * @param name namespace name
2906    * @throws IOException
2907    */
2908   @Override
2909   public void deleteNamespace(final String name) throws IOException {
2910     executeCallable(new MasterCallable<Void>(getConnection()) {
2911       @Override
2912       public Void call(int callTimeout) throws Exception {
2913         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2914         controller.setCallTimeout(callTimeout);
2915         master.deleteNamespace(controller, DeleteNamespaceRequest.newBuilder().
2916           setNamespaceName(name).build());
2917         return null;
2918       }
2919     });
2920   }
2921 
2922   /**
2923    * Get a namespace descriptor by name
2924    * @param name name of namespace descriptor
2925    * @return A descriptor
2926    * @throws IOException
2927    */
2928   @Override
2929   public NamespaceDescriptor getNamespaceDescriptor(final String name) throws IOException {
2930     return
2931         executeCallable(new MasterCallable<NamespaceDescriptor>(getConnection()) {
2932           @Override
2933           public NamespaceDescriptor call(int callTimeout) throws Exception {
2934             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2935             controller.setCallTimeout(callTimeout);
2936             return ProtobufUtil.toNamespaceDescriptor(
2937               master.getNamespaceDescriptor(controller, GetNamespaceDescriptorRequest.newBuilder().
2938                 setNamespaceName(name).build()).getNamespaceDescriptor());
2939           }
2940         });
2941   }
2942 
2943   /**
2944    * List available namespace descriptors
2945    * @return List of descriptors
2946    * @throws IOException
2947    */
2948   @Override
2949   public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
2950     return
2951         executeCallable(new MasterCallable<NamespaceDescriptor[]>(getConnection()) {
2952           @Override
2953           public NamespaceDescriptor[] call(int callTimeout) throws Exception {
2954             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2955             controller.setCallTimeout(callTimeout);
2956             List<HBaseProtos.NamespaceDescriptor> list =
2957                 master.listNamespaceDescriptors(controller,
2958                   ListNamespaceDescriptorsRequest.newBuilder().build())
2959                 .getNamespaceDescriptorList();
2960             NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
2961             for(int i = 0; i < list.size(); i++) {
2962               res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
2963             }
2964             return res;
2965           }
2966         });
2967   }
2968 
2969   /**
2970    * List procedures
2971    * @return procedure list
2972    * @throws IOException
2973    */
2974   @Override
2975   public ProcedureInfo[] listProcedures() throws IOException {
2976     return
2977         executeCallable(new MasterCallable<ProcedureInfo[]>(getConnection()) {
2978           @Override
2979           public ProcedureInfo[] call(int callTimeout) throws Exception {
2980             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2981             controller.setCallTimeout(callTimeout);
2982             List<ProcedureProtos.Procedure> procList = master.listProcedures(
2983               controller, ListProceduresRequest.newBuilder().build()).getProcedureList();
2984             ProcedureInfo[] procInfoList = new ProcedureInfo[procList.size()];
2985             for (int i = 0; i < procList.size(); i++) {
2986               procInfoList[i] = ProcedureInfo.convert(procList.get(i));
2987             }
2988             return procInfoList;
2989           }
2990         });
2991   }
2992 
2993   /**
2994    * Get list of table descriptors by namespace
2995    * @param name namespace name
2996    * @return A descriptor
2997    * @throws IOException
2998    */
2999   @Override
3000   public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException {
3001     return
3002         executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
3003           @Override
3004           public HTableDescriptor[] call(int callTimeout) throws Exception {
3005             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3006             controller.setCallTimeout(callTimeout);
3007             List<TableSchema> list =
3008                 master.listTableDescriptorsByNamespace(controller,
3009                   ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name)
3010                   .build()).getTableSchemaList();
3011             HTableDescriptor[] res = new HTableDescriptor[list.size()];
3012             for(int i=0; i < list.size(); i++) {
3013 
3014               res[i] = HTableDescriptor.convert(list.get(i));
3015             }
3016             return res;
3017           }
3018         });
3019   }
3020 
3021   /**
3022    * Get list of table names by namespace
3023    * @param name namespace name
3024    * @return The list of table names in the namespace
3025    * @throws IOException
3026    */
3027   @Override
3028   public TableName[] listTableNamesByNamespace(final String name) throws IOException {
3029     return
3030         executeCallable(new MasterCallable<TableName[]>(getConnection()) {
3031           @Override
3032           public TableName[] call(int callTimeout) throws Exception {
3033             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3034             controller.setCallTimeout(callTimeout);
3035             List<HBaseProtos.TableName> tableNames =
3036               master.listTableNamesByNamespace(controller, ListTableNamesByNamespaceRequest.
3037                 newBuilder().setNamespaceName(name).build())
3038                 .getTableNameList();
3039             TableName[] result = new TableName[tableNames.size()];
3040             for (int i = 0; i < tableNames.size(); i++) {
3041               result[i] = ProtobufUtil.toTableName(tableNames.get(i));
3042             }
3043             return result;
3044           }
3045         });
3046   }
3047 
3048   /**
3049    * Check to see if HBase is running. Throw an exception if not.
3050    * @param conf system configuration
3051    * @throws MasterNotRunningException if the master is not running
3052    * @throws ZooKeeperConnectionException if unable to connect to zookeeper
3053    */
3054   // Used by tests and by the Merge tool. Merge tool uses it to figure if HBase is up or not.
3055   public static void checkHBaseAvailable(Configuration conf)
3056   throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException {
3057     Configuration copyOfConf = HBaseConfiguration.create(conf);
3058     // We set it to make it fail as soon as possible if HBase is not available
3059     copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
3060     copyOfConf.setInt("zookeeper.recovery.retry", 0);
3061     try (ClusterConnection connection =
3062         (ClusterConnection)ConnectionFactory.createConnection(copyOfConf)) {
3063         // Check ZK first.
3064         // If the connection exists, we may have a connection to ZK that does not work anymore
3065         ZooKeeperKeepAliveConnection zkw = null;
3066         try {
3067           // This is NASTY. FIX!!!! Dependent on internal implementation! TODO
3068           zkw = ((ConnectionManager.HConnectionImplementation)connection).
3069             getKeepAliveZooKeeperWatcher();
3070           zkw.getRecoverableZooKeeper().getZooKeeper().exists(zkw.baseZNode, false);
3071         } catch (IOException e) {
3072           throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
3073         } catch (InterruptedException e) {
3074           throw (InterruptedIOException)
3075             new InterruptedIOException("Can't connect to ZooKeeper").initCause(e);
3076         } catch (KeeperException e) {
3077           throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
3078         } finally {
3079           if (zkw != null) {
3080             zkw.close();
3081           }
3082         }
3083       connection.isMasterRunning();
3084     }
3085   }
3086 
3087   /**
3088    * get the regions of a given table.
3089    *
3090    * @param tableName the name of the table
3091    * @return Ordered list of {@link HRegionInfo}.
3092    * @throws IOException
3093    */
3094   @Override
3095   public List<HRegionInfo> getTableRegions(final TableName tableName)
3096   throws IOException {
3097     ZooKeeperWatcher zookeeper =
3098       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
3099         new ThrowableAbortable());
3100     List<HRegionInfo> Regions = null;
3101     try {
3102       Regions = MetaTableAccessor.getTableRegions(zookeeper, connection, tableName, true);
3103     } finally {
3104       zookeeper.close();
3105     }
3106     return Regions;
3107   }
3108 
3109   public List<HRegionInfo> getTableRegions(final byte[] tableName)
3110   throws IOException {
3111     return getTableRegions(TableName.valueOf(tableName));
3112   }
3113 
3114   @Override
3115   public synchronized void close() throws IOException {
3116     if (cleanupConnectionOnClose && this.connection != null && !this.closed) {
3117       this.connection.close();
3118       this.closed = true;
3119     }
3120   }
3121 
3122   /**
3123    * Get tableDescriptors
3124    * @param tableNames List of table names
3125    * @return HTD[] the tableDescriptor
3126    * @throws IOException if a remote or network exception occurs
3127    */
3128   @Override
3129   public HTableDescriptor[] getTableDescriptorsByTableName(final List<TableName> tableNames)
3130   throws IOException {
3131     return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
3132       @Override
3133       public HTableDescriptor[] call(int callTimeout) throws Exception {
3134         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3135         controller.setCallTimeout(callTimeout);
3136         GetTableDescriptorsRequest req =
3137             RequestConverter.buildGetTableDescriptorsRequest(tableNames);
3138           return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(controller, req));
3139       }
3140     });
3141   }
3142 
3143   /**
3144    * Get tableDescriptor
3145    * @param tableName one table name
3146    * @return HTD the HTableDescriptor or null if the table not exists
3147    * @throws IOException if a remote or network exception occurs
3148    */
3149   private HTableDescriptor getTableDescriptorByTableName(TableName tableName)
3150       throws IOException {
3151     List<TableName> tableNames = new ArrayList<TableName>(1);
3152     tableNames.add(tableName);
3153 
3154     HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames);
3155 
3156     if (htdl == null || htdl.length == 0) {
3157       return null;
3158     }
3159     else {
3160       return htdl[0];
3161     }
3162   }
3163 
3164   /**
3165    * Get tableDescriptors
3166    * @param names List of table names
3167    * @return HTD[] the tableDescriptor
3168    * @throws IOException if a remote or network exception occurs
3169    */
3170   @Override
3171   public HTableDescriptor[] getTableDescriptors(List<String> names)
3172   throws IOException {
3173     List<TableName> tableNames = new ArrayList<TableName>(names.size());
3174     for(String name : names) {
3175       tableNames.add(TableName.valueOf(name));
3176     }
3177     return getTableDescriptorsByTableName(tableNames);
3178   }
3179 
3180   private RollWALWriterResponse rollWALWriterImpl(final ServerName sn) throws IOException,
3181       FailedLogCloseException {
3182     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
3183     RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();
3184     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3185 
3186     try {
3187       // TODO: this does not do retries, it should. Set priority and timeout in controller
3188       return admin.rollWALWriter(controller, request);
3189     } catch (ServiceException se) {
3190       throw ProtobufUtil.getRemoteException(se);
3191     }
3192   }
3193 
3194   /**
3195    * Roll the log writer. I.e. when using a file system based write ahead log,
3196    * start writing log messages to a new file.
3197    *
3198    * Note that when talking to a version 1.0+ HBase deployment, the rolling is asynchronous.
3199    * This method will return as soon as the roll is requested and the return value will
3200    * always be null. Additionally, the named region server may schedule store flushes at the
3201    * request of the wal handling the roll request.
3202    *
3203    * When talking to a 0.98 or older HBase deployment, the rolling is synchronous and the
3204    * return value may be either null or a list of encoded region names.
3205    *
3206    * @param serverName
3207    *          The servername of the regionserver. A server name is made of host,
3208    *          port and startcode. This is mandatory. Here is an example:
3209    *          <code> host187.example.com,60020,1289493121758</code>
3210    * @return a set of {@link HRegionInfo#getEncodedName()} that would allow the wal to
3211    *         clean up some underlying files. null if there's nothing to flush.
3212    * @throws IOException if a remote or network exception occurs
3213    * @throws FailedLogCloseException
3214    * @deprecated use {@link #rollWALWriter(ServerName)}
3215    */
3216   @Deprecated
3217   public synchronized byte[][] rollHLogWriter(String serverName)
3218       throws IOException, FailedLogCloseException {
3219     ServerName sn = ServerName.valueOf(serverName);
3220     final RollWALWriterResponse response = rollWALWriterImpl(sn);
3221     int regionCount = response.getRegionToFlushCount();
3222     if (0 == regionCount) {
3223       return null;
3224     }
3225     byte[][] regionsToFlush = new byte[regionCount][];
3226     for (int i = 0; i < regionCount; i++) {
3227       ByteString region = response.getRegionToFlush(i);
3228       regionsToFlush[i] = region.toByteArray();
3229     }
3230     return regionsToFlush;
3231   }
3232 
3233   @Override
3234   public synchronized void rollWALWriter(ServerName serverName)
3235       throws IOException, FailedLogCloseException {
3236     rollWALWriterImpl(serverName);
3237   }
3238 
3239   @Override
3240   public String[] getMasterCoprocessors() {
3241     try {
3242       return getClusterStatus().getMasterCoprocessors();
3243     } catch (IOException e) {
3244       LOG.error("Could not getClusterStatus()",e);
3245       return null;
3246     }
3247   }
3248 
3249   /**
3250    * {@inheritDoc}
3251    */
3252   @Override
3253   public CompactionState getCompactionState(final TableName tableName)
3254   throws IOException {
3255     CompactionState state = CompactionState.NONE;
3256     ZooKeeperWatcher zookeeper =
3257       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
3258         new ThrowableAbortable());
3259     try {
3260       checkTableExists(tableName);
3261       List<Pair<HRegionInfo, ServerName>> pairs =
3262         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
3263       for (Pair<HRegionInfo, ServerName> pair: pairs) {
3264         if (pair.getFirst().isOffline()) continue;
3265         if (pair.getSecond() == null) continue;
3266         try {
3267           ServerName sn = pair.getSecond();
3268           AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
3269           GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
3270             pair.getFirst().getRegionName(), true);
3271           GetRegionInfoResponse response = admin.getRegionInfo(null, request);
3272           switch (response.getCompactionState()) {
3273           case MAJOR_AND_MINOR:
3274             return CompactionState.MAJOR_AND_MINOR;
3275           case MAJOR:
3276             if (state == CompactionState.MINOR) {
3277               return CompactionState.MAJOR_AND_MINOR;
3278             }
3279             state = CompactionState.MAJOR;
3280             break;
3281           case MINOR:
3282             if (state == CompactionState.MAJOR) {
3283               return CompactionState.MAJOR_AND_MINOR;
3284             }
3285             state = CompactionState.MINOR;
3286             break;
3287           case NONE:
3288           default: // nothing, continue
3289           }
3290         } catch (NotServingRegionException e) {
3291           if (LOG.isDebugEnabled()) {
3292             LOG.debug("Trying to get compaction state of " +
3293               pair.getFirst() + ": " +
3294               StringUtils.stringifyException(e));
3295           }
3296         } catch (RemoteException e) {
3297           if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
3298             if (LOG.isDebugEnabled()) {
3299               LOG.debug("Trying to get compaction state of " + pair.getFirst() + ": "
3300                 + StringUtils.stringifyException(e));
3301             }
3302           } else {
3303             throw e;
3304           }
3305         }
3306       }
3307     } catch (ServiceException se) {
3308       throw ProtobufUtil.getRemoteException(se);
3309     } finally {
3310       zookeeper.close();
3311     }
3312     return state;
3313   }
3314 
3315   /**
3316    * {@inheritDoc}
3317    */
3318   @Override
3319   public CompactionState getCompactionStateForRegion(final byte[] regionName)
3320   throws IOException {
3321     try {
3322       Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
3323       if (regionServerPair == null) {
3324         throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
3325       }
3326       if (regionServerPair.getSecond() == null) {
3327         throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
3328       }
3329       ServerName sn = regionServerPair.getSecond();
3330       AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
3331       GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
3332         regionServerPair.getFirst().getRegionName(), true);
3333       PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3334       // TODO: this does not do retries, it should. Set priority and timeout in controller
3335       GetRegionInfoResponse response = admin.getRegionInfo(controller, request);
3336       return response.getCompactionState();
3337     } catch (ServiceException se) {
3338       throw ProtobufUtil.getRemoteException(se);
3339     }
3340   }
3341 
3342   /**
3343    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
3344    * #getCompactionStateForRegion(byte[])} instead.
3345    */
3346   @Deprecated
3347   public CompactionState getCompactionState(final String tableNameOrRegionName)
3348   throws IOException, InterruptedException {
3349     return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
3350   }
3351 
3352   /**
3353    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
3354    * #getCompactionStateForRegion(byte[])} instead.
3355    */
3356   @Deprecated
3357   public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
3358   throws IOException, InterruptedException {
3359     try {
3360       return getCompactionStateForRegion(tableNameOrRegionName);
3361     } catch (IllegalArgumentException e) {
3362       // Invalid region, try table
3363       return getCompactionState(TableName.valueOf(tableNameOrRegionName));
3364     }
3365   }
3366 
3367   /**
3368    * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
3369    * taken. If the table is disabled, an offline snapshot is taken.
3370    * <p>
3371    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3372    * snapshot with the same name (even a different type or with different parameters) will fail with
3373    * a {@link SnapshotCreationException} indicating the duplicate naming.
3374    * <p>
3375    * Snapshot names follow the same naming constraints as tables in HBase. See
3376    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3377    * @param snapshotName name of the snapshot to be created
3378    * @param tableName name of the table for which snapshot is created
3379    * @throws IOException if a remote or network exception occurs
3380    * @throws SnapshotCreationException if snapshot creation failed
3381    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3382    */
3383   @Override
3384   public void snapshot(final String snapshotName,
3385                        final TableName tableName) throws IOException,
3386       SnapshotCreationException, IllegalArgumentException {
3387     snapshot(snapshotName, tableName, SnapshotDescription.Type.FLUSH);
3388   }
3389 
3390   public void snapshot(final String snapshotName,
3391                        final String tableName) throws IOException,
3392       SnapshotCreationException, IllegalArgumentException {
3393     snapshot(snapshotName, TableName.valueOf(tableName),
3394         SnapshotDescription.Type.FLUSH);
3395   }
3396 
3397   /**
3398    * Create snapshot for the given table of given flush type.
3399    * <p>
3400    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3401    * snapshot with the same name (even a different type or with different parameters) will fail with
3402    * a {@link SnapshotCreationException} indicating the duplicate naming.
3403    * <p>
3404    * Snapshot names follow the same naming constraints as tables in HBase.
3405    * @param snapshotName name of the snapshot to be created
3406    * @param tableName name of the table for which snapshot is created
3407    * @param flushType if the snapshot should be taken without flush memstore first
3408    * @throws IOException if a remote or network exception occurs
3409    * @throws SnapshotCreationException if snapshot creation failed
3410    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3411    */
3412    public void snapshot(final byte[] snapshotName, final byte[] tableName,
3413                        final SnapshotDescription.Type flushType) throws
3414       IOException, SnapshotCreationException, IllegalArgumentException {
3415       snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName), flushType);
3416   }
3417   /**
3418    public void snapshot(final String snapshotName,
3419     * Create a timestamp consistent snapshot for the given table.
3420                         final byte[] tableName) throws IOException,
3421     * <p>
3422     * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3423     * snapshot with the same name (even a different type or with different parameters) will fail with
3424     * a {@link SnapshotCreationException} indicating the duplicate naming.
3425     * <p>
3426     * Snapshot names follow the same naming constraints as tables in HBase.
3427     * @param snapshotName name of the snapshot to be created
3428     * @param tableName name of the table for which snapshot is created
3429     * @throws IOException if a remote or network exception occurs
3430     * @throws SnapshotCreationException if snapshot creation failed
3431     * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3432     */
3433   @Override
3434   public void snapshot(final byte[] snapshotName,
3435                        final TableName tableName) throws IOException,
3436       SnapshotCreationException, IllegalArgumentException {
3437     snapshot(Bytes.toString(snapshotName), tableName, SnapshotDescription.Type.FLUSH);
3438   }
3439 
3440   public void snapshot(final byte[] snapshotName,
3441                        final byte[] tableName) throws IOException,
3442       SnapshotCreationException, IllegalArgumentException {
3443     snapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName),
3444       SnapshotDescription.Type.FLUSH);
3445   }
3446 
3447   /**
3448    * Create typed snapshot of the table.
3449    * <p>
3450    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3451    * snapshot with the same name (even a different type or with different parameters) will fail with
3452    * a {@link SnapshotCreationException} indicating the duplicate naming.
3453    * <p>
3454    * Snapshot names follow the same naming constraints as tables in HBase. See
3455    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3456    * <p>
3457    * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
3458    *          snapshots stored on the cluster
3459    * @param tableName name of the table to snapshot
3460    * @param type type of snapshot to take
3461    * @throws IOException we fail to reach the master
3462    * @throws SnapshotCreationException if snapshot creation failed
3463    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3464    */
3465   @Override
3466   public void snapshot(final String snapshotName,
3467                        final TableName tableName,
3468                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
3469       IllegalArgumentException {
3470     SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
3471     builder.setTable(tableName.getNameAsString());
3472     builder.setName(snapshotName);
3473     builder.setType(type);
3474     snapshot(builder.build());
3475   }
3476 
3477   public void snapshot(final String snapshotName,
3478                        final String tableName,
3479                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
3480       IllegalArgumentException {
3481     snapshot(snapshotName, TableName.valueOf(tableName), type);
3482   }
3483 
3484   public void snapshot(final String snapshotName,
3485                        final byte[] tableName,
3486                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
3487       IllegalArgumentException {
3488     snapshot(snapshotName, TableName.valueOf(tableName), type);
3489   }
3490 
3491   /**
3492    * Take a snapshot and wait for the server to complete that snapshot (blocking).
3493    * <p>
3494    * Only a single snapshot should be taken at a time for an instance of HBase, or results may be
3495    * undefined (you can tell multiple HBase clusters to snapshot at the same time, but only one at a
3496    * time for a single cluster).
3497    * <p>
3498    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3499    * snapshot with the same name (even a different type or with different parameters) will fail with
3500    * a {@link SnapshotCreationException} indicating the duplicate naming.
3501    * <p>
3502    * Snapshot names follow the same naming constraints as tables in HBase. See
3503    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3504    * <p>
3505    * You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])}
3506    * unless you are sure about the type of snapshot that you want to take.
3507    * @param snapshot snapshot to take
3508    * @throws IOException or we lose contact with the master.
3509    * @throws SnapshotCreationException if snapshot failed to be taken
3510    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3511    */
3512   @Override
3513   public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException,
3514       IllegalArgumentException {
3515     // actually take the snapshot
3516     SnapshotResponse response = takeSnapshotAsync(snapshot);
3517     final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
3518         .build();
3519     IsSnapshotDoneResponse done = null;
3520     long start = EnvironmentEdgeManager.currentTime();
3521     long max = response.getExpectedTimeout();
3522     long maxPauseTime = max / this.numRetries;
3523     int tries = 0;
3524     LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
3525         ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
3526         maxPauseTime + " ms per retry)");
3527     while (tries == 0
3528         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) {
3529       try {
3530         // sleep a backoff <= pauseTime amount
3531         long sleep = getPauseTime(tries++);
3532         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3533         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
3534           "ms while waiting for snapshot completion.");
3535         Thread.sleep(sleep);
3536       } catch (InterruptedException e) {
3537         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
3538       }
3539       LOG.debug("Getting current status of snapshot from master...");
3540       done = executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
3541         @Override
3542         public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3543           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3544           controller.setCallTimeout(callTimeout);
3545           return master.isSnapshotDone(controller, request);
3546         }
3547       });
3548     }
3549     if (!done.getDone()) {
3550       throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
3551           + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
3552     }
3553   }
3554 
3555   /**
3556    * Take a snapshot without waiting for the server to complete that snapshot (asynchronous)
3557    * <p>
3558    * Only a single snapshot should be taken at a time, or results may be undefined.
3559    * @param snapshot snapshot to take
3560    * @return response from the server indicating the max time to wait for the snapshot
3561    * @throws IOException if the snapshot did not succeed or we lose contact with the master.
3562    * @throws SnapshotCreationException if snapshot creation failed
3563    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3564    */
3565   @Override
3566   public SnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException,
3567       SnapshotCreationException {
3568     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
3569     final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot)
3570         .build();
3571     // run the snapshot on the master
3572     return executeCallable(new MasterCallable<SnapshotResponse>(getConnection()) {
3573       @Override
3574       public SnapshotResponse call(int callTimeout) throws ServiceException {
3575         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3576         controller.setCallTimeout(callTimeout);
3577         return master.snapshot(controller, request);
3578       }
3579     });
3580   }
3581 
3582   /**
3583    * Check the current state of the passed snapshot.
3584    * <p>
3585    * There are three possible states:
3586    * <ol>
3587    * <li>running - returns <tt>false</tt></li>
3588    * <li>finished - returns <tt>true</tt></li>
3589    * <li>finished with error - throws the exception that caused the snapshot to fail</li>
3590    * </ol>
3591    * <p>
3592    * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
3593    * run/started since the snapshot your are checking, you will recieve an
3594    * {@link UnknownSnapshotException}.
3595    * @param snapshot description of the snapshot to check
3596    * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
3597    *         running
3598    * @throws IOException if we have a network issue
3599    * @throws HBaseSnapshotException if the snapshot failed
3600    * @throws UnknownSnapshotException if the requested snapshot is unknown
3601    */
3602   @Override
3603   public boolean isSnapshotFinished(final SnapshotDescription snapshot)
3604       throws IOException, HBaseSnapshotException, UnknownSnapshotException {
3605 
3606     return executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
3607       @Override
3608       public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3609         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3610         controller.setCallTimeout(callTimeout);
3611         return master.isSnapshotDone(controller,
3612           IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build());
3613       }
3614     }).getDone();
3615   }
3616 
3617   /**
3618    * Restore the specified snapshot on the original table. (The table must be disabled)
3619    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3620    * is set to true, a snapshot of the current table is taken
3621    * before executing the restore operation.
3622    * In case of restore failure, the failsafe snapshot will be restored.
3623    * If the restore completes without problem the failsafe snapshot is deleted.
3624    *
3625    * @param snapshotName name of the snapshot to restore
3626    * @throws IOException if a remote or network exception occurs
3627    * @throws RestoreSnapshotException if snapshot failed to be restored
3628    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3629    */
3630   @Override
3631   public void restoreSnapshot(final byte[] snapshotName)
3632       throws IOException, RestoreSnapshotException {
3633     restoreSnapshot(Bytes.toString(snapshotName));
3634   }
3635 
3636   /**
3637    * Restore the specified snapshot on the original table. (The table must be disabled)
3638    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3639    * is set to true, a snapshot of the current table is taken
3640    * before executing the restore operation.
3641    * In case of restore failure, the failsafe snapshot will be restored.
3642    * If the restore completes without problem the failsafe snapshot is deleted.
3643    *
3644    * @param snapshotName name of the snapshot to restore
3645    * @throws IOException if a remote or network exception occurs
3646    * @throws RestoreSnapshotException if snapshot failed to be restored
3647    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3648    */
3649   @Override
3650   public void restoreSnapshot(final String snapshotName)
3651       throws IOException, RestoreSnapshotException {
3652     boolean takeFailSafeSnapshot =
3653       conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false);
3654     restoreSnapshot(snapshotName, takeFailSafeSnapshot);
3655   }
3656 
3657   /**
3658    * Restore the specified snapshot on the original table. (The table must be disabled)
3659    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3660    * before executing the restore operation.
3661    * In case of restore failure, the failsafe snapshot will be restored.
3662    * If the restore completes without problem the failsafe snapshot is deleted.
3663    *
3664    * The failsafe snapshot name is configurable by using the property
3665    * "hbase.snapshot.restore.failsafe.name".
3666    *
3667    * @param snapshotName name of the snapshot to restore
3668    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3669    * @throws IOException if a remote or network exception occurs
3670    * @throws RestoreSnapshotException if snapshot failed to be restored
3671    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3672    */
3673   @Override
3674   public void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot)
3675       throws IOException, RestoreSnapshotException {
3676     restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
3677   }
3678 
3679   /**
3680    * Restore the specified snapshot on the original table. (The table must be disabled)
3681    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3682    * before executing the restore operation.
3683    * In case of restore failure, the failsafe snapshot will be restored.
3684    * If the restore completes without problem the failsafe snapshot is deleted.
3685    *
3686    * The failsafe snapshot name is configurable by using the property
3687    * "hbase.snapshot.restore.failsafe.name".
3688    *
3689    * @param snapshotName name of the snapshot to restore
3690    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3691    * @throws IOException if a remote or network exception occurs
3692    * @throws RestoreSnapshotException if snapshot failed to be restored
3693    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3694    */
3695   @Override
3696   public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot)
3697       throws IOException, RestoreSnapshotException {
3698     TableName tableName = null;
3699     for (SnapshotDescription snapshotInfo: listSnapshots()) {
3700       if (snapshotInfo.getName().equals(snapshotName)) {
3701         tableName = TableName.valueOf(snapshotInfo.getTable());
3702         break;
3703       }
3704     }
3705 
3706     if (tableName == null) {
3707       throw new RestoreSnapshotException(
3708         "Unable to find the table name for snapshot=" + snapshotName);
3709     }
3710 
3711     // The table does not exists, switch to clone.
3712     if (!tableExists(tableName)) {
3713       cloneSnapshot(snapshotName, tableName);
3714       return;
3715     }
3716 
3717     // Check if the table is disabled
3718     if (!isTableDisabled(tableName)) {
3719       throw new TableNotDisabledException(tableName);
3720     }
3721 
3722     // Take a snapshot of the current state
3723     String failSafeSnapshotSnapshotName = null;
3724     if (takeFailSafeSnapshot) {
3725       failSafeSnapshotSnapshotName = conf.get("hbase.snapshot.restore.failsafe.name",
3726         "hbase-failsafe-{snapshot.name}-{restore.timestamp}");
3727       failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName
3728         .replace("{snapshot.name}", snapshotName)
3729         .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.'))
3730         .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime()));
3731       LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3732       snapshot(failSafeSnapshotSnapshotName, tableName);
3733     }
3734 
3735     try {
3736       // Restore snapshot
3737       internalRestoreSnapshot(snapshotName, tableName);
3738     } catch (IOException e) {
3739       // Somthing went wrong during the restore...
3740       // if the pre-restore snapshot is available try to rollback
3741       if (takeFailSafeSnapshot) {
3742         try {
3743           internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName);
3744           String msg = "Restore snapshot=" + snapshotName +
3745             " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded.";
3746           LOG.error(msg, e);
3747           throw new RestoreSnapshotException(msg, e);
3748         } catch (IOException ex) {
3749           String msg = "Failed to restore and rollback to snapshot=" + failSafeSnapshotSnapshotName;
3750           LOG.error(msg, ex);
3751           throw new RestoreSnapshotException(msg, e);
3752         }
3753       } else {
3754         throw new RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e);
3755       }
3756     }
3757 
3758     // If the restore is succeeded, delete the pre-restore snapshot
3759     if (takeFailSafeSnapshot) {
3760       try {
3761         LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3762         deleteSnapshot(failSafeSnapshotSnapshotName);
3763       } catch (IOException e) {
3764         LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, e);
3765       }
3766     }
3767   }
3768 
3769   /**
3770    * Create a new table by cloning the snapshot content.
3771    *
3772    * @param snapshotName name of the snapshot to be cloned
3773    * @param tableName name of the table where the snapshot will be restored
3774    * @throws IOException if a remote or network exception occurs
3775    * @throws TableExistsException if table to be created already exists
3776    * @throws RestoreSnapshotException if snapshot failed to be cloned
3777    * @throws IllegalArgumentException if the specified table has not a valid name
3778    */
3779   public void cloneSnapshot(final byte[] snapshotName, final byte[] tableName)
3780       throws IOException, TableExistsException, RestoreSnapshotException {
3781     cloneSnapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName));
3782   }
3783 
3784   /**
3785    * Create a new table by cloning the snapshot content.
3786    *
3787    * @param snapshotName name of the snapshot to be cloned
3788    * @param tableName name of the table where the snapshot will be restored
3789    * @throws IOException if a remote or network exception occurs
3790    * @throws TableExistsException if table to be created already exists
3791    * @throws RestoreSnapshotException if snapshot failed to be cloned
3792    * @throws IllegalArgumentException if the specified table has not a valid name
3793    */
3794   @Override
3795   public void cloneSnapshot(final byte[] snapshotName, final TableName tableName)
3796       throws IOException, TableExistsException, RestoreSnapshotException {
3797     cloneSnapshot(Bytes.toString(snapshotName), tableName);
3798   }
3799 
3800 
3801 
3802   /**
3803    * Create a new table by cloning the snapshot content.
3804    *
3805    * @param snapshotName name of the snapshot to be cloned
3806    * @param tableName name of the table where the snapshot will be restored
3807    * @throws IOException if a remote or network exception occurs
3808    * @throws TableExistsException if table to be created already exists
3809    * @throws RestoreSnapshotException if snapshot failed to be cloned
3810    * @throws IllegalArgumentException if the specified table has not a valid name
3811    */
3812   public void cloneSnapshot(final String snapshotName, final String tableName)
3813       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
3814     cloneSnapshot(snapshotName, TableName.valueOf(tableName));
3815   }
3816 
3817   /**
3818    * Create a new table by cloning the snapshot content.
3819    *
3820    * @param snapshotName name of the snapshot to be cloned
3821    * @param tableName name of the table where the snapshot will be restored
3822    * @throws IOException if a remote or network exception occurs
3823    * @throws TableExistsException if table to be created already exists
3824    * @throws RestoreSnapshotException if snapshot failed to be cloned
3825    * @throws IllegalArgumentException if the specified table has not a valid name
3826    */
3827   @Override
3828   public void cloneSnapshot(final String snapshotName, final TableName tableName)
3829       throws IOException, TableExistsException, RestoreSnapshotException {
3830     if (tableExists(tableName)) {
3831       throw new TableExistsException(tableName);
3832     }
3833     internalRestoreSnapshot(snapshotName, tableName);
3834     waitUntilTableIsEnabled(tableName);
3835   }
3836 
3837   /**
3838    * Execute a distributed procedure on a cluster synchronously with return data
3839    *
3840    * @param signature A distributed procedure is uniquely identified
3841    * by its signature (default the root ZK node name of the procedure).
3842    * @param instance The instance name of the procedure. For some procedures, this parameter is
3843    * optional.
3844    * @param props Property/Value pairs of properties passing to the procedure
3845    * @return data returned after procedure execution. null if no return data.
3846    * @throws IOException
3847    */
3848   @Override
3849   public byte[] execProcedureWithRet(String signature, String instance,
3850       Map<String, String> props) throws IOException {
3851     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3852     builder.setSignature(signature).setInstance(instance);
3853     for (Entry<String, String> entry : props.entrySet()) {
3854       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3855           .setValue(entry.getValue()).build();
3856       builder.addConfiguration(pair);
3857     }
3858 
3859     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
3860         .setProcedure(builder.build()).build();
3861     // run the procedure on the master
3862     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
3863         getConnection()) {
3864       @Override
3865       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
3866         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3867         controller.setCallTimeout(callTimeout);
3868         return master.execProcedureWithRet(controller, request);
3869       }
3870     });
3871 
3872     return response.hasReturnData() ? response.getReturnData().toByteArray() : null;
3873   }
3874   /**
3875    * Execute a distributed procedure on a cluster.
3876    *
3877    * @param signature A distributed procedure is uniquely identified
3878    * by its signature (default the root ZK node name of the procedure).
3879    * @param instance The instance name of the procedure. For some procedures, this parameter is
3880    * optional.
3881    * @param props Property/Value pairs of properties passing to the procedure
3882    * @throws IOException
3883    */
3884   @Override
3885   public void execProcedure(String signature, String instance,
3886       Map<String, String> props) throws IOException {
3887     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3888     builder.setSignature(signature).setInstance(instance);
3889     for (Entry<String, String> entry : props.entrySet()) {
3890       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3891           .setValue(entry.getValue()).build();
3892       builder.addConfiguration(pair);
3893     }
3894 
3895     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
3896         .setProcedure(builder.build()).build();
3897     // run the procedure on the master
3898     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
3899         getConnection()) {
3900       @Override
3901       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
3902         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3903         controller.setCallTimeout(callTimeout);
3904         return master.execProcedure(controller, request);
3905       }
3906     });
3907 
3908     long start = EnvironmentEdgeManager.currentTime();
3909     long max = response.getExpectedTimeout();
3910     long maxPauseTime = max / this.numRetries;
3911     int tries = 0;
3912     LOG.debug("Waiting a max of " + max + " ms for procedure '" +
3913         signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)");
3914     boolean done = false;
3915     while (tries == 0
3916         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
3917       try {
3918         // sleep a backoff <= pauseTime amount
3919         long sleep = getPauseTime(tries++);
3920         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3921         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
3922           "ms while waiting for procedure completion.");
3923         Thread.sleep(sleep);
3924       } catch (InterruptedException e) {
3925         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
3926       }
3927       LOG.debug("Getting current status of procedure from master...");
3928       done = isProcedureFinished(signature, instance, props);
3929     }
3930     if (!done) {
3931       throw new IOException("Procedure '" + signature + " : " + instance
3932           + "' wasn't completed in expectedTime:" + max + " ms");
3933     }
3934   }
3935 
3936   /**
3937    * Check the current state of the specified procedure.
3938    * <p>
3939    * There are three possible states:
3940    * <ol>
3941    * <li>running - returns <tt>false</tt></li>
3942    * <li>finished - returns <tt>true</tt></li>
3943    * <li>finished with error - throws the exception that caused the procedure to fail</li>
3944    * </ol>
3945    * <p>
3946    *
3947    * @param signature The signature that uniquely identifies a procedure
3948    * @param instance The instance name of the procedure
3949    * @param props Property/Value pairs of properties passing to the procedure
3950    * @return true if the specified procedure is finished successfully, false if it is still running
3951    * @throws IOException if the specified procedure finished with error
3952    */
3953   @Override
3954   public boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
3955       throws IOException {
3956     final ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3957     builder.setSignature(signature).setInstance(instance);
3958     for (Entry<String, String> entry : props.entrySet()) {
3959       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3960           .setValue(entry.getValue()).build();
3961       builder.addConfiguration(pair);
3962     }
3963     final ProcedureDescription desc = builder.build();
3964     return executeCallable(
3965         new MasterCallable<IsProcedureDoneResponse>(getConnection()) {
3966           @Override
3967           public IsProcedureDoneResponse call(int callTimeout) throws ServiceException {
3968             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3969             controller.setCallTimeout(callTimeout);
3970             return master.isProcedureDone(controller, IsProcedureDoneRequest
3971                 .newBuilder().setProcedure(desc).build());
3972           }
3973         }).getDone();
3974   }
3975 
3976   /**
3977    * Execute Restore/Clone snapshot and wait for the server to complete (blocking).
3978    * To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to
3979    * create an HTable instance to this table before it is available.
3980    * @param snapshotName snapshot to restore
3981    * @param tableName table name to restore the snapshot on
3982    * @throws IOException if a remote or network exception occurs
3983    * @throws RestoreSnapshotException if snapshot failed to be restored
3984    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3985    */
3986   private void internalRestoreSnapshot(final String snapshotName, final TableName
3987       tableName)
3988       throws IOException, RestoreSnapshotException {
3989     SnapshotDescription snapshot = SnapshotDescription.newBuilder()
3990         .setName(snapshotName).setTable(tableName.getNameAsString()).build();
3991 
3992     // actually restore the snapshot
3993     internalRestoreSnapshotAsync(snapshot);
3994 
3995     final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder()
3996         .setSnapshot(snapshot).build();
3997     IsRestoreSnapshotDoneResponse done = IsRestoreSnapshotDoneResponse.newBuilder()
3998         .setDone(false).buildPartial();
3999     final long maxPauseTime = 5000;
4000     int tries = 0;
4001     while (!done.getDone()) {
4002       try {
4003         // sleep a backoff <= pauseTime amount
4004         long sleep = getPauseTime(tries++);
4005         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
4006         LOG.debug(tries + ") Sleeping: " + sleep + " ms while we wait for snapshot restore to complete.");
4007         Thread.sleep(sleep);
4008       } catch (InterruptedException e) {
4009         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
4010       }
4011       LOG.debug("Getting current status of snapshot restore from master...");
4012       done = executeCallable(new MasterCallable<IsRestoreSnapshotDoneResponse>(
4013           getConnection()) {
4014         @Override
4015         public IsRestoreSnapshotDoneResponse call(int callTimeout) throws ServiceException {
4016           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4017           controller.setCallTimeout(callTimeout);
4018           return master.isRestoreSnapshotDone(controller, request);
4019         }
4020       });
4021     }
4022     if (!done.getDone()) {
4023       throw new RestoreSnapshotException("Snapshot '" + snapshot.getName() + "' wasn't restored.");
4024     }
4025   }
4026 
4027   /**
4028    * Execute Restore/Clone snapshot and wait for the server to complete (asynchronous)
4029    * <p>
4030    * Only a single snapshot should be restored at a time, or results may be undefined.
4031    * @param snapshot snapshot to restore
4032    * @return response from the server indicating the max time to wait for the snapshot
4033    * @throws IOException if a remote or network exception occurs
4034    * @throws RestoreSnapshotException if snapshot failed to be restored
4035    * @throws IllegalArgumentException if the restore request is formatted incorrectly
4036    */
4037   private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot)
4038       throws IOException, RestoreSnapshotException {
4039     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
4040 
4041     final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot)
4042         .build();
4043 
4044     // run the snapshot restore on the master
4045     return executeCallable(new MasterCallable<RestoreSnapshotResponse>(getConnection()) {
4046       @Override
4047       public RestoreSnapshotResponse call(int callTimeout) throws ServiceException {
4048         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4049         controller.setCallTimeout(callTimeout);
4050         return master.restoreSnapshot(controller, request);
4051       }
4052     });
4053   }
4054 
4055   /**
4056    * List completed snapshots.
4057    * @return a list of snapshot descriptors for completed snapshots
4058    * @throws IOException if a network error occurs
4059    */
4060   @Override
4061   public List<SnapshotDescription> listSnapshots() throws IOException {
4062     return executeCallable(new MasterCallable<List<SnapshotDescription>>(getConnection()) {
4063       @Override
4064       public List<SnapshotDescription> call(int callTimeout) throws ServiceException {
4065         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4066         controller.setCallTimeout(callTimeout);
4067         return master.getCompletedSnapshots(controller,
4068           GetCompletedSnapshotsRequest.newBuilder().build()).getSnapshotsList();
4069       }
4070     });
4071   }
4072 
4073   /**
4074    * List all the completed snapshots matching the given regular expression.
4075    *
4076    * @param regex The regular expression to match against
4077    * @return - returns a List of SnapshotDescription
4078    * @throws IOException if a remote or network exception occurs
4079    */
4080   @Override
4081   public List<SnapshotDescription> listSnapshots(String regex) throws IOException {
4082     return listSnapshots(Pattern.compile(regex));
4083   }
4084 
4085   /**
4086    * List all the completed snapshots matching the given pattern.
4087    *
4088    * @param pattern The compiled regular expression to match against
4089    * @return - returns a List of SnapshotDescription
4090    * @throws IOException if a remote or network exception occurs
4091    */
4092   @Override
4093   public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
4094     List<SnapshotDescription> matched = new LinkedList<SnapshotDescription>();
4095     List<SnapshotDescription> snapshots = listSnapshots();
4096     for (SnapshotDescription snapshot : snapshots) {
4097       if (pattern.matcher(snapshot.getName()).matches()) {
4098         matched.add(snapshot);
4099       }
4100     }
4101     return matched;
4102   }
4103 
4104   /**
4105    * Delete an existing snapshot.
4106    * @param snapshotName name of the snapshot
4107    * @throws IOException if a remote or network exception occurs
4108    */
4109   @Override
4110   public void deleteSnapshot(final byte[] snapshotName) throws IOException {
4111     deleteSnapshot(Bytes.toString(snapshotName));
4112   }
4113 
4114   /**
4115    * Delete an existing snapshot.
4116    * @param snapshotName name of the snapshot
4117    * @throws IOException if a remote or network exception occurs
4118    */
4119   @Override
4120   public void deleteSnapshot(final String snapshotName) throws IOException {
4121     // make sure the snapshot is possibly valid
4122     TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName));
4123     // do the delete
4124     executeCallable(new MasterCallable<Void>(getConnection()) {
4125       @Override
4126       public Void call(int callTimeout) throws ServiceException {
4127         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4128         controller.setCallTimeout(callTimeout);
4129         master.deleteSnapshot(controller,
4130           DeleteSnapshotRequest.newBuilder().
4131             setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build()
4132         );
4133         return null;
4134       }
4135     });
4136   }
4137 
4138   /**
4139    * Delete existing snapshots whose names match the pattern passed.
4140    * @param regex The regular expression to match against
4141    * @throws IOException if a remote or network exception occurs
4142    */
4143   @Override
4144   public void deleteSnapshots(final String regex) throws IOException {
4145     deleteSnapshots(Pattern.compile(regex));
4146   }
4147 
4148   /**
4149    * Delete existing snapshots whose names match the pattern passed.
4150    * @param pattern pattern for names of the snapshot to match
4151    * @throws IOException if a remote or network exception occurs
4152    */
4153   @Override
4154   public void deleteSnapshots(final Pattern pattern) throws IOException {
4155     List<SnapshotDescription> snapshots = listSnapshots(pattern);
4156     for (final SnapshotDescription snapshot : snapshots) {
4157       try {
4158         internalDeleteSnapshot(snapshot);
4159       } catch (IOException ex) {
4160         LOG.info(
4161           "Failed to delete snapshot " + snapshot.getName() + " for table " + snapshot.getTable(),
4162           ex);
4163       }
4164     }
4165   }
4166 
4167   private void internalDeleteSnapshot(final SnapshotDescription snapshot) throws IOException {
4168     executeCallable(new MasterCallable<Void>(getConnection()) {
4169       @Override
4170       public Void call(int callTimeout) throws ServiceException {
4171         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4172         controller.setCallTimeout(callTimeout);
4173         this.master.deleteSnapshot(controller, DeleteSnapshotRequest.newBuilder()
4174           .setSnapshot(snapshot).build());
4175         return null;
4176       }
4177     });
4178   }
4179 
4180   /**
4181    * Apply the new quota settings.
4182    * @param quota the quota settings
4183    * @throws IOException if a remote or network exception occurs
4184    */
4185   @Override
4186   public void setQuota(final QuotaSettings quota) throws IOException {
4187     executeCallable(new MasterCallable<Void>(getConnection()) {
4188       @Override
4189       public Void call(int callTimeout) throws ServiceException {
4190         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4191         controller.setCallTimeout(callTimeout);
4192         this.master.setQuota(controller, QuotaSettings.buildSetQuotaRequestProto(quota));
4193         return null;
4194       }
4195     });
4196   }
4197 
4198   /**
4199    * Return a Quota Scanner to list the quotas based on the filter.
4200    * @param filter the quota settings filter
4201    * @return the quota scanner
4202    * @throws IOException if a remote or network exception occurs
4203    */
4204   @Override
4205   public QuotaRetriever getQuotaRetriever(final QuotaFilter filter) throws IOException {
4206     return QuotaRetriever.open(conf, filter);
4207   }
4208 
4209   private <V> V executeCallable(MasterCallable<V> callable) throws IOException {
4210     RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller();
4211     try {
4212       return caller.callWithRetries(callable, operationTimeout);
4213     } finally {
4214       callable.close();
4215     }
4216   }
4217 
4218   private static <C extends RetryingCallable<V> & Closeable, V> V executeCallable(C callable,
4219              RpcRetryingCallerFactory rpcCallerFactory, int operationTimeout, int rpcTimeout)
4220       throws IOException {
4221     RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller(rpcTimeout);
4222     try {
4223       return caller.callWithRetries(callable, operationTimeout);
4224     } finally {
4225       callable.close();
4226     }
4227   }
4228 
4229   /**
4230    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
4231    * connected to the active master.
4232    *
4233    * <p>
4234    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
4235    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
4236    * </p>
4237    *
4238    * <div style="background-color: #cccccc; padding: 2px">
4239    * <blockquote><pre>
4240    * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
4241    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
4242    * MyCallRequest request = MyCallRequest.newBuilder()
4243    *     ...
4244    *     .build();
4245    * MyCallResponse response = service.myCall(null, request);
4246    * </pre></blockquote></div>
4247    *
4248    * @return A MasterCoprocessorRpcChannel instance
4249    */
4250   @Override
4251   public CoprocessorRpcChannel coprocessorService() {
4252     return new MasterCoprocessorRpcChannel(connection);
4253   }
4254 
4255   /**
4256    * Simple {@link Abortable}, throwing RuntimeException on abort.
4257    */
4258   private static class ThrowableAbortable implements Abortable {
4259 
4260     @Override
4261     public void abort(String why, Throwable e) {
4262       throw new RuntimeException(why, e);
4263     }
4264 
4265     @Override
4266     public boolean isAborted() {
4267       return true;
4268     }
4269   }
4270 
4271   /**
4272    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
4273    * connected to the passed region server.
4274    *
4275    * <p>
4276    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
4277    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
4278    * </p>
4279    *
4280    * <div style="background-color: #cccccc; padding: 2px">
4281    * <blockquote><pre>
4282    * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
4283    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
4284    * MyCallRequest request = MyCallRequest.newBuilder()
4285    *     ...
4286    *     .build();
4287    * MyCallResponse response = service.myCall(null, request);
4288    * </pre></blockquote></div>
4289    *
4290    * @param sn the server name to which the endpoint call is made
4291    * @return A RegionServerCoprocessorRpcChannel instance
4292    */
4293   @Override
4294   public CoprocessorRpcChannel coprocessorService(ServerName sn) {
4295     return new RegionServerCoprocessorRpcChannel(connection, sn);
4296   }
4297 
4298   @Override
4299   public void updateConfiguration(ServerName server) throws IOException {
4300     try {
4301       this.connection.getAdmin(server).updateConfiguration(null,
4302         UpdateConfigurationRequest.getDefaultInstance());
4303     } catch (ServiceException e) {
4304       throw ProtobufUtil.getRemoteException(e);
4305     }
4306   }
4307 
4308   @Override
4309   public void updateConfiguration() throws IOException {
4310     for (ServerName server : this.getClusterStatus().getServers()) {
4311       updateConfiguration(server);
4312     }
4313   }
4314 
4315   @Override
4316   public int getMasterInfoPort() throws IOException {
4317     // TODO: Fix!  Reaching into internal implementation!!!!
4318     ConnectionManager.HConnectionImplementation connection =
4319         (ConnectionManager.HConnectionImplementation)this.connection;
4320     ZooKeeperKeepAliveConnection zkw = connection.getKeepAliveZooKeeperWatcher();
4321     try {
4322       return MasterAddressTracker.getMasterInfoPort(zkw);
4323     } catch (KeeperException e) {
4324       throw new IOException("Failed to get master info port from MasterAddressTracker", e);
4325     }
4326   }
4327 
4328   @Override
4329   public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException {
4330     return executeCallable(new MasterCallable<Long>(getConnection()) {
4331       @Override
4332       public Long call(int callTimeout) throws ServiceException {
4333         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4334         controller.setCallTimeout(callTimeout);
4335         MajorCompactionTimestampRequest req =
4336             MajorCompactionTimestampRequest.newBuilder()
4337                 .setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
4338         return master.getLastMajorCompactionTimestamp(controller, req).getCompactionTimestamp();
4339       }
4340     });
4341   }
4342 
4343   @Override
4344   public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException {
4345     return executeCallable(new MasterCallable<Long>(getConnection()) {
4346       @Override
4347       public Long call(int callTimeout) throws ServiceException {
4348         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4349         controller.setCallTimeout(callTimeout);
4350         MajorCompactionTimestampForRegionRequest req =
4351             MajorCompactionTimestampForRegionRequest
4352                 .newBuilder()
4353                 .setRegion(
4354                   RequestConverter
4355                       .buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)).build();
4356         return master.getLastMajorCompactionTimestampForRegion(controller, req)
4357             .getCompactionTimestamp();
4358       }
4359     });
4360   }
4361 
4362   /**
4363    * Future that waits on a procedure result.
4364    * Returned by the async version of the Admin calls,
4365    * and used internally by the sync calls to wait on the result of the procedure.
4366    */
4367   @InterfaceAudience.Private
4368   @InterfaceStability.Evolving
4369   protected static class ProcedureFuture<V> implements Future<V> {
4370     private ExecutionException exception = null;
4371     private boolean procResultFound = false;
4372     private boolean done = false;
4373     private boolean cancelled = false;
4374     private V result = null;
4375 
4376     private final HBaseAdmin admin;
4377     private final Long procId;
4378 
4379     public ProcedureFuture(final HBaseAdmin admin, final Long procId) {
4380       this.admin = admin;
4381       this.procId = procId;
4382     }
4383 
4384     @Override
4385     public boolean cancel(boolean mayInterruptIfRunning) {
4386       AbortProcedureRequest abortProcRequest = AbortProcedureRequest.newBuilder()
4387           .setProcId(procId).setMayInterruptIfRunning(mayInterruptIfRunning).build();
4388       try {
4389         cancelled = abortProcedureResult(abortProcRequest).getIsProcedureAborted();
4390         if (cancelled) {
4391           done = true;
4392         }
4393       } catch (IOException e) {
4394         // Cancell thrown exception for some reason. At this time, we are not sure whether
4395         // the cancell succeeds or fails. We assume that it is failed, but print out a warning
4396         // for debugging purpose.
4397         LOG.warn(
4398           "Cancelling the procedure with procId=" + procId + " throws exception " + e.getMessage(),
4399           e);
4400         cancelled = false;
4401       }
4402       return cancelled;
4403     }
4404 
4405     @Override
4406     public boolean isCancelled() {
4407       return cancelled;
4408     }
4409 
4410     protected AbortProcedureResponse abortProcedureResult(
4411         final AbortProcedureRequest request) throws IOException {
4412       return admin.executeCallable(new MasterCallable<AbortProcedureResponse>(
4413           admin.getConnection()) {
4414         @Override
4415         public AbortProcedureResponse call(int callTimeout) throws ServiceException {
4416           PayloadCarryingRpcController controller = admin.getRpcControllerFactory().newController();
4417           controller.setCallTimeout(callTimeout);
4418           return master.abortProcedure(controller, request);
4419         }
4420       });
4421     }
4422 
4423     @Override
4424     public V get() throws InterruptedException, ExecutionException {
4425       // TODO: should we ever spin forever?
4426       throw new UnsupportedOperationException();
4427     }
4428 
4429     @Override
4430     public V get(long timeout, TimeUnit unit)
4431         throws InterruptedException, ExecutionException, TimeoutException {
4432       if (!done) {
4433         long deadlineTs = EnvironmentEdgeManager.currentTime() + unit.toMillis(timeout);
4434         try {
4435           try {
4436             // if the master support procedures, try to wait the result
4437             if (procId != null) {
4438               result = waitProcedureResult(procId, deadlineTs);
4439             }
4440             // if we don't have a proc result, try the compatibility wait
4441             if (!procResultFound) {
4442               result = waitOperationResult(deadlineTs);
4443             }
4444             result = postOperationResult(result, deadlineTs);
4445             done = true;
4446           } catch (IOException e) {
4447             result = postOpeartionFailure(e, deadlineTs);
4448             done = true;
4449           }
4450         } catch (IOException e) {
4451           exception = new ExecutionException(e);
4452           done = true;
4453         }
4454       }
4455       if (exception != null) {
4456         throw exception;
4457       }
4458       return result;
4459     }
4460 
4461     @Override
4462     public boolean isDone() {
4463       return done;
4464     }
4465 
4466     protected HBaseAdmin getAdmin() {
4467       return admin;
4468     }
4469 
4470     private V waitProcedureResult(long procId, long deadlineTs)
4471         throws IOException, TimeoutException, InterruptedException {
4472       GetProcedureResultRequest request = GetProcedureResultRequest.newBuilder()
4473           .setProcId(procId)
4474           .build();
4475 
4476       int tries = 0;
4477       IOException serviceEx = null;
4478       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
4479         GetProcedureResultResponse response = null;
4480         try {
4481           // Try to fetch the result
4482           response = getProcedureResult(request);
4483         } catch (IOException e) {
4484           serviceEx = unwrapException(e);
4485 
4486           // the master may be down
4487           LOG.warn("failed to get the procedure result procId=" + procId, serviceEx);
4488 
4489           // Not much to do, if we have a DoNotRetryIOException
4490           if (serviceEx instanceof DoNotRetryIOException) {
4491             // TODO: looks like there is no way to unwrap this exception and get the proper
4492             // UnsupportedOperationException aside from looking at the message.
4493             // anyway, if we fail here we just failover to the compatibility side
4494             // and that is always a valid solution.
4495             LOG.warn("Proc-v2 is unsupported on this master: " + serviceEx.getMessage(), serviceEx);
4496             procResultFound = false;
4497             return null;
4498           }
4499         }
4500 
4501         // If the procedure is no longer running, we should have a result
4502         if (response != null && response.getState() != GetProcedureResultResponse.State.RUNNING) {
4503           procResultFound = response.getState() != GetProcedureResultResponse.State.NOT_FOUND;
4504           return convertResult(response);
4505         }
4506 
4507         try {
4508           Thread.sleep(getAdmin().getPauseTime(tries++));
4509         } catch (InterruptedException e) {
4510           throw new InterruptedException(
4511             "Interrupted while waiting for the result of proc " + procId);
4512         }
4513       }
4514       if (serviceEx != null) {
4515         throw serviceEx;
4516       } else {
4517         throw new TimeoutException("The procedure " + procId + " is still running");
4518       }
4519     }
4520 
4521     private static IOException unwrapException(IOException e) {
4522       if (e instanceof RemoteException) {
4523         return ((RemoteException)e).unwrapRemoteException();
4524       }
4525       return e;
4526     }
4527 
4528     protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request)
4529         throws IOException {
4530       return admin.executeCallable(new MasterCallable<GetProcedureResultResponse>(
4531           admin.getConnection()) {
4532         @Override
4533         public GetProcedureResultResponse call(int callTimeout) throws ServiceException {
4534           return master.getProcedureResult(null, request);
4535         }
4536       });
4537     }
4538 
4539     /**
4540      * Convert the procedure result response to a specified type.
4541      * @param response the procedure result object to parse
4542      * @return the result data of the procedure.
4543      */
4544     protected V convertResult(final GetProcedureResultResponse response) throws IOException {
4545       if (response.hasException()) {
4546         throw ForeignExceptionUtil.toIOException(response.getException());
4547       }
4548       return null;
4549     }
4550 
4551     /**
4552      * Fallback implementation in case the procedure is not supported by the server.
4553      * It should try to wait until the operation is completed.
4554      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4555      * @return the result data of the operation
4556      */
4557     protected V waitOperationResult(final long deadlineTs)
4558         throws IOException, TimeoutException {
4559       return null;
4560     }
4561 
4562     /**
4563      * Called after the operation is completed and the result fetched.
4564      * this allows to perform extra steps after the procedure is completed.
4565      * it allows to apply transformations to the result that will be returned by get().
4566      * @param result the result of the procedure
4567      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4568      * @return the result of the procedure, which may be the same as the passed one
4569      */
4570     protected V postOperationResult(final V result, final long deadlineTs)
4571         throws IOException, TimeoutException {
4572       return result;
4573     }
4574 
4575     /**
4576      * Called after the operation is terminated with a failure.
4577      * this allows to perform extra steps after the procedure is terminated.
4578      * it allows to apply transformations to the result that will be returned by get().
4579      * The default implementation will rethrow the exception
4580      * @param exception the exception got from fetching the result
4581      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4582      * @return the result of the procedure, which may be the same as the passed one
4583      */
4584     protected V postOpeartionFailure(final IOException exception, final long deadlineTs)
4585         throws IOException, TimeoutException {
4586       throw exception;
4587     }
4588 
4589     protected interface WaitForStateCallable {
4590       boolean checkState(int tries) throws IOException;
4591       void throwInterruptedException() throws InterruptedIOException;
4592       void throwTimeoutException(long elapsed) throws TimeoutException;
4593     }
4594 
4595     protected void waitForState(final long deadlineTs, final WaitForStateCallable callable)
4596         throws IOException, TimeoutException {
4597       int tries = 0;
4598       IOException serverEx = null;
4599       long startTime = EnvironmentEdgeManager.currentTime();
4600       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
4601         serverEx = null;
4602         try {
4603           if (callable.checkState(tries)) {
4604             return;
4605           }
4606         } catch (IOException e) {
4607           serverEx = e;
4608         }
4609         try {
4610           Thread.sleep(getAdmin().getPauseTime(tries++));
4611         } catch (InterruptedException e) {
4612           callable.throwInterruptedException();
4613         }
4614       }
4615       if (serverEx != null) {
4616         throw unwrapException(serverEx);
4617       } else {
4618         callable.throwTimeoutException(EnvironmentEdgeManager.currentTime() - startTime);
4619       }
4620     }
4621   }
4622 
4623   private RpcControllerFactory getRpcControllerFactory() {
4624     return rpcControllerFactory;
4625   }
4626 }