View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.net.SocketTimeoutException;
24  import java.util.ArrayList;
25  import java.util.Arrays;
26  import java.util.HashMap;
27  import java.util.LinkedList;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Map.Entry;
31  import java.util.concurrent.atomic.AtomicInteger;
32  import java.util.concurrent.atomic.AtomicReference;
33  import java.util.concurrent.ExecutionException;
34  import java.util.concurrent.Future;
35  import java.util.concurrent.TimeUnit;
36  import java.util.concurrent.TimeoutException;
37  import java.util.regex.Pattern;
38  
39  import org.apache.commons.logging.Log;
40  import org.apache.commons.logging.LogFactory;
41  import org.apache.hadoop.conf.Configuration;
42  import org.apache.hadoop.hbase.Abortable;
43  import org.apache.hadoop.hbase.ClusterStatus;
44  import org.apache.hadoop.hbase.DoNotRetryIOException;
45  import org.apache.hadoop.hbase.HBaseConfiguration;
46  import org.apache.hadoop.hbase.HColumnDescriptor;
47  import org.apache.hadoop.hbase.HConstants;
48  import org.apache.hadoop.hbase.HRegionInfo;
49  import org.apache.hadoop.hbase.HRegionLocation;
50  import org.apache.hadoop.hbase.HTableDescriptor;
51  import org.apache.hadoop.hbase.MasterNotRunningException;
52  import org.apache.hadoop.hbase.MetaTableAccessor;
53  import org.apache.hadoop.hbase.NamespaceDescriptor;
54  import org.apache.hadoop.hbase.NotServingRegionException;
55  import org.apache.hadoop.hbase.ProcedureInfo;
56  import org.apache.hadoop.hbase.RegionException;
57  import org.apache.hadoop.hbase.RegionLocations;
58  import org.apache.hadoop.hbase.ServerName;
59  import org.apache.hadoop.hbase.TableExistsException;
60  import org.apache.hadoop.hbase.TableName;
61  import org.apache.hadoop.hbase.TableNotDisabledException;
62  import org.apache.hadoop.hbase.TableNotEnabledException;
63  import org.apache.hadoop.hbase.TableNotFoundException;
64  import org.apache.hadoop.hbase.UnknownRegionException;
65  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
66  import org.apache.hadoop.hbase.classification.InterfaceAudience;
67  import org.apache.hadoop.hbase.classification.InterfaceStability;
68  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
69  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
70  import org.apache.hadoop.hbase.client.security.SecurityCapability;
71  import org.apache.hadoop.hbase.exceptions.DeserializationException;
72  import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
73  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
74  import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
75  import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
76  import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel;
77  import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
78  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
79  import org.apache.hadoop.hbase.protobuf.RequestConverter;
80  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
81  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
82  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
83  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
84  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
85  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
86  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
87  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
88  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
89  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
90  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
91  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
92  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
93  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
94  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
95  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
96  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
97  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
98  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
99  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse;
100 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
101 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
102 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
103 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
104 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
105 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
106 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
107 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
108 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
109 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
110 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
111 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
112 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
113 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
114 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
115 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
116 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
117 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
118 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
119 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
120 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest;
121 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse;
122 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
123 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
124 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
125 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
126 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
127 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
128 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
129 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
130 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
131 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
132 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
133 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
134 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest;
135 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
136 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
137 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
138 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
139 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
140 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
141 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
142 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
143 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
144 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
145 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
146 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
147 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
148 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
149 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
150 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
151 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
152 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
153 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
154 import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos;
155 import org.apache.hadoop.hbase.quotas.QuotaFilter;
156 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
157 import org.apache.hadoop.hbase.quotas.QuotaSettings;
158 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
159 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
160 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
161 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
162 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
163 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
164 import org.apache.hadoop.hbase.util.Addressing;
165 import org.apache.hadoop.hbase.util.Bytes;
166 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
167 import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
168 import org.apache.hadoop.hbase.util.Pair;
169 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
170 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
171 import org.apache.hadoop.ipc.RemoteException;
172 import org.apache.hadoop.util.StringUtils;
173 import org.apache.zookeeper.KeeperException;
174 
175 import com.google.common.annotations.VisibleForTesting;
176 import com.google.protobuf.ByteString;
177 import com.google.protobuf.ServiceException;
178 
179 /**
180  * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that
181  * this is an HBase-internal class as defined in
182  * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
183  * There are no guarantees for backwards source / binary compatibility and methods or class can
184  * change or go away without deprecation.
185  * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing
186  * an HBaseAdmin directly.
187  *
188  * <p>Connection should be an <i>unmanaged</i> connection obtained via
189  * {@link ConnectionFactory#createConnection(Configuration)}
190  *
191  * @see ConnectionFactory
192  * @see Connection
193  * @see Admin
194  */
195 @InterfaceAudience.Private
196 @InterfaceStability.Evolving
197 public class HBaseAdmin implements Admin {
198   private static final Log LOG = LogFactory.getLog(HBaseAdmin.class);
199 
200   private static final String ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
201 
202   private ClusterConnection connection;
203 
204   private volatile Configuration conf;
205   private final long pause;
206   private final int numRetries;
207   // Some operations can take a long time such as disable of big table.
208   // numRetries is for 'normal' stuff... Multiply by this factor when
209   // want to wait a long time.
210   private final int retryLongerMultiplier;
211   private final int syncWaitTimeout;
212   private boolean aborted;
213   private boolean cleanupConnectionOnClose = false; // close the connection in close()
214   private boolean closed = false;
215   private int operationTimeout;
216   private int rpcTimeout;
217 
218   private RpcRetryingCallerFactory rpcCallerFactory;
219   private RpcControllerFactory rpcControllerFactory;
220 
221   private NonceGenerator ng;
222 
223   /**
224    * Constructor.
225    * See {@link #HBaseAdmin(Connection connection)}
226    *
227    * @param c Configuration object. Copied internally.
228    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
229    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
230    */
231   @Deprecated
232   public HBaseAdmin(Configuration c)
233   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
234     // Will not leak connections, as the new implementation of the constructor
235     // does not throw exceptions anymore.
236     this(ConnectionManager.getConnectionInternal(new Configuration(c)));
237     this.cleanupConnectionOnClose = true;
238   }
239 
240   @Override
241   public int getOperationTimeout() {
242     return operationTimeout;
243   }
244 
245 
246   /**
247    * Constructor for externally managed Connections.
248    * The connection to master will be created when required by admin functions.
249    *
250    * @param connection The Connection instance to use
251    * @throws MasterNotRunningException
252    * @throws ZooKeeperConnectionException are not
253    *  thrown anymore but kept into the interface for backward api compatibility
254    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
255    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
256    */
257   @Deprecated
258   public HBaseAdmin(Connection connection)
259       throws MasterNotRunningException, ZooKeeperConnectionException {
260     this((ClusterConnection)connection);
261   }
262 
263   HBaseAdmin(ClusterConnection connection) {
264     this.conf = connection.getConfiguration();
265     this.connection = connection;
266 
267     // TODO: receive ConnectionConfiguration here rather than re-parsing these configs every time.
268     this.pause = this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
269         HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
270     this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
271         HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
272     this.retryLongerMultiplier = this.conf.getInt(
273         "hbase.client.retries.longer.multiplier", 10);
274     this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
275         HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
276     this.rpcTimeout = this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
277         HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
278     this.syncWaitTimeout = this.conf.getInt(
279       "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min
280 
281     this.rpcCallerFactory = connection.getRpcRetryingCallerFactory();
282     this.rpcControllerFactory = connection.getRpcControllerFactory();
283 
284     this.ng = this.connection.getNonceGenerator();
285   }
286 
287   @Override
288   public void abort(String why, Throwable e) {
289     // Currently does nothing but throw the passed message and exception
290     this.aborted = true;
291     throw new RuntimeException(why, e);
292   }
293 
294   @Override
295   public boolean isAborted(){
296     return this.aborted;
297   }
298 
299   /**
300    * Abort a procedure
301    * @param procId ID of the procedure to abort
302    * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
303    * @return true if aborted, false if procedure already completed or does not exist
304    * @throws IOException
305    */
306   @Override
307   public boolean abortProcedure(
308       final long procId,
309       final boolean mayInterruptIfRunning) throws IOException {
310     Future<Boolean> future = abortProcedureAsync(procId, mayInterruptIfRunning);
311     try {
312       return future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
313     } catch (InterruptedException e) {
314       throw new InterruptedIOException("Interrupted when waiting for procedure to be cancelled");
315     } catch (TimeoutException e) {
316       throw new TimeoutIOException(e);
317     } catch (ExecutionException e) {
318       if (e.getCause() instanceof IOException) {
319         throw (IOException)e.getCause();
320       } else {
321         throw new IOException(e.getCause());
322       }
323     }
324   }
325 
326   /**
327    * Abort a procedure but does not block and wait for it be completely removed.
328    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
329    * It may throw ExecutionException if there was an error while executing the operation
330    * or TimeoutException in case the wait timeout was not long enough to allow the
331    * operation to complete.
332    *
333    * @param procId ID of the procedure to abort
334    * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
335    * @return true if aborted, false if procedure already completed or does not exist
336    * @throws IOException
337    */
338   @Override
339   public Future<Boolean> abortProcedureAsync(
340       final long procId,
341       final boolean mayInterruptIfRunning) throws IOException {
342     Boolean abortProcResponse = executeCallable(
343       new MasterCallable<AbortProcedureResponse>(getConnection()) {
344         @Override
345         public AbortProcedureResponse call(int callTimeout) throws ServiceException {
346           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
347           controller.setCallTimeout(callTimeout);
348           AbortProcedureRequest abortProcRequest =
349               AbortProcedureRequest.newBuilder().setProcId(procId).build();
350           return master.abortProcedure(controller, abortProcRequest);
351         }
352       }).getIsProcedureAborted();
353 
354     AbortProcedureFuture abortProcFuture =
355         new AbortProcedureFuture(this, procId, abortProcResponse);
356     return abortProcFuture;
357   }
358 
359   private static class AbortProcedureFuture extends ProcedureFuture<Boolean> {
360     private boolean isAbortInProgress;
361 
362     public AbortProcedureFuture(
363         final HBaseAdmin admin,
364         final Long procId,
365         final Boolean abortProcResponse) {
366       super(admin, procId);
367       this.isAbortInProgress = abortProcResponse;
368     }
369 
370     @Override
371     public Boolean get(long timeout, TimeUnit unit)
372         throws InterruptedException, ExecutionException, TimeoutException {
373       if (!this.isAbortInProgress) {
374         return false;
375       }
376       super.get(timeout, unit);
377       return true;
378     }
379   }
380 
381   /** @return HConnection used by this object. */
382   @Override
383   public HConnection getConnection() {
384     return connection;
385   }
386 
387   /** @return - true if the master server is running. Throws an exception
388    *  otherwise.
389    * @throws ZooKeeperConnectionException
390    * @throws MasterNotRunningException
391    * @deprecated this has been deprecated without a replacement
392    */
393   @Deprecated
394   public boolean isMasterRunning()
395   throws MasterNotRunningException, ZooKeeperConnectionException {
396     return connection.isMasterRunning();
397   }
398 
399   /**
400    * @param tableName Table to check.
401    * @return True if table exists already.
402    * @throws IOException
403    */
404   @Override
405   public boolean tableExists(final TableName tableName) throws IOException {
406     return MetaTableAccessor.tableExists(connection, tableName);
407   }
408 
409   public boolean tableExists(final byte[] tableName)
410   throws IOException {
411     return tableExists(TableName.valueOf(tableName));
412   }
413 
414   public boolean tableExists(final String tableName)
415   throws IOException {
416     return tableExists(TableName.valueOf(tableName));
417   }
418 
419   @Override
420   public HTableDescriptor[] listTables() throws IOException {
421     return listTables((Pattern)null, false);
422   }
423 
424   @Override
425   public HTableDescriptor[] listTables(Pattern pattern) throws IOException {
426     return listTables(pattern, false);
427   }
428 
429   @Override
430   public HTableDescriptor[] listTables(String regex) throws IOException {
431     return listTables(Pattern.compile(regex), false);
432   }
433 
434   @Override
435   public HTableDescriptor[] listTables(final Pattern pattern, final boolean includeSysTables)
436       throws IOException {
437     return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
438       @Override
439       public HTableDescriptor[] call(int callTimeout) throws ServiceException {
440         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
441         controller.setCallTimeout(callTimeout);
442         GetTableDescriptorsRequest req =
443             RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
444         return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(controller, req));
445       }
446     });
447   }
448 
449   @Override
450   public HTableDescriptor[] listTables(String regex, boolean includeSysTables)
451       throws IOException {
452     return listTables(Pattern.compile(regex), includeSysTables);
453   }
454 
455   /**
456    * List all of the names of userspace tables.
457    * @return String[] table names
458    * @throws IOException if a remote or network exception occurs
459    * @deprecated Use {@link Admin#listTableNames()} instead
460    */
461   @Deprecated
462   public String[] getTableNames() throws IOException {
463     TableName[] tableNames = listTableNames();
464     String result[] = new String[tableNames.length];
465     for (int i = 0; i < tableNames.length; i++) {
466       result[i] = tableNames[i].getNameAsString();
467     }
468     return result;
469   }
470 
471   /**
472    * List all of the names of userspace tables matching the given regular expression.
473    * @param pattern The regular expression to match against
474    * @return String[] table names
475    * @throws IOException if a remote or network exception occurs
476    * @deprecated Use {@link Admin#listTableNames(Pattern)} instead.
477    */
478   @Deprecated
479   public String[] getTableNames(Pattern pattern) throws IOException {
480     TableName[] tableNames = listTableNames(pattern);
481     String result[] = new String[tableNames.length];
482     for (int i = 0; i < tableNames.length; i++) {
483       result[i] = tableNames[i].getNameAsString();
484     }
485     return result;
486   }
487 
488   /**
489    * List all of the names of userspace tables matching the given regular expression.
490    * @param regex The regular expression to match against
491    * @return String[] table names
492    * @throws IOException if a remote or network exception occurs
493    * @deprecated Use {@link Admin#listTableNames(Pattern)} instead.
494    */
495   @Deprecated
496   public String[] getTableNames(String regex) throws IOException {
497     return getTableNames(Pattern.compile(regex));
498   }
499 
500   @Override
501   public TableName[] listTableNames() throws IOException {
502     return listTableNames((Pattern)null, false);
503   }
504 
505   @Override
506   public TableName[] listTableNames(Pattern pattern) throws IOException {
507     return listTableNames(pattern, false);
508   }
509 
510   @Override
511   public TableName[] listTableNames(String regex) throws IOException {
512     return listTableNames(Pattern.compile(regex), false);
513   }
514 
515   @Override
516   public TableName[] listTableNames(final Pattern pattern, final boolean includeSysTables)
517       throws IOException {
518     return executeCallable(new MasterCallable<TableName[]>(getConnection()) {
519       @Override
520       public TableName[] call(int callTimeout) throws ServiceException {
521         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
522         controller.setCallTimeout(callTimeout);
523         GetTableNamesRequest req =
524             RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables);
525         return ProtobufUtil.getTableNameArray(master.getTableNames(controller, req)
526             .getTableNamesList());
527       }
528     });
529   }
530 
531   @Override
532   public TableName[] listTableNames(final String regex, final boolean includeSysTables)
533       throws IOException {
534     return listTableNames(Pattern.compile(regex), includeSysTables);
535   }
536 
537   /**
538    * Method for getting the tableDescriptor
539    * @param tableName as a byte []
540    * @return the tableDescriptor
541    * @throws TableNotFoundException
542    * @throws IOException if a remote or network exception occurs
543    */
544   @Override
545   public HTableDescriptor getTableDescriptor(final TableName tableName)
546   throws TableNotFoundException, IOException {
547      return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory,
548        operationTimeout, rpcTimeout);
549   }
550 
551   static HTableDescriptor getTableDescriptor(final TableName tableName, HConnection connection,
552       RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
553          int operationTimeout, int rpcTimeout) throws TableNotFoundException, IOException {
554 
555       if (tableName == null) return null;
556       HTableDescriptor htd = executeCallable(new MasterCallable<HTableDescriptor>(connection) {
557         @Override
558         public HTableDescriptor call(int callTimeout) throws ServiceException {
559           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
560           controller.setCallTimeout(callTimeout);
561           GetTableDescriptorsResponse htds;
562           GetTableDescriptorsRequest req =
563                   RequestConverter.buildGetTableDescriptorsRequest(tableName);
564           htds = master.getTableDescriptors(controller, req);
565 
566           if (!htds.getTableSchemaList().isEmpty()) {
567             return HTableDescriptor.convert(htds.getTableSchemaList().get(0));
568           }
569           return null;
570         }
571       }, rpcCallerFactory, operationTimeout, rpcTimeout);
572       if (htd != null) {
573         return htd;
574       }
575       throw new TableNotFoundException(tableName.getNameAsString());
576   }
577 
578   public HTableDescriptor getTableDescriptor(final byte[] tableName)
579   throws TableNotFoundException, IOException {
580     return getTableDescriptor(TableName.valueOf(tableName));
581   }
582 
583   private long getPauseTime(int tries) {
584     int triesCount = tries;
585     if (triesCount >= HConstants.RETRY_BACKOFF.length) {
586       triesCount = HConstants.RETRY_BACKOFF.length - 1;
587     }
588     return this.pause * HConstants.RETRY_BACKOFF[triesCount];
589   }
590 
591   /**
592    * Creates a new table.
593    * Synchronous operation.
594    *
595    * @param desc table descriptor for table
596    *
597    * @throws IllegalArgumentException if the table name is reserved
598    * @throws MasterNotRunningException if master is not running
599    * @throws TableExistsException if table already exists (If concurrent
600    * threads, the table may have been created between test-for-existence
601    * and attempt-at-creation).
602    * @throws IOException if a remote or network exception occurs
603    */
604   @Override
605   public void createTable(HTableDescriptor desc)
606   throws IOException {
607     createTable(desc, null);
608   }
609 
610   /**
611    * Creates a new table with the specified number of regions.  The start key
612    * specified will become the end key of the first region of the table, and
613    * the end key specified will become the start key of the last region of the
614    * table (the first region has a null start key and the last region has a
615    * null end key).
616    *
617    * BigInteger math will be used to divide the key range specified into
618    * enough segments to make the required number of total regions.
619    *
620    * Synchronous operation.
621    *
622    * @param desc table descriptor for table
623    * @param startKey beginning of key range
624    * @param endKey end of key range
625    * @param numRegions the total number of regions to create
626    *
627    * @throws IllegalArgumentException if the table name is reserved
628    * @throws MasterNotRunningException if master is not running
629    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
630    * threads, the table may have been created between test-for-existence
631    * and attempt-at-creation).
632    * @throws IOException
633    */
634   @Override
635   public void createTable(HTableDescriptor desc, byte [] startKey,
636       byte [] endKey, int numRegions)
637   throws IOException {
638     if(numRegions < 3) {
639       throw new IllegalArgumentException("Must create at least three regions");
640     } else if(Bytes.compareTo(startKey, endKey) >= 0) {
641       throw new IllegalArgumentException("Start key must be smaller than end key");
642     }
643     if (numRegions == 3) {
644       createTable(desc, new byte[][]{startKey, endKey});
645       return;
646     }
647     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
648     if(splitKeys == null || splitKeys.length != numRegions - 1) {
649       throw new IllegalArgumentException("Unable to split key range into enough regions");
650     }
651     createTable(desc, splitKeys);
652   }
653 
654   /**
655    * Creates a new table with an initial set of empty regions defined by the
656    * specified split keys.  The total number of regions created will be the
657    * number of split keys plus one. Synchronous operation.
658    * Note : Avoid passing empty split key.
659    *
660    * @param desc table descriptor for table
661    * @param splitKeys array of split keys for the initial regions of the table
662    *
663    * @throws IllegalArgumentException if the table name is reserved, if the split keys
664    * are repeated and if the split key has empty byte array.
665    * @throws MasterNotRunningException if master is not running
666    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
667    * threads, the table may have been created between test-for-existence
668    * and attempt-at-creation).
669    * @throws IOException
670    */
671   @Override
672   public void createTable(final HTableDescriptor desc, byte [][] splitKeys)
673       throws IOException {
674     Future<Void> future = createTableAsyncV2(desc, splitKeys);
675     try {
676       // TODO: how long should we wait? spin forever?
677       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
678     } catch (InterruptedException e) {
679       throw new InterruptedIOException("Interrupted when waiting" +
680           " for table to be enabled; meta scan was done");
681     } catch (TimeoutException e) {
682       throw new TimeoutIOException(e);
683     } catch (ExecutionException e) {
684       if (e.getCause() instanceof IOException) {
685         throw (IOException)e.getCause();
686       } else {
687         throw new IOException(e.getCause());
688       }
689     }
690   }
691 
692   /**
693    * Creates a new table but does not block and wait for it to come online.
694    * Asynchronous operation.  To check if the table exists, use
695    * {@link #isTableAvailable} -- it is not safe to create an HTable
696    * instance to this table before it is available.
697    * Note : Avoid passing empty split key.
698    * @param desc table descriptor for table
699    *
700    * @throws IllegalArgumentException Bad table name, if the split keys
701    * are repeated and if the split key has empty byte array.
702    * @throws MasterNotRunningException if master is not running
703    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
704    * threads, the table may have been created between test-for-existence
705    * and attempt-at-creation).
706    * @throws IOException
707    */
708   @Override
709   public void createTableAsync(final HTableDescriptor desc, final byte [][] splitKeys)
710       throws IOException {
711     createTableAsyncV2(desc, splitKeys);
712   }
713 
714   /**
715    * Creates a new table but does not block and wait for it to come online.
716    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
717    * It may throw ExecutionException if there was an error while executing the operation
718    * or TimeoutException in case the wait timeout was not long enough to allow the
719    * operation to complete.
720    *
721    * @param desc table descriptor for table
722    * @param splitKeys keys to check if the table has been created with all split keys
723    * @throws IllegalArgumentException Bad table name, if the split keys
724    *    are repeated and if the split key has empty byte array.
725    * @throws IOException if a remote or network exception occurs
726    * @return the result of the async creation. You can use Future.get(long, TimeUnit)
727    *    to wait on the operation to complete.
728    */
729   // TODO: This should be called Async but it will break binary compatibility
730   private Future<Void> createTableAsyncV2(final HTableDescriptor desc, final byte[][] splitKeys)
731       throws IOException {
732     if (desc.getTableName() == null) {
733       throw new IllegalArgumentException("TableName cannot be null");
734     }
735     if (splitKeys != null && splitKeys.length > 0) {
736       Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
737       // Verify there are no duplicate split keys
738       byte[] lastKey = null;
739       for (byte[] splitKey : splitKeys) {
740         if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
741           throw new IllegalArgumentException(
742               "Empty split key must not be passed in the split keys.");
743         }
744         if (lastKey != null && Bytes.equals(splitKey, lastKey)) {
745           throw new IllegalArgumentException("All split keys must be unique, " +
746             "found duplicate: " + Bytes.toStringBinary(splitKey) +
747             ", " + Bytes.toStringBinary(lastKey));
748         }
749         lastKey = splitKey;
750       }
751     }
752 
753     CreateTableResponse response = executeCallable(
754       new MasterCallable<CreateTableResponse>(getConnection()) {
755         @Override
756         public CreateTableResponse call(int callTimeout) throws ServiceException {
757           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
758           controller.setCallTimeout(callTimeout);
759           controller.setPriority(desc.getTableName());
760           CreateTableRequest request = RequestConverter.buildCreateTableRequest(
761             desc, splitKeys, ng.getNonceGroup(), ng.newNonce());
762           return master.createTable(controller, request);
763         }
764       });
765     return new CreateTableFuture(this, desc, splitKeys, response);
766   }
767 
768   private static class CreateTableFuture extends ProcedureFuture<Void> {
769     private final HTableDescriptor desc;
770     private final byte[][] splitKeys;
771 
772     public CreateTableFuture(final HBaseAdmin admin, final HTableDescriptor desc,
773         final byte[][] splitKeys, final CreateTableResponse response) {
774       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null, true);
775       this.splitKeys = splitKeys;
776       this.desc = desc;
777     }
778 
779     @Override
780     protected Void waitOperationResult(final long deadlineTs)
781         throws IOException, TimeoutException {
782       waitForTableEnabled(deadlineTs);
783       waitForAllRegionsOnline(deadlineTs);
784       return null;
785     }
786 
787     @Override
788     protected Void postOperationResult(final Void result, final long deadlineTs)
789         throws IOException, TimeoutException {
790       LOG.info("Created " + desc.getTableName());
791       return result;
792     }
793 
794     private void waitForTableEnabled(final long deadlineTs)
795         throws IOException, TimeoutException {
796       waitForState(deadlineTs, new WaitForStateCallable() {
797         @Override
798         public boolean checkState(int tries) throws IOException {
799           try {
800             if (getAdmin().isTableAvailable(desc.getTableName())) {
801               return true;
802             }
803           } catch (TableNotFoundException tnfe) {
804             LOG.debug("Table "+ desc.getTableName() +" was not enabled, sleeping. tries="+  tries);
805           }
806           return false;
807         }
808 
809         @Override
810         public void throwInterruptedException() throws InterruptedIOException {
811           throw new InterruptedIOException("Interrupted when waiting for table " +
812               desc.getTableName() + " to be enabled");
813         }
814 
815         @Override
816         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
817           throw new TimeoutException("Table " + desc.getTableName() +
818             " not enabled after " + elapsedTime + "msec");
819         }
820       });
821     }
822 
823     private void waitForAllRegionsOnline(final long deadlineTs)
824         throws IOException, TimeoutException {
825       final AtomicInteger actualRegCount = new AtomicInteger(0);
826       final MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
827         @Override
828         public boolean processRow(Result rowResult) throws IOException {
829           RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
830           if (list == null) {
831             LOG.warn("No serialized HRegionInfo in " + rowResult);
832             return true;
833           }
834           HRegionLocation l = list.getRegionLocation();
835           if (l == null) {
836             return true;
837           }
838           if (!l.getRegionInfo().getTable().equals(desc.getTableName())) {
839             return false;
840           }
841           if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
842           HRegionLocation[] locations = list.getRegionLocations();
843           for (HRegionLocation location : locations) {
844             if (location == null) continue;
845             ServerName serverName = location.getServerName();
846             // Make sure that regions are assigned to server
847             if (serverName != null && serverName.getHostAndPort() != null) {
848               actualRegCount.incrementAndGet();
849             }
850           }
851           return true;
852         }
853       };
854 
855       int tries = 0;
856       IOException serverEx = null;
857       int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication();
858       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
859         actualRegCount.set(0);
860         MetaScanner.metaScan(getAdmin().getConnection(), visitor, desc.getTableName());
861         if (actualRegCount.get() == numRegs) {
862           // all the regions are online
863           return;
864         }
865 
866         try {
867           Thread.sleep(getAdmin().getPauseTime(tries++));
868         } catch (InterruptedException e) {
869           throw new InterruptedIOException("Interrupted when opening" +
870             " regions; " + actualRegCount.get() + " of " + numRegs +
871             " regions processed so far");
872         }
873       }
874       throw new TimeoutException("Only " + actualRegCount.get() +
875               " of " + numRegs + " regions are online; retries exhausted.");
876     }
877   }
878 
879   public void deleteTable(final String tableName) throws IOException {
880     deleteTable(TableName.valueOf(tableName));
881   }
882 
883   public void deleteTable(final byte[] tableName) throws IOException {
884     deleteTable(TableName.valueOf(tableName));
885   }
886 
887   /**
888    * Deletes a table.
889    * Synchronous operation.
890    *
891    * @param tableName name of table to delete
892    * @throws IOException if a remote or network exception occurs
893    */
894   @Override
895   public void deleteTable(final TableName tableName) throws IOException {
896     Future<Void> future = deleteTableAsyncV2(tableName);
897     try {
898       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
899     } catch (InterruptedException e) {
900       throw new InterruptedIOException("Interrupted when waiting for table to be deleted");
901     } catch (TimeoutException e) {
902       throw new TimeoutIOException(e);
903     } catch (ExecutionException e) {
904       if (e.getCause() instanceof IOException) {
905         throw (IOException)e.getCause();
906       } else {
907         throw new IOException(e.getCause());
908       }
909     }
910   }
911 
912   /**
913    * Deletes the table but does not block and wait for it be completely removed.
914    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
915    * It may throw ExecutionException if there was an error while executing the operation
916    * or TimeoutException in case the wait timeout was not long enough to allow the
917    * operation to complete.
918    *
919    * @param desc table descriptor for table
920    * @param tableName name of table to delete
921    * @throws IOException if a remote or network exception occurs
922    * @return the result of the async delete. You can use Future.get(long, TimeUnit)
923    *    to wait on the operation to complete.
924    */
925   // TODO: This should be called Async but it will break binary compatibility
926   private Future<Void> deleteTableAsyncV2(final TableName tableName) throws IOException {
927     DeleteTableResponse response = executeCallable(
928       new MasterCallable<DeleteTableResponse>(getConnection()) {
929         @Override
930         public DeleteTableResponse call(int callTimeout) throws ServiceException {
931           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
932           controller.setCallTimeout(callTimeout);
933           controller.setPriority(tableName);
934           DeleteTableRequest req =
935               RequestConverter.buildDeleteTableRequest(tableName, ng.getNonceGroup(),ng.newNonce());
936           return master.deleteTable(controller,req);
937         }
938       });
939     return new DeleteTableFuture(this, tableName, response);
940   }
941 
942   private static class DeleteTableFuture extends ProcedureFuture<Void> {
943     private final TableName tableName;
944 
945     public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName,
946         final DeleteTableResponse response) {
947       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
948       this.tableName = tableName;
949     }
950 
951     @Override
952     protected Void waitOperationResult(final long deadlineTs)
953         throws IOException, TimeoutException {
954       waitTableNotFound(deadlineTs);
955       return null;
956     }
957 
958     @Override
959     protected Void postOperationResult(final Void result, final long deadlineTs)
960         throws IOException, TimeoutException {
961       // Delete cached information to prevent clients from using old locations
962       getAdmin().getConnection().clearRegionCache(tableName);
963       LOG.info("Deleted " + tableName);
964       return result;
965     }
966 
967     private void waitTableNotFound(final long deadlineTs)
968         throws IOException, TimeoutException {
969       waitForState(deadlineTs, new WaitForStateCallable() {
970         @Override
971         public boolean checkState(int tries) throws IOException {
972           return !getAdmin().tableExists(tableName);
973         }
974 
975         @Override
976         public void throwInterruptedException() throws InterruptedIOException {
977           throw new InterruptedIOException("Interrupted when waiting for table to be deleted");
978         }
979 
980         @Override
981         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
982           throw new TimeoutException("Table " + tableName + " not yet deleted after " +
983               elapsedTime + "msec");
984         }
985       });
986     }
987   }
988 
989   /**
990    * Deletes tables matching the passed in pattern and wait on completion.
991    *
992    * Warning: Use this method carefully, there is no prompting and the effect is
993    * immediate. Consider using {@link #listTables(java.lang.String)} and
994    * {@link #deleteTable(byte[])}
995    *
996    * @param regex The regular expression to match table names against
997    * @return Table descriptors for tables that couldn't be deleted
998    * @throws IOException
999    * @see #deleteTables(java.util.regex.Pattern)
1000    * @see #deleteTable(java.lang.String)
1001    */
1002   @Override
1003   public HTableDescriptor[] deleteTables(String regex) throws IOException {
1004     return deleteTables(Pattern.compile(regex));
1005   }
1006 
1007   /**
1008    * Delete tables matching the passed in pattern and wait on completion.
1009    *
1010    * Warning: Use this method carefully, there is no prompting and the effect is
1011    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1012    * {@link #deleteTable(byte[])}
1013    *
1014    * @param pattern The pattern to match table names against
1015    * @return Table descriptors for tables that couldn't be deleted
1016    * @throws IOException
1017    */
1018   @Override
1019   public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException {
1020     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1021     for (HTableDescriptor table : listTables(pattern)) {
1022       try {
1023         deleteTable(table.getTableName());
1024       } catch (IOException ex) {
1025         LOG.info("Failed to delete table " + table.getTableName(), ex);
1026         failed.add(table);
1027       }
1028     }
1029     return failed.toArray(new HTableDescriptor[failed.size()]);
1030   }
1031 
1032   /**
1033    * Truncate a table.
1034    * Synchronous operation.
1035    *
1036    * @param tableName name of table to truncate
1037    * @param preserveSplits True if the splits should be preserved
1038    * @throws IOException if a remote or network exception occurs
1039    */
1040   @Override
1041   public void truncateTable(final TableName tableName, final boolean preserveSplits)
1042       throws IOException {
1043     executeCallable(new MasterCallable<Void>(getConnection()) {
1044       @Override
1045       public Void call(int callTimeout) throws ServiceException {
1046         TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(
1047           tableName, preserveSplits, ng.getNonceGroup(), ng.newNonce());
1048         master.truncateTable(null, req);
1049         return null;
1050       }
1051     });
1052   }
1053 
1054   /**
1055    * Enable a table.  May timeout.  Use {@link #enableTableAsync(byte[])}
1056    * and {@link #isTableEnabled(byte[])} instead.
1057    * The table has to be in disabled state for it to be enabled.
1058    * @param tableName name of the table
1059    * @throws IOException if a remote or network exception occurs
1060    * There could be couple types of IOException
1061    * TableNotFoundException means the table doesn't exist.
1062    * TableNotDisabledException means the table isn't in disabled state.
1063    * @see #isTableEnabled(byte[])
1064    * @see #disableTable(byte[])
1065    * @see #enableTableAsync(byte[])
1066    */
1067   @Override
1068   public void enableTable(final TableName tableName)
1069   throws IOException {
1070     Future<Void> future = enableTableAsyncV2(tableName);
1071     try {
1072       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
1073     } catch (InterruptedException e) {
1074       throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
1075     } catch (TimeoutException e) {
1076       throw new TimeoutIOException(e);
1077     } catch (ExecutionException e) {
1078       if (e.getCause() instanceof IOException) {
1079         throw (IOException)e.getCause();
1080       } else {
1081         throw new IOException(e.getCause());
1082       }
1083     }
1084   }
1085 
1086   public void enableTable(final byte[] tableName)
1087   throws IOException {
1088     enableTable(TableName.valueOf(tableName));
1089   }
1090 
1091   public void enableTable(final String tableName)
1092   throws IOException {
1093     enableTable(TableName.valueOf(tableName));
1094   }
1095 
1096   /**
1097    * Wait for the table to be enabled and available
1098    * If enabling the table exceeds the retry period, an exception is thrown.
1099    * @param tableName name of the table
1100    * @throws IOException if a remote or network exception occurs or
1101    *    table is not enabled after the retries period.
1102    */
1103   private void waitUntilTableIsEnabled(final TableName tableName) throws IOException {
1104     boolean enabled = false;
1105     long start = EnvironmentEdgeManager.currentTime();
1106     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
1107       try {
1108         enabled = isTableEnabled(tableName);
1109       } catch (TableNotFoundException tnfe) {
1110         // wait for table to be created
1111         enabled = false;
1112       }
1113       enabled = enabled && isTableAvailable(tableName);
1114       if (enabled) {
1115         break;
1116       }
1117       long sleep = getPauseTime(tries);
1118       if (LOG.isDebugEnabled()) {
1119         LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
1120           "enabled in " + tableName);
1121       }
1122       try {
1123         Thread.sleep(sleep);
1124       } catch (InterruptedException e) {
1125         // Do this conversion rather than let it out because do not want to
1126         // change the method signature.
1127         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
1128       }
1129     }
1130     if (!enabled) {
1131       long msec = EnvironmentEdgeManager.currentTime() - start;
1132       throw new IOException("Table '" + tableName +
1133         "' not yet enabled, after " + msec + "ms.");
1134     }
1135   }
1136 
1137   /**
1138    * Brings a table on-line (enables it).  Method returns immediately though
1139    * enable of table may take some time to complete, especially if the table
1140    * is large (All regions are opened as part of enabling process).  Check
1141    * {@link #isTableEnabled(byte[])} to learn when table is fully online.  If
1142    * table is taking too long to online, check server logs.
1143    * @param tableName
1144    * @throws IOException
1145    * @since 0.90.0
1146    */
1147   @Override
1148   public void enableTableAsync(final TableName tableName)
1149   throws IOException {
1150     enableTableAsyncV2(tableName);
1151   }
1152 
1153   public void enableTableAsync(final byte[] tableName)
1154   throws IOException {
1155     enableTable(TableName.valueOf(tableName));
1156   }
1157 
1158   public void enableTableAsync(final String tableName)
1159   throws IOException {
1160     enableTableAsync(TableName.valueOf(tableName));
1161   }
1162 
1163   /**
1164    * Enable the table but does not block and wait for it be completely enabled.
1165    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1166    * It may throw ExecutionException if there was an error while executing the operation
1167    * or TimeoutException in case the wait timeout was not long enough to allow the
1168    * operation to complete.
1169    *
1170    * @param tableName name of table to delete
1171    * @throws IOException if a remote or network exception occurs
1172    * @return the result of the async enable. You can use Future.get(long, TimeUnit)
1173    *    to wait on the operation to complete.
1174    */
1175   // TODO: This should be called Async but it will break binary compatibility
1176   private Future<Void> enableTableAsyncV2(final TableName tableName) throws IOException {
1177     TableName.isLegalFullyQualifiedTableName(tableName.getName());
1178     EnableTableResponse response = executeCallable(
1179       new MasterCallable<EnableTableResponse>(getConnection()) {
1180         @Override
1181         public EnableTableResponse call(int callTimeout) throws ServiceException {
1182           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1183           controller.setCallTimeout(callTimeout);
1184           controller.setPriority(tableName);
1185 
1186           LOG.info("Started enable of " + tableName);
1187           EnableTableRequest req =
1188               RequestConverter.buildEnableTableRequest(tableName, ng.getNonceGroup(),ng.newNonce());
1189           return master.enableTable(controller,req);
1190         }
1191       });
1192     return new EnableTableFuture(this, tableName, response);
1193   }
1194 
1195   private static class EnableTableFuture extends ProcedureFuture<Void> {
1196     private final TableName tableName;
1197 
1198     public EnableTableFuture(final HBaseAdmin admin, final TableName tableName,
1199         final EnableTableResponse response) {
1200       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null, true);
1201       this.tableName = tableName;
1202     }
1203 
1204     @Override
1205     protected Void waitOperationResult(final long deadlineTs)
1206         throws IOException, TimeoutException {
1207       waitTableEnabled(deadlineTs);
1208       return null;
1209     }
1210 
1211     @Override
1212     protected Void postOperationResult(final Void result, final long deadlineTs)
1213         throws IOException, TimeoutException {
1214       LOG.info("Enabled " + tableName);
1215       return result;
1216     }
1217 
1218     private void waitTableEnabled(final long deadlineTs)
1219         throws IOException, TimeoutException {
1220       waitForState(deadlineTs, new WaitForStateCallable() {
1221         @Override
1222         public boolean checkState(int tries) throws IOException {
1223           boolean enabled;
1224           try {
1225             enabled = getAdmin().isTableEnabled(tableName);
1226           } catch (TableNotFoundException tnfe) {
1227             return false;
1228           }
1229           return enabled && getAdmin().isTableAvailable(tableName);
1230         }
1231 
1232         @Override
1233         public void throwInterruptedException() throws InterruptedIOException {
1234           throw new InterruptedIOException("Interrupted when waiting for table to be enabled");
1235         }
1236 
1237         @Override
1238         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
1239           throw new TimeoutException("Table " + tableName + " not yet enabled after " +
1240               elapsedTime + "msec");
1241         }
1242       });
1243     }
1244   }
1245 
1246   /**
1247    * Enable tables matching the passed in pattern and wait on completion.
1248    *
1249    * Warning: Use this method carefully, there is no prompting and the effect is
1250    * immediate. Consider using {@link #listTables(java.lang.String)} and
1251    * {@link #enableTable(byte[])}
1252    *
1253    * @param regex The regular expression to match table names against
1254    * @throws IOException
1255    * @see #enableTables(java.util.regex.Pattern)
1256    * @see #enableTable(java.lang.String)
1257    */
1258   @Override
1259   public HTableDescriptor[] enableTables(String regex) throws IOException {
1260     return enableTables(Pattern.compile(regex));
1261   }
1262 
1263   /**
1264    * Enable tables matching the passed in pattern and wait on completion.
1265    *
1266    * Warning: Use this method carefully, there is no prompting and the effect is
1267    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1268    * {@link #enableTable(byte[])}
1269    *
1270    * @param pattern The pattern to match table names against
1271    * @throws IOException
1272    */
1273   @Override
1274   public HTableDescriptor[] enableTables(Pattern pattern) throws IOException {
1275     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1276     for (HTableDescriptor table : listTables(pattern)) {
1277       if (isTableDisabled(table.getTableName())) {
1278         try {
1279           enableTable(table.getTableName());
1280         } catch (IOException ex) {
1281           LOG.info("Failed to enable table " + table.getTableName(), ex);
1282           failed.add(table);
1283         }
1284       }
1285     }
1286     return failed.toArray(new HTableDescriptor[failed.size()]);
1287   }
1288 
1289   /**
1290    * Starts the disable of a table.  If it is being served, the master
1291    * will tell the servers to stop serving it.  This method returns immediately.
1292    * The disable of a table can take some time if the table is large (all
1293    * regions are closed as part of table disable operation).
1294    * Call {@link #isTableDisabled(byte[])} to check for when disable completes.
1295    * If table is taking too long to online, check server logs.
1296    * @param tableName name of table
1297    * @throws IOException if a remote or network exception occurs
1298    * @see #isTableDisabled(byte[])
1299    * @see #isTableEnabled(byte[])
1300    * @since 0.90.0
1301    */
1302   @Override
1303   public void disableTableAsync(final TableName tableName) throws IOException {
1304     disableTableAsyncV2(tableName);
1305   }
1306 
1307   public void disableTableAsync(final byte[] tableName) throws IOException {
1308     disableTableAsync(TableName.valueOf(tableName));
1309   }
1310 
1311   public void disableTableAsync(final String tableName) throws IOException {
1312     disableTableAsync(TableName.valueOf(tableName));
1313   }
1314 
1315   /**
1316    * Disable table and wait on completion.  May timeout eventually.  Use
1317    * {@link #disableTableAsync(byte[])} and {@link #isTableDisabled(String)}
1318    * instead.
1319    * The table has to be in enabled state for it to be disabled.
1320    * @param tableName
1321    * @throws IOException
1322    * There could be couple types of IOException
1323    * TableNotFoundException means the table doesn't exist.
1324    * TableNotEnabledException means the table isn't in enabled state.
1325    */
1326   @Override
1327   public void disableTable(final TableName tableName)
1328   throws IOException {
1329     Future<Void> future = disableTableAsyncV2(tableName);
1330     try {
1331       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
1332     } catch (InterruptedException e) {
1333       throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
1334     } catch (TimeoutException e) {
1335       throw new TimeoutIOException(e);
1336     } catch (ExecutionException e) {
1337       if (e.getCause() instanceof IOException) {
1338         throw (IOException)e.getCause();
1339       } else {
1340         throw new IOException(e.getCause());
1341       }
1342     }
1343   }
1344 
1345   public void disableTable(final byte[] tableName)
1346   throws IOException {
1347     disableTable(TableName.valueOf(tableName));
1348   }
1349 
1350   public void disableTable(final String tableName)
1351   throws IOException {
1352     disableTable(TableName.valueOf(tableName));
1353   }
1354 
1355   /**
1356    * Disable the table but does not block and wait for it be completely disabled.
1357    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1358    * It may throw ExecutionException if there was an error while executing the operation
1359    * or TimeoutException in case the wait timeout was not long enough to allow the
1360    * operation to complete.
1361    *
1362    * @param tableName name of table to delete
1363    * @throws IOException if a remote or network exception occurs
1364    * @return the result of the async disable. You can use Future.get(long, TimeUnit)
1365    *    to wait on the operation to complete.
1366    */
1367   // TODO: This should be called Async but it will break binary compatibility
1368   private Future<Void> disableTableAsyncV2(final TableName tableName) throws IOException {
1369     TableName.isLegalFullyQualifiedTableName(tableName.getName());
1370     DisableTableResponse response = executeCallable(
1371       new MasterCallable<DisableTableResponse>(getConnection()) {
1372         @Override
1373         public DisableTableResponse call(int callTimeout) throws ServiceException {
1374           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1375           controller.setCallTimeout(callTimeout);
1376           controller.setPriority(tableName);
1377 
1378           LOG.info("Started disable of " + tableName);
1379           DisableTableRequest req =
1380               RequestConverter.buildDisableTableRequest(
1381                 tableName, ng.getNonceGroup(), ng.newNonce());
1382           return master.disableTable(controller, req);
1383         }
1384       });
1385     return new DisableTableFuture(this, tableName, response);
1386   }
1387 
1388   private static class DisableTableFuture extends ProcedureFuture<Void> {
1389     private final TableName tableName;
1390 
1391     public DisableTableFuture(final HBaseAdmin admin, final TableName tableName,
1392         final DisableTableResponse response) {
1393       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
1394       this.tableName = tableName;
1395     }
1396 
1397     @Override
1398     protected Void waitOperationResult(final long deadlineTs)
1399         throws IOException, TimeoutException {
1400       waitTableDisabled(deadlineTs);
1401       return null;
1402     }
1403 
1404     @Override
1405     protected Void postOperationResult(final Void result, final long deadlineTs)
1406         throws IOException, TimeoutException {
1407       LOG.info("Disabled " + tableName);
1408       return result;
1409     }
1410 
1411     private void waitTableDisabled(final long deadlineTs)
1412         throws IOException, TimeoutException {
1413       waitForState(deadlineTs, new WaitForStateCallable() {
1414         @Override
1415         public boolean checkState(int tries) throws IOException {
1416           return getAdmin().isTableDisabled(tableName);
1417         }
1418 
1419         @Override
1420         public void throwInterruptedException() throws InterruptedIOException {
1421           throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
1422         }
1423 
1424         @Override
1425         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
1426           throw new TimeoutException("Table " + tableName + " not yet disabled after " +
1427               elapsedTime + "msec");
1428         }
1429       });
1430     }
1431   }
1432 
1433   /**
1434    * Disable tables matching the passed in pattern and wait on completion.
1435    *
1436    * Warning: Use this method carefully, there is no prompting and the effect is
1437    * immediate. Consider using {@link #listTables(java.lang.String)} and
1438    * {@link #disableTable(byte[])}
1439    *
1440    * @param regex The regular expression to match table names against
1441    * @return Table descriptors for tables that couldn't be disabled
1442    * @throws IOException
1443    * @see #disableTables(java.util.regex.Pattern)
1444    * @see #disableTable(java.lang.String)
1445    */
1446   @Override
1447   public HTableDescriptor[] disableTables(String regex) throws IOException {
1448     return disableTables(Pattern.compile(regex));
1449   }
1450 
1451   /**
1452    * Disable tables matching the passed in pattern and wait on completion.
1453    *
1454    * Warning: Use this method carefully, there is no prompting and the effect is
1455    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1456    * {@link #disableTable(byte[])}
1457    *
1458    * @param pattern The pattern to match table names against
1459    * @return Table descriptors for tables that couldn't be disabled
1460    * @throws IOException
1461    */
1462   @Override
1463   public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
1464     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1465     for (HTableDescriptor table : listTables(pattern)) {
1466       if (isTableEnabled(table.getTableName())) {
1467         try {
1468           disableTable(table.getTableName());
1469         } catch (IOException ex) {
1470           LOG.info("Failed to disable table " + table.getTableName(), ex);
1471           failed.add(table);
1472         }
1473       }
1474     }
1475     return failed.toArray(new HTableDescriptor[failed.size()]);
1476   }
1477 
1478   /*
1479    * Checks whether table exists. If not, throws TableNotFoundException
1480    * @param tableName
1481    */
1482   private void checkTableExistence(TableName tableName) throws IOException {
1483     if (!tableExists(tableName)) {
1484       throw new TableNotFoundException(tableName);
1485     }
1486   }
1487 
1488   /**
1489    * @param tableName name of table to check
1490    * @return true if table is on-line
1491    * @throws IOException if a remote or network exception occurs
1492    */
1493   @Override
1494   public boolean isTableEnabled(TableName tableName) throws IOException {
1495     checkTableExistence(tableName);
1496     return connection.isTableEnabled(tableName);
1497   }
1498 
1499   public boolean isTableEnabled(byte[] tableName) throws IOException {
1500     return isTableEnabled(TableName.valueOf(tableName));
1501   }
1502 
1503   public boolean isTableEnabled(String tableName) throws IOException {
1504     return isTableEnabled(TableName.valueOf(tableName));
1505   }
1506 
1507 
1508 
1509   /**
1510    * @param tableName name of table to check
1511    * @return true if table is off-line
1512    * @throws IOException if a remote or network exception occurs
1513    */
1514   @Override
1515   public boolean isTableDisabled(TableName tableName) throws IOException {
1516     checkTableExistence(tableName);
1517     return connection.isTableDisabled(tableName);
1518   }
1519 
1520   public boolean isTableDisabled(byte[] tableName) throws IOException {
1521     return isTableDisabled(TableName.valueOf(tableName));
1522   }
1523 
1524   public boolean isTableDisabled(String tableName) throws IOException {
1525     return isTableDisabled(TableName.valueOf(tableName));
1526   }
1527 
1528   /**
1529    * @param tableName name of table to check
1530    * @return true if all regions of the table are available
1531    * @throws IOException if a remote or network exception occurs
1532    */
1533   @Override
1534   public boolean isTableAvailable(TableName tableName) throws IOException {
1535     return connection.isTableAvailable(tableName);
1536   }
1537 
1538   public boolean isTableAvailable(byte[] tableName) throws IOException {
1539     return isTableAvailable(TableName.valueOf(tableName));
1540   }
1541 
1542   public boolean isTableAvailable(String tableName) throws IOException {
1543     return isTableAvailable(TableName.valueOf(tableName));
1544   }
1545 
1546   /**
1547    * Use this api to check if the table has been created with the specified number of
1548    * splitkeys which was used while creating the given table.
1549    * Note : If this api is used after a table's region gets splitted, the api may return
1550    * false.
1551    * @param tableName
1552    *          name of table to check
1553    * @param splitKeys
1554    *          keys to check if the table has been created with all split keys
1555    * @throws IOException
1556    *           if a remote or network excpetion occurs
1557    */
1558   @Override
1559   public boolean isTableAvailable(TableName tableName,
1560                                   byte[][] splitKeys) throws IOException {
1561     return connection.isTableAvailable(tableName, splitKeys);
1562   }
1563 
1564   public boolean isTableAvailable(byte[] tableName,
1565                                   byte[][] splitKeys) throws IOException {
1566     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1567   }
1568 
1569   public boolean isTableAvailable(String tableName,
1570                                   byte[][] splitKeys) throws IOException {
1571     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1572   }
1573 
1574   /**
1575    * Get the status of alter command - indicates how many regions have received
1576    * the updated schema Asynchronous operation.
1577    *
1578    * @param tableName TableName instance
1579    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1580    *         regions that are yet to be updated Pair.getSecond() is the total number
1581    *         of regions of the table
1582    * @throws IOException
1583    *           if a remote or network exception occurs
1584    */
1585   @Override
1586   public Pair<Integer, Integer> getAlterStatus(final TableName tableName)
1587   throws IOException {
1588     return executeCallable(new MasterCallable<Pair<Integer, Integer>>(getConnection()) {
1589       @Override
1590       public Pair<Integer, Integer> call(int callTimeout) throws ServiceException {
1591         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1592         controller.setCallTimeout(callTimeout);
1593         controller.setPriority(tableName);
1594 
1595         GetSchemaAlterStatusRequest req = RequestConverter
1596             .buildGetSchemaAlterStatusRequest(tableName);
1597         GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(controller, req);
1598         Pair<Integer, Integer> pair = new Pair<Integer, Integer>(Integer.valueOf(ret
1599             .getYetToUpdateRegions()), Integer.valueOf(ret.getTotalRegions()));
1600         return pair;
1601       }
1602     });
1603   }
1604 
1605   /**
1606    * Get the status of alter command - indicates how many regions have received
1607    * the updated schema Asynchronous operation.
1608    *
1609    * @param tableName
1610    *          name of the table to get the status of
1611    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1612    *         regions that are yet to be updated Pair.getSecond() is the total number
1613    *         of regions of the table
1614    * @throws IOException
1615    *           if a remote or network exception occurs
1616    */
1617   @Override
1618   public Pair<Integer, Integer> getAlterStatus(final byte[] tableName)
1619    throws IOException {
1620     return getAlterStatus(TableName.valueOf(tableName));
1621   }
1622 
1623   /**
1624    * Add a column to an existing table.
1625    * Asynchronous operation.
1626    *
1627    * @param tableName name of the table to add column to
1628    * @param column column descriptor of column to be added
1629    * @throws IOException if a remote or network exception occurs
1630    */
1631   public void addColumn(final byte[] tableName, HColumnDescriptor column)
1632   throws IOException {
1633     addColumn(TableName.valueOf(tableName), column);
1634   }
1635 
1636   /**
1637    * Add a column to an existing table.
1638    * Asynchronous operation.
1639    *
1640    * @param tableName name of the table to add column to
1641    * @param column column descriptor of column to be added
1642    * @throws IOException if a remote or network exception occurs
1643    */
1644   public void addColumn(final String tableName, HColumnDescriptor column)
1645   throws IOException {
1646     addColumn(TableName.valueOf(tableName), column);
1647   }
1648 
1649   /**
1650    * Add a column to an existing table.
1651    * Asynchronous operation.
1652    *
1653    * @param tableName name of the table to add column to
1654    * @param column column descriptor of column to be added
1655    * @throws IOException if a remote or network exception occurs
1656    */
1657   @Override
1658   public void addColumn(final TableName tableName, final HColumnDescriptor column)
1659   throws IOException {
1660     executeCallable(new MasterCallable<Void>(getConnection()) {
1661       @Override
1662       public Void call(int callTimeout) throws ServiceException {
1663         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1664         controller.setCallTimeout(callTimeout);
1665         controller.setPriority(tableName);
1666         AddColumnRequest req = RequestConverter.buildAddColumnRequest(
1667           tableName, column, ng.getNonceGroup(), ng.newNonce());
1668         master.addColumn(controller,req);
1669         return null;
1670       }
1671     });
1672   }
1673 
1674   /**
1675    * Delete a column from a table.
1676    * Asynchronous operation.
1677    *
1678    * @param tableName name of table
1679    * @param columnName name of column to be deleted
1680    * @throws IOException if a remote or network exception occurs
1681    */
1682   public void deleteColumn(final byte[] tableName, final String columnName)
1683   throws IOException {
1684     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1685   }
1686 
1687   /**
1688    * Delete a column from a table.
1689    * Asynchronous operation.
1690    *
1691    * @param tableName name of table
1692    * @param columnName name of column to be deleted
1693    * @throws IOException if a remote or network exception occurs
1694    */
1695   public void deleteColumn(final String tableName, final String columnName)
1696   throws IOException {
1697     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1698   }
1699 
1700   /**
1701    * Delete a column from a table.
1702    * Asynchronous operation.
1703    *
1704    * @param tableName name of table
1705    * @param columnName name of column to be deleted
1706    * @throws IOException if a remote or network exception occurs
1707    */
1708   @Override
1709   public void deleteColumn(final TableName tableName, final byte [] columnName)
1710   throws IOException {
1711     executeCallable(new MasterCallable<Void>(getConnection()) {
1712       @Override
1713       public Void call(int callTimeout) throws ServiceException {
1714         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1715         controller.setCallTimeout(callTimeout);
1716         controller.setPriority(tableName);
1717         DeleteColumnRequest req = RequestConverter.buildDeleteColumnRequest(
1718           tableName, columnName, ng.getNonceGroup(), ng.newNonce());
1719         master.deleteColumn(controller, req);
1720         return null;
1721       }
1722     });
1723   }
1724 
1725   /**
1726    * Modify an existing column family on a table.
1727    * Asynchronous operation.
1728    *
1729    * @param tableName name of table
1730    * @param descriptor new column descriptor to use
1731    * @throws IOException if a remote or network exception occurs
1732    */
1733   public void modifyColumn(final String tableName, HColumnDescriptor descriptor)
1734   throws IOException {
1735     modifyColumn(TableName.valueOf(tableName), descriptor);
1736   }
1737 
1738   /**
1739    * Modify an existing column family on a table.
1740    * Asynchronous operation.
1741    *
1742    * @param tableName name of table
1743    * @param descriptor new column descriptor to use
1744    * @throws IOException if a remote or network exception occurs
1745    */
1746   public void modifyColumn(final byte[] tableName, HColumnDescriptor descriptor)
1747   throws IOException {
1748     modifyColumn(TableName.valueOf(tableName), descriptor);
1749   }
1750 
1751   /**
1752    * Modify an existing column family on a table.
1753    * Asynchronous operation.
1754    *
1755    * @param tableName name of table
1756    * @param descriptor new column descriptor to use
1757    * @throws IOException if a remote or network exception occurs
1758    */
1759   @Override
1760   public void modifyColumn(final TableName tableName, final HColumnDescriptor descriptor)
1761   throws IOException {
1762     executeCallable(new MasterCallable<Void>(getConnection()) {
1763       @Override
1764       public Void call(int callTimeout) throws ServiceException {
1765         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1766         controller.setCallTimeout(callTimeout);
1767         controller.setPriority(tableName);
1768         ModifyColumnRequest req = RequestConverter.buildModifyColumnRequest(
1769           tableName, descriptor, ng.getNonceGroup(), ng.newNonce());
1770         master.modifyColumn(controller, req);
1771         return null;
1772       }
1773     });
1774   }
1775 
1776   /**
1777    * Close a region. For expert-admins.  Runs close on the regionserver.  The
1778    * master will not be informed of the close.
1779    * @param regionname region name to close
1780    * @param serverName If supplied, we'll use this location rather than
1781    * the one currently in <code>hbase:meta</code>
1782    * @throws IOException if a remote or network exception occurs
1783    */
1784   @Override
1785   public void closeRegion(final String regionname, final String serverName)
1786   throws IOException {
1787     closeRegion(Bytes.toBytes(regionname), serverName);
1788   }
1789 
1790   /**
1791    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1792    * master will not be informed of the close.
1793    * @param regionname region name to close
1794    * @param serverName The servername of the regionserver.  If passed null we
1795    * will use servername found in the hbase:meta table. A server name
1796    * is made of host, port and startcode.  Here is an example:
1797    * <code> host187.example.com,60020,1289493121758</code>
1798    * @throws IOException if a remote or network exception occurs
1799    */
1800   @Override
1801   public void closeRegion(final byte [] regionname, final String serverName)
1802       throws IOException {
1803     if (serverName != null) {
1804       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1805       if (pair == null || pair.getFirst() == null) {
1806         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1807       } else {
1808         closeRegion(ServerName.valueOf(serverName), pair.getFirst());
1809       }
1810     } else {
1811       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1812       if (pair == null) {
1813         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1814       } else if (pair.getSecond() == null) {
1815         throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
1816       } else {
1817         closeRegion(pair.getSecond(), pair.getFirst());
1818       }
1819     }
1820   }
1821 
1822   /**
1823    * For expert-admins. Runs close on the regionserver. Closes a region based on
1824    * the encoded region name. The region server name is mandatory. If the
1825    * servername is provided then based on the online regions in the specified
1826    * regionserver the specified region will be closed. The master will not be
1827    * informed of the close. Note that the regionname is the encoded regionname.
1828    *
1829    * @param encodedRegionName
1830    *          The encoded region name; i.e. the hash that makes up the region
1831    *          name suffix: e.g. if regionname is
1832    *          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>
1833    *          , then the encoded region name is:
1834    *          <code>527db22f95c8a9e0116f0cc13c680396</code>.
1835    * @param serverName
1836    *          The servername of the regionserver. A server name is made of host,
1837    *          port and startcode. This is mandatory. Here is an example:
1838    *          <code> host187.example.com,60020,1289493121758</code>
1839    * @return true if the region was closed, false if not.
1840    * @throws IOException
1841    *           if a remote or network exception occurs
1842    */
1843   @Override
1844   public boolean closeRegionWithEncodedRegionName(final String encodedRegionName,
1845       final String serverName) throws IOException {
1846     if (null == serverName || ("").equals(serverName.trim())) {
1847       throw new IllegalArgumentException(
1848           "The servername cannot be null or empty.");
1849     }
1850     ServerName sn = ServerName.valueOf(serverName);
1851     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1852     // Close the region without updating zk state.
1853     CloseRegionRequest request =
1854       RequestConverter.buildCloseRegionRequest(sn, encodedRegionName, false);
1855     try {
1856       PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1857 
1858       // TODO: this does not do retries, it should. Set priority and timeout in controller
1859       CloseRegionResponse response = admin.closeRegion(controller, request);
1860       boolean isRegionClosed = response.getClosed();
1861       if (false == isRegionClosed) {
1862         LOG.error("Not able to close the region " + encodedRegionName + ".");
1863       }
1864       return isRegionClosed;
1865     } catch (ServiceException se) {
1866       throw ProtobufUtil.getRemoteException(se);
1867     }
1868   }
1869 
1870   /**
1871    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1872    * master will not be informed of the close.
1873    * @param sn
1874    * @param hri
1875    * @throws IOException
1876    */
1877   @Override
1878   public void closeRegion(final ServerName sn, final HRegionInfo hri)
1879   throws IOException {
1880     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1881     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1882 
1883     // Close the region without updating zk state.
1884     ProtobufUtil.closeRegion(controller, admin, sn, hri.getRegionName(), false);
1885   }
1886 
1887   /**
1888    * Get all the online regions on a region server.
1889    */
1890   @Override
1891   public List<HRegionInfo> getOnlineRegions(final ServerName sn) throws IOException {
1892     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1893     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1894     return ProtobufUtil.getOnlineRegions(controller, admin);
1895   }
1896 
1897   /**
1898    * {@inheritDoc}
1899    */
1900   @Override
1901   public void flush(final TableName tableName) throws IOException {
1902     checkTableExists(tableName);
1903     if (isTableDisabled(tableName)) {
1904       LOG.info("Table is disabled: " + tableName.getNameAsString());
1905       return;
1906     }
1907     execProcedure("flush-table-proc", tableName.getNameAsString(),
1908       new HashMap<String, String>());
1909   }
1910 
1911   /**
1912    * {@inheritDoc}
1913    */
1914   @Override
1915   public void flushRegion(final byte[] regionName) throws IOException {
1916     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
1917     if (regionServerPair == null) {
1918       throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName));
1919     }
1920     if (regionServerPair.getSecond() == null) {
1921       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1922     }
1923     flush(regionServerPair.getSecond(), regionServerPair.getFirst());
1924   }
1925 
1926   /**
1927    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1928    * (byte[])} instead.
1929    */
1930   @Deprecated
1931   public void flush(final String tableNameOrRegionName)
1932   throws IOException, InterruptedException {
1933     flush(Bytes.toBytes(tableNameOrRegionName));
1934   }
1935 
1936   /**
1937    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1938    * (byte[])} instead.
1939    */
1940   @Deprecated
1941   public void flush(final byte[] tableNameOrRegionName)
1942   throws IOException, InterruptedException {
1943     try {
1944       flushRegion(tableNameOrRegionName);
1945     } catch (IllegalArgumentException e) {
1946       // Unknown region.  Try table.
1947       flush(TableName.valueOf(tableNameOrRegionName));
1948     }
1949   }
1950 
1951   private void flush(final ServerName sn, final HRegionInfo hri)
1952   throws IOException {
1953     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1954     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1955     FlushRegionRequest request =
1956       RequestConverter.buildFlushRegionRequest(hri.getRegionName());
1957     try {
1958       admin.flushRegion(controller, request);
1959     } catch (ServiceException se) {
1960       throw ProtobufUtil.getRemoteException(se);
1961     }
1962   }
1963 
1964   /**
1965    * {@inheritDoc}
1966    */
1967   @Override
1968   public void compact(final TableName tableName)
1969     throws IOException {
1970     compact(tableName, null, false);
1971   }
1972 
1973   /**
1974    * {@inheritDoc}
1975    */
1976   @Override
1977   public void compactRegion(final byte[] regionName)
1978     throws IOException {
1979     compactRegion(regionName, null, false);
1980   }
1981 
1982   /**
1983    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1984    * (byte[])} instead.
1985    */
1986   @Deprecated
1987   public void compact(final String tableNameOrRegionName)
1988   throws IOException {
1989     compact(Bytes.toBytes(tableNameOrRegionName));
1990   }
1991 
1992   /**
1993    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1994    * (byte[])} instead.
1995    */
1996   @Deprecated
1997   public void compact(final byte[] tableNameOrRegionName)
1998   throws IOException {
1999     try {
2000       compactRegion(tableNameOrRegionName, null, false);
2001     } catch (IllegalArgumentException e) {
2002       compact(TableName.valueOf(tableNameOrRegionName), null, false);
2003     }
2004   }
2005 
2006   /**
2007    * {@inheritDoc}
2008    */
2009   @Override
2010   public void compact(final TableName tableName, final byte[] columnFamily)
2011     throws IOException {
2012     compact(tableName, columnFamily, false);
2013   }
2014 
2015   /**
2016    * {@inheritDoc}
2017    */
2018   @Override
2019   public void compactRegion(final byte[] regionName, final byte[] columnFamily)
2020     throws IOException {
2021     compactRegion(regionName, columnFamily, false);
2022   }
2023 
2024   /**
2025    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
2026    * (byte[], byte[])} instead.
2027    */
2028   @Deprecated
2029   public void compact(String tableOrRegionName, String columnFamily)
2030     throws IOException {
2031     compact(Bytes.toBytes(tableOrRegionName), Bytes.toBytes(columnFamily));
2032   }
2033 
2034   /**
2035    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
2036    * (byte[], byte[])} instead.
2037    */
2038   @Deprecated
2039   public void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
2040   throws IOException {
2041     try {
2042       compactRegion(tableNameOrRegionName, columnFamily, false);
2043     } catch (IllegalArgumentException e) {
2044       // Bad region, try table
2045       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, false);
2046     }
2047   }
2048 
2049   /**
2050    * {@inheritDoc}
2051    */
2052   @Override
2053   public void compactRegionServer(final ServerName sn, boolean major)
2054   throws IOException, InterruptedException {
2055     for (HRegionInfo region : getOnlineRegions(sn)) {
2056       compact(sn, region, major, null);
2057     }
2058   }
2059 
2060   /**
2061    * {@inheritDoc}
2062    */
2063   @Override
2064   public void majorCompact(final TableName tableName)
2065   throws IOException {
2066     compact(tableName, null, true);
2067   }
2068 
2069   /**
2070    * {@inheritDoc}
2071    */
2072   @Override
2073   public void majorCompactRegion(final byte[] regionName)
2074   throws IOException {
2075     compactRegion(regionName, null, true);
2076   }
2077 
2078   /**
2079    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
2080    * #majorCompactRegion(byte[])} instead.
2081    */
2082   @Deprecated
2083   public void majorCompact(final String tableNameOrRegionName)
2084   throws IOException {
2085     majorCompact(Bytes.toBytes(tableNameOrRegionName));
2086   }
2087 
2088   /**
2089    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
2090    * #majorCompactRegion(byte[])} instead.
2091    */
2092   @Deprecated
2093   public void majorCompact(final byte[] tableNameOrRegionName)
2094   throws IOException {
2095     try {
2096       compactRegion(tableNameOrRegionName, null, true);
2097     } catch (IllegalArgumentException e) {
2098       // Invalid region, try table
2099       compact(TableName.valueOf(tableNameOrRegionName), null, true);
2100     }
2101   }
2102 
2103   /**
2104    * {@inheritDoc}
2105    */
2106   @Override
2107   public void majorCompact(final TableName tableName, final byte[] columnFamily)
2108   throws IOException {
2109     compact(tableName, columnFamily, true);
2110   }
2111 
2112   /**
2113    * {@inheritDoc}
2114    */
2115   @Override
2116   public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
2117   throws IOException {
2118     compactRegion(regionName, columnFamily, true);
2119   }
2120 
2121   /**
2122    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
2123    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
2124    */
2125   @Deprecated
2126   public void majorCompact(final String tableNameOrRegionName, final String columnFamily)
2127   throws IOException {
2128     majorCompact(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(columnFamily));
2129   }
2130 
2131   /**
2132    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
2133    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
2134    */
2135   @Deprecated
2136   public void majorCompact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
2137   throws IOException {
2138     try {
2139       compactRegion(tableNameOrRegionName, columnFamily, true);
2140     } catch (IllegalArgumentException e) {
2141       // Invalid region, try table
2142       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, true);
2143     }
2144   }
2145 
2146   /**
2147    * Compact a table.
2148    * Asynchronous operation.
2149    *
2150    * @param tableName table or region to compact
2151    * @param columnFamily column family within a table or region
2152    * @param major True if we are to do a major compaction.
2153    * @throws IOException if a remote or network exception occurs
2154    * @throws InterruptedException
2155    */
2156   private void compact(final TableName tableName, final byte[] columnFamily,final boolean major)
2157   throws IOException {
2158     ZooKeeperWatcher zookeeper = null;
2159     try {
2160       checkTableExists(tableName);
2161       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2162           new ThrowableAbortable());
2163       List<Pair<HRegionInfo, ServerName>> pairs =
2164         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
2165       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2166         if (pair.getFirst().isOffline()) continue;
2167         if (pair.getSecond() == null) continue;
2168         try {
2169           compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
2170         } catch (NotServingRegionException e) {
2171           if (LOG.isDebugEnabled()) {
2172             LOG.debug("Trying to" + (major ? " major" : "") + " compact " +
2173               pair.getFirst() + ": " +
2174               StringUtils.stringifyException(e));
2175           }
2176         }
2177       }
2178     } finally {
2179       if (zookeeper != null) {
2180         zookeeper.close();
2181       }
2182     }
2183   }
2184 
2185   /**
2186    * Compact an individual region.
2187    * Asynchronous operation.
2188    *
2189    * @param regionName region to compact
2190    * @param columnFamily column family within a table or region
2191    * @param major True if we are to do a major compaction.
2192    * @throws IOException if a remote or network exception occurs
2193    * @throws InterruptedException
2194    */
2195   private void compactRegion(final byte[] regionName, final byte[] columnFamily,final boolean major)
2196   throws IOException {
2197     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2198     if (regionServerPair == null) {
2199       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2200     }
2201     if (regionServerPair.getSecond() == null) {
2202       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2203     }
2204     compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
2205   }
2206 
2207   private void compact(final ServerName sn, final HRegionInfo hri,
2208       final boolean major, final byte [] family)
2209   throws IOException {
2210     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2211     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2212     CompactRegionRequest request =
2213       RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family);
2214     try {
2215       // TODO: this does not do retries, it should. Set priority and timeout in controller
2216       admin.compactRegion(controller, request);
2217     } catch (ServiceException se) {
2218       throw ProtobufUtil.getRemoteException(se);
2219     }
2220   }
2221 
2222   /**
2223    * Move the region <code>r</code> to <code>dest</code>.
2224    * @param encodedRegionName The encoded region name; i.e. the hash that makes
2225    * up the region name suffix: e.g. if regionname is
2226    * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
2227    * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
2228    * @param destServerName The servername of the destination regionserver.  If
2229    * passed the empty byte array we'll assign to a random server.  A server name
2230    * is made of host, port and startcode.  Here is an example:
2231    * <code> host187.example.com,60020,1289493121758</code>
2232    * @throws UnknownRegionException Thrown if we can't find a region named
2233    * <code>encodedRegionName</code>
2234    */
2235   @Override
2236   public void move(final byte [] encodedRegionName, final byte [] destServerName)
2237       throws IOException {
2238 
2239     executeCallable(new MasterCallable<Void>(getConnection()) {
2240       @Override
2241       public Void call(int callTimeout) throws ServiceException {
2242         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2243         controller.setCallTimeout(callTimeout);
2244         // Hard to know the table name, at least check if meta
2245         if (isMetaRegion(encodedRegionName)) {
2246           controller.setPriority(TableName.META_TABLE_NAME);
2247         }
2248 
2249         try {
2250           MoveRegionRequest request =
2251               RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
2252             master.moveRegion(controller, request);
2253         } catch (DeserializationException de) {
2254           LOG.error("Could not parse destination server name: " + de);
2255           throw new ServiceException(new DoNotRetryIOException(de));
2256         }
2257         return null;
2258       }
2259     });
2260   }
2261 
2262   private boolean isMetaRegion(final byte[] regionName) {
2263     return Bytes.equals(regionName, HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
2264         || Bytes.equals(regionName, HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes());
2265   }
2266 
2267   /**
2268    * @param regionName
2269    *          Region name to assign.
2270    * @throws MasterNotRunningException
2271    * @throws ZooKeeperConnectionException
2272    * @throws IOException
2273    */
2274   @Override
2275   public void assign(final byte[] regionName) throws MasterNotRunningException,
2276       ZooKeeperConnectionException, IOException {
2277     final byte[] toBeAssigned = getRegionName(regionName);
2278     executeCallable(new MasterCallable<Void>(getConnection()) {
2279       @Override
2280       public Void call(int callTimeout) throws ServiceException {
2281         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2282         controller.setCallTimeout(callTimeout);
2283         // Hard to know the table name, at least check if meta
2284         if (isMetaRegion(regionName)) {
2285           controller.setPriority(TableName.META_TABLE_NAME);
2286         }
2287 
2288         AssignRegionRequest request =
2289           RequestConverter.buildAssignRegionRequest(toBeAssigned);
2290         master.assignRegion(controller,request);
2291         return null;
2292       }
2293     });
2294   }
2295 
2296   /**
2297    * Unassign a region from current hosting regionserver.  Region will then be
2298    * assigned to a regionserver chosen at random.  Region could be reassigned
2299    * back to the same server.  Use {@link #move(byte[], byte[])} if you want
2300    * to control the region movement.
2301    * @param regionName Region to unassign. Will clear any existing RegionPlan
2302    * if one found.
2303    * @param force If true, force unassign (Will remove region from
2304    * regions-in-transition too if present. If results in double assignment
2305    * use hbck -fix to resolve. To be used by experts).
2306    * @throws MasterNotRunningException
2307    * @throws ZooKeeperConnectionException
2308    * @throws IOException
2309    */
2310   @Override
2311   public void unassign(final byte [] regionName, final boolean force)
2312   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
2313     final byte[] toBeUnassigned = getRegionName(regionName);
2314     executeCallable(new MasterCallable<Void>(getConnection()) {
2315       @Override
2316       public Void call(int callTimeout) throws ServiceException {
2317         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2318         controller.setCallTimeout(callTimeout);
2319         // Hard to know the table name, at least check if meta
2320         if (isMetaRegion(regionName)) {
2321           controller.setPriority(TableName.META_TABLE_NAME);
2322         }
2323         UnassignRegionRequest request =
2324           RequestConverter.buildUnassignRegionRequest(toBeUnassigned, force);
2325         master.unassignRegion(controller, request);
2326         return null;
2327       }
2328     });
2329   }
2330 
2331   /**
2332    * Offline specified region from master's in-memory state. It will not attempt to reassign the
2333    * region as in unassign. This API can be used when a region not served by any region server and
2334    * still online as per Master's in memory state. If this API is incorrectly used on active region
2335    * then master will loose track of that region.
2336    *
2337    * This is a special method that should be used by experts or hbck.
2338    *
2339    * @param regionName
2340    *          Region to offline.
2341    * @throws IOException
2342    */
2343   @Override
2344   public void offline(final byte [] regionName)
2345   throws IOException {
2346     executeCallable(new MasterCallable<Void>(getConnection()) {
2347       @Override
2348       public Void call(int callTimeout) throws ServiceException {
2349         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2350         controller.setCallTimeout(callTimeout);
2351         // Hard to know the table name, at least check if meta
2352         if (isMetaRegion(regionName)) {
2353           controller.setPriority(TableName.META_TABLE_NAME);
2354         }
2355         master.offlineRegion(controller, RequestConverter.buildOfflineRegionRequest(regionName));
2356         return null;
2357       }
2358     });
2359   }
2360 
2361   /**
2362    * Turn the load balancer on or off.
2363    * @param on If true, enable balancer. If false, disable balancer.
2364    * @param synchronous If true, it waits until current balance() call, if outstanding, to return.
2365    * @return Previous balancer value
2366    */
2367   @Override
2368   public boolean setBalancerRunning(final boolean on, final boolean synchronous)
2369   throws IOException {
2370     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2371       @Override
2372       public Boolean call(int callTimeout) throws ServiceException {
2373         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2374         controller.setCallTimeout(callTimeout);
2375 
2376         SetBalancerRunningRequest req =
2377             RequestConverter.buildSetBalancerRunningRequest(on, synchronous);
2378         return master.setBalancerRunning(controller, req).getPrevBalanceValue();
2379       }
2380     });
2381   }
2382 
2383   /**
2384    * Invoke the balancer.  Will run the balancer and if regions to move, it will
2385    * go ahead and do the reassignments.  Can NOT run for various reasons.  Check
2386    * logs.
2387    * @return True if balancer ran, false otherwise.
2388    */
2389   @Override
2390   public boolean balancer() throws IOException {
2391     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2392       @Override
2393       public Boolean call(int callTimeout) throws ServiceException {
2394         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2395         controller.setCallTimeout(callTimeout);
2396         return master.balance(controller, RequestConverter.buildBalanceRequest()).getBalancerRan();
2397       }
2398     });
2399   }
2400 
2401   /**
2402    * Query the state of the balancer from the Master. It's not a guarantee that the balancer is
2403    * actually running this very moment, but that it will run.
2404    *
2405    * @return True if the balancer is enabled, false otherwise.
2406    */
2407   @Override
2408   public boolean isBalancerEnabled() throws IOException {
2409     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2410       @Override
2411       public Boolean call(int callTimeout) throws ServiceException {
2412         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2413         controller.setCallTimeout(callTimeout);
2414 
2415         return master.isBalancerEnabled(controller,
2416           RequestConverter.buildIsBalancerEnabledRequest()).getEnabled();
2417       }
2418     });
2419   }
2420 
2421   /**
2422    * Invoke region normalizer. Can NOT run for various reasons.  Check logs.
2423    *
2424    * @return True if region normalizer ran, false otherwise.
2425    */
2426   @Override
2427   public boolean normalize() throws IOException {
2428     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2429       @Override
2430       public Boolean call(int callTimeout) throws ServiceException {
2431         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2432         controller.setCallTimeout(callTimeout);
2433 
2434         return master.normalize(controller,
2435           RequestConverter.buildNormalizeRequest()).getNormalizerRan();
2436       }
2437     });
2438   }
2439 
2440   /**
2441    * Query the current state of the region normalizer
2442    *
2443    * @return true if region normalizer is enabled, false otherwise.
2444    */
2445   public boolean isNormalizerEnabled() throws IOException {
2446     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2447       @Override
2448       public Boolean call(int callTimeout) throws ServiceException {
2449         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2450         controller.setCallTimeout(callTimeout);
2451 
2452         return master.isNormalizerEnabled(controller,
2453           RequestConverter.buildIsNormalizerEnabledRequest()).getEnabled();
2454       }
2455     });
2456   }
2457 
2458   /**
2459    * Turn region normalizer on or off.
2460    *
2461    * @return Previous normalizer value
2462    */
2463   public boolean setNormalizerRunning(final boolean on) throws IOException {
2464     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2465       @Override
2466       public Boolean call(int callTimeout) throws ServiceException {
2467         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2468         controller.setCallTimeout(callTimeout);
2469 
2470         SetNormalizerRunningRequest req =
2471           RequestConverter.buildSetNormalizerRunningRequest(on);
2472         return master.setNormalizerRunning(controller, req).getPrevNormalizerValue();
2473       }
2474     });
2475   }
2476 
2477   /**
2478    * Enable/Disable the catalog janitor
2479    * @param enable if true enables the catalog janitor
2480    * @return the previous state
2481    * @throws MasterNotRunningException
2482    */
2483   @Override
2484   public boolean enableCatalogJanitor(final boolean enable)
2485       throws IOException {
2486     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2487       @Override
2488       public Boolean call(int callTimeout) throws ServiceException {
2489         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2490         controller.setCallTimeout(callTimeout);
2491 
2492         return master.enableCatalogJanitor(controller,
2493           RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
2494       }
2495     });
2496   }
2497 
2498   /**
2499    * Ask for a scan of the catalog table
2500    * @return the number of entries cleaned
2501    * @throws MasterNotRunningException
2502    */
2503   @Override
2504   public int runCatalogScan() throws IOException {
2505     return executeCallable(new MasterCallable<Integer>(getConnection()) {
2506       @Override
2507       public Integer call(int callTimeout) throws ServiceException {
2508         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2509         controller.setCallTimeout(callTimeout);
2510 
2511         return master.runCatalogScan(controller,
2512           RequestConverter.buildCatalogScanRequest()).getScanResult();
2513       }
2514     });
2515   }
2516 
2517   /**
2518    * Query on the catalog janitor state (Enabled/Disabled?)
2519    * @throws org.apache.hadoop.hbase.MasterNotRunningException
2520    */
2521   @Override
2522   public boolean isCatalogJanitorEnabled() throws IOException {
2523     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2524       @Override
2525       public Boolean call(int callTimeout) throws ServiceException {
2526         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2527         controller.setCallTimeout(callTimeout);
2528 
2529         return master.isCatalogJanitorEnabled(controller,
2530           RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
2531       }
2532     });
2533   }
2534 
2535   private boolean isEncodedRegionName(byte[] regionName) throws IOException {
2536     try {
2537       HRegionInfo.parseRegionName(regionName);
2538       return false;
2539     } catch (IOException e) {
2540       if (StringUtils.stringifyException(e)
2541         .contains(HRegionInfo.INVALID_REGION_NAME_FORMAT_MESSAGE)) {
2542         return true;
2543       }
2544       throw e;
2545     }
2546   }
2547 
2548   /**
2549    * Merge two regions. Asynchronous operation.
2550    * @param nameOfRegionA encoded or full name of region a
2551    * @param nameOfRegionB encoded or full name of region b
2552    * @param forcible true if do a compulsory merge, otherwise we will only merge
2553    *          two adjacent regions
2554    * @throws IOException
2555    */
2556   @Override
2557   public void mergeRegions(final byte[] nameOfRegionA,
2558       final byte[] nameOfRegionB, final boolean forcible)
2559       throws IOException {
2560     final byte[] encodedNameOfRegionA = isEncodedRegionName(nameOfRegionA) ?
2561       nameOfRegionA : HRegionInfo.encodeRegionName(nameOfRegionA).getBytes();
2562     final byte[] encodedNameOfRegionB = isEncodedRegionName(nameOfRegionB) ?
2563       nameOfRegionB : HRegionInfo.encodeRegionName(nameOfRegionB).getBytes();
2564 
2565     Pair<HRegionInfo, ServerName> pair = getRegion(nameOfRegionA);
2566     if (pair != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
2567       throw new IllegalArgumentException("Can't invoke merge on non-default regions directly");
2568     pair = getRegion(nameOfRegionB);
2569     if (pair != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
2570       throw new IllegalArgumentException("Can't invoke merge on non-default regions directly");
2571     executeCallable(new MasterCallable<Void>(getConnection()) {
2572       @Override
2573       public Void call(int callTimeout) throws ServiceException {
2574         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2575         controller.setCallTimeout(callTimeout);
2576 
2577         try {
2578           DispatchMergingRegionsRequest request = RequestConverter
2579               .buildDispatchMergingRegionsRequest(encodedNameOfRegionA,
2580                 encodedNameOfRegionB, forcible);
2581           master.dispatchMergingRegions(controller, request);
2582         } catch (DeserializationException de) {
2583           LOG.error("Could not parse destination server name: " + de);
2584         }
2585         return null;
2586       }
2587     });
2588   }
2589 
2590   /**
2591    * {@inheritDoc}
2592    */
2593   @Override
2594   public void split(final TableName tableName)
2595     throws IOException {
2596     split(tableName, null);
2597   }
2598 
2599   /**
2600    * {@inheritDoc}
2601    */
2602   @Override
2603   public void splitRegion(final byte[] regionName)
2604     throws IOException {
2605     splitRegion(regionName, null);
2606   }
2607 
2608   /**
2609    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2610    * (byte[])} instead.
2611    */
2612   @Deprecated
2613   public void split(final String tableNameOrRegionName)
2614   throws IOException, InterruptedException {
2615     split(Bytes.toBytes(tableNameOrRegionName));
2616   }
2617 
2618   /**
2619    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2620    * (byte[])} instead.
2621    */
2622   @Deprecated
2623   public void split(final byte[] tableNameOrRegionName)
2624   throws IOException, InterruptedException {
2625     split(tableNameOrRegionName, null);
2626   }
2627 
2628   /**
2629    * {@inheritDoc}
2630    */
2631   @Override
2632   public void split(final TableName tableName, final byte [] splitPoint)
2633   throws IOException {
2634     ZooKeeperWatcher zookeeper = null;
2635     try {
2636       checkTableExists(tableName);
2637       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2638         new ThrowableAbortable());
2639       List<Pair<HRegionInfo, ServerName>> pairs =
2640         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
2641       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2642         // May not be a server for a particular row
2643         if (pair.getSecond() == null) continue;
2644         HRegionInfo r = pair.getFirst();
2645         // check for parents
2646         if (r.isSplitParent()) continue;
2647         // if a split point given, only split that particular region
2648         if (r.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID ||
2649            (splitPoint != null && !r.containsRow(splitPoint))) continue;
2650         // call out to region server to do split now
2651         split(pair.getSecond(), pair.getFirst(), splitPoint);
2652       }
2653     } finally {
2654       if (zookeeper != null) {
2655         zookeeper.close();
2656       }
2657     }
2658   }
2659 
2660   /**
2661    * {@inheritDoc}
2662    */
2663   @Override
2664   public void splitRegion(final byte[] regionName, final byte [] splitPoint)
2665   throws IOException {
2666     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2667     if (regionServerPair == null) {
2668       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2669     }
2670     if (regionServerPair.getFirst() != null &&
2671         regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
2672       throw new IllegalArgumentException("Can't split replicas directly. "
2673           + "Replicas are auto-split when their primary is split.");
2674     }
2675     if (regionServerPair.getSecond() == null) {
2676       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2677     }
2678     split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
2679   }
2680 
2681   /**
2682    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2683    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2684    */
2685   @Deprecated
2686   public void split(final String tableNameOrRegionName,
2687     final String splitPoint) throws IOException {
2688     split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint));
2689   }
2690 
2691   /**
2692    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2693    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2694    */
2695   @Deprecated
2696   public void split(final byte[] tableNameOrRegionName,
2697       final byte [] splitPoint) throws IOException {
2698     try {
2699       splitRegion(tableNameOrRegionName, splitPoint);
2700     } catch (IllegalArgumentException e) {
2701       // Bad region, try table
2702       split(TableName.valueOf(tableNameOrRegionName), splitPoint);
2703     }
2704   }
2705 
2706   @VisibleForTesting
2707   public void split(final ServerName sn, final HRegionInfo hri,
2708       byte[] splitPoint) throws IOException {
2709     if (hri.getStartKey() != null && splitPoint != null &&
2710          Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
2711        throw new IOException("should not give a splitkey which equals to startkey!");
2712     }
2713     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2714     controller.setPriority(hri.getTable());
2715 
2716     // TODO: this does not do retries, it should. Set priority and timeout in controller
2717     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2718     ProtobufUtil.split(controller, admin, hri, splitPoint);
2719   }
2720 
2721   /**
2722    * Modify an existing table, more IRB friendly version.
2723    * Asynchronous operation.  This means that it may be a while before your
2724    * schema change is updated across all of the table.
2725    *
2726    * @param tableName name of table.
2727    * @param htd modified description of the table
2728    * @throws IOException if a remote or network exception occurs
2729    */
2730   @Override
2731   public void modifyTable(final TableName tableName, final HTableDescriptor htd)
2732   throws IOException {
2733     if (!tableName.equals(htd.getTableName())) {
2734       throw new IllegalArgumentException("the specified table name '" + tableName +
2735         "' doesn't match with the HTD one: " + htd.getTableName());
2736     }
2737 
2738     executeCallable(new MasterCallable<Void>(getConnection()) {
2739       @Override
2740       public Void call(int callTimeout) throws ServiceException {
2741         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2742         controller.setCallTimeout(callTimeout);
2743         controller.setPriority(tableName);
2744         ModifyTableRequest request = RequestConverter.buildModifyTableRequest(
2745           tableName, htd, ng.getNonceGroup(), ng.newNonce());
2746         master.modifyTable(controller, request);
2747         return null;
2748       }
2749     });
2750   }
2751 
2752   public void modifyTable(final byte[] tableName, final HTableDescriptor htd)
2753   throws IOException {
2754     modifyTable(TableName.valueOf(tableName), htd);
2755   }
2756 
2757   public void modifyTable(final String tableName, final HTableDescriptor htd)
2758   throws IOException {
2759     modifyTable(TableName.valueOf(tableName), htd);
2760   }
2761 
2762   /**
2763    * @param regionName Name of a region.
2764    * @return a pair of HRegionInfo and ServerName if <code>regionName</code> is
2765    *  a verified region name (we call {@link
2766    *  MetaTableAccessor#getRegion(HConnection, byte[])}
2767    *  else null.
2768    * Throw IllegalArgumentException if <code>regionName</code> is null.
2769    * @throws IOException
2770    */
2771   Pair<HRegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException {
2772     if (regionName == null) {
2773       throw new IllegalArgumentException("Pass a table name or region name");
2774     }
2775     Pair<HRegionInfo, ServerName> pair =
2776       MetaTableAccessor.getRegion(connection, regionName);
2777     if (pair == null) {
2778       final AtomicReference<Pair<HRegionInfo, ServerName>> result =
2779         new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
2780       final String encodedName = Bytes.toString(regionName);
2781       MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
2782         @Override
2783         public boolean processRow(Result data) throws IOException {
2784           HRegionInfo info = HRegionInfo.getHRegionInfo(data);
2785           if (info == null) {
2786             LOG.warn("No serialized HRegionInfo in " + data);
2787             return true;
2788           }
2789           RegionLocations rl = MetaTableAccessor.getRegionLocations(data);
2790           boolean matched = false;
2791           ServerName sn = null;
2792           for (HRegionLocation h : rl.getRegionLocations()) {
2793             if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) {
2794               sn = h.getServerName();
2795               info = h.getRegionInfo();
2796               matched = true;
2797             }
2798           }
2799           if (!matched) return true;
2800           result.set(new Pair<HRegionInfo, ServerName>(info, sn));
2801           return false; // found the region, stop
2802         }
2803       };
2804 
2805       MetaScanner.metaScan(connection, visitor, null);
2806       pair = result.get();
2807     }
2808     return pair;
2809   }
2810 
2811   /**
2812    * If the input is a region name, it is returned as is. If it's an
2813    * encoded region name, the corresponding region is found from meta
2814    * and its region name is returned. If we can't find any region in
2815    * meta matching the input as either region name or encoded region
2816    * name, the input is returned as is. We don't throw unknown
2817    * region exception.
2818    */
2819   private byte[] getRegionName(
2820       final byte[] regionNameOrEncodedRegionName) throws IOException {
2821     if (Bytes.equals(regionNameOrEncodedRegionName,
2822         HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
2823           || Bytes.equals(regionNameOrEncodedRegionName,
2824             HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
2825       return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
2826     }
2827     byte[] tmp = regionNameOrEncodedRegionName;
2828     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionNameOrEncodedRegionName);
2829     if (regionServerPair != null && regionServerPair.getFirst() != null) {
2830       tmp = regionServerPair.getFirst().getRegionName();
2831     }
2832     return tmp;
2833   }
2834 
2835   /**
2836    * Check if table exists or not
2837    * @param tableName Name of a table.
2838    * @return tableName instance
2839    * @throws IOException if a remote or network exception occurs.
2840    * @throws TableNotFoundException if table does not exist.
2841    */
2842   private TableName checkTableExists(final TableName tableName)
2843       throws IOException {
2844     if (!MetaTableAccessor.tableExists(connection, tableName)) {
2845       throw new TableNotFoundException(tableName);
2846     }
2847     return tableName;
2848   }
2849 
2850   /**
2851    * Shuts down the HBase cluster
2852    * @throws IOException if a remote or network exception occurs
2853    */
2854   @Override
2855   public synchronized void shutdown() throws IOException {
2856     executeCallable(new MasterCallable<Void>(getConnection()) {
2857       @Override
2858       public Void call(int callTimeout) throws ServiceException {
2859         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2860         controller.setCallTimeout(callTimeout);
2861         controller.setPriority(HConstants.HIGH_QOS);
2862         master.shutdown(controller, ShutdownRequest.newBuilder().build());
2863         return null;
2864       }
2865     });
2866   }
2867 
2868   /**
2869    * Shuts down the current HBase master only.
2870    * Does not shutdown the cluster.
2871    * @see #shutdown()
2872    * @throws IOException if a remote or network exception occurs
2873    */
2874   @Override
2875   public synchronized void stopMaster() throws IOException {
2876     executeCallable(new MasterCallable<Void>(getConnection()) {
2877       @Override
2878       public Void call(int callTimeout) throws ServiceException {
2879         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2880         controller.setCallTimeout(callTimeout);
2881         controller.setPriority(HConstants.HIGH_QOS);
2882         master.stopMaster(controller, StopMasterRequest.newBuilder().build());
2883         return null;
2884       }
2885     });
2886   }
2887 
2888   /**
2889    * Stop the designated regionserver
2890    * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
2891    * <code>example.org:1234</code>
2892    * @throws IOException if a remote or network exception occurs
2893    */
2894   @Override
2895   public synchronized void stopRegionServer(final String hostnamePort)
2896   throws IOException {
2897     String hostname = Addressing.parseHostname(hostnamePort);
2898     int port = Addressing.parsePort(hostnamePort);
2899     AdminService.BlockingInterface admin =
2900       this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
2901     StopServerRequest request = RequestConverter.buildStopServerRequest(
2902       "Called by admin client " + this.connection.toString());
2903     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2904 
2905     controller.setPriority(HConstants.HIGH_QOS);
2906     try {
2907       // TODO: this does not do retries, it should. Set priority and timeout in controller
2908       admin.stopServer(controller, request);
2909     } catch (ServiceException se) {
2910       throw ProtobufUtil.getRemoteException(se);
2911     }
2912   }
2913 
2914 
2915   /**
2916    * @return cluster status
2917    * @throws IOException if a remote or network exception occurs
2918    */
2919   @Override
2920   public ClusterStatus getClusterStatus() throws IOException {
2921     return executeCallable(new MasterCallable<ClusterStatus>(getConnection()) {
2922       @Override
2923       public ClusterStatus call(int callTimeout) throws ServiceException {
2924         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2925         controller.setCallTimeout(callTimeout);
2926         GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest();
2927         return ClusterStatus.convert(master.getClusterStatus(controller, req).getClusterStatus());
2928       }
2929     });
2930   }
2931 
2932   /**
2933    * @return Configuration used by the instance.
2934    */
2935   @Override
2936   public Configuration getConfiguration() {
2937     return this.conf;
2938   }
2939 
2940   /**
2941    * Create a new namespace
2942    * @param descriptor descriptor which describes the new namespace
2943    * @throws IOException
2944    */
2945   @Override
2946   public void createNamespace(final NamespaceDescriptor descriptor) throws IOException {
2947     executeCallable(new MasterCallable<Void>(getConnection()) {
2948       @Override
2949       public Void call(int callTimeout) throws Exception {
2950         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2951         controller.setCallTimeout(callTimeout);
2952         // TODO: set priority based on NS?
2953         master.createNamespace(controller,
2954           CreateNamespaceRequest.newBuilder()
2955             .setNamespaceDescriptor(ProtobufUtil
2956               .toProtoNamespaceDescriptor(descriptor)).build()
2957         );
2958         return null;
2959       }
2960     });
2961   }
2962 
2963   /**
2964    * Modify an existing namespace
2965    * @param descriptor descriptor which describes the new namespace
2966    * @throws IOException
2967    */
2968   @Override
2969   public void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException {
2970     executeCallable(new MasterCallable<Void>(getConnection()) {
2971       @Override
2972       public Void call(int callTimeout) throws Exception {
2973         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2974         controller.setCallTimeout(callTimeout);
2975         master.modifyNamespace(controller, ModifyNamespaceRequest.newBuilder().
2976           setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build());
2977         return null;
2978       }
2979     });
2980   }
2981 
2982   /**
2983    * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
2984    * @param name namespace name
2985    * @throws IOException
2986    */
2987   @Override
2988   public void deleteNamespace(final String name) throws IOException {
2989     executeCallable(new MasterCallable<Void>(getConnection()) {
2990       @Override
2991       public Void call(int callTimeout) throws Exception {
2992         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2993         controller.setCallTimeout(callTimeout);
2994         master.deleteNamespace(controller, DeleteNamespaceRequest.newBuilder().
2995           setNamespaceName(name).build());
2996         return null;
2997       }
2998     });
2999   }
3000 
3001   /**
3002    * Get a namespace descriptor by name
3003    * @param name name of namespace descriptor
3004    * @return A descriptor
3005    * @throws IOException
3006    */
3007   @Override
3008   public NamespaceDescriptor getNamespaceDescriptor(final String name) throws IOException {
3009     return
3010         executeCallable(new MasterCallable<NamespaceDescriptor>(getConnection()) {
3011           @Override
3012           public NamespaceDescriptor call(int callTimeout) throws Exception {
3013             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3014             controller.setCallTimeout(callTimeout);
3015             return ProtobufUtil.toNamespaceDescriptor(
3016               master.getNamespaceDescriptor(controller, GetNamespaceDescriptorRequest.newBuilder().
3017                 setNamespaceName(name).build()).getNamespaceDescriptor());
3018           }
3019         });
3020   }
3021 
3022   /**
3023    * List available namespace descriptors
3024    * @return List of descriptors
3025    * @throws IOException
3026    */
3027   @Override
3028   public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
3029     return
3030         executeCallable(new MasterCallable<NamespaceDescriptor[]>(getConnection()) {
3031           @Override
3032           public NamespaceDescriptor[] call(int callTimeout) throws Exception {
3033             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3034             controller.setCallTimeout(callTimeout);
3035             List<HBaseProtos.NamespaceDescriptor> list =
3036                 master.listNamespaceDescriptors(controller,
3037                   ListNamespaceDescriptorsRequest.newBuilder().build())
3038                 .getNamespaceDescriptorList();
3039             NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
3040             for(int i = 0; i < list.size(); i++) {
3041               res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
3042             }
3043             return res;
3044           }
3045         });
3046   }
3047 
3048   /**
3049    * List procedures
3050    * @return procedure list
3051    * @throws IOException
3052    */
3053   @Override
3054   public ProcedureInfo[] listProcedures() throws IOException {
3055     return
3056         executeCallable(new MasterCallable<ProcedureInfo[]>(getConnection()) {
3057           @Override
3058           public ProcedureInfo[] call(int callTimeout) throws Exception {
3059             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3060             controller.setCallTimeout(callTimeout);
3061             List<ProcedureProtos.Procedure> procList = master.listProcedures(
3062               controller, ListProceduresRequest.newBuilder().build()).getProcedureList();
3063             ProcedureInfo[] procInfoList = new ProcedureInfo[procList.size()];
3064             for (int i = 0; i < procList.size(); i++) {
3065               procInfoList[i] = ProcedureInfo.convert(procList.get(i));
3066             }
3067             return procInfoList;
3068           }
3069         });
3070   }
3071 
3072   /**
3073    * Get list of table descriptors by namespace
3074    * @param name namespace name
3075    * @return A descriptor
3076    * @throws IOException
3077    */
3078   @Override
3079   public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException {
3080     return
3081         executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
3082           @Override
3083           public HTableDescriptor[] call(int callTimeout) throws Exception {
3084             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3085             controller.setCallTimeout(callTimeout);
3086             List<TableSchema> list =
3087                 master.listTableDescriptorsByNamespace(controller,
3088                   ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name)
3089                   .build()).getTableSchemaList();
3090             HTableDescriptor[] res = new HTableDescriptor[list.size()];
3091             for(int i=0; i < list.size(); i++) {
3092 
3093               res[i] = HTableDescriptor.convert(list.get(i));
3094             }
3095             return res;
3096           }
3097         });
3098   }
3099 
3100   /**
3101    * Get list of table names by namespace
3102    * @param name namespace name
3103    * @return The list of table names in the namespace
3104    * @throws IOException
3105    */
3106   @Override
3107   public TableName[] listTableNamesByNamespace(final String name) throws IOException {
3108     return
3109         executeCallable(new MasterCallable<TableName[]>(getConnection()) {
3110           @Override
3111           public TableName[] call(int callTimeout) throws Exception {
3112             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3113             controller.setCallTimeout(callTimeout);
3114             List<HBaseProtos.TableName> tableNames =
3115               master.listTableNamesByNamespace(controller, ListTableNamesByNamespaceRequest.
3116                 newBuilder().setNamespaceName(name).build())
3117                 .getTableNameList();
3118             TableName[] result = new TableName[tableNames.size()];
3119             for (int i = 0; i < tableNames.size(); i++) {
3120               result[i] = ProtobufUtil.toTableName(tableNames.get(i));
3121             }
3122             return result;
3123           }
3124         });
3125   }
3126 
3127   /**
3128    * Check to see if HBase is running. Throw an exception if not.
3129    * @param conf system configuration
3130    * @throws MasterNotRunningException if the master is not running
3131    * @throws ZooKeeperConnectionException if unable to connect to zookeeper
3132    */
3133   // Used by tests and by the Merge tool. Merge tool uses it to figure if HBase is up or not.
3134   public static void checkHBaseAvailable(Configuration conf)
3135   throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException {
3136     Configuration copyOfConf = HBaseConfiguration.create(conf);
3137     // We set it to make it fail as soon as possible if HBase is not available
3138     copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
3139     copyOfConf.setInt("zookeeper.recovery.retry", 0);
3140     try (ClusterConnection connection =
3141         (ClusterConnection)ConnectionFactory.createConnection(copyOfConf)) {
3142         // Check ZK first.
3143         // If the connection exists, we may have a connection to ZK that does not work anymore
3144         ZooKeeperKeepAliveConnection zkw = null;
3145         try {
3146           // This is NASTY. FIX!!!! Dependent on internal implementation! TODO
3147           zkw = ((ConnectionManager.HConnectionImplementation)connection).
3148             getKeepAliveZooKeeperWatcher();
3149           zkw.getRecoverableZooKeeper().getZooKeeper().exists(zkw.baseZNode, false);
3150         } catch (IOException e) {
3151           throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
3152         } catch (InterruptedException e) {
3153           throw (InterruptedIOException)
3154             new InterruptedIOException("Can't connect to ZooKeeper").initCause(e);
3155         } catch (KeeperException e) {
3156           throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
3157         } finally {
3158           if (zkw != null) {
3159             zkw.close();
3160           }
3161         }
3162       connection.isMasterRunning();
3163     }
3164   }
3165 
3166   /**
3167    * get the regions of a given table.
3168    *
3169    * @param tableName the name of the table
3170    * @return Ordered list of {@link HRegionInfo}.
3171    * @throws IOException
3172    */
3173   @Override
3174   public List<HRegionInfo> getTableRegions(final TableName tableName)
3175   throws IOException {
3176     ZooKeeperWatcher zookeeper =
3177       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
3178         new ThrowableAbortable());
3179     List<HRegionInfo> Regions = null;
3180     try {
3181       Regions = MetaTableAccessor.getTableRegions(zookeeper, connection, tableName, true);
3182     } finally {
3183       zookeeper.close();
3184     }
3185     return Regions;
3186   }
3187 
3188   public List<HRegionInfo> getTableRegions(final byte[] tableName)
3189   throws IOException {
3190     return getTableRegions(TableName.valueOf(tableName));
3191   }
3192 
3193   @Override
3194   public synchronized void close() throws IOException {
3195     if (cleanupConnectionOnClose && this.connection != null && !this.closed) {
3196       this.connection.close();
3197       this.closed = true;
3198     }
3199   }
3200 
3201   /**
3202    * Get tableDescriptors
3203    * @param tableNames List of table names
3204    * @return HTD[] the tableDescriptor
3205    * @throws IOException if a remote or network exception occurs
3206    */
3207   @Override
3208   public HTableDescriptor[] getTableDescriptorsByTableName(final List<TableName> tableNames)
3209   throws IOException {
3210     return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
3211       @Override
3212       public HTableDescriptor[] call(int callTimeout) throws Exception {
3213         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3214         controller.setCallTimeout(callTimeout);
3215         GetTableDescriptorsRequest req =
3216             RequestConverter.buildGetTableDescriptorsRequest(tableNames);
3217           return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(controller, req));
3218       }
3219     });
3220   }
3221 
3222   /**
3223    * Get tableDescriptor
3224    * @param tableName one table name
3225    * @return HTD the HTableDescriptor or null if the table not exists
3226    * @throws IOException if a remote or network exception occurs
3227    */
3228   private HTableDescriptor getTableDescriptorByTableName(TableName tableName)
3229       throws IOException {
3230     List<TableName> tableNames = new ArrayList<TableName>(1);
3231     tableNames.add(tableName);
3232 
3233     HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames);
3234 
3235     if (htdl == null || htdl.length == 0) {
3236       return null;
3237     }
3238     else {
3239       return htdl[0];
3240     }
3241   }
3242 
3243   /**
3244    * Get tableDescriptors
3245    * @param names List of table names
3246    * @return HTD[] the tableDescriptor
3247    * @throws IOException if a remote or network exception occurs
3248    */
3249   @Override
3250   public HTableDescriptor[] getTableDescriptors(List<String> names)
3251   throws IOException {
3252     List<TableName> tableNames = new ArrayList<TableName>(names.size());
3253     for(String name : names) {
3254       tableNames.add(TableName.valueOf(name));
3255     }
3256     return getTableDescriptorsByTableName(tableNames);
3257   }
3258 
3259   private RollWALWriterResponse rollWALWriterImpl(final ServerName sn) throws IOException,
3260       FailedLogCloseException {
3261     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
3262     RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();
3263     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3264 
3265     try {
3266       // TODO: this does not do retries, it should. Set priority and timeout in controller
3267       return admin.rollWALWriter(controller, request);
3268     } catch (ServiceException se) {
3269       throw ProtobufUtil.getRemoteException(se);
3270     }
3271   }
3272 
3273   /**
3274    * Roll the log writer. I.e. when using a file system based write ahead log,
3275    * start writing log messages to a new file.
3276    *
3277    * Note that when talking to a version 1.0+ HBase deployment, the rolling is asynchronous.
3278    * This method will return as soon as the roll is requested and the return value will
3279    * always be null. Additionally, the named region server may schedule store flushes at the
3280    * request of the wal handling the roll request.
3281    *
3282    * When talking to a 0.98 or older HBase deployment, the rolling is synchronous and the
3283    * return value may be either null or a list of encoded region names.
3284    *
3285    * @param serverName
3286    *          The servername of the regionserver. A server name is made of host,
3287    *          port and startcode. This is mandatory. Here is an example:
3288    *          <code> host187.example.com,60020,1289493121758</code>
3289    * @return a set of {@link HRegionInfo#getEncodedName()} that would allow the wal to
3290    *         clean up some underlying files. null if there's nothing to flush.
3291    * @throws IOException if a remote or network exception occurs
3292    * @throws FailedLogCloseException
3293    * @deprecated use {@link #rollWALWriter(ServerName)}
3294    */
3295   @Deprecated
3296   public synchronized byte[][] rollHLogWriter(String serverName)
3297       throws IOException, FailedLogCloseException {
3298     ServerName sn = ServerName.valueOf(serverName);
3299     final RollWALWriterResponse response = rollWALWriterImpl(sn);
3300     int regionCount = response.getRegionToFlushCount();
3301     if (0 == regionCount) {
3302       return null;
3303     }
3304     byte[][] regionsToFlush = new byte[regionCount][];
3305     for (int i = 0; i < regionCount; i++) {
3306       ByteString region = response.getRegionToFlush(i);
3307       regionsToFlush[i] = region.toByteArray();
3308     }
3309     return regionsToFlush;
3310   }
3311 
3312   @Override
3313   public synchronized void rollWALWriter(ServerName serverName)
3314       throws IOException, FailedLogCloseException {
3315     rollWALWriterImpl(serverName);
3316   }
3317 
3318   @Override
3319   public String[] getMasterCoprocessors() {
3320     try {
3321       return getClusterStatus().getMasterCoprocessors();
3322     } catch (IOException e) {
3323       LOG.error("Could not getClusterStatus()",e);
3324       return null;
3325     }
3326   }
3327 
3328   /**
3329    * {@inheritDoc}
3330    */
3331   @Override
3332   public CompactionState getCompactionState(final TableName tableName)
3333   throws IOException {
3334     CompactionState state = CompactionState.NONE;
3335     ZooKeeperWatcher zookeeper =
3336       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
3337         new ThrowableAbortable());
3338     try {
3339       checkTableExists(tableName);
3340       List<Pair<HRegionInfo, ServerName>> pairs =
3341         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
3342       for (Pair<HRegionInfo, ServerName> pair: pairs) {
3343         if (pair.getFirst().isOffline()) continue;
3344         if (pair.getSecond() == null) continue;
3345         try {
3346           ServerName sn = pair.getSecond();
3347           AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
3348           GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
3349             pair.getFirst().getRegionName(), true);
3350           GetRegionInfoResponse response = admin.getRegionInfo(null, request);
3351           switch (response.getCompactionState()) {
3352           case MAJOR_AND_MINOR:
3353             return CompactionState.MAJOR_AND_MINOR;
3354           case MAJOR:
3355             if (state == CompactionState.MINOR) {
3356               return CompactionState.MAJOR_AND_MINOR;
3357             }
3358             state = CompactionState.MAJOR;
3359             break;
3360           case MINOR:
3361             if (state == CompactionState.MAJOR) {
3362               return CompactionState.MAJOR_AND_MINOR;
3363             }
3364             state = CompactionState.MINOR;
3365             break;
3366           case NONE:
3367           default: // nothing, continue
3368           }
3369         } catch (NotServingRegionException e) {
3370           if (LOG.isDebugEnabled()) {
3371             LOG.debug("Trying to get compaction state of " +
3372               pair.getFirst() + ": " +
3373               StringUtils.stringifyException(e));
3374           }
3375         } catch (RemoteException e) {
3376           if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
3377             if (LOG.isDebugEnabled()) {
3378               LOG.debug("Trying to get compaction state of " + pair.getFirst() + ": "
3379                 + StringUtils.stringifyException(e));
3380             }
3381           } else {
3382             throw e;
3383           }
3384         }
3385       }
3386     } catch (ServiceException se) {
3387       throw ProtobufUtil.getRemoteException(se);
3388     } finally {
3389       zookeeper.close();
3390     }
3391     return state;
3392   }
3393 
3394   /**
3395    * {@inheritDoc}
3396    */
3397   @Override
3398   public CompactionState getCompactionStateForRegion(final byte[] regionName)
3399   throws IOException {
3400     try {
3401       Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
3402       if (regionServerPair == null) {
3403         throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
3404       }
3405       if (regionServerPair.getSecond() == null) {
3406         throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
3407       }
3408       ServerName sn = regionServerPair.getSecond();
3409       AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
3410       GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
3411         regionServerPair.getFirst().getRegionName(), true);
3412       PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3413       // TODO: this does not do retries, it should. Set priority and timeout in controller
3414       GetRegionInfoResponse response = admin.getRegionInfo(controller, request);
3415       return response.getCompactionState();
3416     } catch (ServiceException se) {
3417       throw ProtobufUtil.getRemoteException(se);
3418     }
3419   }
3420 
3421   /**
3422    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
3423    * #getCompactionStateForRegion(byte[])} instead.
3424    */
3425   @Deprecated
3426   public CompactionState getCompactionState(final String tableNameOrRegionName)
3427   throws IOException, InterruptedException {
3428     return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
3429   }
3430 
3431   /**
3432    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
3433    * #getCompactionStateForRegion(byte[])} instead.
3434    */
3435   @Deprecated
3436   public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
3437   throws IOException, InterruptedException {
3438     try {
3439       return getCompactionStateForRegion(tableNameOrRegionName);
3440     } catch (IllegalArgumentException e) {
3441       // Invalid region, try table
3442       return getCompactionState(TableName.valueOf(tableNameOrRegionName));
3443     }
3444   }
3445 
3446   /**
3447    * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
3448    * taken. If the table is disabled, an offline snapshot is taken.
3449    * <p>
3450    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3451    * snapshot with the same name (even a different type or with different parameters) will fail with
3452    * a {@link SnapshotCreationException} indicating the duplicate naming.
3453    * <p>
3454    * Snapshot names follow the same naming constraints as tables in HBase. See
3455    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3456    * @param snapshotName name of the snapshot to be created
3457    * @param tableName name of the table for which snapshot is created
3458    * @throws IOException if a remote or network exception occurs
3459    * @throws SnapshotCreationException if snapshot creation failed
3460    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3461    */
3462   @Override
3463   public void snapshot(final String snapshotName,
3464                        final TableName tableName) throws IOException,
3465       SnapshotCreationException, IllegalArgumentException {
3466     snapshot(snapshotName, tableName, SnapshotDescription.Type.FLUSH);
3467   }
3468 
3469   public void snapshot(final String snapshotName,
3470                        final String tableName) throws IOException,
3471       SnapshotCreationException, IllegalArgumentException {
3472     snapshot(snapshotName, TableName.valueOf(tableName),
3473         SnapshotDescription.Type.FLUSH);
3474   }
3475 
3476   /**
3477    * Create snapshot for the given table of given flush type.
3478    * <p>
3479    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3480    * snapshot with the same name (even a different type or with different parameters) will fail with
3481    * a {@link SnapshotCreationException} indicating the duplicate naming.
3482    * <p>
3483    * Snapshot names follow the same naming constraints as tables in HBase.
3484    * @param snapshotName name of the snapshot to be created
3485    * @param tableName name of the table for which snapshot is created
3486    * @param flushType if the snapshot should be taken without flush memstore first
3487    * @throws IOException if a remote or network exception occurs
3488    * @throws SnapshotCreationException if snapshot creation failed
3489    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3490    */
3491    public void snapshot(final byte[] snapshotName, final byte[] tableName,
3492                        final SnapshotDescription.Type flushType) throws
3493       IOException, SnapshotCreationException, IllegalArgumentException {
3494       snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName), flushType);
3495   }
3496   /**
3497    public void snapshot(final String snapshotName,
3498     * Create a timestamp consistent snapshot for the given table.
3499                         final byte[] tableName) throws IOException,
3500     * <p>
3501     * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3502     * snapshot with the same name (even a different type or with different parameters) will fail with
3503     * a {@link SnapshotCreationException} indicating the duplicate naming.
3504     * <p>
3505     * Snapshot names follow the same naming constraints as tables in HBase.
3506     * @param snapshotName name of the snapshot to be created
3507     * @param tableName name of the table for which snapshot is created
3508     * @throws IOException if a remote or network exception occurs
3509     * @throws SnapshotCreationException if snapshot creation failed
3510     * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3511     */
3512   @Override
3513   public void snapshot(final byte[] snapshotName,
3514                        final TableName tableName) throws IOException,
3515       SnapshotCreationException, IllegalArgumentException {
3516     snapshot(Bytes.toString(snapshotName), tableName, SnapshotDescription.Type.FLUSH);
3517   }
3518 
3519   public void snapshot(final byte[] snapshotName,
3520                        final byte[] tableName) throws IOException,
3521       SnapshotCreationException, IllegalArgumentException {
3522     snapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName),
3523       SnapshotDescription.Type.FLUSH);
3524   }
3525 
3526   /**
3527    * Create typed snapshot of the table.
3528    * <p>
3529    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3530    * snapshot with the same name (even a different type or with different parameters) will fail with
3531    * a {@link SnapshotCreationException} indicating the duplicate naming.
3532    * <p>
3533    * Snapshot names follow the same naming constraints as tables in HBase. See
3534    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3535    * <p>
3536    * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
3537    *          snapshots stored on the cluster
3538    * @param tableName name of the table to snapshot
3539    * @param type type of snapshot to take
3540    * @throws IOException we fail to reach the master
3541    * @throws SnapshotCreationException if snapshot creation failed
3542    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3543    */
3544   @Override
3545   public void snapshot(final String snapshotName,
3546                        final TableName tableName,
3547                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
3548       IllegalArgumentException {
3549     SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
3550     builder.setTable(tableName.getNameAsString());
3551     builder.setName(snapshotName);
3552     builder.setType(type);
3553     snapshot(builder.build());
3554   }
3555 
3556   public void snapshot(final String snapshotName,
3557                        final String tableName,
3558                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
3559       IllegalArgumentException {
3560     snapshot(snapshotName, TableName.valueOf(tableName), type);
3561   }
3562 
3563   public void snapshot(final String snapshotName,
3564                        final byte[] tableName,
3565                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
3566       IllegalArgumentException {
3567     snapshot(snapshotName, TableName.valueOf(tableName), type);
3568   }
3569 
3570   /**
3571    * Take a snapshot and wait for the server to complete that snapshot (blocking).
3572    * <p>
3573    * Only a single snapshot should be taken at a time for an instance of HBase, or results may be
3574    * undefined (you can tell multiple HBase clusters to snapshot at the same time, but only one at a
3575    * time for a single cluster).
3576    * <p>
3577    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3578    * snapshot with the same name (even a different type or with different parameters) will fail with
3579    * a {@link SnapshotCreationException} indicating the duplicate naming.
3580    * <p>
3581    * Snapshot names follow the same naming constraints as tables in HBase. See
3582    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3583    * <p>
3584    * You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])}
3585    * unless you are sure about the type of snapshot that you want to take.
3586    * @param snapshot snapshot to take
3587    * @throws IOException or we lose contact with the master.
3588    * @throws SnapshotCreationException if snapshot failed to be taken
3589    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3590    */
3591   @Override
3592   public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException,
3593       IllegalArgumentException {
3594     // actually take the snapshot
3595     SnapshotResponse response = takeSnapshotAsync(snapshot);
3596     final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
3597         .build();
3598     IsSnapshotDoneResponse done = null;
3599     long start = EnvironmentEdgeManager.currentTime();
3600     long max = response.getExpectedTimeout();
3601     long maxPauseTime = max / this.numRetries;
3602     int tries = 0;
3603     LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
3604         ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
3605         maxPauseTime + " ms per retry)");
3606     while (tries == 0
3607         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) {
3608       try {
3609         // sleep a backoff <= pauseTime amount
3610         long sleep = getPauseTime(tries++);
3611         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3612         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
3613           "ms while waiting for snapshot completion.");
3614         Thread.sleep(sleep);
3615       } catch (InterruptedException e) {
3616         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
3617       }
3618       LOG.debug("Getting current status of snapshot from master...");
3619       done = executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
3620         @Override
3621         public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3622           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3623           controller.setCallTimeout(callTimeout);
3624           return master.isSnapshotDone(controller, request);
3625         }
3626       });
3627     }
3628     if (!done.getDone()) {
3629       throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
3630           + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
3631     }
3632   }
3633 
3634   /**
3635    * Take a snapshot without waiting for the server to complete that snapshot (asynchronous)
3636    * <p>
3637    * Only a single snapshot should be taken at a time, or results may be undefined.
3638    * @param snapshot snapshot to take
3639    * @return response from the server indicating the max time to wait for the snapshot
3640    * @throws IOException if the snapshot did not succeed or we lose contact with the master.
3641    * @throws SnapshotCreationException if snapshot creation failed
3642    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3643    */
3644   @Override
3645   public SnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException,
3646       SnapshotCreationException {
3647     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
3648     final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot)
3649         .build();
3650     // run the snapshot on the master
3651     return executeCallable(new MasterCallable<SnapshotResponse>(getConnection()) {
3652       @Override
3653       public SnapshotResponse call(int callTimeout) throws ServiceException {
3654         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3655         controller.setCallTimeout(callTimeout);
3656         return master.snapshot(controller, request);
3657       }
3658     });
3659   }
3660 
3661   /**
3662    * Check the current state of the passed snapshot.
3663    * <p>
3664    * There are three possible states:
3665    * <ol>
3666    * <li>running - returns <tt>false</tt></li>
3667    * <li>finished - returns <tt>true</tt></li>
3668    * <li>finished with error - throws the exception that caused the snapshot to fail</li>
3669    * </ol>
3670    * <p>
3671    * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
3672    * run/started since the snapshot your are checking, you will recieve an
3673    * {@link UnknownSnapshotException}.
3674    * @param snapshot description of the snapshot to check
3675    * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
3676    *         running
3677    * @throws IOException if we have a network issue
3678    * @throws HBaseSnapshotException if the snapshot failed
3679    * @throws UnknownSnapshotException if the requested snapshot is unknown
3680    */
3681   @Override
3682   public boolean isSnapshotFinished(final SnapshotDescription snapshot)
3683       throws IOException, HBaseSnapshotException, UnknownSnapshotException {
3684 
3685     return executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
3686       @Override
3687       public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3688         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3689         controller.setCallTimeout(callTimeout);
3690         return master.isSnapshotDone(controller,
3691           IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build());
3692       }
3693     }).getDone();
3694   }
3695 
3696   /**
3697    * Restore the specified snapshot on the original table. (The table must be disabled)
3698    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3699    * is set to true, a snapshot of the current table is taken
3700    * before executing the restore operation.
3701    * In case of restore failure, the failsafe snapshot will be restored.
3702    * If the restore completes without problem the failsafe snapshot is deleted.
3703    *
3704    * @param snapshotName name of the snapshot to restore
3705    * @throws IOException if a remote or network exception occurs
3706    * @throws RestoreSnapshotException if snapshot failed to be restored
3707    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3708    */
3709   @Override
3710   public void restoreSnapshot(final byte[] snapshotName)
3711       throws IOException, RestoreSnapshotException {
3712     restoreSnapshot(Bytes.toString(snapshotName));
3713   }
3714 
3715   /**
3716    * Restore the specified snapshot on the original table. (The table must be disabled)
3717    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3718    * is set to true, a snapshot of the current table is taken
3719    * before executing the restore operation.
3720    * In case of restore failure, the failsafe snapshot will be restored.
3721    * If the restore completes without problem the failsafe snapshot is deleted.
3722    *
3723    * @param snapshotName name of the snapshot to restore
3724    * @throws IOException if a remote or network exception occurs
3725    * @throws RestoreSnapshotException if snapshot failed to be restored
3726    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3727    */
3728   @Override
3729   public void restoreSnapshot(final String snapshotName)
3730       throws IOException, RestoreSnapshotException {
3731     boolean takeFailSafeSnapshot =
3732       conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false);
3733     restoreSnapshot(snapshotName, takeFailSafeSnapshot);
3734   }
3735 
3736   /**
3737    * Restore the specified snapshot on the original table. (The table must be disabled)
3738    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3739    * before executing the restore operation.
3740    * In case of restore failure, the failsafe snapshot will be restored.
3741    * If the restore completes without problem the failsafe snapshot is deleted.
3742    *
3743    * The failsafe snapshot name is configurable by using the property
3744    * "hbase.snapshot.restore.failsafe.name".
3745    *
3746    * @param snapshotName name of the snapshot to restore
3747    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3748    * @throws IOException if a remote or network exception occurs
3749    * @throws RestoreSnapshotException if snapshot failed to be restored
3750    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3751    */
3752   @Override
3753   public void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot)
3754       throws IOException, RestoreSnapshotException {
3755     restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
3756   }
3757 
3758   /**
3759    * Restore the specified snapshot on the original table. (The table must be disabled)
3760    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3761    * before executing the restore operation.
3762    * In case of restore failure, the failsafe snapshot will be restored.
3763    * If the restore completes without problem the failsafe snapshot is deleted.
3764    *
3765    * The failsafe snapshot name is configurable by using the property
3766    * "hbase.snapshot.restore.failsafe.name".
3767    *
3768    * @param snapshotName name of the snapshot to restore
3769    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3770    * @throws IOException if a remote or network exception occurs
3771    * @throws RestoreSnapshotException if snapshot failed to be restored
3772    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3773    */
3774   @Override
3775   public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot)
3776       throws IOException, RestoreSnapshotException {
3777     TableName tableName = null;
3778     for (SnapshotDescription snapshotInfo: listSnapshots()) {
3779       if (snapshotInfo.getName().equals(snapshotName)) {
3780         tableName = TableName.valueOf(snapshotInfo.getTable());
3781         break;
3782       }
3783     }
3784 
3785     if (tableName == null) {
3786       throw new RestoreSnapshotException(
3787         "Unable to find the table name for snapshot=" + snapshotName);
3788     }
3789 
3790     // The table does not exists, switch to clone.
3791     if (!tableExists(tableName)) {
3792       cloneSnapshot(snapshotName, tableName);
3793       return;
3794     }
3795 
3796     // Check if the table is disabled
3797     if (!isTableDisabled(tableName)) {
3798       throw new TableNotDisabledException(tableName);
3799     }
3800 
3801     // Take a snapshot of the current state
3802     String failSafeSnapshotSnapshotName = null;
3803     if (takeFailSafeSnapshot) {
3804       failSafeSnapshotSnapshotName = conf.get("hbase.snapshot.restore.failsafe.name",
3805         "hbase-failsafe-{snapshot.name}-{restore.timestamp}");
3806       failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName
3807         .replace("{snapshot.name}", snapshotName)
3808         .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.'))
3809         .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime()));
3810       LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3811       snapshot(failSafeSnapshotSnapshotName, tableName);
3812     }
3813 
3814     try {
3815       // Restore snapshot
3816       internalRestoreSnapshot(snapshotName, tableName);
3817     } catch (IOException e) {
3818       // Somthing went wrong during the restore...
3819       // if the pre-restore snapshot is available try to rollback
3820       if (takeFailSafeSnapshot) {
3821         try {
3822           internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName);
3823           String msg = "Restore snapshot=" + snapshotName +
3824             " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded.";
3825           LOG.error(msg, e);
3826           throw new RestoreSnapshotException(msg, e);
3827         } catch (IOException ex) {
3828           String msg = "Failed to restore and rollback to snapshot=" + failSafeSnapshotSnapshotName;
3829           LOG.error(msg, ex);
3830           throw new RestoreSnapshotException(msg, e);
3831         }
3832       } else {
3833         throw new RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e);
3834       }
3835     }
3836 
3837     // If the restore is succeeded, delete the pre-restore snapshot
3838     if (takeFailSafeSnapshot) {
3839       try {
3840         LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3841         deleteSnapshot(failSafeSnapshotSnapshotName);
3842       } catch (IOException e) {
3843         LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, e);
3844       }
3845     }
3846   }
3847 
3848   /**
3849    * Create a new table by cloning the snapshot content.
3850    *
3851    * @param snapshotName name of the snapshot to be cloned
3852    * @param tableName name of the table where the snapshot will be restored
3853    * @throws IOException if a remote or network exception occurs
3854    * @throws TableExistsException if table to be created already exists
3855    * @throws RestoreSnapshotException if snapshot failed to be cloned
3856    * @throws IllegalArgumentException if the specified table has not a valid name
3857    */
3858   public void cloneSnapshot(final byte[] snapshotName, final byte[] tableName)
3859       throws IOException, TableExistsException, RestoreSnapshotException {
3860     cloneSnapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName));
3861   }
3862 
3863   /**
3864    * Create a new table by cloning the snapshot content.
3865    *
3866    * @param snapshotName name of the snapshot to be cloned
3867    * @param tableName name of the table where the snapshot will be restored
3868    * @throws IOException if a remote or network exception occurs
3869    * @throws TableExistsException if table to be created already exists
3870    * @throws RestoreSnapshotException if snapshot failed to be cloned
3871    * @throws IllegalArgumentException if the specified table has not a valid name
3872    */
3873   @Override
3874   public void cloneSnapshot(final byte[] snapshotName, final TableName tableName)
3875       throws IOException, TableExistsException, RestoreSnapshotException {
3876     cloneSnapshot(Bytes.toString(snapshotName), tableName);
3877   }
3878 
3879 
3880 
3881   /**
3882    * Create a new table by cloning the snapshot content.
3883    *
3884    * @param snapshotName name of the snapshot to be cloned
3885    * @param tableName name of the table where the snapshot will be restored
3886    * @throws IOException if a remote or network exception occurs
3887    * @throws TableExistsException if table to be created already exists
3888    * @throws RestoreSnapshotException if snapshot failed to be cloned
3889    * @throws IllegalArgumentException if the specified table has not a valid name
3890    */
3891   public void cloneSnapshot(final String snapshotName, final String tableName)
3892       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
3893     cloneSnapshot(snapshotName, TableName.valueOf(tableName));
3894   }
3895 
3896   /**
3897    * Create a new table by cloning the snapshot content.
3898    *
3899    * @param snapshotName name of the snapshot to be cloned
3900    * @param tableName name of the table where the snapshot will be restored
3901    * @throws IOException if a remote or network exception occurs
3902    * @throws TableExistsException if table to be created already exists
3903    * @throws RestoreSnapshotException if snapshot failed to be cloned
3904    * @throws IllegalArgumentException if the specified table has not a valid name
3905    */
3906   @Override
3907   public void cloneSnapshot(final String snapshotName, final TableName tableName)
3908       throws IOException, TableExistsException, RestoreSnapshotException {
3909     if (tableExists(tableName)) {
3910       throw new TableExistsException(tableName);
3911     }
3912     internalRestoreSnapshot(snapshotName, tableName);
3913     waitUntilTableIsEnabled(tableName);
3914   }
3915 
3916   /**
3917    * Execute a distributed procedure on a cluster synchronously with return data
3918    *
3919    * @param signature A distributed procedure is uniquely identified
3920    * by its signature (default the root ZK node name of the procedure).
3921    * @param instance The instance name of the procedure. For some procedures, this parameter is
3922    * optional.
3923    * @param props Property/Value pairs of properties passing to the procedure
3924    * @return data returned after procedure execution. null if no return data.
3925    * @throws IOException
3926    */
3927   @Override
3928   public byte[] execProcedureWithRet(String signature, String instance,
3929       Map<String, String> props) throws IOException {
3930     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3931     builder.setSignature(signature).setInstance(instance);
3932     for (Entry<String, String> entry : props.entrySet()) {
3933       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3934           .setValue(entry.getValue()).build();
3935       builder.addConfiguration(pair);
3936     }
3937 
3938     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
3939         .setProcedure(builder.build()).build();
3940     // run the procedure on the master
3941     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
3942         getConnection()) {
3943       @Override
3944       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
3945         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3946         controller.setCallTimeout(callTimeout);
3947         return master.execProcedureWithRet(controller, request);
3948       }
3949     });
3950 
3951     return response.hasReturnData() ? response.getReturnData().toByteArray() : null;
3952   }
3953   /**
3954    * Execute a distributed procedure on a cluster.
3955    *
3956    * @param signature A distributed procedure is uniquely identified
3957    * by its signature (default the root ZK node name of the procedure).
3958    * @param instance The instance name of the procedure. For some procedures, this parameter is
3959    * optional.
3960    * @param props Property/Value pairs of properties passing to the procedure
3961    * @throws IOException
3962    */
3963   @Override
3964   public void execProcedure(String signature, String instance,
3965       Map<String, String> props) throws IOException {
3966     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3967     builder.setSignature(signature).setInstance(instance);
3968     for (Entry<String, String> entry : props.entrySet()) {
3969       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3970           .setValue(entry.getValue()).build();
3971       builder.addConfiguration(pair);
3972     }
3973 
3974     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
3975         .setProcedure(builder.build()).build();
3976     // run the procedure on the master
3977     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
3978         getConnection()) {
3979       @Override
3980       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
3981         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3982         controller.setCallTimeout(callTimeout);
3983         return master.execProcedure(controller, request);
3984       }
3985     });
3986 
3987     long start = EnvironmentEdgeManager.currentTime();
3988     long max = response.getExpectedTimeout();
3989     long maxPauseTime = max / this.numRetries;
3990     int tries = 0;
3991     LOG.debug("Waiting a max of " + max + " ms for procedure '" +
3992         signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)");
3993     boolean done = false;
3994     while (tries == 0
3995         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
3996       try {
3997         // sleep a backoff <= pauseTime amount
3998         long sleep = getPauseTime(tries++);
3999         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
4000         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
4001           "ms while waiting for procedure completion.");
4002         Thread.sleep(sleep);
4003       } catch (InterruptedException e) {
4004         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
4005       }
4006       LOG.debug("Getting current status of procedure from master...");
4007       done = isProcedureFinished(signature, instance, props);
4008     }
4009     if (!done) {
4010       throw new IOException("Procedure '" + signature + " : " + instance
4011           + "' wasn't completed in expectedTime:" + max + " ms");
4012     }
4013   }
4014 
4015   /**
4016    * Check the current state of the specified procedure.
4017    * <p>
4018    * There are three possible states:
4019    * <ol>
4020    * <li>running - returns <tt>false</tt></li>
4021    * <li>finished - returns <tt>true</tt></li>
4022    * <li>finished with error - throws the exception that caused the procedure to fail</li>
4023    * </ol>
4024    * <p>
4025    *
4026    * @param signature The signature that uniquely identifies a procedure
4027    * @param instance The instance name of the procedure
4028    * @param props Property/Value pairs of properties passing to the procedure
4029    * @return true if the specified procedure is finished successfully, false if it is still running
4030    * @throws IOException if the specified procedure finished with error
4031    */
4032   @Override
4033   public boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
4034       throws IOException {
4035     final ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
4036     builder.setSignature(signature).setInstance(instance);
4037     for (Entry<String, String> entry : props.entrySet()) {
4038       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
4039           .setValue(entry.getValue()).build();
4040       builder.addConfiguration(pair);
4041     }
4042     final ProcedureDescription desc = builder.build();
4043     return executeCallable(
4044         new MasterCallable<IsProcedureDoneResponse>(getConnection()) {
4045           @Override
4046           public IsProcedureDoneResponse call(int callTimeout) throws ServiceException {
4047             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4048             controller.setCallTimeout(callTimeout);
4049             return master.isProcedureDone(controller, IsProcedureDoneRequest
4050                 .newBuilder().setProcedure(desc).build());
4051           }
4052         }).getDone();
4053   }
4054 
4055   /**
4056    * Execute Restore/Clone snapshot and wait for the server to complete (blocking).
4057    * To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to
4058    * create an HTable instance to this table before it is available.
4059    * @param snapshotName snapshot to restore
4060    * @param tableName table name to restore the snapshot on
4061    * @throws IOException if a remote or network exception occurs
4062    * @throws RestoreSnapshotException if snapshot failed to be restored
4063    * @throws IllegalArgumentException if the restore request is formatted incorrectly
4064    */
4065   private void internalRestoreSnapshot(final String snapshotName, final TableName
4066       tableName)
4067       throws IOException, RestoreSnapshotException {
4068     SnapshotDescription snapshot = SnapshotDescription.newBuilder()
4069         .setName(snapshotName).setTable(tableName.getNameAsString()).build();
4070 
4071     // actually restore the snapshot
4072     internalRestoreSnapshotAsync(snapshot);
4073 
4074     final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder()
4075         .setSnapshot(snapshot).build();
4076     IsRestoreSnapshotDoneResponse done = IsRestoreSnapshotDoneResponse.newBuilder()
4077         .setDone(false).buildPartial();
4078     final long maxPauseTime = 5000;
4079     int tries = 0;
4080     while (!done.getDone()) {
4081       try {
4082         // sleep a backoff <= pauseTime amount
4083         long sleep = getPauseTime(tries++);
4084         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
4085         LOG.debug(tries + ") Sleeping: " + sleep + " ms while we wait for snapshot restore to complete.");
4086         Thread.sleep(sleep);
4087       } catch (InterruptedException e) {
4088         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
4089       }
4090       LOG.debug("Getting current status of snapshot restore from master...");
4091       done = executeCallable(new MasterCallable<IsRestoreSnapshotDoneResponse>(
4092           getConnection()) {
4093         @Override
4094         public IsRestoreSnapshotDoneResponse call(int callTimeout) throws ServiceException {
4095           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4096           controller.setCallTimeout(callTimeout);
4097           return master.isRestoreSnapshotDone(controller, request);
4098         }
4099       });
4100     }
4101     if (!done.getDone()) {
4102       throw new RestoreSnapshotException("Snapshot '" + snapshot.getName() + "' wasn't restored.");
4103     }
4104   }
4105 
4106   /**
4107    * Execute Restore/Clone snapshot and wait for the server to complete (asynchronous)
4108    * <p>
4109    * Only a single snapshot should be restored at a time, or results may be undefined.
4110    * @param snapshot snapshot to restore
4111    * @return response from the server indicating the max time to wait for the snapshot
4112    * @throws IOException if a remote or network exception occurs
4113    * @throws RestoreSnapshotException if snapshot failed to be restored
4114    * @throws IllegalArgumentException if the restore request is formatted incorrectly
4115    */
4116   private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot)
4117       throws IOException, RestoreSnapshotException {
4118     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
4119 
4120     final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot)
4121         .build();
4122 
4123     // run the snapshot restore on the master
4124     return executeCallable(new MasterCallable<RestoreSnapshotResponse>(getConnection()) {
4125       @Override
4126       public RestoreSnapshotResponse call(int callTimeout) throws ServiceException {
4127         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4128         controller.setCallTimeout(callTimeout);
4129         return master.restoreSnapshot(controller, request);
4130       }
4131     });
4132   }
4133 
4134   /**
4135    * List completed snapshots.
4136    * @return a list of snapshot descriptors for completed snapshots
4137    * @throws IOException if a network error occurs
4138    */
4139   @Override
4140   public List<SnapshotDescription> listSnapshots() throws IOException {
4141     return executeCallable(new MasterCallable<List<SnapshotDescription>>(getConnection()) {
4142       @Override
4143       public List<SnapshotDescription> call(int callTimeout) throws ServiceException {
4144         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4145         controller.setCallTimeout(callTimeout);
4146         return master.getCompletedSnapshots(controller,
4147           GetCompletedSnapshotsRequest.newBuilder().build()).getSnapshotsList();
4148       }
4149     });
4150   }
4151 
4152   /**
4153    * List all the completed snapshots matching the given regular expression.
4154    *
4155    * @param regex The regular expression to match against
4156    * @return - returns a List of SnapshotDescription
4157    * @throws IOException if a remote or network exception occurs
4158    */
4159   @Override
4160   public List<SnapshotDescription> listSnapshots(String regex) throws IOException {
4161     return listSnapshots(Pattern.compile(regex));
4162   }
4163 
4164   /**
4165    * List all the completed snapshots matching the given pattern.
4166    *
4167    * @param pattern The compiled regular expression to match against
4168    * @return - returns a List of SnapshotDescription
4169    * @throws IOException if a remote or network exception occurs
4170    */
4171   @Override
4172   public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
4173     List<SnapshotDescription> matched = new LinkedList<SnapshotDescription>();
4174     List<SnapshotDescription> snapshots = listSnapshots();
4175     for (SnapshotDescription snapshot : snapshots) {
4176       if (pattern.matcher(snapshot.getName()).matches()) {
4177         matched.add(snapshot);
4178       }
4179     }
4180     return matched;
4181   }
4182 
4183   /**
4184    * Delete an existing snapshot.
4185    * @param snapshotName name of the snapshot
4186    * @throws IOException if a remote or network exception occurs
4187    */
4188   @Override
4189   public void deleteSnapshot(final byte[] snapshotName) throws IOException {
4190     deleteSnapshot(Bytes.toString(snapshotName));
4191   }
4192 
4193   /**
4194    * Delete an existing snapshot.
4195    * @param snapshotName name of the snapshot
4196    * @throws IOException if a remote or network exception occurs
4197    */
4198   @Override
4199   public void deleteSnapshot(final String snapshotName) throws IOException {
4200     // make sure the snapshot is possibly valid
4201     TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName));
4202     // do the delete
4203     executeCallable(new MasterCallable<Void>(getConnection()) {
4204       @Override
4205       public Void call(int callTimeout) throws ServiceException {
4206         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4207         controller.setCallTimeout(callTimeout);
4208         master.deleteSnapshot(controller,
4209           DeleteSnapshotRequest.newBuilder().
4210             setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build()
4211         );
4212         return null;
4213       }
4214     });
4215   }
4216 
4217   /**
4218    * Delete existing snapshots whose names match the pattern passed.
4219    * @param regex The regular expression to match against
4220    * @throws IOException if a remote or network exception occurs
4221    */
4222   @Override
4223   public void deleteSnapshots(final String regex) throws IOException {
4224     deleteSnapshots(Pattern.compile(regex));
4225   }
4226 
4227   /**
4228    * Delete existing snapshots whose names match the pattern passed.
4229    * @param pattern pattern for names of the snapshot to match
4230    * @throws IOException if a remote or network exception occurs
4231    */
4232   @Override
4233   public void deleteSnapshots(final Pattern pattern) throws IOException {
4234     List<SnapshotDescription> snapshots = listSnapshots(pattern);
4235     for (final SnapshotDescription snapshot : snapshots) {
4236       try {
4237         internalDeleteSnapshot(snapshot);
4238       } catch (IOException ex) {
4239         LOG.info(
4240           "Failed to delete snapshot " + snapshot.getName() + " for table " + snapshot.getTable(),
4241           ex);
4242       }
4243     }
4244   }
4245 
4246   private void internalDeleteSnapshot(final SnapshotDescription snapshot) throws IOException {
4247     executeCallable(new MasterCallable<Void>(getConnection()) {
4248       @Override
4249       public Void call(int callTimeout) throws ServiceException {
4250         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4251         controller.setCallTimeout(callTimeout);
4252         this.master.deleteSnapshot(controller, DeleteSnapshotRequest.newBuilder()
4253           .setSnapshot(snapshot).build());
4254         return null;
4255       }
4256     });
4257   }
4258 
4259   /**
4260    * Apply the new quota settings.
4261    * @param quota the quota settings
4262    * @throws IOException if a remote or network exception occurs
4263    */
4264   @Override
4265   public void setQuota(final QuotaSettings quota) throws IOException {
4266     executeCallable(new MasterCallable<Void>(getConnection()) {
4267       @Override
4268       public Void call(int callTimeout) throws ServiceException {
4269         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4270         controller.setCallTimeout(callTimeout);
4271         this.master.setQuota(controller, QuotaSettings.buildSetQuotaRequestProto(quota));
4272         return null;
4273       }
4274     });
4275   }
4276 
4277   /**
4278    * Return a Quota Scanner to list the quotas based on the filter.
4279    * @param filter the quota settings filter
4280    * @return the quota scanner
4281    * @throws IOException if a remote or network exception occurs
4282    */
4283   @Override
4284   public QuotaRetriever getQuotaRetriever(final QuotaFilter filter) throws IOException {
4285     return QuotaRetriever.open(conf, filter);
4286   }
4287 
4288   private <V> V executeCallable(MasterCallable<V> callable) throws IOException {
4289     return executeCallable(callable, rpcCallerFactory, operationTimeout, rpcTimeout);
4290   }
4291 
4292   private static <V> V executeCallable(MasterCallable<V> callable,
4293              RpcRetryingCallerFactory rpcCallerFactory, int operationTimeout, int rpcTimeout)
4294       throws IOException {
4295     RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller(rpcTimeout);
4296     try {
4297       return caller.callWithRetries(callable, operationTimeout);
4298     } finally {
4299       callable.close();
4300     }
4301   }
4302 
4303   /**
4304    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
4305    * connected to the active master.
4306    *
4307    * <p>
4308    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
4309    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
4310    * </p>
4311    *
4312    * <div style="background-color: #cccccc; padding: 2px">
4313    * <blockquote><pre>
4314    * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
4315    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
4316    * MyCallRequest request = MyCallRequest.newBuilder()
4317    *     ...
4318    *     .build();
4319    * MyCallResponse response = service.myCall(null, request);
4320    * </pre></blockquote></div>
4321    *
4322    * @return A MasterCoprocessorRpcChannel instance
4323    */
4324   @Override
4325   public CoprocessorRpcChannel coprocessorService() {
4326     return new MasterCoprocessorRpcChannel(connection);
4327   }
4328 
4329   /**
4330    * Simple {@link Abortable}, throwing RuntimeException on abort.
4331    */
4332   private static class ThrowableAbortable implements Abortable {
4333 
4334     @Override
4335     public void abort(String why, Throwable e) {
4336       throw new RuntimeException(why, e);
4337     }
4338 
4339     @Override
4340     public boolean isAborted() {
4341       return true;
4342     }
4343   }
4344 
4345   /**
4346    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
4347    * connected to the passed region server.
4348    *
4349    * <p>
4350    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
4351    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
4352    * </p>
4353    *
4354    * <div style="background-color: #cccccc; padding: 2px">
4355    * <blockquote><pre>
4356    * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
4357    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
4358    * MyCallRequest request = MyCallRequest.newBuilder()
4359    *     ...
4360    *     .build();
4361    * MyCallResponse response = service.myCall(null, request);
4362    * </pre></blockquote></div>
4363    *
4364    * @param sn the server name to which the endpoint call is made
4365    * @return A RegionServerCoprocessorRpcChannel instance
4366    */
4367   @Override
4368   public CoprocessorRpcChannel coprocessorService(ServerName sn) {
4369     return new RegionServerCoprocessorRpcChannel(connection, sn);
4370   }
4371 
4372   @Override
4373   public void updateConfiguration(ServerName server) throws IOException {
4374     try {
4375       this.connection.getAdmin(server).updateConfiguration(null,
4376         UpdateConfigurationRequest.getDefaultInstance());
4377     } catch (ServiceException e) {
4378       throw ProtobufUtil.getRemoteException(e);
4379     }
4380   }
4381 
4382   @Override
4383   public void updateConfiguration() throws IOException {
4384     for (ServerName server : this.getClusterStatus().getServers()) {
4385       updateConfiguration(server);
4386     }
4387   }
4388 
4389   @Override
4390   public int getMasterInfoPort() throws IOException {
4391     // TODO: Fix!  Reaching into internal implementation!!!!
4392     ConnectionManager.HConnectionImplementation connection =
4393         (ConnectionManager.HConnectionImplementation)this.connection;
4394     ZooKeeperKeepAliveConnection zkw = connection.getKeepAliveZooKeeperWatcher();
4395     try {
4396       return MasterAddressTracker.getMasterInfoPort(zkw);
4397     } catch (KeeperException e) {
4398       throw new IOException("Failed to get master info port from MasterAddressTracker", e);
4399     }
4400   }
4401 
4402   @Override
4403   public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException {
4404     return executeCallable(new MasterCallable<Long>(getConnection()) {
4405       @Override
4406       public Long call(int callTimeout) throws ServiceException {
4407         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4408         controller.setCallTimeout(callTimeout);
4409         MajorCompactionTimestampRequest req =
4410             MajorCompactionTimestampRequest.newBuilder()
4411                 .setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
4412         return master.getLastMajorCompactionTimestamp(controller, req).getCompactionTimestamp();
4413       }
4414     });
4415   }
4416 
4417   @Override
4418   public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException {
4419     return executeCallable(new MasterCallable<Long>(getConnection()) {
4420       @Override
4421       public Long call(int callTimeout) throws ServiceException {
4422         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4423         controller.setCallTimeout(callTimeout);
4424         MajorCompactionTimestampForRegionRequest req =
4425             MajorCompactionTimestampForRegionRequest
4426                 .newBuilder()
4427                 .setRegion(
4428                   RequestConverter
4429                       .buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)).build();
4430         return master.getLastMajorCompactionTimestampForRegion(controller, req)
4431             .getCompactionTimestamp();
4432       }
4433     });
4434   }
4435 
4436   /**
4437    * Future that waits on a procedure result.
4438    * Returned by the async version of the Admin calls,
4439    * and used internally by the sync calls to wait on the result of the procedure.
4440    */
4441   @InterfaceAudience.Private
4442   @InterfaceStability.Evolving
4443   protected static class ProcedureFuture<V> implements Future<V> {
4444     private ExecutionException exception = null;
4445     private boolean procResultFound = false;
4446     private boolean done = false;
4447     private boolean cancelled = false;
4448     private boolean waitForOpResult = false;
4449     private V result = null;
4450 
4451     private final HBaseAdmin admin;
4452     private final Long procId;
4453 
4454     public ProcedureFuture(final HBaseAdmin admin, final Long procId) {
4455       this.admin = admin;
4456       this.procId = procId;
4457     }
4458 
4459     public ProcedureFuture(final HBaseAdmin admin, final Long procId,
4460         final boolean waitForOpResult) {
4461       this.admin = admin;
4462       this.procId = procId;
4463       this.waitForOpResult = waitForOpResult;
4464     }
4465 
4466     @Override
4467     public boolean cancel(boolean mayInterruptIfRunning) {
4468       AbortProcedureRequest abortProcRequest = AbortProcedureRequest.newBuilder()
4469           .setProcId(procId).setMayInterruptIfRunning(mayInterruptIfRunning).build();
4470       try {
4471         cancelled = abortProcedureResult(abortProcRequest).getIsProcedureAborted();
4472         if (cancelled) {
4473           done = true;
4474         }
4475       } catch (IOException e) {
4476         // Cancell thrown exception for some reason. At this time, we are not sure whether
4477         // the cancell succeeds or fails. We assume that it is failed, but print out a warning
4478         // for debugging purpose.
4479         LOG.warn(
4480           "Cancelling the procedure with procId=" + procId + " throws exception " + e.getMessage(),
4481           e);
4482         cancelled = false;
4483       }
4484       return cancelled;
4485     }
4486 
4487     @Override
4488     public boolean isCancelled() {
4489       return cancelled;
4490     }
4491 
4492     protected AbortProcedureResponse abortProcedureResult(
4493         final AbortProcedureRequest request) throws IOException {
4494       return admin.executeCallable(new MasterCallable<AbortProcedureResponse>(
4495           admin.getConnection()) {
4496         @Override
4497         public AbortProcedureResponse call(int callTimeout) throws ServiceException {
4498           PayloadCarryingRpcController controller = admin.getRpcControllerFactory().newController();
4499           controller.setCallTimeout(callTimeout);
4500           return master.abortProcedure(controller, request);
4501         }
4502       });
4503     }
4504 
4505     @Override
4506     public V get() throws InterruptedException, ExecutionException {
4507       // TODO: should we ever spin forever?
4508       throw new UnsupportedOperationException();
4509     }
4510 
4511     @Override
4512     public V get(long timeout, TimeUnit unit)
4513         throws InterruptedException, ExecutionException, TimeoutException {
4514       if (!done) {
4515         long deadlineTs = EnvironmentEdgeManager.currentTime() + unit.toMillis(timeout);
4516         try {
4517           try {
4518             // if the master support procedures, try to wait the result
4519             if (procId != null) {
4520               result = waitProcedureResult(procId, deadlineTs);
4521             }
4522             // if we don't have a proc result, try the compatibility wait
4523             if (!procResultFound || waitForOpResult) {
4524               result = waitOperationResult(deadlineTs);
4525             }
4526             result = postOperationResult(result, deadlineTs);
4527             done = true;
4528           } catch (IOException e) {
4529             result = postOpeartionFailure(e, deadlineTs);
4530             done = true;
4531           }
4532         } catch (IOException e) {
4533           exception = new ExecutionException(e);
4534           done = true;
4535         }
4536       }
4537       if (exception != null) {
4538         throw exception;
4539       }
4540       return result;
4541     }
4542 
4543     @Override
4544     public boolean isDone() {
4545       return done;
4546     }
4547 
4548     protected HBaseAdmin getAdmin() {
4549       return admin;
4550     }
4551 
4552     private V waitProcedureResult(long procId, long deadlineTs)
4553         throws IOException, TimeoutException, InterruptedException {
4554       GetProcedureResultRequest request = GetProcedureResultRequest.newBuilder()
4555           .setProcId(procId)
4556           .build();
4557 
4558       int tries = 0;
4559       IOException serviceEx = null;
4560       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
4561         GetProcedureResultResponse response = null;
4562         try {
4563           // Try to fetch the result
4564           response = getProcedureResult(request);
4565         } catch (IOException e) {
4566           serviceEx = unwrapException(e);
4567 
4568           // the master may be down
4569           LOG.warn("failed to get the procedure result procId=" + procId, serviceEx);
4570 
4571           // Not much to do, if we have a DoNotRetryIOException
4572           if (serviceEx instanceof DoNotRetryIOException ||
4573               serviceEx instanceof NeedUnmanagedConnectionException) {
4574             // TODO: looks like there is no way to unwrap this exception and get the proper
4575             // UnsupportedOperationException aside from looking at the message.
4576             // anyway, if we fail here we just failover to the compatibility side
4577             // and that is always a valid solution.
4578             LOG.warn("Proc-v2 is unsupported on this master: " + serviceEx.getMessage(), serviceEx);
4579             procResultFound = false;
4580             waitForOpResult = false;
4581             return null;
4582           }
4583         }
4584 
4585         // If the procedure is no longer running, we should have a result
4586         if (response != null && response.getState() != GetProcedureResultResponse.State.RUNNING) {
4587           procResultFound = response.getState() != GetProcedureResultResponse.State.NOT_FOUND;
4588           return convertResult(response);
4589         }
4590 
4591         try {
4592           Thread.sleep(getAdmin().getPauseTime(tries++));
4593         } catch (InterruptedException e) {
4594           throw new InterruptedException(
4595             "Interrupted while waiting for the result of proc " + procId);
4596         }
4597       }
4598       if (serviceEx != null) {
4599         throw serviceEx;
4600       } else {
4601         throw new TimeoutException("The procedure " + procId + " is still running");
4602       }
4603     }
4604 
4605     private static IOException unwrapException(IOException e) {
4606       if (e instanceof RemoteException) {
4607         return ((RemoteException)e).unwrapRemoteException();
4608       }
4609       return e;
4610     }
4611 
4612     protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request)
4613         throws IOException {
4614       return admin.executeCallable(new MasterCallable<GetProcedureResultResponse>(
4615           admin.getConnection()) {
4616         @Override
4617         public GetProcedureResultResponse call(int callTimeout) throws ServiceException {
4618           return master.getProcedureResult(null, request);
4619         }
4620       });
4621     }
4622 
4623     /**
4624      * Convert the procedure result response to a specified type.
4625      * @param response the procedure result object to parse
4626      * @return the result data of the procedure.
4627      */
4628     protected V convertResult(final GetProcedureResultResponse response) throws IOException {
4629       if (response.hasException()) {
4630         throw ForeignExceptionUtil.toIOException(response.getException());
4631       }
4632       return null;
4633     }
4634 
4635     /**
4636      * Fallback implementation in case the procedure is not supported by the server.
4637      * It should try to wait until the operation is completed.
4638      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4639      * @return the result data of the operation
4640      */
4641     protected V waitOperationResult(final long deadlineTs)
4642         throws IOException, TimeoutException {
4643       return null;
4644     }
4645 
4646     /**
4647      * Called after the operation is completed and the result fetched.
4648      * this allows to perform extra steps after the procedure is completed.
4649      * it allows to apply transformations to the result that will be returned by get().
4650      * @param result the result of the procedure
4651      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4652      * @return the result of the procedure, which may be the same as the passed one
4653      */
4654     protected V postOperationResult(final V result, final long deadlineTs)
4655         throws IOException, TimeoutException {
4656       return result;
4657     }
4658 
4659     /**
4660      * Called after the operation is terminated with a failure.
4661      * this allows to perform extra steps after the procedure is terminated.
4662      * it allows to apply transformations to the result that will be returned by get().
4663      * The default implementation will rethrow the exception
4664      * @param exception the exception got from fetching the result
4665      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4666      * @return the result of the procedure, which may be the same as the passed one
4667      */
4668     protected V postOpeartionFailure(final IOException exception, final long deadlineTs)
4669         throws IOException, TimeoutException {
4670       throw exception;
4671     }
4672 
4673     protected interface WaitForStateCallable {
4674       boolean checkState(int tries) throws IOException;
4675       void throwInterruptedException() throws InterruptedIOException;
4676       void throwTimeoutException(long elapsed) throws TimeoutException;
4677     }
4678 
4679     protected void waitForState(final long deadlineTs, final WaitForStateCallable callable)
4680         throws IOException, TimeoutException {
4681       int tries = 0;
4682       IOException serverEx = null;
4683       long startTime = EnvironmentEdgeManager.currentTime();
4684       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
4685         serverEx = null;
4686         try {
4687           if (callable.checkState(tries)) {
4688             return;
4689           }
4690         } catch (IOException e) {
4691           serverEx = e;
4692         }
4693         try {
4694           Thread.sleep(getAdmin().getPauseTime(tries++));
4695         } catch (InterruptedException e) {
4696           callable.throwInterruptedException();
4697         }
4698       }
4699       if (serverEx != null) {
4700         throw unwrapException(serverEx);
4701       } else {
4702         callable.throwTimeoutException(EnvironmentEdgeManager.currentTime() - startTime);
4703       }
4704     }
4705   }
4706 
4707   @Override
4708   public List<SecurityCapability> getSecurityCapabilities() throws IOException {
4709     try {
4710       return executeCallable(new MasterCallable<List<SecurityCapability>>(getConnection()) {
4711         @Override
4712         public List<SecurityCapability> call(int callTimeout) throws ServiceException {
4713           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4714           controller.setCallTimeout(callTimeout);
4715           SecurityCapabilitiesRequest req = SecurityCapabilitiesRequest.newBuilder().build();
4716           return ProtobufUtil.toSecurityCapabilityList(
4717             master.getSecurityCapabilities(controller, req).getCapabilitiesList());
4718         }
4719       });
4720     } catch (IOException e) {
4721       if (e instanceof RemoteException) {
4722         e = ((RemoteException)e).unwrapRemoteException();
4723       }
4724       throw e;
4725     }
4726   }
4727 
4728   private RpcControllerFactory getRpcControllerFactory() {
4729     return rpcControllerFactory;
4730   }
4731 }