View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import java.io.Closeable;
22  import java.io.IOException;
23  import java.io.InterruptedIOException;
24  import java.net.SocketTimeoutException;
25  import java.util.ArrayList;
26  import java.util.Arrays;
27  import java.util.HashMap;
28  import java.util.LinkedList;
29  import java.util.List;
30  import java.util.Map;
31  import java.util.Map.Entry;
32  import java.util.concurrent.atomic.AtomicInteger;
33  import java.util.concurrent.atomic.AtomicReference;
34  import java.util.regex.Pattern;
35  
36  import org.apache.commons.logging.Log;
37  import org.apache.commons.logging.LogFactory;
38  import org.apache.hadoop.conf.Configuration;
39  import org.apache.hadoop.hbase.Abortable;
40  import org.apache.hadoop.hbase.ClusterStatus;
41  import org.apache.hadoop.hbase.DoNotRetryIOException;
42  import org.apache.hadoop.hbase.HBaseConfiguration;
43  import org.apache.hadoop.hbase.HColumnDescriptor;
44  import org.apache.hadoop.hbase.HConstants;
45  import org.apache.hadoop.hbase.HRegionInfo;
46  import org.apache.hadoop.hbase.HRegionLocation;
47  import org.apache.hadoop.hbase.HTableDescriptor;
48  import org.apache.hadoop.hbase.MasterNotRunningException;
49  import org.apache.hadoop.hbase.MetaTableAccessor;
50  import org.apache.hadoop.hbase.NamespaceDescriptor;
51  import org.apache.hadoop.hbase.NotServingRegionException;
52  import org.apache.hadoop.hbase.RegionException;
53  import org.apache.hadoop.hbase.RegionLocations;
54  import org.apache.hadoop.hbase.ServerName;
55  import org.apache.hadoop.hbase.TableExistsException;
56  import org.apache.hadoop.hbase.TableName;
57  import org.apache.hadoop.hbase.TableNotDisabledException;
58  import org.apache.hadoop.hbase.TableNotEnabledException;
59  import org.apache.hadoop.hbase.TableNotFoundException;
60  import org.apache.hadoop.hbase.UnknownRegionException;
61  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
62  import org.apache.hadoop.hbase.classification.InterfaceAudience;
63  import org.apache.hadoop.hbase.classification.InterfaceStability;
64  import org.apache.hadoop.hbase.exceptions.DeserializationException;
65  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
66  import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
67  import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel;
68  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
69  import org.apache.hadoop.hbase.protobuf.RequestConverter;
70  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
71  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
72  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
73  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
74  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
75  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
76  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
77  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
78  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
79  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
80  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
81  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
82  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
83  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
84  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
85  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
86  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
87  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
88  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
89  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
90  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
91  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
92  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
93  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
94  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
95  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
96  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
97  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
98  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
99  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
100 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
101 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
102 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
103 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
104 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
105 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
106 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
107 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
108 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
109 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
110 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
111 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
112 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
113 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
114 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
115 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
116 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
117 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
118 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
119 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
120 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
121 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
122 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
123 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
124 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
125 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
126 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
127 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
128 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
129 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
130 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
131 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
132 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
133 import org.apache.hadoop.hbase.quotas.QuotaFilter;
134 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
135 import org.apache.hadoop.hbase.quotas.QuotaSettings;
136 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
137 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
138 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
139 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
140 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
141 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
142 import org.apache.hadoop.hbase.util.Addressing;
143 import org.apache.hadoop.hbase.util.Bytes;
144 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
145 import org.apache.hadoop.hbase.util.Pair;
146 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
147 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
148 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
149 import org.apache.hadoop.ipc.RemoteException;
150 import org.apache.hadoop.util.StringUtils;
151 import org.apache.zookeeper.KeeperException;
152 
153 import com.google.common.annotations.VisibleForTesting;
154 import com.google.protobuf.ByteString;
155 import com.google.protobuf.ServiceException;
156 
157 /**
158  * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that
159  * this is an HBase-internal class as defined in
160  * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
161  * There are no guarantees for backwards source / binary compatibility and methods or class can
162  * change or go away without deprecation.
163  * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing
164  * an HBaseAdmin directly.
165  *
166  * <p>Connection should be an <i>unmanaged</i> connection obtained via
167  * {@link ConnectionFactory#createConnection(Configuration)}
168  *
169  * @see ConnectionFactory
170  * @see Connection
171  * @see Admin
172  */
173 @InterfaceAudience.Private
174 @InterfaceStability.Evolving
175 public class HBaseAdmin implements Admin {
176   private static final Log LOG = LogFactory.getLog(HBaseAdmin.class);
177 
178   private static final String ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
179 
180   private ClusterConnection connection;
181 
182   private volatile Configuration conf;
183   private final long pause;
184   private final int numRetries;
185   // Some operations can take a long time such as disable of big table.
186   // numRetries is for 'normal' stuff... Multiply by this factor when
187   // want to wait a long time.
188   private final int retryLongerMultiplier;
189   private boolean aborted;
190   private boolean cleanupConnectionOnClose = false; // close the connection in close()
191   private boolean closed = false;
192   private int operationTimeout;
193 
194   private RpcRetryingCallerFactory rpcCallerFactory;
195 
196   /**
197    * Constructor.
198    * See {@link #HBaseAdmin(Connection connection)}
199    *
200    * @param c Configuration object. Copied internally.
201    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
202    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
203    */
204   @Deprecated
205   public HBaseAdmin(Configuration c)
206   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
207     // Will not leak connections, as the new implementation of the constructor
208     // does not throw exceptions anymore.
209     this(ConnectionManager.getConnectionInternal(new Configuration(c)));
210     this.cleanupConnectionOnClose = true;
211   }
212 
213   @Override
214   public int getOperationTimeout() {
215     return operationTimeout;
216   }
217 
218 
219   /**
220    * Constructor for externally managed Connections.
221    * The connection to master will be created when required by admin functions.
222    *
223    * @param connection The Connection instance to use
224    * @throws MasterNotRunningException, ZooKeeperConnectionException are not
225    *  thrown anymore but kept into the interface for backward api compatibility
226    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
227    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
228    */
229   @Deprecated
230   public HBaseAdmin(Connection connection)
231       throws MasterNotRunningException, ZooKeeperConnectionException {
232     this((ClusterConnection)connection);
233   }
234 
235   HBaseAdmin(ClusterConnection connection) {
236     this.conf = connection.getConfiguration();
237     this.connection = connection;
238 
239     this.pause = this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
240         HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
241     this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
242         HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
243     this.retryLongerMultiplier = this.conf.getInt(
244         "hbase.client.retries.longer.multiplier", 10);
245     this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
246         HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
247 
248     this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf);
249   }
250 
251   @Override
252   public void abort(String why, Throwable e) {
253     // Currently does nothing but throw the passed message and exception
254     this.aborted = true;
255     throw new RuntimeException(why, e);
256   }
257 
258   @Override
259   public boolean isAborted(){
260     return this.aborted;
261   }
262 
263   /** @return HConnection used by this object. */
264   @Override
265   public HConnection getConnection() {
266     return connection;
267   }
268 
269   /** @return - true if the master server is running. Throws an exception
270    *  otherwise.
271    * @throws ZooKeeperConnectionException
272    * @throws MasterNotRunningException
273    * @deprecated this has been deprecated without a replacement
274    */
275   @Deprecated
276   public boolean isMasterRunning()
277   throws MasterNotRunningException, ZooKeeperConnectionException {
278     return connection.isMasterRunning();
279   }
280 
281   /**
282    * @param tableName Table to check.
283    * @return True if table exists already.
284    * @throws IOException
285    */
286   @Override
287   public boolean tableExists(final TableName tableName) throws IOException {
288     return executeCallable(new ConnectionCallable<Boolean>(getConnection()) {
289       @Override
290       public Boolean call(int callTimeout) throws ServiceException, IOException {
291         return MetaTableAccessor.tableExists(connection, tableName);
292       }
293     });
294   }
295 
296   public boolean tableExists(final byte[] tableName)
297   throws IOException {
298     return tableExists(TableName.valueOf(tableName));
299   }
300 
301   public boolean tableExists(final String tableName)
302   throws IOException {
303     return tableExists(TableName.valueOf(tableName));
304   }
305 
306   @Override
307   public HTableDescriptor[] listTables() throws IOException {
308     return listTables((Pattern)null, false);
309   }
310 
311   @Override
312   public HTableDescriptor[] listTables(Pattern pattern) throws IOException {
313     return listTables(pattern, false);
314   }
315 
316   @Override
317   public HTableDescriptor[] listTables(String regex) throws IOException {
318     return listTables(Pattern.compile(regex), false);
319   }
320 
321   @Override
322   public HTableDescriptor[] listTables(final Pattern pattern, final boolean includeSysTables)
323       throws IOException {
324     return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
325       @Override
326       public HTableDescriptor[] call(int callTimeout) throws ServiceException {
327         GetTableDescriptorsRequest req =
328             RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
329         return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req));
330       }
331     });
332   }
333 
334   @Override
335   public HTableDescriptor[] listTables(String regex, boolean includeSysTables)
336       throws IOException {
337     return listTables(Pattern.compile(regex), includeSysTables);
338   }
339 
340   /**
341    * List all of the names of userspace tables.
342    * @return String[] table names
343    * @throws IOException if a remote or network exception occurs
344    * @deprecated Use {@link Admin#listTableNames()} instead
345    */
346   @Deprecated
347   public String[] getTableNames() throws IOException {
348     TableName[] tableNames = listTableNames();
349     String[] result = new String[tableNames.length];
350     for (int i = 0; i < tableNames.length; i++) {
351       result[i] = tableNames[i].getNameAsString();
352     }
353     return result;
354   }
355 
356   /**
357    * List all of the names of userspace tables matching the given regular expression.
358    * @param pattern The regular expression to match against
359    * @return String[] table names
360    * @throws IOException if a remote or network exception occurs
361    * @deprecated Use {@link Admin#listTableNames(Pattern)} instead.
362    */
363   @Deprecated
364   public String[] getTableNames(Pattern pattern) throws IOException {
365     TableName[] tableNames = listTableNames(pattern);
366     String[] result = new String[tableNames.length];
367     for (int i = 0; i < tableNames.length; i++) {
368       result[i] = tableNames[i].getNameAsString();
369     }
370     return result;
371   }
372 
373   /**
374    * List all of the names of userspace tables matching the given regular expression.
375    * @param regex The regular expression to match against
376    * @return String[] table names
377    * @throws IOException if a remote or network exception occurs
378    * @deprecated Use {@link Admin#listTableNames(Pattern)} instead.
379    */
380   @Deprecated
381   public String[] getTableNames(String regex) throws IOException {
382     return getTableNames(Pattern.compile(regex));
383   }
384 
385   @Override
386   public TableName[] listTableNames() throws IOException {
387     return listTableNames((Pattern)null, false);
388   }
389 
390   @Override
391   public TableName[] listTableNames(Pattern pattern) throws IOException {
392     return listTableNames(pattern, false);
393   }
394 
395   @Override
396   public TableName[] listTableNames(String regex) throws IOException {
397     return listTableNames(Pattern.compile(regex), false);
398   }
399 
400   @Override
401   public TableName[] listTableNames(final Pattern pattern, final boolean includeSysTables)
402       throws IOException {
403     return executeCallable(new MasterCallable<TableName[]>(getConnection()) {
404       @Override
405       public TableName[] call(int callTimeout) throws ServiceException {
406         GetTableNamesRequest req =
407             RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables);
408         return ProtobufUtil.getTableNameArray(master.getTableNames(null, req)
409             .getTableNamesList());
410       }
411     });
412   }
413 
414   @Override
415   public TableName[] listTableNames(final String regex, final boolean includeSysTables)
416       throws IOException {
417     return listTableNames(Pattern.compile(regex), includeSysTables);
418   }
419 
420   /**
421    * Method for getting the tableDescriptor
422    * @param tableName as a byte []
423    * @return the tableDescriptor
424    * @throws TableNotFoundException
425    * @throws IOException if a remote or network exception occurs
426    */
427   @Override
428   public HTableDescriptor getTableDescriptor(final TableName tableName)
429   throws TableNotFoundException, IOException {
430     if (tableName == null) return null;
431     HTableDescriptor htd = executeCallable(new MasterCallable<HTableDescriptor>(getConnection()) {
432       @Override
433       public HTableDescriptor call(int callTimeout) throws ServiceException {
434         GetTableDescriptorsResponse htds;
435         GetTableDescriptorsRequest req =
436             RequestConverter.buildGetTableDescriptorsRequest(tableName);
437         htds = master.getTableDescriptors(null, req);
438 
439         if (!htds.getTableSchemaList().isEmpty()) {
440           return HTableDescriptor.convert(htds.getTableSchemaList().get(0));
441         }
442         return null;
443       }
444     });
445     if (htd != null) {
446       return htd;
447     }
448     throw new TableNotFoundException(tableName.getNameAsString());
449   }
450 
451   public HTableDescriptor getTableDescriptor(final byte[] tableName)
452   throws TableNotFoundException, IOException {
453     return getTableDescriptor(TableName.valueOf(tableName));
454   }
455 
456   private long getPauseTime(int tries) {
457     int triesCount = tries;
458     if (triesCount >= HConstants.RETRY_BACKOFF.length) {
459       triesCount = HConstants.RETRY_BACKOFF.length - 1;
460     }
461     return this.pause * HConstants.RETRY_BACKOFF[triesCount];
462   }
463 
464   /**
465    * Creates a new table.
466    * Synchronous operation.
467    *
468    * @param desc table descriptor for table
469    *
470    * @throws IllegalArgumentException if the table name is reserved
471    * @throws MasterNotRunningException if master is not running
472    * @throws TableExistsException if table already exists (If concurrent
473    * threads, the table may have been created between test-for-existence
474    * and attempt-at-creation).
475    * @throws IOException if a remote or network exception occurs
476    */
477   @Override
478   public void createTable(HTableDescriptor desc)
479   throws IOException {
480     createTable(desc, null);
481   }
482 
483   /**
484    * Creates a new table with the specified number of regions.  The start key
485    * specified will become the end key of the first region of the table, and
486    * the end key specified will become the start key of the last region of the
487    * table (the first region has a null start key and the last region has a
488    * null end key).
489    *
490    * BigInteger math will be used to divide the key range specified into
491    * enough segments to make the required number of total regions.
492    *
493    * Synchronous operation.
494    *
495    * @param desc table descriptor for table
496    * @param startKey beginning of key range
497    * @param endKey end of key range
498    * @param numRegions the total number of regions to create
499    *
500    * @throws IllegalArgumentException if the table name is reserved
501    * @throws MasterNotRunningException if master is not running
502    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
503    * threads, the table may have been created between test-for-existence
504    * and attempt-at-creation).
505    * @throws IOException
506    */
507   @Override
508   public void createTable(HTableDescriptor desc, byte [] startKey,
509       byte [] endKey, int numRegions)
510   throws IOException {
511     if(numRegions < 3) {
512       throw new IllegalArgumentException("Must create at least three regions");
513     } else if(Bytes.compareTo(startKey, endKey) >= 0) {
514       throw new IllegalArgumentException("Start key must be smaller than end key");
515     }
516     if (numRegions == 3) {
517       createTable(desc, new byte[][]{startKey, endKey});
518       return;
519     }
520     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
521     if(splitKeys == null || splitKeys.length != numRegions - 1) {
522       throw new IllegalArgumentException("Unable to split key range into enough regions");
523     }
524     createTable(desc, splitKeys);
525   }
526 
527   /**
528    * Creates a new table with an initial set of empty regions defined by the
529    * specified split keys.  The total number of regions created will be the
530    * number of split keys plus one. Synchronous operation.
531    * Note : Avoid passing empty split key.
532    *
533    * @param desc table descriptor for table
534    * @param splitKeys array of split keys for the initial regions of the table
535    *
536    * @throws IllegalArgumentException if the table name is reserved, if the split keys
537    * are repeated and if the split key has empty byte array.
538    * @throws MasterNotRunningException if master is not running
539    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
540    * threads, the table may have been created between test-for-existence
541    * and attempt-at-creation).
542    * @throws IOException
543    */
544   @Override
545   public void createTable(final HTableDescriptor desc, byte [][] splitKeys)
546   throws IOException {
547     try {
548       createTableAsync(desc, splitKeys);
549     } catch (SocketTimeoutException ste) {
550       LOG.warn("Creating " + desc.getTableName() + " took too long", ste);
551     }
552     int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication();
553     int prevRegCount = 0;
554     boolean tableWasEnabled = false;
555     for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier;
556       ++tries) {
557       if (tableWasEnabled) {
558         // Wait all table regions comes online
559         final AtomicInteger actualRegCount = new AtomicInteger(0);
560         MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
561           @Override
562           public boolean visit(Result rowResult) throws IOException {
563             RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
564             if (list == null) {
565               LOG.warn("No serialized HRegionInfo in " + rowResult);
566               return true;
567             }
568             HRegionLocation l = list.getRegionLocation();
569             if (l == null) {
570               return true;
571             }
572             if (!l.getRegionInfo().getTable().equals(desc.getTableName())) {
573               return false;
574             }
575             if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
576             HRegionLocation[] locations = list.getRegionLocations();
577             for (HRegionLocation location : locations) {
578               if (location == null) continue;
579               ServerName serverName = location.getServerName();
580               // Make sure that regions are assigned to server
581               if (serverName != null && serverName.getHostAndPort() != null) {
582                 actualRegCount.incrementAndGet();
583               }
584             }
585             return true;
586           }
587         };
588         MetaTableAccessor.scanMetaForTableRegions(connection, visitor, desc.getTableName());
589         if (actualRegCount.get() < numRegs) {
590           if (tries == this.numRetries * this.retryLongerMultiplier - 1) {
591             throw new RegionOfflineException("Only " + actualRegCount.get() +
592               " of " + numRegs + " regions are online; retries exhausted.");
593           }
594           try { // Sleep
595             Thread.sleep(getPauseTime(tries));
596           } catch (InterruptedException e) {
597             throw new InterruptedIOException("Interrupted when opening" +
598               " regions; " + actualRegCount.get() + " of " + numRegs +
599               " regions processed so far");
600           }
601           if (actualRegCount.get() > prevRegCount) { // Making progress
602             prevRegCount = actualRegCount.get();
603             tries = -1;
604           }
605         } else {
606           return;
607         }
608       } else {
609         try {
610           tableWasEnabled = isTableAvailable(desc.getTableName());
611         } catch (TableNotFoundException tnfe) {
612           LOG.debug(
613               "Table " + desc.getTableName() + " was not enabled, sleeping, still " + numRetries
614                   + " retries left");
615         }
616         if (tableWasEnabled) {
617           // no we will scan meta to ensure all regions are online
618           tries = -1;
619         } else {
620           try { // Sleep
621             Thread.sleep(getPauseTime(tries));
622           } catch (InterruptedException e) {
623             throw new InterruptedIOException("Interrupted when waiting" +
624                 " for table to be enabled; meta scan was done");
625           }
626         }
627       }
628     }
629     throw new TableNotEnabledException(
630       "Retries exhausted while still waiting for table: "
631       + desc.getTableName() + " to be enabled");
632   }
633 
634   /**
635    * Creates a new table but does not block and wait for it to come online.
636    * Asynchronous operation.  To check if the table exists, use
637    * {@link #isTableAvailable} -- it is not safe to create an HTable
638    * instance to this table before it is available.
639    * Note : Avoid passing empty split key.
640    * @param desc table descriptor for table
641    *
642    * @throws IllegalArgumentException Bad table name, if the split keys
643    * are repeated and if the split key has empty byte array.
644    * @throws MasterNotRunningException if master is not running
645    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
646    * threads, the table may have been created between test-for-existence
647    * and attempt-at-creation).
648    * @throws IOException
649    */
650   @Override
651   public void createTableAsync(
652     final HTableDescriptor desc, final byte [][] splitKeys)
653   throws IOException {
654     if(desc.getTableName() == null) {
655       throw new IllegalArgumentException("TableName cannot be null");
656     }
657     if(splitKeys != null && splitKeys.length > 0) {
658       Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
659       // Verify there are no duplicate split keys
660       byte [] lastKey = null;
661       for(byte [] splitKey : splitKeys) {
662         if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
663           throw new IllegalArgumentException(
664               "Empty split key must not be passed in the split keys.");
665         }
666         if(lastKey != null && Bytes.equals(splitKey, lastKey)) {
667           throw new IllegalArgumentException("All split keys must be unique, " +
668             "found duplicate: " + Bytes.toStringBinary(splitKey) +
669             ", " + Bytes.toStringBinary(lastKey));
670         }
671         lastKey = splitKey;
672       }
673     }
674 
675     executeCallable(new MasterCallable<Void>(getConnection()) {
676       @Override
677       public Void call(int callTimeout) throws ServiceException {
678         CreateTableRequest request = RequestConverter.buildCreateTableRequest(desc, splitKeys);
679         master.createTable(null, request);
680         return null;
681       }
682     });
683   }
684 
685   public void deleteTable(final String tableName) throws IOException {
686     deleteTable(TableName.valueOf(tableName));
687   }
688 
689   public void deleteTable(final byte[] tableName) throws IOException {
690     deleteTable(TableName.valueOf(tableName));
691   }
692 
693   /**
694    * Deletes a table.
695    * Synchronous operation.
696    *
697    * @param tableName name of table to delete
698    * @throws IOException if a remote or network exception occurs
699    */
700   @Override
701   public void deleteTable(final TableName tableName) throws IOException {
702     boolean tableExists = true;
703 
704     executeCallable(new MasterCallable<Void>(getConnection()) {
705       @Override
706       public Void call(int callTimeout) throws ServiceException {
707         DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName);
708         master.deleteTable(null,req);
709         return null;
710       }
711     });
712 
713     int failures = 0;
714     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
715       try {
716         tableExists = tableExists(tableName);
717         if (!tableExists)
718           break;
719       } catch (IOException ex) {
720         failures++;
721         if(failures >= numRetries - 1) {           // no more tries left
722           if (ex instanceof RemoteException) {
723             throw ((RemoteException) ex).unwrapRemoteException();
724           } else {
725             throw ex;
726           }
727         }
728       }
729       try {
730         Thread.sleep(getPauseTime(tries));
731       } catch (InterruptedException e) {
732         throw new InterruptedIOException("Interrupted when waiting" +
733             " for table to be deleted");
734       }
735     }
736 
737     if (tableExists) {
738       throw new IOException("Retries exhausted, it took too long to wait"+
739         " for the table " + tableName + " to be deleted.");
740     }
741     // Delete cached information to prevent clients from using old locations
742     this.connection.clearRegionCache(tableName);
743     LOG.info("Deleted " + tableName);
744   }
745 
746   /**
747    * Deletes tables matching the passed in pattern and wait on completion.
748    *
749    * Warning: Use this method carefully, there is no prompting and the effect is
750    * immediate. Consider using {@link #listTables(java.lang.String)} and
751    * {@link #deleteTable(byte[])}
752    *
753    * @param regex The regular expression to match table names against
754    * @return Table descriptors for tables that couldn't be deleted
755    * @throws IOException
756    * @see #deleteTables(java.util.regex.Pattern)
757    * @see #deleteTable(java.lang.String)
758    */
759   @Override
760   public HTableDescriptor[] deleteTables(String regex) throws IOException {
761     return deleteTables(Pattern.compile(regex));
762   }
763 
764   /**
765    * Delete tables matching the passed in pattern and wait on completion.
766    *
767    * Warning: Use this method carefully, there is no prompting and the effect is
768    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
769    * {@link #deleteTable(byte[])}
770    *
771    * @param pattern The pattern to match table names against
772    * @return Table descriptors for tables that couldn't be deleted
773    * @throws IOException
774    */
775   @Override
776   public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException {
777     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
778     for (HTableDescriptor table : listTables(pattern)) {
779       try {
780         deleteTable(table.getTableName());
781       } catch (IOException ex) {
782         LOG.info("Failed to delete table " + table.getTableName(), ex);
783         failed.add(table);
784       }
785     }
786     return failed.toArray(new HTableDescriptor[failed.size()]);
787   }
788 
789   /**
790    * Truncate a table.
791    * Synchronous operation.
792    *
793    * @param tableName name of table to truncate
794    * @param preserveSplits True if the splits should be preserved
795    * @throws IOException if a remote or network exception occurs
796    */
797   @Override
798   public void truncateTable(final TableName tableName, final boolean preserveSplits)
799       throws IOException {
800     executeCallable(new MasterCallable<Void>(getConnection()) {
801       @Override
802       public Void call(int callTimeout) throws ServiceException {
803         TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(
804           tableName, preserveSplits);
805         master.truncateTable(null, req);
806         return null;
807       }
808     });
809   }
810 
811   /**
812    * Enable a table.  May timeout.  Use {@link #enableTableAsync(byte[])}
813    * and {@link #isTableEnabled(byte[])} instead.
814    * The table has to be in disabled state for it to be enabled.
815    * @param tableName name of the table
816    * @throws IOException if a remote or network exception occurs
817    * There could be couple types of IOException
818    * TableNotFoundException means the table doesn't exist.
819    * TableNotDisabledException means the table isn't in disabled state.
820    * @see #isTableEnabled(byte[])
821    * @see #disableTable(byte[])
822    * @see #enableTableAsync(byte[])
823    */
824   @Override
825   public void enableTable(final TableName tableName)
826   throws IOException {
827     enableTableAsync(tableName);
828 
829     // Wait until all regions are enabled
830     waitUntilTableIsEnabled(tableName);
831 
832     LOG.info("Enabled table " + tableName);
833   }
834 
835   public void enableTable(final byte[] tableName)
836   throws IOException {
837     enableTable(TableName.valueOf(tableName));
838   }
839 
840   public void enableTable(final String tableName)
841   throws IOException {
842     enableTable(TableName.valueOf(tableName));
843   }
844 
845   /**
846    * Wait for the table to be enabled and available
847    * If enabling the table exceeds the retry period, an exception is thrown.
848    * @param tableName name of the table
849    * @throws IOException if a remote or network exception occurs or
850    *    table is not enabled after the retries period.
851    */
852   private void waitUntilTableIsEnabled(final TableName tableName) throws IOException {
853     boolean enabled = false;
854     long start = EnvironmentEdgeManager.currentTime();
855     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
856       try {
857         enabled = isTableEnabled(tableName);
858       } catch (TableNotFoundException tnfe) {
859         // wait for table to be created
860         enabled = false;
861       }
862       enabled = enabled && isTableAvailable(tableName);
863       if (enabled) {
864         break;
865       }
866       long sleep = getPauseTime(tries);
867       if (LOG.isDebugEnabled()) {
868         LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
869           "enabled in " + tableName);
870       }
871       try {
872         Thread.sleep(sleep);
873       } catch (InterruptedException e) {
874         // Do this conversion rather than let it out because do not want to
875         // change the method signature.
876         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
877       }
878     }
879     if (!enabled) {
880       long msec = EnvironmentEdgeManager.currentTime() - start;
881       throw new IOException("Table '" + tableName +
882         "' not yet enabled, after " + msec + "ms.");
883     }
884   }
885 
886   /**
887    * Brings a table on-line (enables it).  Method returns immediately though
888    * enable of table may take some time to complete, especially if the table
889    * is large (All regions are opened as part of enabling process).  Check
890    * {@link #isTableEnabled(byte[])} to learn when table is fully online.  If
891    * table is taking too long to online, check server logs.
892    * @param tableName
893    * @throws IOException
894    * @since 0.90.0
895    */
896   @Override
897   public void enableTableAsync(final TableName tableName)
898   throws IOException {
899     TableName.isLegalFullyQualifiedTableName(tableName.getName());
900     executeCallable(new MasterCallable<Void>(getConnection()) {
901       @Override
902       public Void call(int callTimeout) throws ServiceException {
903         LOG.info("Started enable of " + tableName);
904         EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName);
905         master.enableTable(null,req);
906         return null;
907       }
908     });
909   }
910 
911   public void enableTableAsync(final byte[] tableName)
912   throws IOException {
913     enableTable(TableName.valueOf(tableName));
914   }
915 
916   public void enableTableAsync(final String tableName)
917   throws IOException {
918     enableTableAsync(TableName.valueOf(tableName));
919   }
920 
921   /**
922    * Enable tables matching the passed in pattern and wait on completion.
923    *
924    * Warning: Use this method carefully, there is no prompting and the effect is
925    * immediate. Consider using {@link #listTables(java.lang.String)} and
926    * {@link #enableTable(byte[])}
927    *
928    * @param regex The regular expression to match table names against
929    * @throws IOException
930    * @see #enableTables(java.util.regex.Pattern)
931    * @see #enableTable(java.lang.String)
932    */
933   @Override
934   public HTableDescriptor[] enableTables(String regex) throws IOException {
935     return enableTables(Pattern.compile(regex));
936   }
937 
938   /**
939    * Enable tables matching the passed in pattern and wait on completion.
940    *
941    * Warning: Use this method carefully, there is no prompting and the effect is
942    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
943    * {@link #enableTable(byte[])}
944    *
945    * @param pattern The pattern to match table names against
946    * @throws IOException
947    */
948   @Override
949   public HTableDescriptor[] enableTables(Pattern pattern) throws IOException {
950     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
951     for (HTableDescriptor table : listTables(pattern)) {
952       if (isTableDisabled(table.getTableName())) {
953         try {
954           enableTable(table.getTableName());
955         } catch (IOException ex) {
956           LOG.info("Failed to enable table " + table.getTableName(), ex);
957           failed.add(table);
958         }
959       }
960     }
961     return failed.toArray(new HTableDescriptor[failed.size()]);
962   }
963 
964   /**
965    * Starts the disable of a table.  If it is being served, the master
966    * will tell the servers to stop serving it.  This method returns immediately.
967    * The disable of a table can take some time if the table is large (all
968    * regions are closed as part of table disable operation).
969    * Call {@link #isTableDisabled(byte[])} to check for when disable completes.
970    * If table is taking too long to online, check server logs.
971    * @param tableName name of table
972    * @throws IOException if a remote or network exception occurs
973    * @see #isTableDisabled(byte[])
974    * @see #isTableEnabled(byte[])
975    * @since 0.90.0
976    */
977   @Override
978   public void disableTableAsync(final TableName tableName) throws IOException {
979     TableName.isLegalFullyQualifiedTableName(tableName.getName());
980     executeCallable(new MasterCallable<Void>(getConnection()) {
981       @Override
982       public Void call(int callTimeout) throws ServiceException {
983         LOG.info("Started disable of " + tableName);
984         DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName);
985         master.disableTable(null,req);
986         return null;
987       }
988     });
989   }
990 
991   public void disableTableAsync(final byte[] tableName) throws IOException {
992     disableTableAsync(TableName.valueOf(tableName));
993   }
994 
995   public void disableTableAsync(final String tableName) throws IOException {
996     disableTableAsync(TableName.valueOf(tableName));
997   }
998 
999   /**
1000    * Disable table and wait on completion.  May timeout eventually.  Use
1001    * {@link #disableTableAsync(byte[])} and {@link #isTableDisabled(String)}
1002    * instead.
1003    * The table has to be in enabled state for it to be disabled.
1004    * @param tableName
1005    * @throws IOException
1006    * There could be couple types of IOException
1007    * TableNotFoundException means the table doesn't exist.
1008    * TableNotEnabledException means the table isn't in enabled state.
1009    */
1010   @Override
1011   public void disableTable(final TableName tableName)
1012   throws IOException {
1013     disableTableAsync(tableName);
1014     // Wait until table is disabled
1015     boolean disabled = false;
1016     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
1017       disabled = isTableDisabled(tableName);
1018       if (disabled) {
1019         break;
1020       }
1021       long sleep = getPauseTime(tries);
1022       if (LOG.isDebugEnabled()) {
1023         LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
1024           "disabled in " + tableName);
1025       }
1026       try {
1027         Thread.sleep(sleep);
1028       } catch (InterruptedException e) {
1029         // Do this conversion rather than let it out because do not want to
1030         // change the method signature.
1031         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
1032       }
1033     }
1034     if (!disabled) {
1035       throw new RegionException("Retries exhausted, it took too long to wait"+
1036         " for the table " + tableName + " to be disabled.");
1037     }
1038     LOG.info("Disabled " + tableName);
1039   }
1040 
1041   public void disableTable(final byte[] tableName)
1042   throws IOException {
1043     disableTable(TableName.valueOf(tableName));
1044   }
1045 
1046   public void disableTable(final String tableName)
1047   throws IOException {
1048     disableTable(TableName.valueOf(tableName));
1049   }
1050 
1051   /**
1052    * Disable tables matching the passed in pattern and wait on completion.
1053    *
1054    * Warning: Use this method carefully, there is no prompting and the effect is
1055    * immediate. Consider using {@link #listTables(java.lang.String)} and
1056    * {@link #disableTable(byte[])}
1057    *
1058    * @param regex The regular expression to match table names against
1059    * @return Table descriptors for tables that couldn't be disabled
1060    * @throws IOException
1061    * @see #disableTables(java.util.regex.Pattern)
1062    * @see #disableTable(java.lang.String)
1063    */
1064   @Override
1065   public HTableDescriptor[] disableTables(String regex) throws IOException {
1066     return disableTables(Pattern.compile(regex));
1067   }
1068 
1069   /**
1070    * Disable tables matching the passed in pattern and wait on completion.
1071    *
1072    * Warning: Use this method carefully, there is no prompting and the effect is
1073    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1074    * {@link #disableTable(byte[])}
1075    *
1076    * @param pattern The pattern to match table names against
1077    * @return Table descriptors for tables that couldn't be disabled
1078    * @throws IOException
1079    */
1080   @Override
1081   public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
1082     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1083     for (HTableDescriptor table : listTables(pattern)) {
1084       if (isTableEnabled(table.getTableName())) {
1085         try {
1086           disableTable(table.getTableName());
1087         } catch (IOException ex) {
1088           LOG.info("Failed to disable table " + table.getTableName(), ex);
1089           failed.add(table);
1090         }
1091       }
1092     }
1093     return failed.toArray(new HTableDescriptor[failed.size()]);
1094   }
1095 
1096   /*
1097    * Checks whether table exists. If not, throws TableNotFoundException
1098    * @param tableName
1099    */
1100   private void checkTableExistence(TableName tableName) throws IOException {
1101     if (!tableExists(tableName)) {
1102       throw new TableNotFoundException(tableName);
1103     }
1104   }
1105 
1106   /**
1107    * @param tableName name of table to check
1108    * @return true if table is on-line
1109    * @throws IOException if a remote or network exception occurs
1110    */
1111   @Override
1112   public boolean isTableEnabled(final TableName tableName) throws IOException {
1113     checkTableExistence(tableName);
1114     return executeCallable(new ConnectionCallable<Boolean>(getConnection()) {
1115       @Override
1116       public Boolean call(int callTimeout) throws ServiceException, IOException {
1117         TableState tableState = MetaTableAccessor.getTableState(connection, tableName);
1118         if (tableState == null)
1119           throw new TableNotFoundException(tableName);
1120         return tableState.inStates(TableState.State.ENABLED);
1121       }
1122     });
1123   }
1124 
1125   public boolean isTableEnabled(byte[] tableName) throws IOException {
1126     return isTableEnabled(TableName.valueOf(tableName));
1127   }
1128 
1129   public boolean isTableEnabled(String tableName) throws IOException {
1130     return isTableEnabled(TableName.valueOf(tableName));
1131   }
1132 
1133 
1134 
1135   /**
1136    * @param tableName name of table to check
1137    * @return true if table is off-line
1138    * @throws IOException if a remote or network exception occurs
1139    */
1140   @Override
1141   public boolean isTableDisabled(TableName tableName) throws IOException {
1142     checkTableExistence(tableName);
1143     return connection.isTableDisabled(tableName);
1144   }
1145 
1146   public boolean isTableDisabled(byte[] tableName) throws IOException {
1147     return isTableDisabled(TableName.valueOf(tableName));
1148   }
1149 
1150   public boolean isTableDisabled(String tableName) throws IOException {
1151     return isTableDisabled(TableName.valueOf(tableName));
1152   }
1153 
1154   /**
1155    * @param tableName name of table to check
1156    * @return true if all regions of the table are available
1157    * @throws IOException if a remote or network exception occurs
1158    */
1159   @Override
1160   public boolean isTableAvailable(TableName tableName) throws IOException {
1161     return connection.isTableAvailable(tableName);
1162   }
1163 
1164   public boolean isTableAvailable(byte[] tableName) throws IOException {
1165     return isTableAvailable(TableName.valueOf(tableName));
1166   }
1167 
1168   public boolean isTableAvailable(String tableName) throws IOException {
1169     return isTableAvailable(TableName.valueOf(tableName));
1170   }
1171 
1172   /**
1173    * Use this api to check if the table has been created with the specified number of
1174    * splitkeys which was used while creating the given table.
1175    * Note : If this api is used after a table's region gets splitted, the api may return
1176    * false.
1177    * @param tableName
1178    *          name of table to check
1179    * @param splitKeys
1180    *          keys to check if the table has been created with all split keys
1181    * @throws IOException
1182    *           if a remote or network excpetion occurs
1183    */
1184   @Override
1185   public boolean isTableAvailable(TableName tableName,
1186                                   byte[][] splitKeys) throws IOException {
1187     return connection.isTableAvailable(tableName, splitKeys);
1188   }
1189 
1190   public boolean isTableAvailable(byte[] tableName,
1191                                   byte[][] splitKeys) throws IOException {
1192     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1193   }
1194 
1195   public boolean isTableAvailable(String tableName,
1196                                   byte[][] splitKeys) throws IOException {
1197     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1198   }
1199 
1200   /**
1201    * Get the status of alter command - indicates how many regions have received
1202    * the updated schema Asynchronous operation.
1203    *
1204    * @param tableName TableName instance
1205    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1206    *         regions that are yet to be updated Pair.getSecond() is the total number
1207    *         of regions of the table
1208    * @throws IOException
1209    *           if a remote or network exception occurs
1210    */
1211   @Override
1212   public Pair<Integer, Integer> getAlterStatus(final TableName tableName)
1213   throws IOException {
1214     return executeCallable(new MasterCallable<Pair<Integer, Integer>>(getConnection()) {
1215       @Override
1216       public Pair<Integer, Integer> call(int callTimeout) throws ServiceException {
1217         GetSchemaAlterStatusRequest req = RequestConverter
1218             .buildGetSchemaAlterStatusRequest(tableName);
1219         GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(null, req);
1220         Pair<Integer, Integer> pair = new Pair<Integer, Integer>(Integer.valueOf(ret
1221             .getYetToUpdateRegions()), Integer.valueOf(ret.getTotalRegions()));
1222         return pair;
1223       }
1224     });
1225   }
1226 
1227   /**
1228    * Get the status of alter command - indicates how many regions have received
1229    * the updated schema Asynchronous operation.
1230    *
1231    * @param tableName
1232    *          name of the table to get the status of
1233    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1234    *         regions that are yet to be updated Pair.getSecond() is the total number
1235    *         of regions of the table
1236    * @throws IOException
1237    *           if a remote or network exception occurs
1238    */
1239   @Override
1240   public Pair<Integer, Integer> getAlterStatus(final byte[] tableName)
1241    throws IOException {
1242     return getAlterStatus(TableName.valueOf(tableName));
1243   }
1244 
1245   /**
1246    * Add a column to an existing table.
1247    * Asynchronous operation.
1248    *
1249    * @param tableName name of the table to add column to
1250    * @param column column descriptor of column to be added
1251    * @throws IOException if a remote or network exception occurs
1252    */
1253   public void addColumn(final byte[] tableName, HColumnDescriptor column)
1254   throws IOException {
1255     addColumn(TableName.valueOf(tableName), column);
1256   }
1257 
1258 
1259   /**
1260    * Add a column to an existing table.
1261    * Asynchronous operation.
1262    *
1263    * @param tableName name of the table to add column to
1264    * @param column column descriptor of column to be added
1265    * @throws IOException if a remote or network exception occurs
1266    */
1267   public void addColumn(final String tableName, HColumnDescriptor column)
1268   throws IOException {
1269     addColumn(TableName.valueOf(tableName), column);
1270   }
1271 
1272   /**
1273    * Add a column to an existing table.
1274    * Asynchronous operation.
1275    *
1276    * @param tableName name of the table to add column to
1277    * @param column column descriptor of column to be added
1278    * @throws IOException if a remote or network exception occurs
1279    */
1280   @Override
1281   public void addColumn(final TableName tableName, final HColumnDescriptor column)
1282   throws IOException {
1283     executeCallable(new MasterCallable<Void>(getConnection()) {
1284       @Override
1285       public Void call(int callTimeout) throws ServiceException {
1286         AddColumnRequest req = RequestConverter.buildAddColumnRequest(tableName, column);
1287         master.addColumn(null,req);
1288         return null;
1289       }
1290     });
1291   }
1292 
1293   /**
1294    * Delete a column from a table.
1295    * Asynchronous operation.
1296    *
1297    * @param tableName name of table
1298    * @param columnName name of column to be deleted
1299    * @throws IOException if a remote or network exception occurs
1300    */
1301   public void deleteColumn(final byte[] tableName, final String columnName)
1302   throws IOException {
1303     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1304   }
1305 
1306   /**
1307    * Delete a column from a table.
1308    * Asynchronous operation.
1309    *
1310    * @param tableName name of table
1311    * @param columnName name of column to be deleted
1312    * @throws IOException if a remote or network exception occurs
1313    */
1314   public void deleteColumn(final String tableName, final String columnName)
1315   throws IOException {
1316     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1317   }
1318 
1319   /**
1320    * Delete a column from a table.
1321    * Asynchronous operation.
1322    *
1323    * @param tableName name of table
1324    * @param columnName name of column to be deleted
1325    * @throws IOException if a remote or network exception occurs
1326    */
1327   @Override
1328   public void deleteColumn(final TableName tableName, final byte [] columnName)
1329   throws IOException {
1330     executeCallable(new MasterCallable<Void>(getConnection()) {
1331       @Override
1332       public Void call(int callTimeout) throws ServiceException {
1333         DeleteColumnRequest req = RequestConverter.buildDeleteColumnRequest(tableName, columnName);
1334         master.deleteColumn(null,req);
1335         return null;
1336       }
1337     });
1338   }
1339 
1340   /**
1341    * Modify an existing column family on a table.
1342    * Asynchronous operation.
1343    *
1344    * @param tableName name of table
1345    * @param descriptor new column descriptor to use
1346    * @throws IOException if a remote or network exception occurs
1347    */
1348   public void modifyColumn(final String tableName, HColumnDescriptor descriptor)
1349   throws IOException {
1350     modifyColumn(TableName.valueOf(tableName), descriptor);
1351   }
1352 
1353   /**
1354    * Modify an existing column family on a table.
1355    * Asynchronous operation.
1356    *
1357    * @param tableName name of table
1358    * @param descriptor new column descriptor to use
1359    * @throws IOException if a remote or network exception occurs
1360    */
1361   public void modifyColumn(final byte[] tableName, HColumnDescriptor descriptor)
1362   throws IOException {
1363     modifyColumn(TableName.valueOf(tableName), descriptor);
1364   }
1365 
1366 
1367 
1368   /**
1369    * Modify an existing column family on a table.
1370    * Asynchronous operation.
1371    *
1372    * @param tableName name of table
1373    * @param descriptor new column descriptor to use
1374    * @throws IOException if a remote or network exception occurs
1375    */
1376   @Override
1377   public void modifyColumn(final TableName tableName, final HColumnDescriptor descriptor)
1378   throws IOException {
1379     executeCallable(new MasterCallable<Void>(getConnection()) {
1380       @Override
1381       public Void call(int callTimeout) throws ServiceException {
1382         ModifyColumnRequest req = RequestConverter.buildModifyColumnRequest(tableName, descriptor);
1383         master.modifyColumn(null,req);
1384         return null;
1385       }
1386     });
1387   }
1388 
1389   /**
1390    * Close a region. For expert-admins.  Runs close on the regionserver.  The
1391    * master will not be informed of the close.
1392    * @param regionname region name to close
1393    * @param serverName If supplied, we'll use this location rather than
1394    * the one currently in <code>hbase:meta</code>
1395    * @throws IOException if a remote or network exception occurs
1396    */
1397   @Override
1398   public void closeRegion(final String regionname, final String serverName)
1399   throws IOException {
1400     closeRegion(Bytes.toBytes(regionname), serverName);
1401   }
1402 
1403   /**
1404    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1405    * master will not be informed of the close.
1406    * @param regionname region name to close
1407    * @param serverName The servername of the regionserver.  If passed null we
1408    * will use servername found in the hbase:meta table. A server name
1409    * is made of host, port and startcode.  Here is an example:
1410    * <code> host187.example.com,60020,1289493121758</code>
1411    * @throws IOException if a remote or network exception occurs
1412    */
1413   @Override
1414   public void closeRegion(final byte [] regionname, final String serverName)
1415       throws IOException {
1416     if (serverName != null) {
1417       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1418       if (pair == null || pair.getFirst() == null) {
1419         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1420       } else {
1421         closeRegion(ServerName.valueOf(serverName), pair.getFirst());
1422       }
1423     } else {
1424       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1425       if (pair == null) {
1426         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1427       } else if (pair.getSecond() == null) {
1428         throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
1429       } else {
1430         closeRegion(pair.getSecond(), pair.getFirst());
1431       }
1432     }
1433   }
1434 
1435   /**
1436    * For expert-admins. Runs close on the regionserver. Closes a region based on
1437    * the encoded region name. The region server name is mandatory. If the
1438    * servername is provided then based on the online regions in the specified
1439    * regionserver the specified region will be closed. The master will not be
1440    * informed of the close. Note that the regionname is the encoded regionname.
1441    *
1442    * @param encodedRegionName
1443    *          The encoded region name; i.e. the hash that makes up the region
1444    *          name suffix: e.g. if regionname is
1445    *          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>
1446    *          , then the encoded region name is:
1447    *          <code>527db22f95c8a9e0116f0cc13c680396</code>.
1448    * @param serverName
1449    *          The servername of the regionserver. A server name is made of host,
1450    *          port and startcode. This is mandatory. Here is an example:
1451    *          <code> host187.example.com,60020,1289493121758</code>
1452    * @return true if the region was closed, false if not.
1453    * @throws IOException
1454    *           if a remote or network exception occurs
1455    */
1456   @Override
1457   public boolean closeRegionWithEncodedRegionName(final String encodedRegionName,
1458       final String serverName) throws IOException {
1459     if (null == serverName || ("").equals(serverName.trim())) {
1460       throw new IllegalArgumentException(
1461           "The servername cannot be null or empty.");
1462     }
1463     ServerName sn = ServerName.valueOf(serverName);
1464     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1465     // Close the region without updating zk state.
1466     CloseRegionRequest request =
1467       RequestConverter.buildCloseRegionRequest(sn, encodedRegionName);
1468     try {
1469       CloseRegionResponse response = admin.closeRegion(null, request);
1470       boolean isRegionClosed = response.getClosed();
1471       if (false == isRegionClosed) {
1472         LOG.error("Not able to close the region " + encodedRegionName + ".");
1473       }
1474       return isRegionClosed;
1475     } catch (ServiceException se) {
1476       throw ProtobufUtil.getRemoteException(se);
1477     }
1478   }
1479 
1480   /**
1481    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1482    * master will not be informed of the close.
1483    * @param sn
1484    * @param hri
1485    * @throws IOException
1486    */
1487   @Override
1488   public void closeRegion(final ServerName sn, final HRegionInfo hri)
1489   throws IOException {
1490     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1491     // Close the region without updating zk state.
1492     ProtobufUtil.closeRegion(admin, sn, hri.getRegionName());
1493   }
1494 
1495   /**
1496    * Get all the online regions on a region server.
1497    */
1498   @Override
1499   public List<HRegionInfo> getOnlineRegions(final ServerName sn) throws IOException {
1500     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1501     return ProtobufUtil.getOnlineRegions(admin);
1502   }
1503 
1504   /**
1505    * {@inheritDoc}
1506    */
1507   @Override
1508   public void flush(final TableName tableName) throws IOException {
1509     checkTableExists(tableName);
1510     if (isTableDisabled(tableName)) {
1511       LOG.info("Table is disabled: " + tableName.getNameAsString());
1512       return;
1513     }
1514     execProcedure("flush-table-proc", tableName.getNameAsString(),
1515       new HashMap<String, String>());
1516   }
1517 
1518   /**
1519    * {@inheritDoc}
1520    */
1521   @Override
1522   public void flushRegion(final byte[] regionName) throws IOException {
1523     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
1524     if (regionServerPair == null) {
1525       throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName));
1526     }
1527     if (regionServerPair.getSecond() == null) {
1528       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1529     }
1530     flush(regionServerPair.getSecond(), regionServerPair.getFirst());
1531   }
1532 
1533   /**
1534    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1535    * (byte[])} instead.
1536    */
1537   @Deprecated
1538   public void flush(final String tableNameOrRegionName)
1539   throws IOException, InterruptedException {
1540     flush(Bytes.toBytes(tableNameOrRegionName));
1541   }
1542 
1543   /**
1544    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1545    * (byte[])} instead.
1546    */
1547   @Deprecated
1548   public void flush(final byte[] tableNameOrRegionName)
1549   throws IOException, InterruptedException {
1550     try {
1551       flushRegion(tableNameOrRegionName);
1552     } catch (IllegalArgumentException e) {
1553       // Unknown region.  Try table.
1554       flush(TableName.valueOf(tableNameOrRegionName));
1555     }
1556   }
1557 
1558   private void flush(final ServerName sn, final HRegionInfo hri)
1559   throws IOException {
1560     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1561     FlushRegionRequest request =
1562       RequestConverter.buildFlushRegionRequest(hri.getRegionName());
1563     try {
1564       admin.flushRegion(null, request);
1565     } catch (ServiceException se) {
1566       throw ProtobufUtil.getRemoteException(se);
1567     }
1568   }
1569 
1570   /**
1571    * {@inheritDoc}
1572    */
1573   @Override
1574   public void compact(final TableName tableName)
1575     throws IOException {
1576     compact(tableName, null, false);
1577   }
1578 
1579   /**
1580    * {@inheritDoc}
1581    */
1582   @Override
1583   public void compactRegion(final byte[] regionName)
1584     throws IOException {
1585     compactRegion(regionName, null, false);
1586   }
1587 
1588   /**
1589    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1590    * (byte[])} instead.
1591    */
1592   @Deprecated
1593   public void compact(final String tableNameOrRegionName)
1594   throws IOException {
1595     compact(Bytes.toBytes(tableNameOrRegionName));
1596   }
1597 
1598   /**
1599    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1600    * (byte[])} instead.
1601    */
1602   @Deprecated
1603   public void compact(final byte[] tableNameOrRegionName)
1604   throws IOException {
1605     try {
1606       compactRegion(tableNameOrRegionName, null, false);
1607     } catch (IllegalArgumentException e) {
1608       compact(TableName.valueOf(tableNameOrRegionName), null, false);
1609     }
1610   }
1611 
1612   /**
1613    * {@inheritDoc}
1614    */
1615   @Override
1616   public void compact(final TableName tableName, final byte[] columnFamily)
1617     throws IOException {
1618     compact(tableName, columnFamily, false);
1619   }
1620 
1621   /**
1622    * {@inheritDoc}
1623    */
1624   @Override
1625   public void compactRegion(final byte[] regionName, final byte[] columnFamily)
1626     throws IOException {
1627     compactRegion(regionName, columnFamily, false);
1628   }
1629 
1630   /**
1631    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1632    * (byte[], byte[])} instead.
1633    */
1634   @Deprecated
1635   public void compact(String tableOrRegionName, String columnFamily)
1636     throws IOException {
1637     compact(Bytes.toBytes(tableOrRegionName), Bytes.toBytes(columnFamily));
1638   }
1639 
1640   /**
1641    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1642    * (byte[], byte[])} instead.
1643    */
1644   @Deprecated
1645   public void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
1646   throws IOException {
1647     try {
1648       compactRegion(tableNameOrRegionName, columnFamily, false);
1649     } catch (IllegalArgumentException e) {
1650       // Bad region, try table
1651       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, false);
1652     }
1653   }
1654 
1655   /**
1656    * {@inheritDoc}
1657    */
1658   @Override
1659   public void compactRegionServer(final ServerName sn, boolean major)
1660   throws IOException, InterruptedException {
1661     for (HRegionInfo region : getOnlineRegions(sn)) {
1662       compact(sn, region, major, null);
1663     }
1664   }
1665 
1666   /**
1667    * {@inheritDoc}
1668    */
1669   @Override
1670   public void majorCompact(final TableName tableName)
1671   throws IOException {
1672     compact(tableName, null, true);
1673   }
1674 
1675   /**
1676    * {@inheritDoc}
1677    */
1678   @Override
1679   public void majorCompactRegion(final byte[] regionName)
1680   throws IOException {
1681     compactRegion(regionName, null, true);
1682   }
1683 
1684   /**
1685    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
1686    * #majorCompactRegion(byte[])} instead.
1687    */
1688   @Deprecated
1689   public void majorCompact(final String tableNameOrRegionName)
1690   throws IOException {
1691     majorCompact(Bytes.toBytes(tableNameOrRegionName));
1692   }
1693 
1694   /**
1695    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
1696    * #majorCompactRegion(byte[])} instead.
1697    */
1698   @Deprecated
1699   public void majorCompact(final byte[] tableNameOrRegionName)
1700   throws IOException {
1701     try {
1702       compactRegion(tableNameOrRegionName, null, true);
1703     } catch (IllegalArgumentException e) {
1704       // Invalid region, try table
1705       compact(TableName.valueOf(tableNameOrRegionName), null, true);
1706     }
1707   }
1708 
1709   /**
1710    * {@inheritDoc}
1711    */
1712   @Override
1713   public void majorCompact(final TableName tableName, final byte[] columnFamily)
1714   throws IOException {
1715     compact(tableName, columnFamily, true);
1716   }
1717 
1718   /**
1719    * {@inheritDoc}
1720    */
1721   @Override
1722   public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
1723   throws IOException {
1724     compactRegion(regionName, columnFamily, true);
1725   }
1726 
1727   /**
1728    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
1729    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
1730    */
1731   @Deprecated
1732   public void majorCompact(final String tableNameOrRegionName, final String columnFamily)
1733   throws IOException {
1734     majorCompact(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(columnFamily));
1735   }
1736 
1737   /**
1738    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
1739    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
1740    */
1741   @Deprecated
1742   public void majorCompact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
1743   throws IOException {
1744     try {
1745       compactRegion(tableNameOrRegionName, columnFamily, true);
1746     } catch (IllegalArgumentException e) {
1747       // Invalid region, try table
1748       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, true);
1749     }
1750   }
1751 
1752   /**
1753    * Compact a table.
1754    * Asynchronous operation.
1755    *
1756    * @param tableName table or region to compact
1757    * @param columnFamily column family within a table or region
1758    * @param major True if we are to do a major compaction.
1759    * @throws IOException if a remote or network exception occurs
1760    * @throws InterruptedException
1761    */
1762   private void compact(final TableName tableName, final byte[] columnFamily,final boolean major)
1763   throws IOException {
1764     ZooKeeperWatcher zookeeper = null;
1765     try {
1766       checkTableExists(tableName);
1767       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
1768           new ThrowableAbortable());
1769       List<Pair<HRegionInfo, ServerName>> pairs;
1770       if (TableName.META_TABLE_NAME.equals(tableName)) {
1771         pairs = new MetaTableLocator().getMetaRegionsAndLocations(zookeeper);
1772       } else {
1773         pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
1774       }
1775       for (Pair<HRegionInfo, ServerName> pair: pairs) {
1776         if (pair.getFirst().isOffline()) continue;
1777         if (pair.getSecond() == null) continue;
1778         try {
1779           compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
1780         } catch (NotServingRegionException e) {
1781           if (LOG.isDebugEnabled()) {
1782             LOG.debug("Trying to" + (major ? " major" : "") + " compact " +
1783               pair.getFirst() + ": " +
1784               StringUtils.stringifyException(e));
1785           }
1786         }
1787       }
1788     } finally {
1789       if (zookeeper != null) {
1790         zookeeper.close();
1791       }
1792     }
1793   }
1794 
1795   /**
1796    * Compact an individual region.
1797    * Asynchronous operation.
1798    *
1799    * @param regionName region to compact
1800    * @param columnFamily column family within a table or region
1801    * @param major True if we are to do a major compaction.
1802    * @throws IOException if a remote or network exception occurs
1803    * @throws InterruptedException
1804    */
1805   private void compactRegion(final byte[] regionName, final byte[] columnFamily,final boolean major)
1806   throws IOException {
1807     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
1808     if (regionServerPair == null) {
1809       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
1810     }
1811     if (regionServerPair.getSecond() == null) {
1812       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1813     }
1814     compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
1815   }
1816 
1817   private void compact(final ServerName sn, final HRegionInfo hri,
1818       final boolean major, final byte [] family)
1819   throws IOException {
1820     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1821     CompactRegionRequest request =
1822       RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family);
1823     try {
1824       admin.compactRegion(null, request);
1825     } catch (ServiceException se) {
1826       throw ProtobufUtil.getRemoteException(se);
1827     }
1828   }
1829 
1830   /**
1831    * Move the region <code>r</code> to <code>dest</code>.
1832    * @param encodedRegionName The encoded region name; i.e. the hash that makes
1833    * up the region name suffix: e.g. if regionname is
1834    * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
1835    * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
1836    * @param destServerName The servername of the destination regionserver.  If
1837    * passed the empty byte array we'll assign to a random server.  A server name
1838    * is made of host, port and startcode.  Here is an example:
1839    * <code> host187.example.com,60020,1289493121758</code>
1840    * @throws UnknownRegionException Thrown if we can't find a region named
1841    * <code>encodedRegionName</code>
1842    */
1843   @Override
1844   public void move(final byte [] encodedRegionName, final byte [] destServerName)
1845       throws IOException {
1846 
1847     executeCallable(new MasterCallable<Void>(getConnection()) {
1848       @Override
1849       public Void call(int callTimeout) throws ServiceException {
1850         try {
1851           MoveRegionRequest request =
1852               RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
1853             master.moveRegion(null, request);
1854         } catch (DeserializationException de) {
1855           LOG.error("Could not parse destination server name: " + de);
1856           throw new ServiceException(new DoNotRetryIOException(de));
1857         }
1858         return null;
1859       }
1860     });
1861   }
1862 
1863   /**
1864    * @param regionName
1865    *          Region name to assign.
1866    * @throws MasterNotRunningException
1867    * @throws ZooKeeperConnectionException
1868    * @throws IOException
1869    */
1870   @Override
1871   public void assign(final byte[] regionName) throws MasterNotRunningException,
1872       ZooKeeperConnectionException, IOException {
1873     final byte[] toBeAssigned = getRegionName(regionName);
1874     executeCallable(new MasterCallable<Void>(getConnection()) {
1875       @Override
1876       public Void call(int callTimeout) throws ServiceException {
1877         AssignRegionRequest request =
1878           RequestConverter.buildAssignRegionRequest(toBeAssigned);
1879         master.assignRegion(null,request);
1880         return null;
1881       }
1882     });
1883   }
1884 
1885   /**
1886    * Unassign a region from current hosting regionserver.  Region will then be
1887    * assigned to a regionserver chosen at random.  Region could be reassigned
1888    * back to the same server.  Use {@link #move(byte[], byte[])} if you want
1889    * to control the region movement.
1890    * @param regionName Region to unassign. Will clear any existing RegionPlan
1891    * if one found.
1892    * @param force If true, force unassign (Will remove region from
1893    * regions-in-transition too if present. If results in double assignment
1894    * use hbck -fix to resolve. To be used by experts).
1895    * @throws MasterNotRunningException
1896    * @throws ZooKeeperConnectionException
1897    * @throws IOException
1898    */
1899   @Override
1900   public void unassign(final byte [] regionName, final boolean force)
1901   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
1902     final byte[] toBeUnassigned = getRegionName(regionName);
1903     executeCallable(new MasterCallable<Void>(getConnection()) {
1904       @Override
1905       public Void call(int callTimeout) throws ServiceException {
1906         UnassignRegionRequest request =
1907           RequestConverter.buildUnassignRegionRequest(toBeUnassigned, force);
1908         master.unassignRegion(null, request);
1909         return null;
1910       }
1911     });
1912   }
1913 
1914   /**
1915    * Offline specified region from master's in-memory state. It will not attempt to reassign the
1916    * region as in unassign. This API can be used when a region not served by any region server and
1917    * still online as per Master's in memory state. If this API is incorrectly used on active region
1918    * then master will loose track of that region.
1919    *
1920    * This is a special method that should be used by experts or hbck.
1921    *
1922    * @param regionName
1923    *          Region to offline.
1924    * @throws IOException
1925    */
1926   @Override
1927   public void offline(final byte [] regionName)
1928   throws IOException {
1929     executeCallable(new MasterCallable<Void>(getConnection()) {
1930       @Override
1931       public Void call(int callTimeout) throws ServiceException {
1932         master.offlineRegion(null,RequestConverter.buildOfflineRegionRequest(regionName));
1933         return null;
1934       }
1935     });
1936   }
1937 
1938   /**
1939    * Turn the load balancer on or off.
1940    * @param on If true, enable balancer. If false, disable balancer.
1941    * @param synchronous If true, it waits until current balance() call, if outstanding, to return.
1942    * @return Previous balancer value
1943    */
1944   @Override
1945   public boolean setBalancerRunning(final boolean on, final boolean synchronous)
1946   throws IOException {
1947     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
1948       @Override
1949       public Boolean call(int callTimeout) throws ServiceException {
1950         SetBalancerRunningRequest req =
1951             RequestConverter.buildSetBalancerRunningRequest(on, synchronous);
1952         return master.setBalancerRunning(null, req).getPrevBalanceValue();
1953       }
1954     });
1955   }
1956 
1957   /**
1958    * Invoke the balancer.  Will run the balancer and if regions to move, it will
1959    * go ahead and do the reassignments.  Can NOT run for various reasons.  Check
1960    * logs.
1961    * @return True if balancer ran, false otherwise.
1962    */
1963   @Override
1964   public boolean balancer() throws IOException {
1965     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
1966       @Override
1967       public Boolean call(int callTimeout) throws ServiceException {
1968         return master.balance(null, RequestConverter.buildBalanceRequest()).getBalancerRan();
1969       }
1970     });
1971   }
1972 
1973   /**
1974    * Enable/Disable the catalog janitor
1975    * @param enable if true enables the catalog janitor
1976    * @return the previous state
1977    * @throws MasterNotRunningException
1978    */
1979   @Override
1980   public boolean enableCatalogJanitor(final boolean enable)
1981       throws IOException {
1982     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
1983       @Override
1984       public Boolean call(int callTimeout) throws ServiceException {
1985         return master.enableCatalogJanitor(null,
1986           RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
1987       }
1988     });
1989   }
1990 
1991   /**
1992    * Ask for a scan of the catalog table
1993    * @return the number of entries cleaned
1994    * @throws MasterNotRunningException
1995    */
1996   @Override
1997   public int runCatalogScan() throws IOException {
1998     return executeCallable(new MasterCallable<Integer>(getConnection()) {
1999       @Override
2000       public Integer call(int callTimeout) throws ServiceException {
2001         return master.runCatalogScan(null,
2002           RequestConverter.buildCatalogScanRequest()).getScanResult();
2003       }
2004     });
2005   }
2006 
2007   /**
2008    * Query on the catalog janitor state (Enabled/Disabled?)
2009    * @throws org.apache.hadoop.hbase.MasterNotRunningException
2010    */
2011   @Override
2012   public boolean isCatalogJanitorEnabled() throws IOException {
2013     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2014       @Override
2015       public Boolean call(int callTimeout) throws ServiceException {
2016         return master.isCatalogJanitorEnabled(null,
2017           RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
2018       }
2019     });
2020   }
2021 
2022   /**
2023    * Merge two regions. Asynchronous operation.
2024    * @param encodedNameOfRegionA encoded name of region a
2025    * @param encodedNameOfRegionB encoded name of region b
2026    * @param forcible true if do a compulsory merge, otherwise we will only merge
2027    *          two adjacent regions
2028    * @throws IOException
2029    */
2030   @Override
2031   public void mergeRegions(final byte[] encodedNameOfRegionA,
2032       final byte[] encodedNameOfRegionB, final boolean forcible)
2033       throws IOException {
2034     Pair<HRegionInfo, ServerName> pair = getRegion(encodedNameOfRegionA);
2035     if (pair != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
2036       throw new IllegalArgumentException("Can't invoke merge on non-default regions directly");
2037     pair = getRegion(encodedNameOfRegionB);
2038     if (pair != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
2039       throw new IllegalArgumentException("Can't invoke merge on non-default regions directly");
2040     executeCallable(new MasterCallable<Void>(getConnection()) {
2041       @Override
2042       public Void call(int callTimeout) throws ServiceException {
2043         try {
2044           DispatchMergingRegionsRequest request = RequestConverter
2045               .buildDispatchMergingRegionsRequest(encodedNameOfRegionA,
2046                 encodedNameOfRegionB, forcible);
2047           master.dispatchMergingRegions(null, request);
2048         } catch (DeserializationException de) {
2049           LOG.error("Could not parse destination server name: " + de);
2050         }
2051         return null;
2052       }
2053     });
2054   }
2055 
2056   /**
2057    * {@inheritDoc}
2058    */
2059   @Override
2060   public void split(final TableName tableName)
2061     throws IOException {
2062     split(tableName, null);
2063   }
2064 
2065   /**
2066    * {@inheritDoc}
2067    */
2068   @Override
2069   public void splitRegion(final byte[] regionName)
2070     throws IOException {
2071     splitRegion(regionName, null);
2072   }
2073 
2074   /**
2075    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2076    * (byte[])} instead.
2077    */
2078   @Deprecated
2079   public void split(final String tableNameOrRegionName)
2080   throws IOException, InterruptedException {
2081     split(Bytes.toBytes(tableNameOrRegionName));
2082   }
2083 
2084   /**
2085    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2086    * (byte[])} instead.
2087    */
2088   @Deprecated
2089   public void split(final byte[] tableNameOrRegionName)
2090   throws IOException, InterruptedException {
2091     split(tableNameOrRegionName, null);
2092   }
2093 
2094   /**
2095    * {@inheritDoc}
2096    */
2097   @Override
2098   public void split(final TableName tableName, final byte [] splitPoint)
2099   throws IOException {
2100     ZooKeeperWatcher zookeeper = null;
2101     try {
2102       checkTableExists(tableName);
2103       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2104         new ThrowableAbortable());
2105       List<Pair<HRegionInfo, ServerName>> pairs;
2106       if (TableName.META_TABLE_NAME.equals(tableName)) {
2107         pairs = new MetaTableLocator().getMetaRegionsAndLocations(zookeeper);
2108       } else {
2109         pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
2110       }
2111       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2112         // May not be a server for a particular row
2113         if (pair.getSecond() == null) continue;
2114         HRegionInfo r = pair.getFirst();
2115         // check for parents
2116         if (r.isSplitParent()) continue;
2117         // if a split point given, only split that particular region
2118         if (r.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID ||
2119            (splitPoint != null && !r.containsRow(splitPoint))) continue;
2120         // call out to region server to do split now
2121         split(pair.getSecond(), pair.getFirst(), splitPoint);
2122       }
2123     } finally {
2124       if (zookeeper != null) {
2125         zookeeper.close();
2126       }
2127     }
2128   }
2129 
2130   /**
2131    * {@inheritDoc}
2132    */
2133   @Override
2134   public void splitRegion(final byte[] regionName, final byte [] splitPoint)
2135   throws IOException {
2136     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2137     if (regionServerPair == null) {
2138       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2139     }
2140     if (regionServerPair.getFirst() != null &&
2141         regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
2142       throw new IllegalArgumentException("Can't split replicas directly. "
2143           + "Replicas are auto-split when their primary is split.");
2144     }
2145     if (regionServerPair.getSecond() == null) {
2146       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2147     }
2148     split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
2149   }
2150 
2151   /**
2152    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2153    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2154    */
2155   @Deprecated
2156   public void split(final String tableNameOrRegionName,
2157     final String splitPoint) throws IOException {
2158     split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint));
2159   }
2160 
2161   /**
2162    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2163    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2164    */
2165   @Deprecated
2166   public void split(final byte[] tableNameOrRegionName,
2167       final byte [] splitPoint) throws IOException {
2168     try {
2169       splitRegion(tableNameOrRegionName, splitPoint);
2170     } catch (IllegalArgumentException e) {
2171       // Bad region, try table
2172       split(TableName.valueOf(tableNameOrRegionName), splitPoint);
2173     }
2174   }
2175 
2176   @VisibleForTesting
2177   public void split(final ServerName sn, final HRegionInfo hri,
2178       byte[] splitPoint) throws IOException {
2179     if (hri.getStartKey() != null && splitPoint != null &&
2180          Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
2181        throw new IOException("should not give a splitkey which equals to startkey!");
2182     }
2183     // TODO: This is not executed via retries
2184     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2185     ProtobufUtil.split(admin, hri, splitPoint);
2186   }
2187 
2188   /**
2189    * Modify an existing table, more IRB friendly version.
2190    * Asynchronous operation.  This means that it may be a while before your
2191    * schema change is updated across all of the table.
2192    *
2193    * @param tableName name of table.
2194    * @param htd modified description of the table
2195    * @throws IOException if a remote or network exception occurs
2196    */
2197   @Override
2198   public void modifyTable(final TableName tableName, final HTableDescriptor htd)
2199   throws IOException {
2200     if (!tableName.equals(htd.getTableName())) {
2201       throw new IllegalArgumentException("the specified table name '" + tableName +
2202         "' doesn't match with the HTD one: " + htd.getTableName());
2203     }
2204 
2205     executeCallable(new MasterCallable<Void>(getConnection()) {
2206       @Override
2207       public Void call(int callTimeout) throws ServiceException {
2208         ModifyTableRequest request = RequestConverter.buildModifyTableRequest(tableName, htd);
2209         master.modifyTable(null, request);
2210         return null;
2211       }
2212     });
2213   }
2214 
2215   public void modifyTable(final byte[] tableName, final HTableDescriptor htd)
2216   throws IOException {
2217     modifyTable(TableName.valueOf(tableName), htd);
2218   }
2219 
2220   public void modifyTable(final String tableName, final HTableDescriptor htd)
2221   throws IOException {
2222     modifyTable(TableName.valueOf(tableName), htd);
2223   }
2224 
2225   /**
2226    * @param regionName Name of a region.
2227    * @return a pair of HRegionInfo and ServerName if <code>regionName</code> is
2228    *  a verified region name (we call {@link
2229    *  MetaTableAccessor#getRegion(HConnection, byte[])}
2230    *  else null.
2231    * Throw IllegalArgumentException if <code>regionName</code> is null.
2232    * @throws IOException
2233    */
2234   Pair<HRegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException {
2235     if (regionName == null) {
2236       throw new IllegalArgumentException("Pass a table name or region name");
2237     }
2238     Pair<HRegionInfo, ServerName> pair =
2239       MetaTableAccessor.getRegion(connection, regionName);
2240     if (pair == null) {
2241       final AtomicReference<Pair<HRegionInfo, ServerName>> result =
2242         new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
2243       final String encodedName = Bytes.toString(regionName);
2244       MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
2245         @Override
2246         public boolean visit(Result data) throws IOException {
2247           HRegionInfo info = HRegionInfo.getHRegionInfo(data);
2248           if (info == null) {
2249             LOG.warn("No serialized HRegionInfo in " + data);
2250             return true;
2251           }
2252           RegionLocations rl = MetaTableAccessor.getRegionLocations(data);
2253           boolean matched = false;
2254           ServerName sn = null;
2255           if (rl != null) {
2256             for (HRegionLocation h : rl.getRegionLocations()) {
2257               if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) {
2258                 sn = h.getServerName();
2259                 info = h.getRegionInfo();
2260                 matched = true;
2261               }
2262             }
2263           }
2264           if (!matched) return true;
2265           result.set(new Pair<HRegionInfo, ServerName>(info, sn));
2266           return false; // found the region, stop
2267         }
2268       };
2269 
2270       MetaTableAccessor.fullScanRegions(connection, visitor);
2271       pair = result.get();
2272     }
2273     return pair;
2274   }
2275 
2276   /**
2277    * If the input is a region name, it is returned as is. If it's an
2278    * encoded region name, the corresponding region is found from meta
2279    * and its region name is returned. If we can't find any region in
2280    * meta matching the input as either region name or encoded region
2281    * name, the input is returned as is. We don't throw unknown
2282    * region exception.
2283    */
2284   private byte[] getRegionName(
2285       final byte[] regionNameOrEncodedRegionName) throws IOException {
2286     if (Bytes.equals(regionNameOrEncodedRegionName,
2287         HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
2288           || Bytes.equals(regionNameOrEncodedRegionName,
2289             HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
2290       return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
2291     }
2292     byte[] tmp = regionNameOrEncodedRegionName;
2293     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionNameOrEncodedRegionName);
2294     if (regionServerPair != null && regionServerPair.getFirst() != null) {
2295       tmp = regionServerPair.getFirst().getRegionName();
2296     }
2297     return tmp;
2298   }
2299 
2300   /**
2301    * Check if table exists or not
2302    * @param tableName Name of a table.
2303    * @return tableName instance
2304    * @throws IOException if a remote or network exception occurs.
2305    * @throws TableNotFoundException if table does not exist.
2306    */
2307   private TableName checkTableExists(final TableName tableName)
2308       throws IOException {
2309     return executeCallable(new ConnectionCallable<TableName>(getConnection()) {
2310       @Override
2311       public TableName call(int callTimeout) throws ServiceException, IOException {
2312         if (!MetaTableAccessor.tableExists(connection, tableName)) {
2313           throw new TableNotFoundException(tableName);
2314         }
2315         return tableName;
2316       }
2317     });
2318   }
2319 
2320   /**
2321    * Shuts down the HBase cluster
2322    * @throws IOException if a remote or network exception occurs
2323    */
2324   @Override
2325   public synchronized void shutdown() throws IOException {
2326     executeCallable(new MasterCallable<Void>(getConnection()) {
2327       @Override
2328       public Void call(int callTimeout) throws ServiceException {
2329         master.shutdown(null,ShutdownRequest.newBuilder().build());
2330         return null;
2331       }
2332     });
2333   }
2334 
2335   /**
2336    * Shuts down the current HBase master only.
2337    * Does not shutdown the cluster.
2338    * @see #shutdown()
2339    * @throws IOException if a remote or network exception occurs
2340    */
2341   @Override
2342   public synchronized void stopMaster() throws IOException {
2343     executeCallable(new MasterCallable<Void>(getConnection()) {
2344       @Override
2345       public Void call(int callTimeout) throws ServiceException {
2346         master.stopMaster(null, StopMasterRequest.newBuilder().build());
2347         return null;
2348       }
2349     });
2350   }
2351 
2352   /**
2353    * Stop the designated regionserver
2354    * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
2355    * <code>example.org:1234</code>
2356    * @throws IOException if a remote or network exception occurs
2357    */
2358   @Override
2359   public synchronized void stopRegionServer(final String hostnamePort)
2360   throws IOException {
2361     String hostname = Addressing.parseHostname(hostnamePort);
2362     int port = Addressing.parsePort(hostnamePort);
2363     AdminService.BlockingInterface admin =
2364       this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
2365     StopServerRequest request = RequestConverter.buildStopServerRequest(
2366       "Called by admin client " + this.connection.toString());
2367     try {
2368       admin.stopServer(null, request);
2369     } catch (ServiceException se) {
2370       throw ProtobufUtil.getRemoteException(se);
2371     }
2372   }
2373 
2374 
2375   /**
2376    * @return cluster status
2377    * @throws IOException if a remote or network exception occurs
2378    */
2379   @Override
2380   public ClusterStatus getClusterStatus() throws IOException {
2381     return executeCallable(new MasterCallable<ClusterStatus>(getConnection()) {
2382       @Override
2383       public ClusterStatus call(int callTimeout) throws ServiceException {
2384         GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest();
2385         return ClusterStatus.convert(master.getClusterStatus(null, req).getClusterStatus());
2386       }
2387     });
2388   }
2389 
2390   /**
2391    * @return Configuration used by the instance.
2392    */
2393   @Override
2394   public Configuration getConfiguration() {
2395     return this.conf;
2396   }
2397 
2398   /**
2399    * Create a new namespace
2400    * @param descriptor descriptor which describes the new namespace
2401    * @throws IOException
2402    */
2403   @Override
2404   public void createNamespace(final NamespaceDescriptor descriptor) throws IOException {
2405     executeCallable(new MasterCallable<Void>(getConnection()) {
2406       @Override
2407       public Void call(int callTimeout) throws Exception {
2408         master.createNamespace(null,
2409           CreateNamespaceRequest.newBuilder()
2410             .setNamespaceDescriptor(ProtobufUtil
2411               .toProtoNamespaceDescriptor(descriptor)).build()
2412         );
2413         return null;
2414       }
2415     });
2416   }
2417 
2418   /**
2419    * Modify an existing namespace
2420    * @param descriptor descriptor which describes the new namespace
2421    * @throws IOException
2422    */
2423   @Override
2424   public void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException {
2425     executeCallable(new MasterCallable<Void>(getConnection()) {
2426       @Override
2427       public Void call(int callTimeout) throws Exception {
2428         master.modifyNamespace(null, ModifyNamespaceRequest.newBuilder().
2429           setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build());
2430         return null;
2431       }
2432     });
2433   }
2434 
2435   /**
2436    * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
2437    * @param name namespace name
2438    * @throws IOException
2439    */
2440   @Override
2441   public void deleteNamespace(final String name) throws IOException {
2442     executeCallable(new MasterCallable<Void>(getConnection()) {
2443       @Override
2444       public Void call(int callTimeout) throws Exception {
2445         master.deleteNamespace(null, DeleteNamespaceRequest.newBuilder().
2446           setNamespaceName(name).build());
2447         return null;
2448       }
2449     });
2450   }
2451 
2452   /**
2453    * Get a namespace descriptor by name
2454    * @param name name of namespace descriptor
2455    * @return A descriptor
2456    * @throws IOException
2457    */
2458   @Override
2459   public NamespaceDescriptor getNamespaceDescriptor(final String name) throws IOException {
2460     return
2461         executeCallable(new MasterCallable<NamespaceDescriptor>(getConnection()) {
2462           @Override
2463           public NamespaceDescriptor call(int callTimeout) throws Exception {
2464             return ProtobufUtil.toNamespaceDescriptor(
2465               master.getNamespaceDescriptor(null, GetNamespaceDescriptorRequest.newBuilder().
2466                 setNamespaceName(name).build()).getNamespaceDescriptor());
2467           }
2468         });
2469   }
2470 
2471   /**
2472    * List available namespace descriptors
2473    * @return List of descriptors
2474    * @throws IOException
2475    */
2476   @Override
2477   public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
2478     return
2479         executeCallable(new MasterCallable<NamespaceDescriptor[]>(getConnection()) {
2480           @Override
2481           public NamespaceDescriptor[] call(int callTimeout) throws Exception {
2482             List<HBaseProtos.NamespaceDescriptor> list =
2483               master.listNamespaceDescriptors(null, ListNamespaceDescriptorsRequest.newBuilder().
2484                 build()).getNamespaceDescriptorList();
2485             NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
2486             for(int i = 0; i < list.size(); i++) {
2487               res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
2488             }
2489             return res;
2490           }
2491         });
2492   }
2493 
2494   /**
2495    * Get list of table descriptors by namespace
2496    * @param name namespace name
2497    * @return A descriptor
2498    * @throws IOException
2499    */
2500   @Override
2501   public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException {
2502     return
2503         executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
2504           @Override
2505           public HTableDescriptor[] call(int callTimeout) throws Exception {
2506             List<TableSchema> list =
2507               master.listTableDescriptorsByNamespace(null, ListTableDescriptorsByNamespaceRequest.
2508                 newBuilder().setNamespaceName(name).build()).getTableSchemaList();
2509             HTableDescriptor[] res = new HTableDescriptor[list.size()];
2510             for(int i=0; i < list.size(); i++) {
2511 
2512               res[i] = HTableDescriptor.convert(list.get(i));
2513             }
2514             return res;
2515           }
2516         });
2517   }
2518 
2519   /**
2520    * Get list of table names by namespace
2521    * @param name namespace name
2522    * @return The list of table names in the namespace
2523    * @throws IOException
2524    */
2525   @Override
2526   public TableName[] listTableNamesByNamespace(final String name) throws IOException {
2527     return
2528         executeCallable(new MasterCallable<TableName[]>(getConnection()) {
2529           @Override
2530           public TableName[] call(int callTimeout) throws Exception {
2531             List<HBaseProtos.TableName> tableNames =
2532               master.listTableNamesByNamespace(null, ListTableNamesByNamespaceRequest.
2533                 newBuilder().setNamespaceName(name).build())
2534                 .getTableNameList();
2535             TableName[] result = new TableName[tableNames.size()];
2536             for (int i = 0; i < tableNames.size(); i++) {
2537               result[i] = ProtobufUtil.toTableName(tableNames.get(i));
2538             }
2539             return result;
2540           }
2541         });
2542   }
2543 
2544   /**
2545    * Check to see if HBase is running. Throw an exception if not.
2546    * @param conf system configuration
2547    * @throws MasterNotRunningException if the master is not running
2548    * @throws ZooKeeperConnectionException if unable to connect to zookeeper
2549    */
2550   // Used by tests and by the Merge tool. Merge tool uses it to figure if HBase is up or not.
2551   public static void checkHBaseAvailable(Configuration conf)
2552   throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException {
2553     Configuration copyOfConf = HBaseConfiguration.create(conf);
2554     // We set it to make it fail as soon as possible if HBase is not available
2555     copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
2556     copyOfConf.setInt("zookeeper.recovery.retry", 0);
2557     try (ClusterConnection connection =
2558         (ClusterConnection)ConnectionFactory.createConnection(copyOfConf)) {
2559         // Check ZK first.
2560         // If the connection exists, we may have a connection to ZK that does not work anymore
2561         ZooKeeperKeepAliveConnection zkw = null;
2562         try {
2563           // This is NASTY. FIX!!!! Dependent on internal implementation! TODO
2564           zkw = ((ConnectionManager.HConnectionImplementation)connection).
2565             getKeepAliveZooKeeperWatcher();
2566           zkw.getRecoverableZooKeeper().getZooKeeper().exists(zkw.baseZNode, false);
2567         } catch (IOException e) {
2568           throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
2569         } catch (InterruptedException e) {
2570           throw (InterruptedIOException)
2571             new InterruptedIOException("Can't connect to ZooKeeper").initCause(e);
2572         } catch (KeeperException e) {
2573           throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
2574         } finally {
2575           if (zkw != null) {
2576             zkw.close();
2577           }
2578         }
2579       connection.isMasterRunning();
2580     }
2581   }
2582 
2583   /**
2584    * get the regions of a given table.
2585    *
2586    * @param tableName the name of the table
2587    * @return Ordered list of {@link HRegionInfo}.
2588    * @throws IOException
2589    */
2590   @Override
2591   public List<HRegionInfo> getTableRegions(final TableName tableName)
2592   throws IOException {
2593     ZooKeeperWatcher zookeeper =
2594       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2595         new ThrowableAbortable());
2596     List<HRegionInfo> regions = null;
2597     try {
2598       if (TableName.META_TABLE_NAME.equals(tableName)) {
2599         regions = new MetaTableLocator().getMetaRegions(zookeeper);
2600       } else {
2601         regions = MetaTableAccessor.getTableRegions(connection, tableName, true);
2602       }
2603     } finally {
2604       zookeeper.close();
2605     }
2606     return regions;
2607   }
2608 
2609   public List<HRegionInfo> getTableRegions(final byte[] tableName)
2610   throws IOException {
2611     return getTableRegions(TableName.valueOf(tableName));
2612   }
2613 
2614   @Override
2615   public synchronized void close() throws IOException {
2616     if (cleanupConnectionOnClose && this.connection != null && !this.closed) {
2617       this.connection.close();
2618       this.closed = true;
2619     }
2620   }
2621 
2622   /**
2623    * Get tableDescriptors
2624    * @param tableNames List of table names
2625    * @return HTD[] the tableDescriptor
2626    * @throws IOException if a remote or network exception occurs
2627    */
2628   @Override
2629   public HTableDescriptor[] getTableDescriptorsByTableName(final List<TableName> tableNames)
2630   throws IOException {
2631     return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
2632       @Override
2633       public HTableDescriptor[] call(int callTimeout) throws Exception {
2634         GetTableDescriptorsRequest req =
2635             RequestConverter.buildGetTableDescriptorsRequest(tableNames);
2636           return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req));
2637       }
2638     });
2639   }
2640 
2641   /**
2642    * Get tableDescriptor
2643    * @param tableName one table name
2644    * @return HTD the HTableDescriptor or null if the table not exists
2645    * @throws IOException if a remote or network exception occurs
2646    */
2647   private HTableDescriptor getTableDescriptorByTableName(TableName tableName)
2648       throws IOException {
2649     List<TableName> tableNames = new ArrayList<TableName>(1);
2650     tableNames.add(tableName);
2651 
2652     HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames);
2653 
2654     if (htdl == null || htdl.length == 0) {
2655       return null;
2656     }
2657     else {
2658       return htdl[0];
2659     }
2660   }
2661 
2662   /**
2663    * Get tableDescriptors
2664    * @param names List of table names
2665    * @return HTD[] the tableDescriptor
2666    * @throws IOException if a remote or network exception occurs
2667    */
2668   @Override
2669   public HTableDescriptor[] getTableDescriptors(List<String> names)
2670   throws IOException {
2671     List<TableName> tableNames = new ArrayList<TableName>(names.size());
2672     for(String name : names) {
2673       tableNames.add(TableName.valueOf(name));
2674     }
2675     return getTableDescriptorsByTableName(tableNames);
2676   }
2677 
2678   private RollWALWriterResponse rollWALWriterImpl(final ServerName sn) throws IOException,
2679       FailedLogCloseException {
2680     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2681     RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();
2682     try {
2683       return admin.rollWALWriter(null, request);
2684     } catch (ServiceException se) {
2685       throw ProtobufUtil.getRemoteException(se);
2686     }
2687   }
2688 
2689   /**
2690    * Roll the log writer. I.e. when using a file system based write ahead log,
2691    * start writing log messages to a new file.
2692    *
2693    * Note that when talking to a version 1.0+ HBase deployment, the rolling is asynchronous.
2694    * This method will return as soon as the roll is requested and the return value will
2695    * always be null. Additionally, the named region server may schedule store flushes at the
2696    * request of the wal handling the roll request.
2697    *
2698    * When talking to a 0.98 or older HBase deployment, the rolling is synchronous and the
2699    * return value may be either null or a list of encoded region names.
2700    *
2701    * @param serverName
2702    *          The servername of the regionserver. A server name is made of host,
2703    *          port and startcode. This is mandatory. Here is an example:
2704    *          <code> host187.example.com,60020,1289493121758</code>
2705    * @return a set of {@link HRegionInfo#getEncodedName()} that would allow the wal to
2706    *         clean up some underlying files. null if there's nothing to flush.
2707    * @throws IOException if a remote or network exception occurs
2708    * @throws FailedLogCloseException
2709    * @deprecated use {@link #rollWALWriter(ServerName)}
2710    */
2711   @Deprecated
2712   public synchronized byte[][] rollHLogWriter(String serverName)
2713       throws IOException, FailedLogCloseException {
2714     ServerName sn = ServerName.valueOf(serverName);
2715     final RollWALWriterResponse response = rollWALWriterImpl(sn);
2716     int regionCount = response.getRegionToFlushCount();
2717     if (0 == regionCount) {
2718       return null;
2719     }
2720     byte[][] regionsToFlush = new byte[regionCount][];
2721     for (int i = 0; i < regionCount; i++) {
2722       ByteString region = response.getRegionToFlush(i);
2723       regionsToFlush[i] = region.toByteArray();
2724     }
2725     return regionsToFlush;
2726   }
2727 
2728   @Override
2729   public synchronized void rollWALWriter(ServerName serverName)
2730       throws IOException, FailedLogCloseException {
2731     rollWALWriterImpl(serverName);
2732   }
2733 
2734   @Override
2735   public String[] getMasterCoprocessors() {
2736     try {
2737       return getClusterStatus().getMasterCoprocessors();
2738     } catch (IOException e) {
2739       LOG.error("Could not getClusterStatus()",e);
2740       return null;
2741     }
2742   }
2743 
2744   /**
2745    * {@inheritDoc}
2746    */
2747   @Override
2748   public CompactionState getCompactionState(final TableName tableName)
2749   throws IOException {
2750     CompactionState state = CompactionState.NONE;
2751     ZooKeeperWatcher zookeeper =
2752       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2753         new ThrowableAbortable());
2754     try {
2755       checkTableExists(tableName);
2756       List<Pair<HRegionInfo, ServerName>> pairs;
2757       if (TableName.META_TABLE_NAME.equals(tableName)) {
2758         pairs = new MetaTableLocator().getMetaRegionsAndLocations(zookeeper);
2759       } else {
2760         pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
2761       }
2762       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2763         if (pair.getFirst().isOffline()) continue;
2764         if (pair.getSecond() == null) continue;
2765         try {
2766           ServerName sn = pair.getSecond();
2767           AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2768           GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
2769             pair.getFirst().getRegionName(), true);
2770           GetRegionInfoResponse response = admin.getRegionInfo(null, request);
2771           switch (response.getCompactionState()) {
2772           case MAJOR_AND_MINOR:
2773             return CompactionState.MAJOR_AND_MINOR;
2774           case MAJOR:
2775             if (state == CompactionState.MINOR) {
2776               return CompactionState.MAJOR_AND_MINOR;
2777             }
2778             state = CompactionState.MAJOR;
2779             break;
2780           case MINOR:
2781             if (state == CompactionState.MAJOR) {
2782               return CompactionState.MAJOR_AND_MINOR;
2783             }
2784             state = CompactionState.MINOR;
2785             break;
2786           case NONE:
2787           default: // nothing, continue
2788           }
2789         } catch (NotServingRegionException e) {
2790           if (LOG.isDebugEnabled()) {
2791             LOG.debug("Trying to get compaction state of " +
2792               pair.getFirst() + ": " +
2793               StringUtils.stringifyException(e));
2794           }
2795         } catch (RemoteException e) {
2796           if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
2797             if (LOG.isDebugEnabled()) {
2798               LOG.debug("Trying to get compaction state of " + pair.getFirst() + ": "
2799                 + StringUtils.stringifyException(e));
2800             }
2801           } else {
2802             throw e;
2803           }
2804         }
2805       }
2806     } catch (ServiceException se) {
2807       throw ProtobufUtil.getRemoteException(se);
2808     } finally {
2809       zookeeper.close();
2810     }
2811     return state;
2812   }
2813 
2814   /**
2815    * {@inheritDoc}
2816    */
2817   @Override
2818   public CompactionState getCompactionStateForRegion(final byte[] regionName)
2819   throws IOException {
2820     try {
2821       Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2822       if (regionServerPair == null) {
2823         throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2824       }
2825       if (regionServerPair.getSecond() == null) {
2826         throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2827       }
2828       ServerName sn = regionServerPair.getSecond();
2829       AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2830       GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
2831         regionServerPair.getFirst().getRegionName(), true);
2832       GetRegionInfoResponse response = admin.getRegionInfo(null, request);
2833       return response.getCompactionState();
2834     } catch (ServiceException se) {
2835       throw ProtobufUtil.getRemoteException(se);
2836     }
2837   }
2838 
2839   /**
2840    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
2841    * #getCompactionStateForRegion(byte[])} instead.
2842    */
2843   @Deprecated
2844   public CompactionState getCompactionState(final String tableNameOrRegionName)
2845   throws IOException, InterruptedException {
2846     return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
2847   }
2848 
2849   /**
2850    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
2851    * #getCompactionStateForRegion(byte[])} instead.
2852    */
2853   @Deprecated
2854   public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
2855   throws IOException, InterruptedException {
2856     try {
2857       return getCompactionStateForRegion(tableNameOrRegionName);
2858     } catch (IllegalArgumentException e) {
2859       // Invalid region, try table
2860       return getCompactionState(TableName.valueOf(tableNameOrRegionName));
2861     }
2862   }
2863 
2864   /**
2865    * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
2866    * taken. If the table is disabled, an offline snapshot is taken.
2867    * <p>
2868    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2869    * snapshot with the same name (even a different type or with different parameters) will fail with
2870    * a {@link SnapshotCreationException} indicating the duplicate naming.
2871    * <p>
2872    * Snapshot names follow the same naming constraints as tables in HBase. See
2873    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
2874    * @param snapshotName name of the snapshot to be created
2875    * @param tableName name of the table for which snapshot is created
2876    * @throws IOException if a remote or network exception occurs
2877    * @throws SnapshotCreationException if snapshot creation failed
2878    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2879    */
2880   @Override
2881   public void snapshot(final String snapshotName,
2882                        final TableName tableName) throws IOException,
2883       SnapshotCreationException, IllegalArgumentException {
2884     snapshot(snapshotName, tableName, SnapshotDescription.Type.FLUSH);
2885   }
2886 
2887   public void snapshot(final String snapshotName,
2888                        final String tableName) throws IOException,
2889       SnapshotCreationException, IllegalArgumentException {
2890     snapshot(snapshotName, TableName.valueOf(tableName),
2891         SnapshotDescription.Type.FLUSH);
2892   }
2893 
2894   /**
2895    * Create snapshot for the given table of given flush type.
2896    * <p>
2897    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2898    * snapshot with the same name (even a different type or with different parameters) will fail with
2899    * a {@link SnapshotCreationException} indicating the duplicate naming.
2900    * <p>
2901    * Snapshot names follow the same naming constraints as tables in HBase.
2902    * @param snapshotName name of the snapshot to be created
2903    * @param tableName name of the table for which snapshot is created
2904    * @param flushType if the snapshot should be taken without flush memstore first
2905    * @throws IOException if a remote or network exception occurs
2906    * @throws SnapshotCreationException if snapshot creation failed
2907    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2908    */
2909    public void snapshot(final byte[] snapshotName, final byte[] tableName,
2910                        final SnapshotDescription.Type flushType) throws
2911       IOException, SnapshotCreationException, IllegalArgumentException {
2912       snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName), flushType);
2913   }
2914   /**
2915    public void snapshot(final String snapshotName,
2916     * Create a timestamp consistent snapshot for the given table.
2917                         final byte[] tableName) throws IOException,
2918     * <p>
2919     * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2920     * snapshot with the same name (even a different type or with different parameters) will fail
2921     * with a {@link SnapshotCreationException} indicating the duplicate naming.
2922     * <p>
2923     * Snapshot names follow the same naming constraints as tables in HBase.
2924     * @param snapshotName name of the snapshot to be created
2925     * @param tableName name of the table for which snapshot is created
2926     * @throws IOException if a remote or network exception occurs
2927     * @throws SnapshotCreationException if snapshot creation failed
2928     * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2929     */
2930   @Override
2931   public void snapshot(final byte[] snapshotName,
2932                        final TableName tableName) throws IOException,
2933       SnapshotCreationException, IllegalArgumentException {
2934     snapshot(Bytes.toString(snapshotName), tableName, SnapshotDescription.Type.FLUSH);
2935   }
2936 
2937   public void snapshot(final byte[] snapshotName,
2938                        final byte[] tableName) throws IOException,
2939       SnapshotCreationException, IllegalArgumentException {
2940     snapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName),
2941       SnapshotDescription.Type.FLUSH);
2942   }
2943 
2944   /**
2945    * Create typed snapshot of the table.
2946    * <p>
2947    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2948    * snapshot with the same name (even a different type or with different parameters) will fail with
2949    * a {@link SnapshotCreationException} indicating the duplicate naming.
2950    * <p>
2951    * Snapshot names follow the same naming constraints as tables in HBase. See
2952    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
2953    * <p>
2954    * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
2955    *          snapshots stored on the cluster
2956    * @param tableName name of the table to snapshot
2957    * @param type type of snapshot to take
2958    * @throws IOException we fail to reach the master
2959    * @throws SnapshotCreationException if snapshot creation failed
2960    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2961    */
2962   @Override
2963   public void snapshot(final String snapshotName,
2964                        final TableName tableName,
2965                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
2966       IllegalArgumentException {
2967     SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
2968     builder.setTable(tableName.getNameAsString());
2969     builder.setName(snapshotName);
2970     builder.setType(type);
2971     snapshot(builder.build());
2972   }
2973 
2974   public void snapshot(final String snapshotName,
2975                        final String tableName,
2976                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
2977       IllegalArgumentException {
2978     snapshot(snapshotName, TableName.valueOf(tableName), type);
2979   }
2980 
2981   public void snapshot(final String snapshotName,
2982                        final byte[] tableName,
2983                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
2984       IllegalArgumentException {
2985     snapshot(snapshotName, TableName.valueOf(tableName), type);
2986   }
2987 
2988   /**
2989    * Take a snapshot and wait for the server to complete that snapshot (blocking).
2990    * <p>
2991    * Only a single snapshot should be taken at a time for an instance of HBase, or results may be
2992    * undefined (you can tell multiple HBase clusters to snapshot at the same time, but only one at a
2993    * time for a single cluster).
2994    * <p>
2995    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2996    * snapshot with the same name (even a different type or with different parameters) will fail with
2997    * a {@link SnapshotCreationException} indicating the duplicate naming.
2998    * <p>
2999    * Snapshot names follow the same naming constraints as tables in HBase. See
3000    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3001    * <p>
3002    * You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])}
3003    * unless you are sure about the type of snapshot that you want to take.
3004    * @param snapshot snapshot to take
3005    * @throws IOException or we lose contact with the master.
3006    * @throws SnapshotCreationException if snapshot failed to be taken
3007    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3008    */
3009   @Override
3010   public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException,
3011       IllegalArgumentException {
3012     // actually take the snapshot
3013     SnapshotResponse response = takeSnapshotAsync(snapshot);
3014     final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
3015         .build();
3016     IsSnapshotDoneResponse done = null;
3017     long start = EnvironmentEdgeManager.currentTime();
3018     long max = response.getExpectedTimeout();
3019     long maxPauseTime = max / this.numRetries;
3020     int tries = 0;
3021     LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
3022         ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
3023         maxPauseTime + " ms per retry)");
3024     while (tries == 0
3025         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) {
3026       try {
3027         // sleep a backoff <= pauseTime amount
3028         long sleep = getPauseTime(tries++);
3029         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3030         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
3031           "ms while waiting for snapshot completion.");
3032         Thread.sleep(sleep);
3033       } catch (InterruptedException e) {
3034         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
3035       }
3036       LOG.debug("Getting current status of snapshot from master...");
3037       done = executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
3038         @Override
3039         public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3040           return master.isSnapshotDone(null, request);
3041         }
3042       });
3043     }
3044     if (!done.getDone()) {
3045       throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
3046           + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
3047     }
3048   }
3049 
3050   /**
3051    * Take a snapshot without waiting for the server to complete that snapshot (asynchronous)
3052    * <p>
3053    * Only a single snapshot should be taken at a time, or results may be undefined.
3054    * @param snapshot snapshot to take
3055    * @return response from the server indicating the max time to wait for the snapshot
3056    * @throws IOException if the snapshot did not succeed or we lose contact with the master.
3057    * @throws SnapshotCreationException if snapshot creation failed
3058    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3059    */
3060   @Override
3061   public SnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException,
3062       SnapshotCreationException {
3063     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
3064     final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot)
3065         .build();
3066     // run the snapshot on the master
3067     return executeCallable(new MasterCallable<SnapshotResponse>(getConnection()) {
3068       @Override
3069       public SnapshotResponse call(int callTimeout) throws ServiceException {
3070         return master.snapshot(null, request);
3071       }
3072     });
3073   }
3074 
3075   /**
3076    * Check the current state of the passed snapshot.
3077    * <p>
3078    * There are three possible states:
3079    * <ol>
3080    * <li>running - returns <tt>false</tt></li>
3081    * <li>finished - returns <tt>true</tt></li>
3082    * <li>finished with error - throws the exception that caused the snapshot to fail</li>
3083    * </ol>
3084    * <p>
3085    * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
3086    * run/started since the snapshot your are checking, you will recieve an
3087    * {@link UnknownSnapshotException}.
3088    * @param snapshot description of the snapshot to check
3089    * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
3090    *         running
3091    * @throws IOException if we have a network issue
3092    * @throws HBaseSnapshotException if the snapshot failed
3093    * @throws UnknownSnapshotException if the requested snapshot is unknown
3094    */
3095   @Override
3096   public boolean isSnapshotFinished(final SnapshotDescription snapshot)
3097       throws IOException, HBaseSnapshotException, UnknownSnapshotException {
3098 
3099     return executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
3100       @Override
3101       public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3102         return master.isSnapshotDone(null,
3103           IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build());
3104       }
3105     }).getDone();
3106   }
3107 
3108   /**
3109    * Restore the specified snapshot on the original table. (The table must be disabled)
3110    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3111    * is set to true, a snapshot of the current table is taken
3112    * before executing the restore operation.
3113    * In case of restore failure, the failsafe snapshot will be restored.
3114    * If the restore completes without problem the failsafe snapshot is deleted.
3115    *
3116    * @param snapshotName name of the snapshot to restore
3117    * @throws IOException if a remote or network exception occurs
3118    * @throws RestoreSnapshotException if snapshot failed to be restored
3119    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3120    */
3121   @Override
3122   public void restoreSnapshot(final byte[] snapshotName)
3123       throws IOException, RestoreSnapshotException {
3124     restoreSnapshot(Bytes.toString(snapshotName));
3125   }
3126 
3127   /**
3128    * Restore the specified snapshot on the original table. (The table must be disabled)
3129    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3130    * is set to true, a snapshot of the current table is taken
3131    * before executing the restore operation.
3132    * In case of restore failure, the failsafe snapshot will be restored.
3133    * If the restore completes without problem the failsafe snapshot is deleted.
3134    *
3135    * @param snapshotName name of the snapshot to restore
3136    * @throws IOException if a remote or network exception occurs
3137    * @throws RestoreSnapshotException if snapshot failed to be restored
3138    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3139    */
3140   @Override
3141   public void restoreSnapshot(final String snapshotName)
3142       throws IOException, RestoreSnapshotException {
3143     boolean takeFailSafeSnapshot =
3144       conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false);
3145     restoreSnapshot(snapshotName, takeFailSafeSnapshot);
3146   }
3147 
3148   /**
3149    * Restore the specified snapshot on the original table. (The table must be disabled)
3150    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3151    * before executing the restore operation.
3152    * In case of restore failure, the failsafe snapshot will be restored.
3153    * If the restore completes without problem the failsafe snapshot is deleted.
3154    *
3155    * The failsafe snapshot name is configurable by using the property
3156    * "hbase.snapshot.restore.failsafe.name".
3157    *
3158    * @param snapshotName name of the snapshot to restore
3159    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3160    * @throws IOException if a remote or network exception occurs
3161    * @throws RestoreSnapshotException if snapshot failed to be restored
3162    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3163    */
3164   @Override
3165   public void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot)
3166       throws IOException, RestoreSnapshotException {
3167     restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
3168   }
3169 
3170   /**
3171    * Restore the specified snapshot on the original table. (The table must be disabled)
3172    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3173    * before executing the restore operation.
3174    * In case of restore failure, the failsafe snapshot will be restored.
3175    * If the restore completes without problem the failsafe snapshot is deleted.
3176    *
3177    * The failsafe snapshot name is configurable by using the property
3178    * "hbase.snapshot.restore.failsafe.name".
3179    *
3180    * @param snapshotName name of the snapshot to restore
3181    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3182    * @throws IOException if a remote or network exception occurs
3183    * @throws RestoreSnapshotException if snapshot failed to be restored
3184    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3185    */
3186   @Override
3187   public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot)
3188       throws IOException, RestoreSnapshotException {
3189     TableName tableName = null;
3190     for (SnapshotDescription snapshotInfo: listSnapshots()) {
3191       if (snapshotInfo.getName().equals(snapshotName)) {
3192         tableName = TableName.valueOf(snapshotInfo.getTable());
3193         break;
3194       }
3195     }
3196 
3197     if (tableName == null) {
3198       throw new RestoreSnapshotException(
3199         "Unable to find the table name for snapshot=" + snapshotName);
3200     }
3201 
3202     // The table does not exists, switch to clone.
3203     if (!tableExists(tableName)) {
3204       cloneSnapshot(snapshotName, tableName);
3205       return;
3206     }
3207 
3208     // Check if the table is disabled
3209     if (!isTableDisabled(tableName)) {
3210       throw new TableNotDisabledException(tableName);
3211     }
3212 
3213     // Take a snapshot of the current state
3214     String failSafeSnapshotSnapshotName = null;
3215     if (takeFailSafeSnapshot) {
3216       failSafeSnapshotSnapshotName = conf.get("hbase.snapshot.restore.failsafe.name",
3217         "hbase-failsafe-{snapshot.name}-{restore.timestamp}");
3218       failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName
3219         .replace("{snapshot.name}", snapshotName)
3220         .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.'))
3221         .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime()));
3222       LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3223       snapshot(failSafeSnapshotSnapshotName, tableName);
3224     }
3225 
3226     try {
3227       // Restore snapshot
3228       internalRestoreSnapshot(snapshotName, tableName);
3229     } catch (IOException e) {
3230       // Somthing went wrong during the restore...
3231       // if the pre-restore snapshot is available try to rollback
3232       if (takeFailSafeSnapshot) {
3233         try {
3234           internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName);
3235           String msg = "Restore snapshot=" + snapshotName +
3236             " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded.";
3237           LOG.error(msg, e);
3238           throw new RestoreSnapshotException(msg, e);
3239         } catch (IOException ex) {
3240           String msg = "Failed to restore and rollback to snapshot=" + failSafeSnapshotSnapshotName;
3241           LOG.error(msg, ex);
3242           throw new RestoreSnapshotException(msg, e);
3243         }
3244       } else {
3245         throw new RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e);
3246       }
3247     }
3248 
3249     // If the restore is succeeded, delete the pre-restore snapshot
3250     if (takeFailSafeSnapshot) {
3251       try {
3252         LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3253         deleteSnapshot(failSafeSnapshotSnapshotName);
3254       } catch (IOException e) {
3255         LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, e);
3256       }
3257     }
3258   }
3259 
3260   /**
3261    * Create a new table by cloning the snapshot content.
3262    *
3263    * @param snapshotName name of the snapshot to be cloned
3264    * @param tableName name of the table where the snapshot will be restored
3265    * @throws IOException if a remote or network exception occurs
3266    * @throws TableExistsException if table to be created already exists
3267    * @throws RestoreSnapshotException if snapshot failed to be cloned
3268    * @throws IllegalArgumentException if the specified table has not a valid name
3269    */
3270   public void cloneSnapshot(final byte[] snapshotName, final byte[] tableName)
3271       throws IOException, TableExistsException, RestoreSnapshotException {
3272     cloneSnapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName));
3273   }
3274 
3275   /**
3276    * Create a new table by cloning the snapshot content.
3277    *
3278    * @param snapshotName name of the snapshot to be cloned
3279    * @param tableName name of the table where the snapshot will be restored
3280    * @throws IOException if a remote or network exception occurs
3281    * @throws TableExistsException if table to be created already exists
3282    * @throws RestoreSnapshotException if snapshot failed to be cloned
3283    * @throws IllegalArgumentException if the specified table has not a valid name
3284    */
3285   @Override
3286   public void cloneSnapshot(final byte[] snapshotName, final TableName tableName)
3287       throws IOException, TableExistsException, RestoreSnapshotException {
3288     cloneSnapshot(Bytes.toString(snapshotName), tableName);
3289   }
3290 
3291 
3292 
3293   /**
3294    * Create a new table by cloning the snapshot content.
3295    *
3296    * @param snapshotName name of the snapshot to be cloned
3297    * @param tableName name of the table where the snapshot will be restored
3298    * @throws IOException if a remote or network exception occurs
3299    * @throws TableExistsException if table to be created already exists
3300    * @throws RestoreSnapshotException if snapshot failed to be cloned
3301    * @throws IllegalArgumentException if the specified table has not a valid name
3302    */
3303   public void cloneSnapshot(final String snapshotName, final String tableName)
3304       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
3305     cloneSnapshot(snapshotName, TableName.valueOf(tableName));
3306   }
3307 
3308   /**
3309    * Create a new table by cloning the snapshot content.
3310    *
3311    * @param snapshotName name of the snapshot to be cloned
3312    * @param tableName name of the table where the snapshot will be restored
3313    * @throws IOException if a remote or network exception occurs
3314    * @throws TableExistsException if table to be created already exists
3315    * @throws RestoreSnapshotException if snapshot failed to be cloned
3316    * @throws IllegalArgumentException if the specified table has not a valid name
3317    */
3318   @Override
3319   public void cloneSnapshot(final String snapshotName, final TableName tableName)
3320       throws IOException, TableExistsException, RestoreSnapshotException {
3321     if (tableExists(tableName)) {
3322       throw new TableExistsException(tableName);
3323     }
3324     internalRestoreSnapshot(snapshotName, tableName);
3325     waitUntilTableIsEnabled(tableName);
3326   }
3327 
3328   /**
3329    * Execute a distributed procedure on a cluster synchronously with return data
3330    *
3331    * @param signature A distributed procedure is uniquely identified
3332    * by its signature (default the root ZK node name of the procedure).
3333    * @param instance The instance name of the procedure. For some procedures, this parameter is
3334    * optional.
3335    * @param props Property/Value pairs of properties passing to the procedure
3336    * @return data returned after procedure execution. null if no return data.
3337    * @throws IOException
3338    */
3339   @Override
3340   public byte[] execProcedureWithRet(String signature, String instance,
3341       Map<String, String> props) throws IOException {
3342     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3343     builder.setSignature(signature).setInstance(instance);
3344     for (Entry<String, String> entry : props.entrySet()) {
3345       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3346           .setValue(entry.getValue()).build();
3347       builder.addConfiguration(pair);
3348     }
3349 
3350     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
3351         .setProcedure(builder.build()).build();
3352     // run the procedure on the master
3353     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
3354         getConnection()) {
3355       @Override
3356       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
3357         return master.execProcedureWithRet(null, request);
3358       }
3359     });
3360 
3361     return response.hasReturnData() ? response.getReturnData().toByteArray() : null;
3362   }
3363   /**
3364    * Execute a distributed procedure on a cluster.
3365    *
3366    * @param signature A distributed procedure is uniquely identified
3367    * by its signature (default the root ZK node name of the procedure).
3368    * @param instance The instance name of the procedure. For some procedures, this parameter is
3369    * optional.
3370    * @param props Property/Value pairs of properties passing to the procedure
3371    * @throws IOException
3372    */
3373   @Override
3374   public void execProcedure(String signature, String instance,
3375       Map<String, String> props) throws IOException {
3376     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3377     builder.setSignature(signature).setInstance(instance);
3378     for (Entry<String, String> entry : props.entrySet()) {
3379       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3380           .setValue(entry.getValue()).build();
3381       builder.addConfiguration(pair);
3382     }
3383 
3384     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
3385         .setProcedure(builder.build()).build();
3386     // run the procedure on the master
3387     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
3388         getConnection()) {
3389       @Override
3390       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
3391         return master.execProcedure(null, request);
3392       }
3393     });
3394 
3395     long start = EnvironmentEdgeManager.currentTime();
3396     long max = response.getExpectedTimeout();
3397     long maxPauseTime = max / this.numRetries;
3398     int tries = 0;
3399     LOG.debug("Waiting a max of " + max + " ms for procedure '" +
3400         signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)");
3401     boolean done = false;
3402     while (tries == 0
3403         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
3404       try {
3405         // sleep a backoff <= pauseTime amount
3406         long sleep = getPauseTime(tries++);
3407         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3408         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
3409           "ms while waiting for procedure completion.");
3410         Thread.sleep(sleep);
3411       } catch (InterruptedException e) {
3412         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
3413       }
3414       LOG.debug("Getting current status of procedure from master...");
3415       done = isProcedureFinished(signature, instance, props);
3416     }
3417     if (!done) {
3418       throw new IOException("Procedure '" + signature + " : " + instance
3419           + "' wasn't completed in expectedTime:" + max + " ms");
3420     }
3421   }
3422 
3423   /**
3424    * Check the current state of the specified procedure.
3425    * <p>
3426    * There are three possible states:
3427    * <ol>
3428    * <li>running - returns <tt>false</tt></li>
3429    * <li>finished - returns <tt>true</tt></li>
3430    * <li>finished with error - throws the exception that caused the procedure to fail</li>
3431    * </ol>
3432    * <p>
3433    *
3434    * @param signature The signature that uniquely identifies a procedure
3435    * @param instance The instance name of the procedure
3436    * @param props Property/Value pairs of properties passing to the procedure
3437    * @return true if the specified procedure is finished successfully, false if it is still running
3438    * @throws IOException if the specified procedure finished with error
3439    */
3440   @Override
3441   public boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
3442       throws IOException {
3443     final ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3444     builder.setSignature(signature).setInstance(instance);
3445     for (Entry<String, String> entry : props.entrySet()) {
3446       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3447           .setValue(entry.getValue()).build();
3448       builder.addConfiguration(pair);
3449     }
3450     final ProcedureDescription desc = builder.build();
3451     return executeCallable(
3452         new MasterCallable<IsProcedureDoneResponse>(getConnection()) {
3453           @Override
3454           public IsProcedureDoneResponse call(int callTimeout) throws ServiceException {
3455             return master.isProcedureDone(null, IsProcedureDoneRequest
3456                 .newBuilder().setProcedure(desc).build());
3457           }
3458         }).getDone();
3459   }
3460 
3461   /**
3462    * Execute Restore/Clone snapshot and wait for the server to complete (blocking).
3463    * To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to
3464    * create an HTable instance to this table before it is available.
3465    * @param snapshotName snapshot to restore
3466    * @param tableName table name to restore the snapshot on
3467    * @throws IOException if a remote or network exception occurs
3468    * @throws RestoreSnapshotException if snapshot failed to be restored
3469    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3470    */
3471   private void internalRestoreSnapshot(final String snapshotName, final TableName
3472       tableName)
3473       throws IOException, RestoreSnapshotException {
3474     SnapshotDescription snapshot = SnapshotDescription.newBuilder()
3475         .setName(snapshotName).setTable(tableName.getNameAsString()).build();
3476 
3477     // actually restore the snapshot
3478     internalRestoreSnapshotAsync(snapshot);
3479 
3480     final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder()
3481         .setSnapshot(snapshot).build();
3482     IsRestoreSnapshotDoneResponse done = IsRestoreSnapshotDoneResponse.newBuilder()
3483         .setDone(false).buildPartial();
3484     final long maxPauseTime = 5000;
3485     int tries = 0;
3486     while (!done.getDone()) {
3487       try {
3488         // sleep a backoff <= pauseTime amount
3489         long sleep = getPauseTime(tries++);
3490         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3491         LOG.debug(tries + ") Sleeping: " + sleep
3492             + " ms while we wait for snapshot restore to complete.");
3493         Thread.sleep(sleep);
3494       } catch (InterruptedException e) {
3495         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
3496       }
3497       LOG.debug("Getting current status of snapshot restore from master...");
3498       done = executeCallable(new MasterCallable<IsRestoreSnapshotDoneResponse>(
3499           getConnection()) {
3500         @Override
3501         public IsRestoreSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3502           return master.isRestoreSnapshotDone(null, request);
3503         }
3504       });
3505     }
3506     if (!done.getDone()) {
3507       throw new RestoreSnapshotException("Snapshot '" + snapshot.getName() + "' wasn't restored.");
3508     }
3509   }
3510 
3511   /**
3512    * Execute Restore/Clone snapshot and wait for the server to complete (asynchronous)
3513    * <p>
3514    * Only a single snapshot should be restored at a time, or results may be undefined.
3515    * @param snapshot snapshot to restore
3516    * @return response from the server indicating the max time to wait for the snapshot
3517    * @throws IOException if a remote or network exception occurs
3518    * @throws RestoreSnapshotException if snapshot failed to be restored
3519    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3520    */
3521   private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot)
3522       throws IOException, RestoreSnapshotException {
3523     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
3524 
3525     final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot)
3526         .build();
3527 
3528     // run the snapshot restore on the master
3529     return executeCallable(new MasterCallable<RestoreSnapshotResponse>(getConnection()) {
3530       @Override
3531       public RestoreSnapshotResponse call(int callTimeout) throws ServiceException {
3532         return master.restoreSnapshot(null, request);
3533       }
3534     });
3535   }
3536 
3537   /**
3538    * List completed snapshots.
3539    * @return a list of snapshot descriptors for completed snapshots
3540    * @throws IOException if a network error occurs
3541    */
3542   @Override
3543   public List<SnapshotDescription> listSnapshots() throws IOException {
3544     return executeCallable(new MasterCallable<List<SnapshotDescription>>(getConnection()) {
3545       @Override
3546       public List<SnapshotDescription> call(int callTimeout) throws ServiceException {
3547         return master.getCompletedSnapshots(null, GetCompletedSnapshotsRequest.newBuilder().build())
3548             .getSnapshotsList();
3549       }
3550     });
3551   }
3552 
3553   /**
3554    * List all the completed snapshots matching the given regular expression.
3555    *
3556    * @param regex The regular expression to match against
3557    * @return - returns a List of SnapshotDescription
3558    * @throws IOException if a remote or network exception occurs
3559    */
3560   @Override
3561   public List<SnapshotDescription> listSnapshots(String regex) throws IOException {
3562     return listSnapshots(Pattern.compile(regex));
3563   }
3564 
3565   /**
3566    * List all the completed snapshots matching the given pattern.
3567    *
3568    * @param pattern The compiled regular expression to match against
3569    * @return - returns a List of SnapshotDescription
3570    * @throws IOException if a remote or network exception occurs
3571    */
3572   @Override
3573   public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
3574     List<SnapshotDescription> matched = new LinkedList<SnapshotDescription>();
3575     List<SnapshotDescription> snapshots = listSnapshots();
3576     for (SnapshotDescription snapshot : snapshots) {
3577       if (pattern.matcher(snapshot.getName()).matches()) {
3578         matched.add(snapshot);
3579       }
3580     }
3581     return matched;
3582   }
3583 
3584   /**
3585    * Delete an existing snapshot.
3586    * @param snapshotName name of the snapshot
3587    * @throws IOException if a remote or network exception occurs
3588    */
3589   @Override
3590   public void deleteSnapshot(final byte[] snapshotName) throws IOException {
3591     deleteSnapshot(Bytes.toString(snapshotName));
3592   }
3593 
3594   /**
3595    * Delete an existing snapshot.
3596    * @param snapshotName name of the snapshot
3597    * @throws IOException if a remote or network exception occurs
3598    */
3599   @Override
3600   public void deleteSnapshot(final String snapshotName) throws IOException {
3601     // make sure the snapshot is possibly valid
3602     TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName));
3603     // do the delete
3604     executeCallable(new MasterCallable<Void>(getConnection()) {
3605       @Override
3606       public Void call(int callTimeout) throws ServiceException {
3607         master.deleteSnapshot(null,
3608           DeleteSnapshotRequest.newBuilder().
3609             setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build()
3610         );
3611         return null;
3612       }
3613     });
3614   }
3615 
3616   /**
3617    * Delete existing snapshots whose names match the pattern passed.
3618    * @param regex The regular expression to match against
3619    * @throws IOException if a remote or network exception occurs
3620    */
3621   @Override
3622   public void deleteSnapshots(final String regex) throws IOException {
3623     deleteSnapshots(Pattern.compile(regex));
3624   }
3625 
3626   /**
3627    * Delete existing snapshots whose names match the pattern passed.
3628    * @param pattern pattern for names of the snapshot to match
3629    * @throws IOException if a remote or network exception occurs
3630    */
3631   @Override
3632   public void deleteSnapshots(final Pattern pattern) throws IOException {
3633     List<SnapshotDescription> snapshots = listSnapshots(pattern);
3634     for (final SnapshotDescription snapshot : snapshots) {
3635       try {
3636         internalDeleteSnapshot(snapshot);
3637       } catch (IOException ex) {
3638         LOG.info(
3639           "Failed to delete snapshot " + snapshot.getName() + " for table " + snapshot.getTable(),
3640           ex);
3641       }
3642     }
3643   }
3644 
3645   private void internalDeleteSnapshot(final SnapshotDescription snapshot) throws IOException {
3646     executeCallable(new MasterCallable<Void>(getConnection()) {
3647       @Override
3648       public Void call(int callTimeout) throws ServiceException {
3649         this.master.deleteSnapshot(null, DeleteSnapshotRequest.newBuilder().setSnapshot(snapshot)
3650             .build());
3651         return null;
3652       }
3653     });
3654   }
3655 
3656   /**
3657    * Apply the new quota settings.
3658    *
3659    * @param quota the quota settings
3660    * @throws IOException if a remote or network exception occurs
3661    */
3662   @Override
3663   public void setQuota(final QuotaSettings quota) throws IOException {
3664     executeCallable(new MasterCallable<Void>(getConnection()) {
3665       @Override
3666       public Void call(int callTimeout) throws ServiceException {
3667         this.master.setQuota(null, QuotaSettings.buildSetQuotaRequestProto(quota));
3668         return null;
3669       }
3670     });
3671   }
3672 
3673   /**
3674    * Return a Quota Scanner to list the quotas based on the filter.
3675    *
3676    * @param filter the quota settings filter
3677    * @return the quota scanner
3678    * @throws IOException if a remote or network exception occurs
3679    */
3680   @Override
3681   public QuotaRetriever getQuotaRetriever(final QuotaFilter filter) throws IOException {
3682     return QuotaRetriever.open(conf, filter);
3683   }
3684 
3685   private <C extends RetryingCallable<V> & Closeable, V> V executeCallable(C callable)
3686       throws IOException {
3687     RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller();
3688     try {
3689       return caller.callWithRetries(callable, operationTimeout);
3690     } finally {
3691       callable.close();
3692     }
3693   }
3694 
3695   /**
3696    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
3697    * connected to the active master.
3698    *
3699    * <p>
3700    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
3701    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
3702    * </p>
3703    *
3704    * <div style="background-color: #cccccc; padding: 2px">
3705    * <blockquote><pre>
3706    * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
3707    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
3708    * MyCallRequest request = MyCallRequest.newBuilder()
3709    *     ...
3710    *     .build();
3711    * MyCallResponse response = service.myCall(null, request);
3712    * </pre></blockquote></div>
3713    *
3714    * @return A MasterCoprocessorRpcChannel instance
3715    */
3716   @Override
3717   public CoprocessorRpcChannel coprocessorService() {
3718     return new MasterCoprocessorRpcChannel(connection);
3719   }
3720 
3721   /**
3722    * Simple {@link Abortable}, throwing RuntimeException on abort.
3723    */
3724   private static class ThrowableAbortable implements Abortable {
3725 
3726     @Override
3727     public void abort(String why, Throwable e) {
3728       throw new RuntimeException(why, e);
3729     }
3730 
3731     @Override
3732     public boolean isAborted() {
3733       return true;
3734     }
3735   }
3736 
3737   /**
3738    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
3739    * connected to the passed region server.
3740    *
3741    * <p>
3742    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
3743    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
3744    * </p>
3745    *
3746    * <div style="background-color: #cccccc; padding: 2px">
3747    * <blockquote><pre>
3748    * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
3749    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
3750    * MyCallRequest request = MyCallRequest.newBuilder()
3751    *     ...
3752    *     .build();
3753    * MyCallResponse response = service.myCall(null, request);
3754    * </pre></blockquote></div>
3755    *
3756    * @param sn the server name to which the endpoint call is made
3757    * @return A RegionServerCoprocessorRpcChannel instance
3758    */
3759   @Override
3760   public CoprocessorRpcChannel coprocessorService(ServerName sn) {
3761     return new RegionServerCoprocessorRpcChannel(connection, sn);
3762   }
3763 
3764   @Override
3765   public void updateConfiguration(ServerName server) throws IOException {
3766     try {
3767       this.connection.getAdmin(server).updateConfiguration(null,
3768         UpdateConfigurationRequest.getDefaultInstance());
3769     } catch (ServiceException e) {
3770       throw ProtobufUtil.getRemoteException(e);
3771     }
3772   }
3773 
3774   @Override
3775   public void updateConfiguration() throws IOException {
3776     for (ServerName server : this.getClusterStatus().getServers()) {
3777       updateConfiguration(server);
3778     }
3779   }
3780 
3781   @Override
3782   public int getMasterInfoPort() throws IOException {
3783     // TODO: Fix!  Reaching into internal implementation!!!!
3784     ConnectionManager.HConnectionImplementation connection =
3785         (ConnectionManager.HConnectionImplementation)this.connection;
3786     ZooKeeperKeepAliveConnection zkw = connection.getKeepAliveZooKeeperWatcher();
3787     try {
3788       return MasterAddressTracker.getMasterInfoPort(zkw);
3789     } catch (KeeperException e) {
3790       throw new IOException("Failed to get master info port from MasterAddressTracker", e);
3791     }
3792   }
3793 
3794   @Override
3795   public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException {
3796     return executeCallable(new MasterCallable<Long>(getConnection()) {
3797       @Override
3798       public Long call(int callTimeout) throws ServiceException {
3799         MajorCompactionTimestampRequest req =
3800             MajorCompactionTimestampRequest.newBuilder()
3801                 .setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
3802         return master.getLastMajorCompactionTimestamp(null, req).getCompactionTimestamp();
3803       }
3804     });
3805   }
3806 
3807   @Override
3808   public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException {
3809     return executeCallable(new MasterCallable<Long>(getConnection()) {
3810       @Override
3811       public Long call(int callTimeout) throws ServiceException {
3812         MajorCompactionTimestampForRegionRequest req =
3813             MajorCompactionTimestampForRegionRequest
3814                 .newBuilder()
3815                 .setRegion(
3816                   RequestConverter
3817                       .buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)).build();
3818         return master.getLastMajorCompactionTimestampForRegion(null, req).getCompactionTimestamp();
3819       }
3820     });
3821   }
3822 }