View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import java.io.Closeable;
22  import java.io.IOException;
23  import java.io.InterruptedIOException;
24  import java.net.SocketTimeoutException;
25  import java.util.ArrayList;
26  import java.util.Arrays;
27  import java.util.HashMap;
28  import java.util.LinkedList;
29  import java.util.List;
30  import java.util.Map;
31  import java.util.Map.Entry;
32  import java.util.concurrent.atomic.AtomicInteger;
33  import java.util.concurrent.atomic.AtomicReference;
34  import java.util.regex.Pattern;
35  
36  import org.apache.commons.logging.Log;
37  import org.apache.commons.logging.LogFactory;
38  import org.apache.hadoop.classification.InterfaceAudience;
39  import org.apache.hadoop.classification.InterfaceStability;
40  import org.apache.hadoop.conf.Configuration;
41  import org.apache.hadoop.hbase.Abortable;
42  import org.apache.hadoop.hbase.ClusterStatus;
43  import org.apache.hadoop.hbase.HBaseConfiguration;
44  import org.apache.hadoop.hbase.HBaseIOException;
45  import org.apache.hadoop.hbase.HColumnDescriptor;
46  import org.apache.hadoop.hbase.HConstants;
47  import org.apache.hadoop.hbase.HRegionInfo;
48  import org.apache.hadoop.hbase.HRegionLocation;
49  import org.apache.hadoop.hbase.HTableDescriptor;
50  import org.apache.hadoop.hbase.MasterNotRunningException;
51  import org.apache.hadoop.hbase.MetaTableAccessor;
52  import org.apache.hadoop.hbase.NamespaceDescriptor;
53  import org.apache.hadoop.hbase.NotServingRegionException;
54  import org.apache.hadoop.hbase.RegionException;
55  import org.apache.hadoop.hbase.RegionLocations;
56  import org.apache.hadoop.hbase.ServerName;
57  import org.apache.hadoop.hbase.TableExistsException;
58  import org.apache.hadoop.hbase.TableName;
59  import org.apache.hadoop.hbase.TableNotDisabledException;
60  import org.apache.hadoop.hbase.TableNotEnabledException;
61  import org.apache.hadoop.hbase.TableNotFoundException;
62  import org.apache.hadoop.hbase.UnknownRegionException;
63  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
64  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
65  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
66  import org.apache.hadoop.hbase.exceptions.DeserializationException;
67  import org.apache.hadoop.hbase.exceptions.MergeRegionException;
68  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
69  import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
70  import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
71  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
72  import org.apache.hadoop.hbase.protobuf.RequestConverter;
73  import org.apache.hadoop.hbase.protobuf.ResponseConverter;
74  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
75  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
76  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
77  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
78  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
79  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
80  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
81  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
82  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
83  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
84  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
85  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
86  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
87  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
88  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
89  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
90  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
91  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
92  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
93  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
94  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
95  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
96  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
97  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
98  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
99  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
100 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
101 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
102 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
103 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
104 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
105 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
106 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
107 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
108 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
109 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
110 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
111 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
112 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
113 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
114 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
115 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
116 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
117 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
118 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
119 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
120 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
121 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
122 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
123 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
124 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
125 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
126 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
127 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
128 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
129 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
130 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
131 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
132 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
133 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
134 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
135 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
136 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
137 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
138 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
139 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
140 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
141 import org.apache.hadoop.hbase.util.Addressing;
142 import org.apache.hadoop.hbase.util.Bytes;
143 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
144 import org.apache.hadoop.hbase.util.Pair;
145 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
146 import org.apache.hadoop.ipc.RemoteException;
147 import org.apache.hadoop.util.StringUtils;
148 import org.apache.zookeeper.KeeperException;
149 
150 import com.google.protobuf.ByteString;
151 import com.google.protobuf.ServiceException;
152 
153 /**
154  * Provides an interface to manage HBase database table metadata + general
155  * administrative functions.  Use HBaseAdmin to create, drop, list, enable and
156  * disable tables. Use it also to add and drop table column families.
157  *
158  * <p>See {@link HTable} to add, update, and delete data from an individual table.
159  * <p>Currently HBaseAdmin instances are not expected to be long-lived.  For
160  * example, an HBaseAdmin instance will not ride over a Master restart.
161  */
162 @InterfaceAudience.Public
163 @InterfaceStability.Evolving
164 public class HBaseAdmin implements Admin {
165   private static final Log LOG = LogFactory.getLog(HBaseAdmin.class);
166 
167   private static final String ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
168 
169   // We use the implementation class rather then the interface because we
170   //  need the package protected functions to get the connection to master
171   private ClusterConnection connection;
172 
173   private volatile Configuration conf;
174   private final long pause;
175   private final int numRetries;
176   // Some operations can take a long time such as disable of big table.
177   // numRetries is for 'normal' stuff... Multiply by this factor when
178   // want to wait a long time.
179   private final int retryLongerMultiplier;
180   private boolean aborted;
181   private boolean cleanupConnectionOnClose = false; // close the connection in close()
182   private boolean closed = false;
183   private int operationTimeout;
184 
185   private RpcRetryingCallerFactory rpcCallerFactory;
186 
187   /**
188    * Constructor.
189    * See {@link #HBaseAdmin(HConnection connection)}
190    *
191    * @param c Configuration object. Copied internally.
192    */
193   public HBaseAdmin(Configuration c)
194   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
195     // Will not leak connections, as the new implementation of the constructor
196     // does not throw exceptions anymore.
197     this(ConnectionManager.getConnectionInternal(new Configuration(c)));
198     this.cleanupConnectionOnClose = true;
199   }
200 
201   @Override
202   public int getOperationTimeout() {
203     return operationTimeout;
204   }
205 
206 
207   /**
208    * Constructor for externally managed HConnections.
209    * The connection to master will be created when required by admin functions.
210    *
211    * @param connection The HConnection instance to use
212    * @throws MasterNotRunningException, ZooKeeperConnectionException are not
213    *  thrown anymore but kept into the interface for backward api compatibility
214    * @deprecated Do not use this internal ctor.
215    */
216   @Deprecated
217   public HBaseAdmin(HConnection connection)
218       throws MasterNotRunningException, ZooKeeperConnectionException {
219     this((ClusterConnection)connection);
220   }
221 
222   HBaseAdmin(ClusterConnection connection) {
223     this.conf = connection.getConfiguration();
224     this.connection = connection;
225 
226     this.pause = this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
227         HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
228     this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
229         HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
230     this.retryLongerMultiplier = this.conf.getInt(
231         "hbase.client.retries.longer.multiplier", 10);
232     this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
233         HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
234 
235     this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf);
236   }
237 
238   @Override
239   public void abort(String why, Throwable e) {
240     // Currently does nothing but throw the passed message and exception
241     this.aborted = true;
242     throw new RuntimeException(why, e);
243   }
244 
245   @Override
246   public boolean isAborted(){
247     return this.aborted;
248   }
249 
250   /** @return HConnection used by this object. */
251   @Override
252   public HConnection getConnection() {
253     return connection;
254   }
255 
256   /** @return - true if the master server is running. Throws an exception
257    *  otherwise.
258    * @throws ZooKeeperConnectionException
259    * @throws MasterNotRunningException
260    */
261   @Override
262   public boolean isMasterRunning()
263   throws MasterNotRunningException, ZooKeeperConnectionException {
264     return connection.isMasterRunning();
265   }
266 
267   /**
268    * @param tableName Table to check.
269    * @return True if table exists already.
270    * @throws IOException
271    */
272   @Override
273   public boolean tableExists(final TableName tableName) throws IOException {
274     return MetaTableAccessor.tableExists(connection, tableName);
275   }
276 
277   public boolean tableExists(final byte[] tableName)
278   throws IOException {
279     return tableExists(TableName.valueOf(tableName));
280   }
281 
282   public boolean tableExists(final String tableName)
283   throws IOException {
284     return tableExists(TableName.valueOf(tableName));
285   }
286 
287   /**
288    * List all the userspace tables.  In other words, scan the hbase:meta table.
289    *
290    * If we wanted this to be really fast, we could implement a special
291    * catalog table that just contains table names and their descriptors.
292    * Right now, it only exists as part of the hbase:meta table's region info.
293    *
294    * @return - returns an array of HTableDescriptors
295    * @throws IOException if a remote or network exception occurs
296    */
297   @Override
298   public HTableDescriptor[] listTables() throws IOException {
299     return this.connection.listTables();
300   }
301 
302   /**
303    * List all the userspace tables matching the given pattern.
304    *
305    * @param pattern The compiled regular expression to match against
306    * @return - returns an array of HTableDescriptors
307    * @throws IOException if a remote or network exception occurs
308    * @see #listTables()
309    */
310   @Override
311   public HTableDescriptor[] listTables(Pattern pattern) throws IOException {
312     List<HTableDescriptor> matched = new LinkedList<HTableDescriptor>();
313     HTableDescriptor[] tables = listTables();
314     for (HTableDescriptor table : tables) {
315       if (pattern.matcher(table.getTableName().getNameAsString()).matches()) {
316         matched.add(table);
317       }
318     }
319     return matched.toArray(new HTableDescriptor[matched.size()]);
320   }
321 
322   /**
323    * List all the userspace tables matching the given regular expression.
324    *
325    * @param regex The regular expression to match against
326    * @return - returns an array of HTableDescriptors
327    * @throws IOException if a remote or network exception occurs
328    * @see #listTables(java.util.regex.Pattern)
329    */
330   @Override
331   public HTableDescriptor[] listTables(String regex) throws IOException {
332     return listTables(Pattern.compile(regex));
333   }
334 
335   /**
336    * List all of the names of userspace tables.
337    * @return String[] table names
338    * @throws IOException if a remote or network exception occurs
339    */
340   @Deprecated
341   public String[] getTableNames() throws IOException {
342     return this.connection.getTableNames();
343   }
344 
345   /**
346    * List all of the names of userspace tables matching the given regular expression.
347    * @param pattern The regular expression to match against
348    * @return String[] table names
349    * @throws IOException if a remote or network exception occurs
350    */
351   @Deprecated
352   public String[] getTableNames(Pattern pattern) throws IOException {
353     List<String> matched = new ArrayList<String>();
354     for (String name: this.connection.getTableNames()) {
355       if (pattern.matcher(name).matches()) {
356         matched.add(name);
357       }
358     }
359     return matched.toArray(new String[matched.size()]);
360   }
361 
362   /**
363    * List all of the names of userspace tables matching the given regular expression.
364    * @param regex The regular expression to match against
365    * @return String[] table names
366    * @throws IOException if a remote or network exception occurs
367    */
368   @Deprecated
369   public String[] getTableNames(String regex) throws IOException {
370     return getTableNames(Pattern.compile(regex));
371   }
372 
373   /**
374    * List all of the names of userspace tables.
375    * @return TableName[] table names
376    * @throws IOException if a remote or network exception occurs
377    */
378   @Override
379   public TableName[] listTableNames() throws IOException {
380     return this.connection.listTableNames();
381   }
382 
383   /**
384    * Method for getting the tableDescriptor
385    * @param tableName as a byte []
386    * @return the tableDescriptor
387    * @throws TableNotFoundException
388    * @throws IOException if a remote or network exception occurs
389    */
390   @Override
391   public HTableDescriptor getTableDescriptor(final TableName tableName)
392   throws TableNotFoundException, IOException {
393     return this.connection.getHTableDescriptor(tableName);
394   }
395 
396   public HTableDescriptor getTableDescriptor(final byte[] tableName)
397   throws TableNotFoundException, IOException {
398     return getTableDescriptor(TableName.valueOf(tableName));
399   }
400 
401   private long getPauseTime(int tries) {
402     int triesCount = tries;
403     if (triesCount >= HConstants.RETRY_BACKOFF.length) {
404       triesCount = HConstants.RETRY_BACKOFF.length - 1;
405     }
406     return this.pause * HConstants.RETRY_BACKOFF[triesCount];
407   }
408 
409   /**
410    * Creates a new table.
411    * Synchronous operation.
412    *
413    * @param desc table descriptor for table
414    *
415    * @throws IllegalArgumentException if the table name is reserved
416    * @throws MasterNotRunningException if master is not running
417    * @throws TableExistsException if table already exists (If concurrent
418    * threads, the table may have been created between test-for-existence
419    * and attempt-at-creation).
420    * @throws IOException if a remote or network exception occurs
421    */
422   @Override
423   public void createTable(HTableDescriptor desc)
424   throws IOException {
425     createTable(desc, null);
426   }
427 
428   /**
429    * Creates a new table with the specified number of regions.  The start key
430    * specified will become the end key of the first region of the table, and
431    * the end key specified will become the start key of the last region of the
432    * table (the first region has a null start key and the last region has a
433    * null end key).
434    *
435    * BigInteger math will be used to divide the key range specified into
436    * enough segments to make the required number of total regions.
437    *
438    * Synchronous operation.
439    *
440    * @param desc table descriptor for table
441    * @param startKey beginning of key range
442    * @param endKey end of key range
443    * @param numRegions the total number of regions to create
444    *
445    * @throws IllegalArgumentException if the table name is reserved
446    * @throws MasterNotRunningException if master is not running
447    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
448    * threads, the table may have been created between test-for-existence
449    * and attempt-at-creation).
450    * @throws IOException
451    */
452   @Override
453   public void createTable(HTableDescriptor desc, byte [] startKey,
454       byte [] endKey, int numRegions)
455   throws IOException {
456     if(numRegions < 3) {
457       throw new IllegalArgumentException("Must create at least three regions");
458     } else if(Bytes.compareTo(startKey, endKey) >= 0) {
459       throw new IllegalArgumentException("Start key must be smaller than end key");
460     }
461     if (numRegions == 3) {
462       createTable(desc, new byte[][]{startKey, endKey});
463       return;
464     }
465     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
466     if(splitKeys == null || splitKeys.length != numRegions - 1) {
467       throw new IllegalArgumentException("Unable to split key range into enough regions");
468     }
469     createTable(desc, splitKeys);
470   }
471 
472   /**
473    * Creates a new table with an initial set of empty regions defined by the
474    * specified split keys.  The total number of regions created will be the
475    * number of split keys plus one. Synchronous operation.
476    * Note : Avoid passing empty split key.
477    *
478    * @param desc table descriptor for table
479    * @param splitKeys array of split keys for the initial regions of the table
480    *
481    * @throws IllegalArgumentException if the table name is reserved, if the split keys
482    * are repeated and if the split key has empty byte array.
483    * @throws MasterNotRunningException if master is not running
484    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
485    * threads, the table may have been created between test-for-existence
486    * and attempt-at-creation).
487    * @throws IOException
488    */
489   @Override
490   public void createTable(final HTableDescriptor desc, byte [][] splitKeys)
491   throws IOException {
492     try {
493       createTableAsync(desc, splitKeys);
494     } catch (SocketTimeoutException ste) {
495       LOG.warn("Creating " + desc.getTableName() + " took too long", ste);
496     }
497     int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication();
498     int prevRegCount = 0;
499     boolean doneWithMetaScan = false;
500     for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier;
501       ++tries) {
502       if (!doneWithMetaScan) {
503         // Wait for new table to come on-line
504         final AtomicInteger actualRegCount = new AtomicInteger(0);
505         MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
506           @Override
507           public boolean processRow(Result rowResult) throws IOException {
508             RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
509             if (list == null) {
510               LOG.warn("No serialized HRegionInfo in " + rowResult);
511               return true;
512             }
513             HRegionLocation l = list.getRegionLocation();
514             if (l == null) {
515               return true;
516             }
517             if (!l.getRegionInfo().getTable().equals(desc.getTableName())) {
518               return false;
519             }
520             if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
521             HRegionLocation[] locations = list.getRegionLocations();
522             for (HRegionLocation location : locations) {
523               if (location == null) continue;
524               ServerName serverName = location.getServerName();
525               // Make sure that regions are assigned to server
526               if (serverName != null && serverName.getHostAndPort() != null) {
527                 actualRegCount.incrementAndGet();
528               }
529             }
530             return true;
531           }
532         };
533         MetaScanner.metaScan(conf, connection, visitor, desc.getTableName());
534         if (actualRegCount.get() < numRegs) {
535           if (tries == this.numRetries * this.retryLongerMultiplier - 1) {
536             throw new RegionOfflineException("Only " + actualRegCount.get() +
537               " of " + numRegs + " regions are online; retries exhausted.");
538           }
539           try { // Sleep
540             Thread.sleep(getPauseTime(tries));
541           } catch (InterruptedException e) {
542             throw new InterruptedIOException("Interrupted when opening" +
543               " regions; " + actualRegCount.get() + " of " + numRegs +
544               " regions processed so far");
545           }
546           if (actualRegCount.get() > prevRegCount) { // Making progress
547             prevRegCount = actualRegCount.get();
548             tries = -1;
549           }
550         } else {
551           doneWithMetaScan = true;
552           tries = -1;
553         }
554       } else if (isTableEnabled(desc.getTableName())) {
555         return;
556       } else {
557         try { // Sleep
558           Thread.sleep(getPauseTime(tries));
559         } catch (InterruptedException e) {
560           throw new InterruptedIOException("Interrupted when waiting" +
561             " for table to be enabled; meta scan was done");
562         }
563       }
564     }
565     throw new TableNotEnabledException(
566       "Retries exhausted while still waiting for table: "
567       + desc.getTableName() + " to be enabled");
568   }
569 
570   /**
571    * Creates a new table but does not block and wait for it to come online.
572    * Asynchronous operation.  To check if the table exists, use
573    * {@link #isTableAvailable} -- it is not safe to create an HTable
574    * instance to this table before it is available.
575    * Note : Avoid passing empty split key.
576    * @param desc table descriptor for table
577    *
578    * @throws IllegalArgumentException Bad table name, if the split keys
579    * are repeated and if the split key has empty byte array.
580    * @throws MasterNotRunningException if master is not running
581    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
582    * threads, the table may have been created between test-for-existence
583    * and attempt-at-creation).
584    * @throws IOException
585    */
586   @Override
587   public void createTableAsync(
588     final HTableDescriptor desc, final byte [][] splitKeys)
589   throws IOException {
590     if(desc.getTableName() == null) {
591       throw new IllegalArgumentException("TableName cannot be null");
592     }
593     if(splitKeys != null && splitKeys.length > 0) {
594       Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
595       // Verify there are no duplicate split keys
596       byte [] lastKey = null;
597       for(byte [] splitKey : splitKeys) {
598         if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
599           throw new IllegalArgumentException(
600               "Empty split key must not be passed in the split keys.");
601         }
602         if(lastKey != null && Bytes.equals(splitKey, lastKey)) {
603           throw new IllegalArgumentException("All split keys must be unique, " +
604             "found duplicate: " + Bytes.toStringBinary(splitKey) +
605             ", " + Bytes.toStringBinary(lastKey));
606         }
607         lastKey = splitKey;
608       }
609     }
610 
611     executeCallable(new MasterCallable<Void>(getConnection()) {
612       @Override
613       public Void call(int callTimeout) throws ServiceException {
614         CreateTableRequest request = RequestConverter.buildCreateTableRequest(desc, splitKeys);
615         master.createTable(null, request);
616         return null;
617       }
618     });
619   }
620 
621   public void deleteTable(final String tableName) throws IOException {
622     deleteTable(TableName.valueOf(tableName));
623   }
624 
625   public void deleteTable(final byte[] tableName) throws IOException {
626     deleteTable(TableName.valueOf(tableName));
627   }
628 
629   /**
630    * Deletes a table.
631    * Synchronous operation.
632    *
633    * @param tableName name of table to delete
634    * @throws IOException if a remote or network exception occurs
635    */
636   @Override
637   public void deleteTable(final TableName tableName) throws IOException {
638     boolean tableExists = true;
639 
640     executeCallable(new MasterCallable<Void>(getConnection()) {
641       @Override
642       public Void call(int callTimeout) throws ServiceException {
643         DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName);
644         master.deleteTable(null,req);
645         return null;
646       }
647     });
648 
649     int failures = 0;
650     // Wait until all regions deleted
651     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
652       try {
653         HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
654         Scan scan = MetaTableAccessor.getScanForTableName(tableName);
655         scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
656         ScanRequest request = RequestConverter.buildScanRequest(
657           firstMetaServer.getRegionInfo().getRegionName(), scan, 1, true);
658         Result[] values = null;
659         // Get a batch at a time.
660         ClientService.BlockingInterface server = connection.getClient(firstMetaServer
661             .getServerName());
662         PayloadCarryingRpcController controller = new PayloadCarryingRpcController();
663         try {
664           controller.setPriority(tableName);
665           ScanResponse response = server.scan(controller, request);
666           values = ResponseConverter.getResults(controller.cellScanner(), response);
667         } catch (ServiceException se) {
668           throw ProtobufUtil.getRemoteException(se);
669         }
670 
671         // let us wait until hbase:meta table is updated and
672         // HMaster removes the table from its HTableDescriptors
673         if (values == null || values.length == 0) {
674           tableExists = false;
675           GetTableDescriptorsResponse htds;
676           MasterKeepAliveConnection master = connection.getKeepAliveMasterService();
677           try {
678             GetTableDescriptorsRequest req =
679               RequestConverter.buildGetTableDescriptorsRequest(tableName);
680             htds = master.getTableDescriptors(null, req);
681           } catch (ServiceException se) {
682             throw ProtobufUtil.getRemoteException(se);
683           } finally {
684             master.close();
685           }
686           tableExists = !htds.getTableSchemaList().isEmpty();
687           if (!tableExists) {
688             break;
689           }
690         }
691       } catch (IOException ex) {
692         failures++;
693         if(failures == numRetries - 1) {           // no more tries left
694           if (ex instanceof RemoteException) {
695             throw ((RemoteException) ex).unwrapRemoteException();
696           } else {
697             throw ex;
698           }
699         }
700       }
701       try {
702         Thread.sleep(getPauseTime(tries));
703       } catch (InterruptedException e) {
704         throw new InterruptedIOException("Interrupted when waiting" +
705             " for table to be deleted");
706       }
707     }
708 
709     if (tableExists) {
710       throw new IOException("Retries exhausted, it took too long to wait"+
711         " for the table " + tableName + " to be deleted.");
712     }
713     // Delete cached information to prevent clients from using old locations
714     this.connection.clearRegionCache(tableName);
715     LOG.info("Deleted " + tableName);
716   }
717 
718   /**
719    * Deletes tables matching the passed in pattern and wait on completion.
720    *
721    * Warning: Use this method carefully, there is no prompting and the effect is
722    * immediate. Consider using {@link #listTables(java.lang.String)} and
723    * {@link #deleteTable(byte[])}
724    *
725    * @param regex The regular expression to match table names against
726    * @return Table descriptors for tables that couldn't be deleted
727    * @throws IOException
728    * @see #deleteTables(java.util.regex.Pattern)
729    * @see #deleteTable(java.lang.String)
730    */
731   @Override
732   public HTableDescriptor[] deleteTables(String regex) throws IOException {
733     return deleteTables(Pattern.compile(regex));
734   }
735 
736   /**
737    * Delete tables matching the passed in pattern and wait on completion.
738    *
739    * Warning: Use this method carefully, there is no prompting and the effect is
740    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
741    * {@link #deleteTable(byte[])}
742    *
743    * @param pattern The pattern to match table names against
744    * @return Table descriptors for tables that couldn't be deleted
745    * @throws IOException
746    */
747   @Override
748   public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException {
749     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
750     for (HTableDescriptor table : listTables(pattern)) {
751       try {
752         deleteTable(table.getTableName());
753       } catch (IOException ex) {
754         LOG.info("Failed to delete table " + table.getTableName(), ex);
755         failed.add(table);
756       }
757     }
758     return failed.toArray(new HTableDescriptor[failed.size()]);
759   }
760 
761   /**
762    * Truncate a table.
763    * Synchronous operation.
764    *
765    * @param tableName name of table to truncate
766    * @param preserveSplits True if the splits should be preserved
767    * @throws IOException if a remote or network exception occurs
768    */
769   @Override
770   public void truncateTable(final TableName tableName, final boolean preserveSplits)
771       throws IOException {
772     executeCallable(new MasterCallable<Void>(getConnection()) {
773       @Override
774       public Void call(int callTimeout) throws ServiceException {
775         TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(
776           tableName, preserveSplits);
777         master.truncateTable(null, req);
778         return null;
779       }
780     });
781   }
782 
783   /**
784    * Enable a table.  May timeout.  Use {@link #enableTableAsync(byte[])}
785    * and {@link #isTableEnabled(byte[])} instead.
786    * The table has to be in disabled state for it to be enabled.
787    * @param tableName name of the table
788    * @throws IOException if a remote or network exception occurs
789    * There could be couple types of IOException
790    * TableNotFoundException means the table doesn't exist.
791    * TableNotDisabledException means the table isn't in disabled state.
792    * @see #isTableEnabled(byte[])
793    * @see #disableTable(byte[])
794    * @see #enableTableAsync(byte[])
795    */
796   @Override
797   public void enableTable(final TableName tableName)
798   throws IOException {
799     enableTableAsync(tableName);
800 
801     // Wait until all regions are enabled
802     waitUntilTableIsEnabled(tableName);
803 
804     LOG.info("Enabled table " + tableName);
805   }
806 
807   public void enableTable(final byte[] tableName)
808   throws IOException {
809     enableTable(TableName.valueOf(tableName));
810   }
811 
812   public void enableTable(final String tableName)
813   throws IOException {
814     enableTable(TableName.valueOf(tableName));
815   }
816 
817   /**
818    * Wait for the table to be enabled and available
819    * If enabling the table exceeds the retry period, an exception is thrown.
820    * @param tableName name of the table
821    * @throws IOException if a remote or network exception occurs or
822    *    table is not enabled after the retries period.
823    */
824   private void waitUntilTableIsEnabled(final TableName tableName) throws IOException {
825     boolean enabled = false;
826     long start = EnvironmentEdgeManager.currentTime();
827     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
828       try {
829         enabled = isTableEnabled(tableName);
830       } catch (TableNotFoundException tnfe) {
831         // wait for table to be created
832         enabled = false;
833       }
834       enabled = enabled && isTableAvailable(tableName);
835       if (enabled) {
836         break;
837       }
838       long sleep = getPauseTime(tries);
839       if (LOG.isDebugEnabled()) {
840         LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
841           "enabled in " + tableName);
842       }
843       try {
844         Thread.sleep(sleep);
845       } catch (InterruptedException e) {
846         // Do this conversion rather than let it out because do not want to
847         // change the method signature.
848         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
849       }
850     }
851     if (!enabled) {
852       long msec = EnvironmentEdgeManager.currentTime() - start;
853       throw new IOException("Table '" + tableName +
854         "' not yet enabled, after " + msec + "ms.");
855     }
856   }
857 
858   /**
859    * Brings a table on-line (enables it).  Method returns immediately though
860    * enable of table may take some time to complete, especially if the table
861    * is large (All regions are opened as part of enabling process).  Check
862    * {@link #isTableEnabled(byte[])} to learn when table is fully online.  If
863    * table is taking too long to online, check server logs.
864    * @param tableName
865    * @throws IOException
866    * @since 0.90.0
867    */
868   @Override
869   public void enableTableAsync(final TableName tableName)
870   throws IOException {
871     TableName.isLegalFullyQualifiedTableName(tableName.getName());
872     executeCallable(new MasterCallable<Void>(getConnection()) {
873       @Override
874       public Void call(int callTimeout) throws ServiceException {
875         LOG.info("Started enable of " + tableName);
876         EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName);
877         master.enableTable(null,req);
878         return null;
879       }
880     });
881   }
882 
883   public void enableTableAsync(final byte[] tableName)
884   throws IOException {
885     enableTable(TableName.valueOf(tableName));
886   }
887 
888   public void enableTableAsync(final String tableName)
889   throws IOException {
890     enableTableAsync(TableName.valueOf(tableName));
891   }
892 
893   /**
894    * Enable tables matching the passed in pattern and wait on completion.
895    *
896    * Warning: Use this method carefully, there is no prompting and the effect is
897    * immediate. Consider using {@link #listTables(java.lang.String)} and
898    * {@link #enableTable(byte[])}
899    *
900    * @param regex The regular expression to match table names against
901    * @throws IOException
902    * @see #enableTables(java.util.regex.Pattern)
903    * @see #enableTable(java.lang.String)
904    */
905   @Override
906   public HTableDescriptor[] enableTables(String regex) throws IOException {
907     return enableTables(Pattern.compile(regex));
908   }
909 
910   /**
911    * Enable tables matching the passed in pattern and wait on completion.
912    *
913    * Warning: Use this method carefully, there is no prompting and the effect is
914    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
915    * {@link #enableTable(byte[])}
916    *
917    * @param pattern The pattern to match table names against
918    * @throws IOException
919    */
920   @Override
921   public HTableDescriptor[] enableTables(Pattern pattern) throws IOException {
922     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
923     for (HTableDescriptor table : listTables(pattern)) {
924       if (isTableDisabled(table.getTableName())) {
925         try {
926           enableTable(table.getTableName());
927         } catch (IOException ex) {
928           LOG.info("Failed to enable table " + table.getTableName(), ex);
929           failed.add(table);
930         }
931       }
932     }
933     return failed.toArray(new HTableDescriptor[failed.size()]);
934   }
935 
936   /**
937    * Starts the disable of a table.  If it is being served, the master
938    * will tell the servers to stop serving it.  This method returns immediately.
939    * The disable of a table can take some time if the table is large (all
940    * regions are closed as part of table disable operation).
941    * Call {@link #isTableDisabled(byte[])} to check for when disable completes.
942    * If table is taking too long to online, check server logs.
943    * @param tableName name of table
944    * @throws IOException if a remote or network exception occurs
945    * @see #isTableDisabled(byte[])
946    * @see #isTableEnabled(byte[])
947    * @since 0.90.0
948    */
949   @Override
950   public void disableTableAsync(final TableName tableName) throws IOException {
951     TableName.isLegalFullyQualifiedTableName(tableName.getName());
952     executeCallable(new MasterCallable<Void>(getConnection()) {
953       @Override
954       public Void call(int callTimeout) throws ServiceException {
955         LOG.info("Started disable of " + tableName);
956         DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName);
957         master.disableTable(null,req);
958         return null;
959       }
960     });
961   }
962 
963   public void disableTableAsync(final byte[] tableName) throws IOException {
964     disableTableAsync(TableName.valueOf(tableName));
965   }
966 
967   public void disableTableAsync(final String tableName) throws IOException {
968     disableTableAsync(TableName.valueOf(tableName));
969   }
970 
971   /**
972    * Disable table and wait on completion.  May timeout eventually.  Use
973    * {@link #disableTableAsync(byte[])} and {@link #isTableDisabled(String)}
974    * instead.
975    * The table has to be in enabled state for it to be disabled.
976    * @param tableName
977    * @throws IOException
978    * There could be couple types of IOException
979    * TableNotFoundException means the table doesn't exist.
980    * TableNotEnabledException means the table isn't in enabled state.
981    */
982   @Override
983   public void disableTable(final TableName tableName)
984   throws IOException {
985     disableTableAsync(tableName);
986     // Wait until table is disabled
987     boolean disabled = false;
988     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
989       disabled = isTableDisabled(tableName);
990       if (disabled) {
991         break;
992       }
993       long sleep = getPauseTime(tries);
994       if (LOG.isDebugEnabled()) {
995         LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
996           "disabled in " + tableName);
997       }
998       try {
999         Thread.sleep(sleep);
1000       } catch (InterruptedException e) {
1001         // Do this conversion rather than let it out because do not want to
1002         // change the method signature.
1003         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
1004       }
1005     }
1006     if (!disabled) {
1007       throw new RegionException("Retries exhausted, it took too long to wait"+
1008         " for the table " + tableName + " to be disabled.");
1009     }
1010     LOG.info("Disabled " + tableName);
1011   }
1012 
1013   public void disableTable(final byte[] tableName)
1014   throws IOException {
1015     disableTable(TableName.valueOf(tableName));
1016   }
1017 
1018   public void disableTable(final String tableName)
1019   throws IOException {
1020     disableTable(TableName.valueOf(tableName));
1021   }
1022 
1023   /**
1024    * Disable tables matching the passed in pattern and wait on completion.
1025    *
1026    * Warning: Use this method carefully, there is no prompting and the effect is
1027    * immediate. Consider using {@link #listTables(java.lang.String)} and
1028    * {@link #disableTable(byte[])}
1029    *
1030    * @param regex The regular expression to match table names against
1031    * @return Table descriptors for tables that couldn't be disabled
1032    * @throws IOException
1033    * @see #disableTables(java.util.regex.Pattern)
1034    * @see #disableTable(java.lang.String)
1035    */
1036   @Override
1037   public HTableDescriptor[] disableTables(String regex) throws IOException {
1038     return disableTables(Pattern.compile(regex));
1039   }
1040 
1041   /**
1042    * Disable tables matching the passed in pattern and wait on completion.
1043    *
1044    * Warning: Use this method carefully, there is no prompting and the effect is
1045    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1046    * {@link #disableTable(byte[])}
1047    *
1048    * @param pattern The pattern to match table names against
1049    * @return Table descriptors for tables that couldn't be disabled
1050    * @throws IOException
1051    */
1052   @Override
1053   public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
1054     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1055     for (HTableDescriptor table : listTables(pattern)) {
1056       if (isTableEnabled(table.getTableName())) {
1057         try {
1058           disableTable(table.getTableName());
1059         } catch (IOException ex) {
1060           LOG.info("Failed to disable table " + table.getTableName(), ex);
1061           failed.add(table);
1062         }
1063       }
1064     }
1065     return failed.toArray(new HTableDescriptor[failed.size()]);
1066   }
1067 
1068   /*
1069    * Checks whether table exists. If not, throws TableNotFoundException
1070    * @param tableName
1071    */
1072   private void checkTableExistence(TableName tableName) throws IOException {
1073     if (!tableExists(tableName)) {
1074       throw new TableNotFoundException(tableName);
1075     }
1076   }
1077 
1078   /**
1079    * @param tableName name of table to check
1080    * @return true if table is on-line
1081    * @throws IOException if a remote or network exception occurs
1082    */
1083   @Override
1084   public boolean isTableEnabled(TableName tableName) throws IOException {
1085     checkTableExistence(tableName);
1086     return connection.isTableEnabled(tableName);
1087   }
1088 
1089   public boolean isTableEnabled(byte[] tableName) throws IOException {
1090     return isTableEnabled(TableName.valueOf(tableName));
1091   }
1092 
1093   public boolean isTableEnabled(String tableName) throws IOException {
1094     return isTableEnabled(TableName.valueOf(tableName));
1095   }
1096 
1097 
1098 
1099   /**
1100    * @param tableName name of table to check
1101    * @return true if table is off-line
1102    * @throws IOException if a remote or network exception occurs
1103    */
1104   @Override
1105   public boolean isTableDisabled(TableName tableName) throws IOException {
1106     checkTableExistence(tableName);
1107     return connection.isTableDisabled(tableName);
1108   }
1109 
1110   public boolean isTableDisabled(byte[] tableName) throws IOException {
1111     return isTableDisabled(TableName.valueOf(tableName));
1112   }
1113 
1114   public boolean isTableDisabled(String tableName) throws IOException {
1115     return isTableDisabled(TableName.valueOf(tableName));
1116   }
1117 
1118   /**
1119    * @param tableName name of table to check
1120    * @return true if all regions of the table are available
1121    * @throws IOException if a remote or network exception occurs
1122    */
1123   @Override
1124   public boolean isTableAvailable(TableName tableName) throws IOException {
1125     return connection.isTableAvailable(tableName);
1126   }
1127 
1128   public boolean isTableAvailable(byte[] tableName) throws IOException {
1129     return isTableAvailable(TableName.valueOf(tableName));
1130   }
1131 
1132   public boolean isTableAvailable(String tableName) throws IOException {
1133     return isTableAvailable(TableName.valueOf(tableName));
1134   }
1135 
1136   /**
1137    * Use this api to check if the table has been created with the specified number of
1138    * splitkeys which was used while creating the given table.
1139    * Note : If this api is used after a table's region gets splitted, the api may return
1140    * false.
1141    * @param tableName
1142    *          name of table to check
1143    * @param splitKeys
1144    *          keys to check if the table has been created with all split keys
1145    * @throws IOException
1146    *           if a remote or network excpetion occurs
1147    */
1148   @Override
1149   public boolean isTableAvailable(TableName tableName,
1150                                   byte[][] splitKeys) throws IOException {
1151     return connection.isTableAvailable(tableName, splitKeys);
1152   }
1153 
1154   public boolean isTableAvailable(byte[] tableName,
1155                                   byte[][] splitKeys) throws IOException {
1156     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1157   }
1158 
1159   public boolean isTableAvailable(String tableName,
1160                                   byte[][] splitKeys) throws IOException {
1161     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1162   }
1163 
1164   /**
1165    * Get the status of alter command - indicates how many regions have received
1166    * the updated schema Asynchronous operation.
1167    *
1168    * @param tableName TableName instance
1169    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1170    *         regions that are yet to be updated Pair.getSecond() is the total number
1171    *         of regions of the table
1172    * @throws IOException
1173    *           if a remote or network exception occurs
1174    */
1175   @Override
1176   public Pair<Integer, Integer> getAlterStatus(final TableName tableName)
1177   throws IOException {
1178     return executeCallable(new MasterCallable<Pair<Integer, Integer>>(getConnection()) {
1179       @Override
1180       public Pair<Integer, Integer> call(int callTimeout) throws ServiceException {
1181         GetSchemaAlterStatusRequest req = RequestConverter
1182             .buildGetSchemaAlterStatusRequest(tableName);
1183         GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(null, req);
1184         Pair<Integer, Integer> pair = new Pair<Integer, Integer>(Integer.valueOf(ret
1185             .getYetToUpdateRegions()), Integer.valueOf(ret.getTotalRegions()));
1186         return pair;
1187       }
1188     });
1189   }
1190 
1191   /**
1192    * Get the status of alter command - indicates how many regions have received
1193    * the updated schema Asynchronous operation.
1194    *
1195    * @param tableName
1196    *          name of the table to get the status of
1197    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1198    *         regions that are yet to be updated Pair.getSecond() is the total number
1199    *         of regions of the table
1200    * @throws IOException
1201    *           if a remote or network exception occurs
1202    */
1203   @Override
1204   public Pair<Integer, Integer> getAlterStatus(final byte[] tableName)
1205    throws IOException {
1206     return getAlterStatus(TableName.valueOf(tableName));
1207   }
1208 
1209   /**
1210    * Add a column to an existing table.
1211    * Asynchronous operation.
1212    *
1213    * @param tableName name of the table to add column to
1214    * @param column column descriptor of column to be added
1215    * @throws IOException if a remote or network exception occurs
1216    */
1217   public void addColumn(final byte[] tableName, HColumnDescriptor column)
1218   throws IOException {
1219     addColumn(TableName.valueOf(tableName), column);
1220   }
1221 
1222 
1223   /**
1224    * Add a column to an existing table.
1225    * Asynchronous operation.
1226    *
1227    * @param tableName name of the table to add column to
1228    * @param column column descriptor of column to be added
1229    * @throws IOException if a remote or network exception occurs
1230    */
1231   public void addColumn(final String tableName, HColumnDescriptor column)
1232   throws IOException {
1233     addColumn(TableName.valueOf(tableName), column);
1234   }
1235 
1236   /**
1237    * Add a column to an existing table.
1238    * Asynchronous operation.
1239    *
1240    * @param tableName name of the table to add column to
1241    * @param column column descriptor of column to be added
1242    * @throws IOException if a remote or network exception occurs
1243    */
1244   @Override
1245   public void addColumn(final TableName tableName, final HColumnDescriptor column)
1246   throws IOException {
1247     executeCallable(new MasterCallable<Void>(getConnection()) {
1248       @Override
1249       public Void call(int callTimeout) throws ServiceException {
1250         AddColumnRequest req = RequestConverter.buildAddColumnRequest(tableName, column);
1251         master.addColumn(null,req);
1252         return null;
1253       }
1254     });
1255   }
1256 
1257   /**
1258    * Delete a column from a table.
1259    * Asynchronous operation.
1260    *
1261    * @param tableName name of table
1262    * @param columnName name of column to be deleted
1263    * @throws IOException if a remote or network exception occurs
1264    */
1265   public void deleteColumn(final byte[] tableName, final String columnName)
1266   throws IOException {
1267     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1268   }
1269 
1270   /**
1271    * Delete a column from a table.
1272    * Asynchronous operation.
1273    *
1274    * @param tableName name of table
1275    * @param columnName name of column to be deleted
1276    * @throws IOException if a remote or network exception occurs
1277    */
1278   public void deleteColumn(final String tableName, final String columnName)
1279   throws IOException {
1280     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1281   }
1282 
1283   /**
1284    * Delete a column from a table.
1285    * Asynchronous operation.
1286    *
1287    * @param tableName name of table
1288    * @param columnName name of column to be deleted
1289    * @throws IOException if a remote or network exception occurs
1290    */
1291   @Override
1292   public void deleteColumn(final TableName tableName, final byte [] columnName)
1293   throws IOException {
1294     executeCallable(new MasterCallable<Void>(getConnection()) {
1295       @Override
1296       public Void call(int callTimeout) throws ServiceException {
1297         DeleteColumnRequest req = RequestConverter.buildDeleteColumnRequest(tableName, columnName);
1298         master.deleteColumn(null,req);
1299         return null;
1300       }
1301     });
1302   }
1303 
1304   /**
1305    * Modify an existing column family on a table.
1306    * Asynchronous operation.
1307    *
1308    * @param tableName name of table
1309    * @param descriptor new column descriptor to use
1310    * @throws IOException if a remote or network exception occurs
1311    */
1312   public void modifyColumn(final String tableName, HColumnDescriptor descriptor)
1313   throws IOException {
1314     modifyColumn(TableName.valueOf(tableName), descriptor);
1315   }
1316 
1317   /**
1318    * Modify an existing column family on a table.
1319    * Asynchronous operation.
1320    *
1321    * @param tableName name of table
1322    * @param descriptor new column descriptor to use
1323    * @throws IOException if a remote or network exception occurs
1324    */
1325   public void modifyColumn(final byte[] tableName, HColumnDescriptor descriptor)
1326   throws IOException {
1327     modifyColumn(TableName.valueOf(tableName), descriptor);
1328   }
1329 
1330 
1331 
1332   /**
1333    * Modify an existing column family on a table.
1334    * Asynchronous operation.
1335    *
1336    * @param tableName name of table
1337    * @param descriptor new column descriptor to use
1338    * @throws IOException if a remote or network exception occurs
1339    */
1340   @Override
1341   public void modifyColumn(final TableName tableName, final HColumnDescriptor descriptor)
1342   throws IOException {
1343     executeCallable(new MasterCallable<Void>(getConnection()) {
1344       @Override
1345       public Void call(int callTimeout) throws ServiceException {
1346         ModifyColumnRequest req = RequestConverter.buildModifyColumnRequest(tableName, descriptor);
1347         master.modifyColumn(null,req);
1348         return null;
1349       }
1350     });
1351   }
1352 
1353   /**
1354    * Close a region. For expert-admins.  Runs close on the regionserver.  The
1355    * master will not be informed of the close.
1356    * @param regionname region name to close
1357    * @param serverName If supplied, we'll use this location rather than
1358    * the one currently in <code>hbase:meta</code>
1359    * @throws IOException if a remote or network exception occurs
1360    */
1361   @Override
1362   public void closeRegion(final String regionname, final String serverName)
1363   throws IOException {
1364     closeRegion(Bytes.toBytes(regionname), serverName);
1365   }
1366 
1367   /**
1368    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1369    * master will not be informed of the close.
1370    * @param regionname region name to close
1371    * @param serverName The servername of the regionserver.  If passed null we
1372    * will use servername found in the hbase:meta table. A server name
1373    * is made of host, port and startcode.  Here is an example:
1374    * <code> host187.example.com,60020,1289493121758</code>
1375    * @throws IOException if a remote or network exception occurs
1376    */
1377   @Override
1378   public void closeRegion(final byte [] regionname, final String serverName)
1379       throws IOException {
1380     if (serverName != null) {
1381       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1382       if (pair == null || pair.getFirst() == null) {
1383         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1384       } else {
1385         closeRegion(ServerName.valueOf(serverName), pair.getFirst());
1386       }
1387     } else {
1388       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1389       if (pair == null) {
1390         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1391       } else if (pair.getSecond() == null) {
1392         throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
1393       } else {
1394         closeRegion(pair.getSecond(), pair.getFirst());
1395       }
1396     }
1397   }
1398 
1399   /**
1400    * For expert-admins. Runs close on the regionserver. Closes a region based on
1401    * the encoded region name. The region server name is mandatory. If the
1402    * servername is provided then based on the online regions in the specified
1403    * regionserver the specified region will be closed. The master will not be
1404    * informed of the close. Note that the regionname is the encoded regionname.
1405    *
1406    * @param encodedRegionName
1407    *          The encoded region name; i.e. the hash that makes up the region
1408    *          name suffix: e.g. if regionname is
1409    *          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>
1410    *          , then the encoded region name is:
1411    *          <code>527db22f95c8a9e0116f0cc13c680396</code>.
1412    * @param serverName
1413    *          The servername of the regionserver. A server name is made of host,
1414    *          port and startcode. This is mandatory. Here is an example:
1415    *          <code> host187.example.com,60020,1289493121758</code>
1416    * @return true if the region was closed, false if not.
1417    * @throws IOException
1418    *           if a remote or network exception occurs
1419    */
1420   @Override
1421   public boolean closeRegionWithEncodedRegionName(final String encodedRegionName,
1422       final String serverName) throws IOException {
1423     if (null == serverName || ("").equals(serverName.trim())) {
1424       throw new IllegalArgumentException(
1425           "The servername cannot be null or empty.");
1426     }
1427     ServerName sn = ServerName.valueOf(serverName);
1428     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1429     // Close the region without updating zk state.
1430     CloseRegionRequest request =
1431       RequestConverter.buildCloseRegionRequest(sn, encodedRegionName);
1432     try {
1433       CloseRegionResponse response = admin.closeRegion(null, request);
1434       boolean isRegionClosed = response.getClosed();
1435       if (false == isRegionClosed) {
1436         LOG.error("Not able to close the region " + encodedRegionName + ".");
1437       }
1438       return isRegionClosed;
1439     } catch (ServiceException se) {
1440       throw ProtobufUtil.getRemoteException(se);
1441     }
1442   }
1443 
1444   /**
1445    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1446    * master will not be informed of the close.
1447    * @param sn
1448    * @param hri
1449    * @throws IOException
1450    */
1451   @Override
1452   public void closeRegion(final ServerName sn, final HRegionInfo hri)
1453   throws IOException {
1454     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1455     // Close the region without updating zk state.
1456     ProtobufUtil.closeRegion(admin, sn, hri.getRegionName());
1457   }
1458 
1459   /**
1460    * Get all the online regions on a region server.
1461    */
1462   @Override
1463   public List<HRegionInfo> getOnlineRegions(
1464       final ServerName sn) throws IOException {
1465     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1466     return ProtobufUtil.getOnlineRegions(admin);
1467   }
1468 
1469   /**
1470    * {@inheritDoc}
1471    */
1472   @Override
1473   public void flush(final TableName tableName) throws IOException, InterruptedException {
1474     checkTableExists(tableName);
1475     if (isTableDisabled(tableName)) {
1476       LOG.info("Table is disabled: " + tableName.getNameAsString());
1477       return;
1478     }
1479     execProcedure("flush-table-proc", tableName.getNameAsString(),
1480       new HashMap<String, String>());
1481   }
1482 
1483   /**
1484    * {@inheritDoc}
1485    */
1486   @Override
1487   public void flushRegion(final byte[] regionName) throws IOException, InterruptedException {
1488     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
1489     if (regionServerPair == null) {
1490       throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName));
1491     }
1492     if (regionServerPair.getSecond() == null) {
1493       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1494     }
1495     flush(regionServerPair.getSecond(), regionServerPair.getFirst());
1496   }
1497 
1498   /**
1499    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1500    * (byte[])} instead.
1501    */
1502   @Deprecated
1503   public void flush(final String tableNameOrRegionName)
1504   throws IOException, InterruptedException {
1505     flush(Bytes.toBytes(tableNameOrRegionName));
1506   }
1507 
1508   /**
1509    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1510    * (byte[])} instead.
1511    */
1512   @Deprecated
1513   public void flush(final byte[] tableNameOrRegionName)
1514   throws IOException, InterruptedException {
1515     try {
1516       flushRegion(tableNameOrRegionName);
1517     } catch (IllegalArgumentException e) {
1518       // Unknown region.  Try table.
1519       flush(TableName.valueOf(tableNameOrRegionName));
1520     }
1521   }
1522 
1523   private void flush(final ServerName sn, final HRegionInfo hri)
1524   throws IOException {
1525     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1526     FlushRegionRequest request =
1527       RequestConverter.buildFlushRegionRequest(hri.getRegionName());
1528     try {
1529       admin.flushRegion(null, request);
1530     } catch (ServiceException se) {
1531       throw ProtobufUtil.getRemoteException(se);
1532     }
1533   }
1534 
1535   /**
1536    * {@inheritDoc}
1537    */
1538   @Override
1539   public void compact(final TableName tableName)
1540     throws IOException, InterruptedException {
1541     compact(tableName, null, false);
1542   }
1543 
1544   /**
1545    * {@inheritDoc}
1546    */
1547   @Override
1548   public void compactRegion(final byte[] regionName)
1549     throws IOException, InterruptedException {
1550     compactRegion(regionName, null, false);
1551   }
1552 
1553   /**
1554    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1555    * (byte[])} instead.
1556    */
1557   @Deprecated
1558   public void compact(final String tableNameOrRegionName)
1559   throws IOException, InterruptedException {
1560     compact(Bytes.toBytes(tableNameOrRegionName));
1561   }
1562 
1563   /**
1564    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1565    * (byte[])} instead.
1566    */
1567   @Deprecated
1568   public void compact(final byte[] tableNameOrRegionName)
1569   throws IOException, InterruptedException {
1570     try {
1571       compactRegion(tableNameOrRegionName, null, false);
1572     } catch (IllegalArgumentException e) {
1573       compact(TableName.valueOf(tableNameOrRegionName), null, false);
1574     }
1575   }
1576 
1577   /**
1578    * {@inheritDoc}
1579    */
1580   @Override
1581   public void compact(final TableName tableName, final byte[] columnFamily)
1582     throws IOException, InterruptedException {
1583     compact(tableName, columnFamily, false);
1584   }
1585 
1586   /**
1587    * {@inheritDoc}
1588    */
1589   @Override
1590   public void compactRegion(final byte[] regionName, final byte[] columnFamily)
1591     throws IOException, InterruptedException {
1592     compactRegion(regionName, columnFamily, false);
1593   }
1594 
1595   /**
1596    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1597    * (byte[], byte[])} instead.
1598    */
1599   @Deprecated
1600   public void compact(String tableOrRegionName, String columnFamily)
1601     throws IOException,  InterruptedException {
1602     compact(Bytes.toBytes(tableOrRegionName), Bytes.toBytes(columnFamily));
1603   }
1604 
1605   /**
1606    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1607    * (byte[], byte[])} instead.
1608    */
1609   @Deprecated
1610   public void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
1611   throws IOException, InterruptedException {
1612     try {
1613       compactRegion(tableNameOrRegionName, columnFamily, false);
1614     } catch (IllegalArgumentException e) {
1615       // Bad region, try table
1616       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, false);
1617     }
1618   }
1619 
1620   /**
1621    * {@inheritDoc}
1622    */
1623   @Override
1624   public void majorCompact(final TableName tableName)
1625   throws IOException, InterruptedException {
1626     compact(tableName, null, true);
1627   }
1628 
1629   /**
1630    * {@inheritDoc}
1631    */
1632   @Override
1633   public void majorCompactRegion(final byte[] regionName)
1634   throws IOException, InterruptedException {
1635     compactRegion(regionName, null, true);
1636   }
1637 
1638   /**
1639    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
1640    * #majorCompactRegion(byte[])} instead.
1641    */
1642   @Deprecated
1643   public void majorCompact(final String tableNameOrRegionName)
1644   throws IOException, InterruptedException {
1645     majorCompact(Bytes.toBytes(tableNameOrRegionName));
1646   }
1647 
1648   /**
1649    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
1650    * #majorCompactRegion(byte[])} instead.
1651    */
1652   @Deprecated
1653   public void majorCompact(final byte[] tableNameOrRegionName)
1654   throws IOException, InterruptedException {
1655     try {
1656       compactRegion(tableNameOrRegionName, null, true);
1657     } catch (IllegalArgumentException e) {
1658       // Invalid region, try table
1659       compact(TableName.valueOf(tableNameOrRegionName), null, true);
1660     }
1661   }
1662 
1663   /**
1664    * {@inheritDoc}
1665    */
1666   @Override
1667   public void majorCompact(final TableName tableName, final byte[] columnFamily)
1668   throws IOException, InterruptedException {
1669     compact(tableName, columnFamily, true);
1670   }
1671 
1672   /**
1673    * {@inheritDoc}
1674    */
1675   @Override
1676   public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
1677   throws IOException, InterruptedException {
1678     compactRegion(regionName, columnFamily, true);
1679   }
1680 
1681   /**
1682    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
1683    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
1684    */
1685   @Deprecated
1686   public void majorCompact(final String tableNameOrRegionName, final String columnFamily)
1687   throws IOException, InterruptedException {
1688     majorCompact(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(columnFamily));
1689   }
1690 
1691   /**
1692    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
1693    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
1694    */
1695   @Deprecated
1696   public void majorCompact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
1697   throws IOException, InterruptedException {
1698     try {
1699       compactRegion(tableNameOrRegionName, columnFamily, true);
1700     } catch (IllegalArgumentException e) {
1701       // Invalid region, try table
1702       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, true);
1703     }
1704   }
1705 
1706   /**
1707    * Compact a table.
1708    * Asynchronous operation.
1709    *
1710    * @param tableName table or region to compact
1711    * @param columnFamily column family within a table or region
1712    * @param major True if we are to do a major compaction.
1713    * @throws IOException if a remote or network exception occurs
1714    * @throws InterruptedException
1715    */
1716   private void compact(final TableName tableName, final byte[] columnFamily,final boolean major)
1717   throws IOException, InterruptedException {
1718     ZooKeeperWatcher zookeeper = null;
1719     try {
1720       checkTableExists(tableName);
1721       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
1722           new ThrowableAbortable());
1723       List<Pair<HRegionInfo, ServerName>> pairs =
1724         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
1725       for (Pair<HRegionInfo, ServerName> pair: pairs) {
1726         if (pair.getFirst().isOffline()) continue;
1727         if (pair.getSecond() == null) continue;
1728         try {
1729           compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
1730         } catch (NotServingRegionException e) {
1731           if (LOG.isDebugEnabled()) {
1732             LOG.debug("Trying to" + (major ? " major" : "") + " compact " +
1733               pair.getFirst() + ": " +
1734               StringUtils.stringifyException(e));
1735           }
1736         }
1737       }
1738     } finally {
1739       if (zookeeper != null) {
1740         zookeeper.close();
1741       }
1742     }
1743   }
1744 
1745   /**
1746    * Compact an individual region.
1747    * Asynchronous operation.
1748    *
1749    * @param regionName region to compact
1750    * @param columnFamily column family within a table or region
1751    * @param major True if we are to do a major compaction.
1752    * @throws IOException if a remote or network exception occurs
1753    * @throws InterruptedException
1754    */
1755   private void compactRegion(final byte[] regionName, final byte[] columnFamily,final boolean major)
1756   throws IOException, InterruptedException {
1757     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
1758     if (regionServerPair == null) {
1759       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
1760     }
1761     if (regionServerPair.getSecond() == null) {
1762       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1763     }
1764     compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
1765   }
1766 
1767   private void compact(final ServerName sn, final HRegionInfo hri,
1768       final boolean major, final byte [] family)
1769   throws IOException {
1770     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1771     CompactRegionRequest request =
1772       RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family);
1773     try {
1774       admin.compactRegion(null, request);
1775     } catch (ServiceException se) {
1776       throw ProtobufUtil.getRemoteException(se);
1777     }
1778   }
1779 
1780   /**
1781    * Move the region <code>r</code> to <code>dest</code>.
1782    * @param encodedRegionName The encoded region name; i.e. the hash that makes
1783    * up the region name suffix: e.g. if regionname is
1784    * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
1785    * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
1786    * @param destServerName The servername of the destination regionserver.  If
1787    * passed the empty byte array we'll assign to a random server.  A server name
1788    * is made of host, port and startcode.  Here is an example:
1789    * <code> host187.example.com,60020,1289493121758</code>
1790    * @throws UnknownRegionException Thrown if we can't find a region named
1791    * <code>encodedRegionName</code>
1792    * @throws ZooKeeperConnectionException
1793    * @throws MasterNotRunningException
1794    */
1795   @Override
1796   public void move(final byte [] encodedRegionName, final byte [] destServerName)
1797   throws HBaseIOException, MasterNotRunningException, ZooKeeperConnectionException {
1798     MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
1799     try {
1800       MoveRegionRequest request =
1801         RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
1802       stub.moveRegion(null, request);
1803     } catch (ServiceException se) {
1804       IOException ioe = ProtobufUtil.getRemoteException(se);
1805       if (ioe instanceof HBaseIOException) {
1806         throw (HBaseIOException)ioe;
1807       }
1808       LOG.error("Unexpected exception: " + se + " from calling HMaster.moveRegion");
1809     } catch (DeserializationException de) {
1810       LOG.error("Could not parse destination server name: " + de);
1811     } finally {
1812       stub.close();
1813     }
1814   }
1815 
1816   /**
1817    * @param regionName
1818    *          Region name to assign.
1819    * @throws MasterNotRunningException
1820    * @throws ZooKeeperConnectionException
1821    * @throws IOException
1822    */
1823   @Override
1824   public void assign(final byte[] regionName) throws MasterNotRunningException,
1825       ZooKeeperConnectionException, IOException {
1826     final byte[] toBeAssigned = getRegionName(regionName);
1827     executeCallable(new MasterCallable<Void>(getConnection()) {
1828       @Override
1829       public Void call(int callTimeout) throws ServiceException {
1830         AssignRegionRequest request =
1831           RequestConverter.buildAssignRegionRequest(toBeAssigned);
1832         master.assignRegion(null,request);
1833         return null;
1834       }
1835     });
1836   }
1837 
1838   /**
1839    * Unassign a region from current hosting regionserver.  Region will then be
1840    * assigned to a regionserver chosen at random.  Region could be reassigned
1841    * back to the same server.  Use {@link #move(byte[], byte[])} if you want
1842    * to control the region movement.
1843    * @param regionName Region to unassign. Will clear any existing RegionPlan
1844    * if one found.
1845    * @param force If true, force unassign (Will remove region from
1846    * regions-in-transition too if present. If results in double assignment
1847    * use hbck -fix to resolve. To be used by experts).
1848    * @throws MasterNotRunningException
1849    * @throws ZooKeeperConnectionException
1850    * @throws IOException
1851    */
1852   @Override
1853   public void unassign(final byte [] regionName, final boolean force)
1854   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
1855     final byte[] toBeUnassigned = getRegionName(regionName);
1856     executeCallable(new MasterCallable<Void>(getConnection()) {
1857       @Override
1858       public Void call(int callTimeout) throws ServiceException {
1859         UnassignRegionRequest request =
1860           RequestConverter.buildUnassignRegionRequest(toBeUnassigned, force);
1861         master.unassignRegion(null, request);
1862         return null;
1863       }
1864     });
1865   }
1866 
1867   /**
1868    * Offline specified region from master's in-memory state. It will not attempt to reassign the
1869    * region as in unassign. This API can be used when a region not served by any region server and
1870    * still online as per Master's in memory state. If this API is incorrectly used on active region
1871    * then master will loose track of that region.
1872    *
1873    * This is a special method that should be used by experts or hbck.
1874    *
1875    * @param regionName
1876    *          Region to offline.
1877    * @throws IOException
1878    */
1879   @Override
1880   public void offline(final byte [] regionName)
1881   throws IOException {
1882     MasterKeepAliveConnection master = connection.getKeepAliveMasterService();
1883     try {
1884       master.offlineRegion(null,RequestConverter.buildOfflineRegionRequest(regionName));
1885     } catch (ServiceException se) {
1886       throw ProtobufUtil.getRemoteException(se);
1887     } finally {
1888       master.close();
1889     }
1890   }
1891 
1892   /**
1893    * Turn the load balancer on or off.
1894    * @param on If true, enable balancer. If false, disable balancer.
1895    * @param synchronous If true, it waits until current balance() call, if outstanding, to return.
1896    * @return Previous balancer value
1897    */
1898   @Override
1899   public boolean setBalancerRunning(final boolean on, final boolean synchronous)
1900   throws MasterNotRunningException, ZooKeeperConnectionException {
1901     MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
1902     try {
1903       SetBalancerRunningRequest req =
1904         RequestConverter.buildSetBalancerRunningRequest(on, synchronous);
1905       return stub.setBalancerRunning(null, req).getPrevBalanceValue();
1906     } catch (ServiceException se) {
1907       IOException ioe = ProtobufUtil.getRemoteException(se);
1908       if (ioe instanceof MasterNotRunningException) {
1909         throw (MasterNotRunningException)ioe;
1910       }
1911       if (ioe instanceof ZooKeeperConnectionException) {
1912         throw (ZooKeeperConnectionException)ioe;
1913       }
1914 
1915       // Throwing MasterNotRunningException even though not really valid in order to not
1916       // break interface by adding additional exception type.
1917       throw new MasterNotRunningException("Unexpected exception when calling balanceSwitch",se);
1918     } finally {
1919       stub.close();
1920     }
1921   }
1922 
1923   /**
1924    * Invoke the balancer.  Will run the balancer and if regions to move, it will
1925    * go ahead and do the reassignments.  Can NOT run for various reasons.  Check
1926    * logs.
1927    * @return True if balancer ran, false otherwise.
1928    */
1929   @Override
1930   public boolean balancer()
1931   throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException {
1932     MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
1933     try {
1934       return stub.balance(null, RequestConverter.buildBalanceRequest()).getBalancerRan();
1935     } finally {
1936       stub.close();
1937     }
1938   }
1939 
1940   /**
1941    * Enable/Disable the catalog janitor
1942    * @param enable if true enables the catalog janitor
1943    * @return the previous state
1944    * @throws ServiceException
1945    * @throws MasterNotRunningException
1946    */
1947   @Override
1948   public boolean enableCatalogJanitor(boolean enable)
1949       throws ServiceException, MasterNotRunningException {
1950     MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
1951     try {
1952       return stub.enableCatalogJanitor(null,
1953         RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
1954     } finally {
1955       stub.close();
1956     }
1957   }
1958 
1959   /**
1960    * Ask for a scan of the catalog table
1961    * @return the number of entries cleaned
1962    * @throws ServiceException
1963    * @throws MasterNotRunningException
1964    */
1965   @Override
1966   public int runCatalogScan() throws ServiceException, MasterNotRunningException {
1967     MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
1968     try {
1969       return stub.runCatalogScan(null,
1970         RequestConverter.buildCatalogScanRequest()).getScanResult();
1971     } finally {
1972       stub.close();
1973     }
1974   }
1975 
1976   /**
1977    * Query on the catalog janitor state (Enabled/Disabled?)
1978    * @throws ServiceException
1979    * @throws org.apache.hadoop.hbase.MasterNotRunningException
1980    */
1981   @Override
1982   public boolean isCatalogJanitorEnabled() throws ServiceException, MasterNotRunningException {
1983     MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
1984     try {
1985       return stub.isCatalogJanitorEnabled(null,
1986         RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
1987     } finally {
1988       stub.close();
1989     }
1990   }
1991 
1992   /**
1993    * Merge two regions. Asynchronous operation.
1994    * @param encodedNameOfRegionA encoded name of region a
1995    * @param encodedNameOfRegionB encoded name of region b
1996    * @param forcible true if do a compulsory merge, otherwise we will only merge
1997    *          two adjacent regions
1998    * @throws IOException
1999    */
2000   @Override
2001   public void mergeRegions(final byte[] encodedNameOfRegionA,
2002       final byte[] encodedNameOfRegionB, final boolean forcible)
2003       throws IOException {
2004     MasterKeepAliveConnection master = connection
2005         .getKeepAliveMasterService();
2006     try {
2007       DispatchMergingRegionsRequest request = RequestConverter
2008           .buildDispatchMergingRegionsRequest(encodedNameOfRegionA,
2009               encodedNameOfRegionB, forcible);
2010       master.dispatchMergingRegions(null, request);
2011     } catch (ServiceException se) {
2012       IOException ioe = ProtobufUtil.getRemoteException(se);
2013       if (ioe instanceof UnknownRegionException) {
2014         throw (UnknownRegionException) ioe;
2015       }
2016       if (ioe instanceof MergeRegionException) {
2017         throw (MergeRegionException) ioe;
2018       }
2019       LOG.error("Unexpected exception: " + se
2020           + " from calling HMaster.dispatchMergingRegions");
2021     } catch (DeserializationException de) {
2022       LOG.error("Could not parse destination server name: " + de);
2023     } finally {
2024       master.close();
2025     }
2026   }
2027 
2028   /**
2029    * {@inheritDoc}
2030    */
2031   @Override
2032   public void split(final TableName tableName)
2033     throws IOException, InterruptedException {
2034     split(tableName, null);
2035   }
2036 
2037   /**
2038    * {@inheritDoc}
2039    */
2040   @Override
2041   public void splitRegion(final byte[] regionName)
2042     throws IOException, InterruptedException {
2043     splitRegion(regionName, null);
2044   }
2045 
2046   /**
2047    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2048    * (byte[])} instead.
2049    */
2050   @Deprecated
2051   public void split(final String tableNameOrRegionName)
2052   throws IOException, InterruptedException {
2053     split(Bytes.toBytes(tableNameOrRegionName));
2054   }
2055 
2056   /**
2057    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2058    * (byte[])} instead.
2059    */
2060   @Deprecated
2061   public void split(final byte[] tableNameOrRegionName)
2062   throws IOException, InterruptedException {
2063     split(tableNameOrRegionName, null);
2064   }
2065 
2066   /**
2067    * {@inheritDoc}
2068    */
2069   @Override
2070   public void split(final TableName tableName, final byte [] splitPoint)
2071   throws IOException, InterruptedException {
2072     ZooKeeperWatcher zookeeper = null;
2073     try {
2074       checkTableExists(tableName);
2075       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2076         new ThrowableAbortable());
2077       List<Pair<HRegionInfo, ServerName>> pairs =
2078         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
2079       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2080         // May not be a server for a particular row
2081         if (pair.getSecond() == null) continue;
2082         HRegionInfo r = pair.getFirst();
2083         // check for parents
2084         if (r.isSplitParent()) continue;
2085         // if a split point given, only split that particular region
2086         if (splitPoint != null && !r.containsRow(splitPoint)) continue;
2087         // call out to region server to do split now
2088         split(pair.getSecond(), pair.getFirst(), splitPoint);
2089       }
2090     } finally {
2091       if (zookeeper != null) {
2092         zookeeper.close();
2093       }
2094     }
2095   }
2096 
2097   /**
2098    * {@inheritDoc}
2099    */
2100   @Override
2101   public void splitRegion(final byte[] regionName, final byte [] splitPoint)
2102   throws IOException, InterruptedException {
2103     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2104     if (regionServerPair == null) {
2105       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2106     }
2107     if (regionServerPair.getSecond() == null) {
2108       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2109     }
2110     split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
2111   }
2112 
2113   /**
2114    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2115    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2116    */
2117   @Deprecated
2118   public void split(final String tableNameOrRegionName,
2119     final String splitPoint) throws IOException, InterruptedException {
2120     split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint));
2121   }
2122 
2123   /**
2124    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2125    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2126    */
2127   @Deprecated
2128   public void split(final byte[] tableNameOrRegionName,
2129       final byte [] splitPoint) throws IOException, InterruptedException {
2130     try {
2131       splitRegion(tableNameOrRegionName, splitPoint);
2132     } catch (IllegalArgumentException e) {
2133       // Bad region, try table
2134       split(TableName.valueOf(tableNameOrRegionName), splitPoint);
2135     }
2136   }
2137 
2138   private void split(final ServerName sn, final HRegionInfo hri,
2139       byte[] splitPoint) throws IOException {
2140     if (hri.getStartKey() != null && splitPoint != null &&
2141          Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
2142        throw new IOException("should not give a splitkey which equals to startkey!");
2143     }
2144     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2145     ProtobufUtil.split(admin, hri, splitPoint);
2146   }
2147 
2148   /**
2149    * Modify an existing table, more IRB friendly version.
2150    * Asynchronous operation.  This means that it may be a while before your
2151    * schema change is updated across all of the table.
2152    *
2153    * @param tableName name of table.
2154    * @param htd modified description of the table
2155    * @throws IOException if a remote or network exception occurs
2156    */
2157   @Override
2158   public void modifyTable(final TableName tableName, final HTableDescriptor htd)
2159   throws IOException {
2160     if (!tableName.equals(htd.getTableName())) {
2161       throw new IllegalArgumentException("the specified table name '" + tableName +
2162         "' doesn't match with the HTD one: " + htd.getTableName());
2163     }
2164 
2165     executeCallable(new MasterCallable<Void>(getConnection()) {
2166       @Override
2167       public Void call(int callTimeout) throws ServiceException {
2168         ModifyTableRequest request = RequestConverter.buildModifyTableRequest(tableName, htd);
2169         master.modifyTable(null, request);
2170         return null;
2171       }
2172     });
2173   }
2174 
2175   public void modifyTable(final byte[] tableName, final HTableDescriptor htd)
2176   throws IOException {
2177     modifyTable(TableName.valueOf(tableName), htd);
2178   }
2179 
2180   public void modifyTable(final String tableName, final HTableDescriptor htd)
2181   throws IOException {
2182     modifyTable(TableName.valueOf(tableName), htd);
2183   }
2184 
2185   /**
2186    * @param regionName Name of a region.
2187    * @return a pair of HRegionInfo and ServerName if <code>regionName</code> is
2188    *  a verified region name (we call {@link
2189    *  MetaTableAccessor#getRegion(HConnection, byte[])}
2190    *  else null.
2191    * Throw IllegalArgumentException if <code>regionName</code> is null.
2192    * @throws IOException
2193    */
2194   Pair<HRegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException {
2195     if (regionName == null) {
2196       throw new IllegalArgumentException("Pass a table name or region name");
2197     }
2198     Pair<HRegionInfo, ServerName> pair =
2199       MetaTableAccessor.getRegion(connection, regionName);
2200     if (pair == null) {
2201       final AtomicReference<Pair<HRegionInfo, ServerName>> result =
2202         new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
2203       final String encodedName = Bytes.toString(regionName);
2204       MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
2205         @Override
2206         public boolean processRow(Result data) throws IOException {
2207           HRegionInfo info = HRegionInfo.getHRegionInfo(data);
2208           if (info == null) {
2209             LOG.warn("No serialized HRegionInfo in " + data);
2210             return true;
2211           }
2212           if (!encodedName.equals(info.getEncodedName())) return true;
2213           ServerName sn = HRegionInfo.getServerName(data);
2214           result.set(new Pair<HRegionInfo, ServerName>(info, sn));
2215           return false; // found the region, stop
2216         }
2217       };
2218 
2219       MetaScanner.metaScan(conf, connection, visitor, null);
2220       pair = result.get();
2221     }
2222     return pair;
2223   }
2224 
2225   /**
2226    * If the input is a region name, it is returned as is. If it's an
2227    * encoded region name, the corresponding region is found from meta
2228    * and its region name is returned. If we can't find any region in
2229    * meta matching the input as either region name or encoded region
2230    * name, the input is returned as is. We don't throw unknown
2231    * region exception.
2232    */
2233   private byte[] getRegionName(
2234       final byte[] regionNameOrEncodedRegionName) throws IOException {
2235     if (Bytes.equals(regionNameOrEncodedRegionName,
2236         HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
2237           || Bytes.equals(regionNameOrEncodedRegionName,
2238             HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
2239       return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
2240     }
2241     byte[] tmp = regionNameOrEncodedRegionName;
2242     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionNameOrEncodedRegionName);
2243     if (regionServerPair != null && regionServerPair.getFirst() != null) {
2244       tmp = regionServerPair.getFirst().getRegionName();
2245     }
2246     return tmp;
2247   }
2248 
2249   /**
2250    * Check if table exists or not
2251    * @param tableName Name of a table.
2252    * @return tableName instance
2253    * @throws IOException if a remote or network exception occurs.
2254    * @throws TableNotFoundException if table does not exist.
2255    */
2256   private TableName checkTableExists(final TableName tableName)
2257       throws IOException {
2258     if (!MetaTableAccessor.tableExists(connection, tableName)) {
2259       throw new TableNotFoundException(tableName);
2260     }
2261     return tableName;
2262   }
2263 
2264   /**
2265    * Shuts down the HBase cluster
2266    * @throws IOException if a remote or network exception occurs
2267    */
2268   @Override
2269   public synchronized void shutdown() throws IOException {
2270     executeCallable(new MasterCallable<Void>(getConnection()) {
2271       @Override
2272       public Void call(int callTimeout) throws ServiceException {
2273         master.shutdown(null,ShutdownRequest.newBuilder().build());
2274         return null;
2275       }
2276     });
2277   }
2278 
2279   /**
2280    * Shuts down the current HBase master only.
2281    * Does not shutdown the cluster.
2282    * @see #shutdown()
2283    * @throws IOException if a remote or network exception occurs
2284    */
2285   @Override
2286   public synchronized void stopMaster() throws IOException {
2287     executeCallable(new MasterCallable<Void>(getConnection()) {
2288       @Override
2289       public Void call(int callTimeout) throws ServiceException {
2290         master.stopMaster(null, StopMasterRequest.newBuilder().build());
2291         return null;
2292       }
2293     });
2294   }
2295 
2296   /**
2297    * Stop the designated regionserver
2298    * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
2299    * <code>example.org:1234</code>
2300    * @throws IOException if a remote or network exception occurs
2301    */
2302   @Override
2303   public synchronized void stopRegionServer(final String hostnamePort)
2304   throws IOException {
2305     String hostname = Addressing.parseHostname(hostnamePort);
2306     int port = Addressing.parsePort(hostnamePort);
2307     AdminService.BlockingInterface admin =
2308       this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
2309     StopServerRequest request = RequestConverter.buildStopServerRequest(
2310       "Called by admin client " + this.connection.toString());
2311     try {
2312       admin.stopServer(null, request);
2313     } catch (ServiceException se) {
2314       throw ProtobufUtil.getRemoteException(se);
2315     }
2316   }
2317 
2318 
2319   /**
2320    * @return cluster status
2321    * @throws IOException if a remote or network exception occurs
2322    */
2323   @Override
2324   public ClusterStatus getClusterStatus() throws IOException {
2325     return executeCallable(new MasterCallable<ClusterStatus>(getConnection()) {
2326       @Override
2327       public ClusterStatus call(int callTimeout) throws ServiceException {
2328         GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest();
2329         return ClusterStatus.convert(master.getClusterStatus(null, req).getClusterStatus());
2330       }
2331     });
2332   }
2333 
2334   private HRegionLocation getFirstMetaServerForTable(final TableName tableName)
2335   throws IOException {
2336     return connection.locateRegion(TableName.META_TABLE_NAME,
2337       HRegionInfo.createRegionName(tableName, null, HConstants.NINES, false));
2338   }
2339 
2340   /**
2341    * @return Configuration used by the instance.
2342    */
2343   @Override
2344   public Configuration getConfiguration() {
2345     return this.conf;
2346   }
2347 
2348   /**
2349    * Create a new namespace
2350    * @param descriptor descriptor which describes the new namespace
2351    * @throws IOException
2352    */
2353   @Override
2354   public void createNamespace(final NamespaceDescriptor descriptor) throws IOException {
2355     executeCallable(new MasterCallable<Void>(getConnection()) {
2356       @Override
2357       public Void call(int callTimeout) throws Exception {
2358         master.createNamespace(null,
2359           CreateNamespaceRequest.newBuilder()
2360             .setNamespaceDescriptor(ProtobufUtil
2361               .toProtoNamespaceDescriptor(descriptor)).build()
2362         );
2363         return null;
2364       }
2365     });
2366   }
2367 
2368   /**
2369    * Modify an existing namespace
2370    * @param descriptor descriptor which describes the new namespace
2371    * @throws IOException
2372    */
2373   @Override
2374   public void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException {
2375     executeCallable(new MasterCallable<Void>(getConnection()) {
2376       @Override
2377       public Void call(int callTimeout) throws Exception {
2378         master.modifyNamespace(null, ModifyNamespaceRequest.newBuilder().
2379           setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build());
2380         return null;
2381       }
2382     });
2383   }
2384 
2385   /**
2386    * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
2387    * @param name namespace name
2388    * @throws IOException
2389    */
2390   @Override
2391   public void deleteNamespace(final String name) throws IOException {
2392     executeCallable(new MasterCallable<Void>(getConnection()) {
2393       @Override
2394       public Void call(int callTimeout) throws Exception {
2395         master.deleteNamespace(null, DeleteNamespaceRequest.newBuilder().
2396           setNamespaceName(name).build());
2397         return null;
2398       }
2399     });
2400   }
2401 
2402   /**
2403    * Get a namespace descriptor by name
2404    * @param name name of namespace descriptor
2405    * @return A descriptor
2406    * @throws IOException
2407    */
2408   @Override
2409   public NamespaceDescriptor getNamespaceDescriptor(final String name) throws IOException {
2410     return
2411         executeCallable(new MasterCallable<NamespaceDescriptor>(getConnection()) {
2412           @Override
2413           public NamespaceDescriptor call(int callTimeout) throws Exception {
2414             return ProtobufUtil.toNamespaceDescriptor(
2415               master.getNamespaceDescriptor(null, GetNamespaceDescriptorRequest.newBuilder().
2416                 setNamespaceName(name).build()).getNamespaceDescriptor());
2417           }
2418         });
2419   }
2420 
2421   /**
2422    * List available namespace descriptors
2423    * @return List of descriptors
2424    * @throws IOException
2425    */
2426   @Override
2427   public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
2428     return
2429         executeCallable(new MasterCallable<NamespaceDescriptor[]>(getConnection()) {
2430           @Override
2431           public NamespaceDescriptor[] call(int callTimeout) throws Exception {
2432             List<HBaseProtos.NamespaceDescriptor> list =
2433               master.listNamespaceDescriptors(null, ListNamespaceDescriptorsRequest.newBuilder().
2434                 build()).getNamespaceDescriptorList();
2435             NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
2436             for(int i = 0; i < list.size(); i++) {
2437               res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
2438             }
2439             return res;
2440           }
2441         });
2442   }
2443 
2444   /**
2445    * Get list of table descriptors by namespace
2446    * @param name namespace name
2447    * @return A descriptor
2448    * @throws IOException
2449    */
2450   @Override
2451   public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException {
2452     return
2453         executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
2454           @Override
2455           public HTableDescriptor[] call(int callTimeout) throws Exception {
2456             List<TableSchema> list =
2457               master.listTableDescriptorsByNamespace(null, ListTableDescriptorsByNamespaceRequest.
2458                 newBuilder().setNamespaceName(name).build()).getTableSchemaList();
2459             HTableDescriptor[] res = new HTableDescriptor[list.size()];
2460             for(int i=0; i < list.size(); i++) {
2461 
2462               res[i] = HTableDescriptor.convert(list.get(i));
2463             }
2464             return res;
2465           }
2466         });
2467   }
2468 
2469   /**
2470    * Get list of table names by namespace
2471    * @param name namespace name
2472    * @return The list of table names in the namespace
2473    * @throws IOException
2474    */
2475   @Override
2476   public TableName[] listTableNamesByNamespace(final String name) throws IOException {
2477     return
2478         executeCallable(new MasterCallable<TableName[]>(getConnection()) {
2479           @Override
2480           public TableName[] call(int callTimeout) throws Exception {
2481             List<HBaseProtos.TableName> tableNames =
2482               master.listTableNamesByNamespace(null, ListTableNamesByNamespaceRequest.
2483                 newBuilder().setNamespaceName(name).build())
2484                 .getTableNameList();
2485             TableName[] result = new TableName[tableNames.size()];
2486             for (int i = 0; i < tableNames.size(); i++) {
2487               result[i] = ProtobufUtil.toTableName(tableNames.get(i));
2488             }
2489             return result;
2490           }
2491         });
2492   }
2493 
2494   /**
2495    * Check to see if HBase is running. Throw an exception if not.
2496    * We consider that HBase is running if ZooKeeper and Master are running.
2497    *
2498    * @param conf system configuration
2499    * @throws MasterNotRunningException if the master is not running
2500    * @throws ZooKeeperConnectionException if unable to connect to zookeeper
2501    */
2502   public static void checkHBaseAvailable(Configuration conf)
2503     throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException {
2504     Configuration copyOfConf = HBaseConfiguration.create(conf);
2505 
2506     // We set it to make it fail as soon as possible if HBase is not available
2507     copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
2508     copyOfConf.setInt("zookeeper.recovery.retry", 0);
2509 
2510     ConnectionManager.HConnectionImplementation connection
2511       = (ConnectionManager.HConnectionImplementation)
2512       HConnectionManager.getConnection(copyOfConf);
2513 
2514     try {
2515       // Check ZK first.
2516       // If the connection exists, we may have a connection to ZK that does
2517       //  not work anymore
2518       ZooKeeperKeepAliveConnection zkw = null;
2519       try {
2520         zkw = connection.getKeepAliveZooKeeperWatcher();
2521         zkw.getRecoverableZooKeeper().getZooKeeper().exists(
2522           zkw.baseZNode, false);
2523 
2524       } catch (IOException e) {
2525         throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
2526       } catch (InterruptedException e) {
2527         throw (InterruptedIOException)
2528             new InterruptedIOException("Can't connect to ZooKeeper").initCause(e);
2529       } catch (KeeperException e) {
2530         throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
2531       } finally {
2532         if (zkw != null) {
2533           zkw.close();
2534         }
2535       }
2536 
2537       // Check Master
2538       connection.isMasterRunning();
2539 
2540     } finally {
2541       connection.close();
2542     }
2543   }
2544 
2545   /**
2546    * get the regions of a given table.
2547    *
2548    * @param tableName the name of the table
2549    * @return Ordered list of {@link HRegionInfo}.
2550    * @throws IOException
2551    */
2552   @Override
2553   public List<HRegionInfo> getTableRegions(final TableName tableName)
2554   throws IOException {
2555     ZooKeeperWatcher zookeeper =
2556       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2557         new ThrowableAbortable());
2558     List<HRegionInfo> Regions = null;
2559     try {
2560       Regions = MetaTableAccessor.getTableRegions(zookeeper, connection, tableName, true);
2561     } finally {
2562       zookeeper.close();
2563     }
2564     return Regions;
2565   }
2566 
2567   public List<HRegionInfo> getTableRegions(final byte[] tableName)
2568   throws IOException {
2569     return getTableRegions(TableName.valueOf(tableName));
2570   }
2571 
2572   @Override
2573   public synchronized void close() throws IOException {
2574     if (cleanupConnectionOnClose && this.connection != null && !this.closed) {
2575       this.connection.close();
2576       this.closed = true;
2577     }
2578   }
2579 
2580   /**
2581    * Get tableDescriptors
2582    * @param tableNames List of table names
2583    * @return HTD[] the tableDescriptor
2584    * @throws IOException if a remote or network exception occurs
2585    */
2586   @Override
2587   public HTableDescriptor[] getTableDescriptorsByTableName(List<TableName> tableNames)
2588   throws IOException {
2589     return this.connection.getHTableDescriptorsByTableName(tableNames);
2590   }
2591 
2592   /**
2593    * Get tableDescriptors
2594    * @param names List of table names
2595    * @return HTD[] the tableDescriptor
2596    * @throws IOException if a remote or network exception occurs
2597    */
2598   @Override
2599   public HTableDescriptor[] getTableDescriptors(List<String> names)
2600   throws IOException {
2601     List<TableName> tableNames = new ArrayList<TableName>(names.size());
2602     for(String name : names) {
2603       tableNames.add(TableName.valueOf(name));
2604     }
2605     return getTableDescriptorsByTableName(tableNames);
2606   }
2607 
2608   /**
2609    * Roll the log writer. That is, start writing log messages to a new file.
2610    *
2611    * @param serverName
2612    *          The servername of the regionserver. A server name is made of host,
2613    *          port and startcode. This is mandatory. Here is an example:
2614    *          <code> host187.example.com,60020,1289493121758</code>
2615    * @return If lots of logs, flush the returned regions so next time through
2616    * we can clean logs. Returns null if nothing to flush.  Names are actual
2617    * region names as returned by {@link HRegionInfo#getEncodedName()}
2618    * @throws IOException if a remote or network exception occurs
2619    * @throws FailedLogCloseException
2620    */
2621   @Override
2622   public synchronized  byte[][] rollHLogWriter(String serverName)
2623       throws IOException, FailedLogCloseException {
2624     ServerName sn = ServerName.valueOf(serverName);
2625     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2626     RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();
2627     try {
2628       RollWALWriterResponse response = admin.rollWALWriter(null, request);
2629       int regionCount = response.getRegionToFlushCount();
2630       byte[][] regionsToFlush = new byte[regionCount][];
2631       for (int i = 0; i < regionCount; i++) {
2632         ByteString region = response.getRegionToFlush(i);
2633         regionsToFlush[i] = region.toByteArray();
2634       }
2635       return regionsToFlush;
2636     } catch (ServiceException se) {
2637       throw ProtobufUtil.getRemoteException(se);
2638     }
2639   }
2640 
2641   @Override
2642   public String[] getMasterCoprocessors() {
2643     try {
2644       return getClusterStatus().getMasterCoprocessors();
2645     } catch (IOException e) {
2646       LOG.error("Could not getClusterStatus()",e);
2647       return null;
2648     }
2649   }
2650 
2651   /**
2652    * {@inheritDoc}
2653    */
2654   @Override
2655   public CompactionState getCompactionState(final TableName tableName)
2656   throws IOException, InterruptedException {
2657     CompactionState state = CompactionState.NONE;
2658     ZooKeeperWatcher zookeeper =
2659       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2660         new ThrowableAbortable());
2661     try {
2662       checkTableExists(tableName);
2663       List<Pair<HRegionInfo, ServerName>> pairs =
2664         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
2665       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2666         if (pair.getFirst().isOffline()) continue;
2667         if (pair.getSecond() == null) continue;
2668         try {
2669           ServerName sn = pair.getSecond();
2670           AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2671           GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
2672             pair.getFirst().getRegionName(), true);
2673           GetRegionInfoResponse response = admin.getRegionInfo(null, request);
2674           switch (response.getCompactionState()) {
2675           case MAJOR_AND_MINOR:
2676             return CompactionState.MAJOR_AND_MINOR;
2677           case MAJOR:
2678             if (state == CompactionState.MINOR) {
2679               return CompactionState.MAJOR_AND_MINOR;
2680             }
2681             state = CompactionState.MAJOR;
2682             break;
2683           case MINOR:
2684             if (state == CompactionState.MAJOR) {
2685               return CompactionState.MAJOR_AND_MINOR;
2686             }
2687             state = CompactionState.MINOR;
2688             break;
2689           case NONE:
2690           default: // nothing, continue
2691           }
2692         } catch (NotServingRegionException e) {
2693           if (LOG.isDebugEnabled()) {
2694             LOG.debug("Trying to get compaction state of " +
2695               pair.getFirst() + ": " +
2696               StringUtils.stringifyException(e));
2697           }
2698         } catch (RemoteException e) {
2699           if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
2700             if (LOG.isDebugEnabled()) {
2701               LOG.debug("Trying to get compaction state of " + pair.getFirst() + ": "
2702                 + StringUtils.stringifyException(e));
2703             }
2704           } else {
2705             throw e;
2706           }
2707         }
2708       }
2709     } catch (ServiceException se) {
2710       throw ProtobufUtil.getRemoteException(se);
2711     } finally {
2712       zookeeper.close();
2713     }
2714     return state;
2715   }
2716 
2717   /**
2718    * {@inheritDoc}
2719    */
2720   @Override
2721   public CompactionState getCompactionStateForRegion(final byte[] regionName)
2722   throws IOException, InterruptedException {
2723     try {
2724       Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2725       if (regionServerPair == null) {
2726         throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2727       }
2728       if (regionServerPair.getSecond() == null) {
2729         throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2730       }
2731       ServerName sn = regionServerPair.getSecond();
2732       AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2733       GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
2734         regionServerPair.getFirst().getRegionName(), true);
2735       GetRegionInfoResponse response = admin.getRegionInfo(null, request);
2736       return response.getCompactionState();
2737     } catch (ServiceException se) {
2738       throw ProtobufUtil.getRemoteException(se);
2739     }
2740   }
2741 
2742   /**
2743    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
2744    * #getCompactionStateForRegion(byte[])} instead.
2745    */
2746   @Deprecated
2747   public CompactionState getCompactionState(final String tableNameOrRegionName)
2748   throws IOException, InterruptedException {
2749     return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
2750   }
2751 
2752   /**
2753    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
2754    * #getCompactionStateForRegion(byte[])} instead.
2755    */
2756   @Deprecated
2757   public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
2758   throws IOException, InterruptedException {
2759     try {
2760       return getCompactionStateForRegion(tableNameOrRegionName);
2761     } catch (IllegalArgumentException e) {
2762       // Invalid region, try table
2763       return getCompactionState(TableName.valueOf(tableNameOrRegionName));
2764     }
2765   }
2766 
2767   /**
2768    * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
2769    * taken. If the table is disabled, an offline snapshot is taken.
2770    * <p>
2771    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2772    * snapshot with the same name (even a different type or with different parameters) will fail with
2773    * a {@link SnapshotCreationException} indicating the duplicate naming.
2774    * <p>
2775    * Snapshot names follow the same naming constraints as tables in HBase. See
2776    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
2777    * @param snapshotName name of the snapshot to be created
2778    * @param tableName name of the table for which snapshot is created
2779    * @throws IOException if a remote or network exception occurs
2780    * @throws SnapshotCreationException if snapshot creation failed
2781    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2782    */
2783   @Override
2784   public void snapshot(final String snapshotName,
2785                        final TableName tableName) throws IOException,
2786       SnapshotCreationException, IllegalArgumentException {
2787     snapshot(snapshotName, tableName, SnapshotDescription.Type.FLUSH);
2788   }
2789 
2790   public void snapshot(final String snapshotName,
2791                        final String tableName) throws IOException,
2792       SnapshotCreationException, IllegalArgumentException {
2793     snapshot(snapshotName, TableName.valueOf(tableName),
2794         SnapshotDescription.Type.FLUSH);
2795   }
2796 
2797   /**
2798    * Create snapshot for the given table of given flush type.
2799    * <p>
2800    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2801    * snapshot with the same name (even a different type or with different parameters) will fail with
2802    * a {@link SnapshotCreationException} indicating the duplicate naming.
2803    * <p>
2804    * Snapshot names follow the same naming constraints as tables in HBase.
2805    * @param snapshotName name of the snapshot to be created
2806    * @param tableName name of the table for which snapshot is created
2807    * @param flushType if the snapshot should be taken without flush memstore first
2808    * @throws IOException if a remote or network exception occurs
2809    * @throws SnapshotCreationException if snapshot creation failed
2810    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2811    */
2812    public void snapshot(final byte[] snapshotName, final byte[] tableName,
2813                        final SnapshotDescription.Type flushType) throws
2814       IOException, SnapshotCreationException, IllegalArgumentException {
2815       snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName), flushType);
2816   }
2817   /**
2818    public void snapshot(final String snapshotName,
2819     * Create a timestamp consistent snapshot for the given table.
2820                         final byte[] tableName) throws IOException,
2821     * <p>
2822     * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2823     * snapshot with the same name (even a different type or with different parameters) will fail with
2824     * a {@link SnapshotCreationException} indicating the duplicate naming.
2825     * <p>
2826     * Snapshot names follow the same naming constraints as tables in HBase.
2827     * @param snapshotName name of the snapshot to be created
2828     * @param tableName name of the table for which snapshot is created
2829     * @throws IOException if a remote or network exception occurs
2830     * @throws SnapshotCreationException if snapshot creation failed
2831     * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2832     */
2833   @Override
2834   public void snapshot(final byte[] snapshotName,
2835                        final TableName tableName) throws IOException,
2836       SnapshotCreationException, IllegalArgumentException {
2837     snapshot(Bytes.toString(snapshotName), tableName, SnapshotDescription.Type.FLUSH);
2838   }
2839 
2840   public void snapshot(final byte[] snapshotName,
2841                        final byte[] tableName) throws IOException,
2842       SnapshotCreationException, IllegalArgumentException {
2843     snapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName),
2844       SnapshotDescription.Type.FLUSH);
2845   }
2846 
2847   /**
2848    * Create typed snapshot of the table.
2849    * <p>
2850    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2851    * snapshot with the same name (even a different type or with different parameters) will fail with
2852    * a {@link SnapshotCreationException} indicating the duplicate naming.
2853    * <p>
2854    * Snapshot names follow the same naming constraints as tables in HBase. See
2855    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
2856    * <p>
2857    * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
2858    *          snapshots stored on the cluster
2859    * @param tableName name of the table to snapshot
2860    * @param type type of snapshot to take
2861    * @throws IOException we fail to reach the master
2862    * @throws SnapshotCreationException if snapshot creation failed
2863    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2864    */
2865   @Override
2866   public void snapshot(final String snapshotName,
2867                        final TableName tableName,
2868                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
2869       IllegalArgumentException {
2870     SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
2871     builder.setTable(tableName.getNameAsString());
2872     builder.setName(snapshotName);
2873     builder.setType(type);
2874     snapshot(builder.build());
2875   }
2876 
2877   public void snapshot(final String snapshotName,
2878                        final String tableName,
2879                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
2880       IllegalArgumentException {
2881     snapshot(snapshotName, TableName.valueOf(tableName), type);
2882   }
2883 
2884   public void snapshot(final String snapshotName,
2885                        final byte[] tableName,
2886                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
2887       IllegalArgumentException {
2888     snapshot(snapshotName, TableName.valueOf(tableName), type);
2889   }
2890 
2891   /**
2892    * Take a snapshot and wait for the server to complete that snapshot (blocking).
2893    * <p>
2894    * Only a single snapshot should be taken at a time for an instance of HBase, or results may be
2895    * undefined (you can tell multiple HBase clusters to snapshot at the same time, but only one at a
2896    * time for a single cluster).
2897    * <p>
2898    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2899    * snapshot with the same name (even a different type or with different parameters) will fail with
2900    * a {@link SnapshotCreationException} indicating the duplicate naming.
2901    * <p>
2902    * Snapshot names follow the same naming constraints as tables in HBase. See
2903    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
2904    * <p>
2905    * You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])}
2906    * unless you are sure about the type of snapshot that you want to take.
2907    * @param snapshot snapshot to take
2908    * @throws IOException or we lose contact with the master.
2909    * @throws SnapshotCreationException if snapshot failed to be taken
2910    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2911    */
2912   @Override
2913   public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException,
2914       IllegalArgumentException {
2915     // actually take the snapshot
2916     SnapshotResponse response = takeSnapshotAsync(snapshot);
2917     final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
2918         .build();
2919     IsSnapshotDoneResponse done = null;
2920     long start = EnvironmentEdgeManager.currentTime();
2921     long max = response.getExpectedTimeout();
2922     long maxPauseTime = max / this.numRetries;
2923     int tries = 0;
2924     LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
2925         ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
2926         maxPauseTime + " ms per retry)");
2927     while (tries == 0
2928         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) {
2929       try {
2930         // sleep a backoff <= pauseTime amount
2931         long sleep = getPauseTime(tries++);
2932         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
2933         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
2934           "ms while waiting for snapshot completion.");
2935         Thread.sleep(sleep);
2936       } catch (InterruptedException e) {
2937         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
2938       }
2939       LOG.debug("Getting current status of snapshot from master...");
2940       done = executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
2941         @Override
2942         public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
2943           return master.isSnapshotDone(null, request);
2944         }
2945       });
2946     }
2947     if (!done.getDone()) {
2948       throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
2949           + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
2950     }
2951   }
2952 
2953   /**
2954    * Take a snapshot without waiting for the server to complete that snapshot (asynchronous)
2955    * <p>
2956    * Only a single snapshot should be taken at a time, or results may be undefined.
2957    * @param snapshot snapshot to take
2958    * @return response from the server indicating the max time to wait for the snapshot
2959    * @throws IOException if the snapshot did not succeed or we lose contact with the master.
2960    * @throws SnapshotCreationException if snapshot creation failed
2961    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2962    */
2963   @Override
2964   public SnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException,
2965       SnapshotCreationException {
2966     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
2967     final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot)
2968         .build();
2969     // run the snapshot on the master
2970     return executeCallable(new MasterCallable<SnapshotResponse>(getConnection()) {
2971       @Override
2972       public SnapshotResponse call(int callTimeout) throws ServiceException {
2973         return master.snapshot(null, request);
2974       }
2975     });
2976   }
2977 
2978   /**
2979    * Check the current state of the passed snapshot.
2980    * <p>
2981    * There are three possible states:
2982    * <ol>
2983    * <li>running - returns <tt>false</tt></li>
2984    * <li>finished - returns <tt>true</tt></li>
2985    * <li>finished with error - throws the exception that caused the snapshot to fail</li>
2986    * </ol>
2987    * <p>
2988    * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
2989    * run/started since the snapshot your are checking, you will recieve an
2990    * {@link UnknownSnapshotException}.
2991    * @param snapshot description of the snapshot to check
2992    * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
2993    *         running
2994    * @throws IOException if we have a network issue
2995    * @throws HBaseSnapshotException if the snapshot failed
2996    * @throws UnknownSnapshotException if the requested snapshot is unknown
2997    */
2998   @Override
2999   public boolean isSnapshotFinished(final SnapshotDescription snapshot)
3000       throws IOException, HBaseSnapshotException, UnknownSnapshotException {
3001 
3002     return executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
3003       @Override
3004       public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3005         return master.isSnapshotDone(null,
3006           IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build());
3007       }
3008     }).getDone();
3009   }
3010 
3011   /**
3012    * Restore the specified snapshot on the original table. (The table must be disabled)
3013    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3014    * is set to true, a snapshot of the current table is taken
3015    * before executing the restore operation.
3016    * In case of restore failure, the failsafe snapshot will be restored.
3017    * If the restore completes without problem the failsafe snapshot is deleted.
3018    *
3019    * @param snapshotName name of the snapshot to restore
3020    * @throws IOException if a remote or network exception occurs
3021    * @throws RestoreSnapshotException if snapshot failed to be restored
3022    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3023    */
3024   @Override
3025   public void restoreSnapshot(final byte[] snapshotName)
3026       throws IOException, RestoreSnapshotException {
3027     restoreSnapshot(Bytes.toString(snapshotName));
3028   }
3029 
3030   /**
3031    * Restore the specified snapshot on the original table. (The table must be disabled)
3032    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3033    * is set to true, a snapshot of the current table is taken
3034    * before executing the restore operation.
3035    * In case of restore failure, the failsafe snapshot will be restored.
3036    * If the restore completes without problem the failsafe snapshot is deleted.
3037    *
3038    * @param snapshotName name of the snapshot to restore
3039    * @throws IOException if a remote or network exception occurs
3040    * @throws RestoreSnapshotException if snapshot failed to be restored
3041    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3042    */
3043   @Override
3044   public void restoreSnapshot(final String snapshotName)
3045       throws IOException, RestoreSnapshotException {
3046     boolean takeFailSafeSnapshot =
3047       conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false);
3048     restoreSnapshot(snapshotName, takeFailSafeSnapshot);
3049   }
3050 
3051   /**
3052    * Restore the specified snapshot on the original table. (The table must be disabled)
3053    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3054    * before executing the restore operation.
3055    * In case of restore failure, the failsafe snapshot will be restored.
3056    * If the restore completes without problem the failsafe snapshot is deleted.
3057    *
3058    * The failsafe snapshot name is configurable by using the property
3059    * "hbase.snapshot.restore.failsafe.name".
3060    *
3061    * @param snapshotName name of the snapshot to restore
3062    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3063    * @throws IOException if a remote or network exception occurs
3064    * @throws RestoreSnapshotException if snapshot failed to be restored
3065    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3066    */
3067   @Override
3068   public void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot)
3069       throws IOException, RestoreSnapshotException {
3070     restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
3071   }
3072 
3073   /**
3074    * Restore the specified snapshot on the original table. (The table must be disabled)
3075    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3076    * before executing the restore operation.
3077    * In case of restore failure, the failsafe snapshot will be restored.
3078    * If the restore completes without problem the failsafe snapshot is deleted.
3079    *
3080    * The failsafe snapshot name is configurable by using the property
3081    * "hbase.snapshot.restore.failsafe.name".
3082    *
3083    * @param snapshotName name of the snapshot to restore
3084    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3085    * @throws IOException if a remote or network exception occurs
3086    * @throws RestoreSnapshotException if snapshot failed to be restored
3087    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3088    */
3089   @Override
3090   public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot)
3091       throws IOException, RestoreSnapshotException {
3092     TableName tableName = null;
3093     for (SnapshotDescription snapshotInfo: listSnapshots()) {
3094       if (snapshotInfo.getName().equals(snapshotName)) {
3095         tableName = TableName.valueOf(snapshotInfo.getTable());
3096         break;
3097       }
3098     }
3099 
3100     if (tableName == null) {
3101       throw new RestoreSnapshotException(
3102         "Unable to find the table name for snapshot=" + snapshotName);
3103     }
3104 
3105     // The table does not exists, switch to clone.
3106     if (!tableExists(tableName)) {
3107       try {
3108         cloneSnapshot(snapshotName, tableName);
3109       } catch (InterruptedException e) {
3110         throw new InterruptedIOException("Interrupted when restoring a nonexistent table: " +
3111           e.getMessage());
3112       }
3113       return;
3114     }
3115 
3116     // Check if the table is disabled
3117     if (!isTableDisabled(tableName)) {
3118       throw new TableNotDisabledException(tableName);
3119     }
3120 
3121     // Take a snapshot of the current state
3122     String failSafeSnapshotSnapshotName = null;
3123     if (takeFailSafeSnapshot) {
3124       failSafeSnapshotSnapshotName = conf.get("hbase.snapshot.restore.failsafe.name",
3125         "hbase-failsafe-{snapshot.name}-{restore.timestamp}");
3126       failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName
3127         .replace("{snapshot.name}", snapshotName)
3128         .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.'))
3129         .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime()));
3130       LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3131       snapshot(failSafeSnapshotSnapshotName, tableName);
3132     }
3133 
3134     try {
3135       // Restore snapshot
3136       internalRestoreSnapshot(snapshotName, tableName);
3137     } catch (IOException e) {
3138       // Somthing went wrong during the restore...
3139       // if the pre-restore snapshot is available try to rollback
3140       if (takeFailSafeSnapshot) {
3141         try {
3142           internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName);
3143           String msg = "Restore snapshot=" + snapshotName +
3144             " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded.";
3145           LOG.error(msg, e);
3146           throw new RestoreSnapshotException(msg, e);
3147         } catch (IOException ex) {
3148           String msg = "Failed to restore and rollback to snapshot=" + failSafeSnapshotSnapshotName;
3149           LOG.error(msg, ex);
3150           throw new RestoreSnapshotException(msg, e);
3151         }
3152       } else {
3153         throw new RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e);
3154       }
3155     }
3156 
3157     // If the restore is succeeded, delete the pre-restore snapshot
3158     if (takeFailSafeSnapshot) {
3159       try {
3160         LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3161         deleteSnapshot(failSafeSnapshotSnapshotName);
3162       } catch (IOException e) {
3163         LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, e);
3164       }
3165     }
3166   }
3167 
3168   /**
3169    * Create a new table by cloning the snapshot content.
3170    *
3171    * @param snapshotName name of the snapshot to be cloned
3172    * @param tableName name of the table where the snapshot will be restored
3173    * @throws IOException if a remote or network exception occurs
3174    * @throws TableExistsException if table to be created already exists
3175    * @throws RestoreSnapshotException if snapshot failed to be cloned
3176    * @throws IllegalArgumentException if the specified table has not a valid name
3177    */
3178   public void cloneSnapshot(final byte[] snapshotName, final byte[] tableName)
3179       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
3180     cloneSnapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName));
3181   }
3182 
3183   /**
3184    * Create a new table by cloning the snapshot content.
3185    *
3186    * @param snapshotName name of the snapshot to be cloned
3187    * @param tableName name of the table where the snapshot will be restored
3188    * @throws IOException if a remote or network exception occurs
3189    * @throws TableExistsException if table to be created already exists
3190    * @throws RestoreSnapshotException if snapshot failed to be cloned
3191    * @throws IllegalArgumentException if the specified table has not a valid name
3192    */
3193   @Override
3194   public void cloneSnapshot(final byte[] snapshotName, final TableName tableName)
3195       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
3196     cloneSnapshot(Bytes.toString(snapshotName), tableName);
3197   }
3198 
3199 
3200 
3201   /**
3202    * Create a new table by cloning the snapshot content.
3203    *
3204    * @param snapshotName name of the snapshot to be cloned
3205    * @param tableName name of the table where the snapshot will be restored
3206    * @throws IOException if a remote or network exception occurs
3207    * @throws TableExistsException if table to be created already exists
3208    * @throws RestoreSnapshotException if snapshot failed to be cloned
3209    * @throws IllegalArgumentException if the specified table has not a valid name
3210    */
3211   public void cloneSnapshot(final String snapshotName, final String tableName)
3212       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
3213     cloneSnapshot(snapshotName, TableName.valueOf(tableName));
3214   }
3215 
3216   /**
3217    * Create a new table by cloning the snapshot content.
3218    *
3219    * @param snapshotName name of the snapshot to be cloned
3220    * @param tableName name of the table where the snapshot will be restored
3221    * @throws IOException if a remote or network exception occurs
3222    * @throws TableExistsException if table to be created already exists
3223    * @throws RestoreSnapshotException if snapshot failed to be cloned
3224    * @throws IllegalArgumentException if the specified table has not a valid name
3225    */
3226   @Override
3227   public void cloneSnapshot(final String snapshotName, final TableName tableName)
3228       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
3229     if (tableExists(tableName)) {
3230       throw new TableExistsException(tableName);
3231     }
3232     internalRestoreSnapshot(snapshotName, tableName);
3233     waitUntilTableIsEnabled(tableName);
3234   }
3235 
3236   /**
3237    * Execute a distributed procedure on a cluster synchronously with return data
3238    *
3239    * @param signature A distributed procedure is uniquely identified
3240    * by its signature (default the root ZK node name of the procedure).
3241    * @param instance The instance name of the procedure. For some procedures, this parameter is
3242    * optional.
3243    * @param props Property/Value pairs of properties passing to the procedure
3244    * @return data returned after procedure execution. null if no return data.
3245    * @throws IOException
3246    */
3247   @Override
3248   public byte[] execProcedureWithRet(String signature, String instance,
3249       Map<String, String> props) throws IOException {
3250     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3251     builder.setSignature(signature).setInstance(instance);
3252     for (Entry<String, String> entry : props.entrySet()) {
3253       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3254           .setValue(entry.getValue()).build();
3255       builder.addConfiguration(pair);
3256     }
3257 
3258     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
3259         .setProcedure(builder.build()).build();
3260     // run the procedure on the master
3261     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
3262         getConnection()) {
3263       @Override
3264       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
3265         return master.execProcedureWithRet(null, request);
3266       }
3267     });
3268 
3269     return response.hasReturnData() ? response.getReturnData().toByteArray() : null;
3270   }
3271   /**
3272    * Execute a distributed procedure on a cluster.
3273    *
3274    * @param signature A distributed procedure is uniquely identified
3275    * by its signature (default the root ZK node name of the procedure).
3276    * @param instance The instance name of the procedure. For some procedures, this parameter is
3277    * optional.
3278    * @param props Property/Value pairs of properties passing to the procedure
3279    * @throws IOException
3280    */
3281   @Override
3282   public void execProcedure(String signature, String instance,
3283       Map<String, String> props) throws IOException {
3284     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3285     builder.setSignature(signature).setInstance(instance);
3286     for (Entry<String, String> entry : props.entrySet()) {
3287       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3288           .setValue(entry.getValue()).build();
3289       builder.addConfiguration(pair);
3290     }
3291 
3292     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
3293         .setProcedure(builder.build()).build();
3294     // run the procedure on the master
3295     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
3296         getConnection()) {
3297       @Override
3298       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
3299         return master.execProcedure(null, request);
3300       }
3301     });
3302 
3303     long start = EnvironmentEdgeManager.currentTime();
3304     long max = response.getExpectedTimeout();
3305     long maxPauseTime = max / this.numRetries;
3306     int tries = 0;
3307     LOG.debug("Waiting a max of " + max + " ms for procedure '" +
3308         signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)");
3309     boolean done = false;
3310     while (tries == 0
3311         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
3312       try {
3313         // sleep a backoff <= pauseTime amount
3314         long sleep = getPauseTime(tries++);
3315         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3316         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
3317           "ms while waiting for procedure completion.");
3318         Thread.sleep(sleep);
3319       } catch (InterruptedException e) {
3320         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
3321       }
3322       LOG.debug("Getting current status of procedure from master...");
3323       done = isProcedureFinished(signature, instance, props);
3324     }
3325     if (!done) {
3326       throw new IOException("Procedure '" + signature + " : " + instance
3327           + "' wasn't completed in expectedTime:" + max + " ms");
3328     }
3329   }
3330 
3331   /**
3332    * Check the current state of the specified procedure.
3333    * <p>
3334    * There are three possible states:
3335    * <ol>
3336    * <li>running - returns <tt>false</tt></li>
3337    * <li>finished - returns <tt>true</tt></li>
3338    * <li>finished with error - throws the exception that caused the procedure to fail</li>
3339    * </ol>
3340    * <p>
3341    *
3342    * @param signature The signature that uniquely identifies a procedure
3343    * @param instance The instance name of the procedure
3344    * @param props Property/Value pairs of properties passing to the procedure
3345    * @return true if the specified procedure is finished successfully, false if it is still running
3346    * @throws IOException if the specified procedure finished with error
3347    */
3348   @Override
3349   public boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
3350       throws IOException {
3351     final ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3352     builder.setSignature(signature).setInstance(instance);
3353     for (Entry<String, String> entry : props.entrySet()) {
3354       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3355           .setValue(entry.getValue()).build();
3356       builder.addConfiguration(pair);
3357     }
3358     final ProcedureDescription desc = builder.build();
3359     return executeCallable(
3360         new MasterCallable<IsProcedureDoneResponse>(getConnection()) {
3361           @Override
3362           public IsProcedureDoneResponse call(int callTimeout) throws ServiceException {
3363             return master.isProcedureDone(null, IsProcedureDoneRequest
3364                 .newBuilder().setProcedure(desc).build());
3365           }
3366         }).getDone();
3367   }
3368 
3369   /**
3370    * Execute Restore/Clone snapshot and wait for the server to complete (blocking).
3371    * To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to
3372    * create an HTable instance to this table before it is available.
3373    * @param snapshotName snapshot to restore
3374    * @param tableName table name to restore the snapshot on
3375    * @throws IOException if a remote or network exception occurs
3376    * @throws RestoreSnapshotException if snapshot failed to be restored
3377    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3378    */
3379   private void internalRestoreSnapshot(final String snapshotName, final TableName
3380       tableName)
3381       throws IOException, RestoreSnapshotException {
3382     SnapshotDescription snapshot = SnapshotDescription.newBuilder()
3383         .setName(snapshotName).setTable(tableName.getNameAsString()).build();
3384 
3385     // actually restore the snapshot
3386     internalRestoreSnapshotAsync(snapshot);
3387 
3388     final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder()
3389         .setSnapshot(snapshot).build();
3390     IsRestoreSnapshotDoneResponse done = IsRestoreSnapshotDoneResponse.newBuilder()
3391         .setDone(false).buildPartial();
3392     final long maxPauseTime = 5000;
3393     int tries = 0;
3394     while (!done.getDone()) {
3395       try {
3396         // sleep a backoff <= pauseTime amount
3397         long sleep = getPauseTime(tries++);
3398         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3399         LOG.debug(tries + ") Sleeping: " + sleep + " ms while we wait for snapshot restore to complete.");
3400         Thread.sleep(sleep);
3401       } catch (InterruptedException e) {
3402         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
3403       }
3404       LOG.debug("Getting current status of snapshot restore from master...");
3405       done = executeCallable(new MasterCallable<IsRestoreSnapshotDoneResponse>(
3406           getConnection()) {
3407         @Override
3408         public IsRestoreSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3409           return master.isRestoreSnapshotDone(null, request);
3410         }
3411       });
3412     }
3413     if (!done.getDone()) {
3414       throw new RestoreSnapshotException("Snapshot '" + snapshot.getName() + "' wasn't restored.");
3415     }
3416   }
3417 
3418   /**
3419    * Execute Restore/Clone snapshot and wait for the server to complete (asynchronous)
3420    * <p>
3421    * Only a single snapshot should be restored at a time, or results may be undefined.
3422    * @param snapshot snapshot to restore
3423    * @return response from the server indicating the max time to wait for the snapshot
3424    * @throws IOException if a remote or network exception occurs
3425    * @throws RestoreSnapshotException if snapshot failed to be restored
3426    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3427    */
3428   private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot)
3429       throws IOException, RestoreSnapshotException {
3430     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
3431 
3432     final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot)
3433         .build();
3434 
3435     // run the snapshot restore on the master
3436     return executeCallable(new MasterCallable<RestoreSnapshotResponse>(getConnection()) {
3437       @Override
3438       public RestoreSnapshotResponse call(int callTimeout) throws ServiceException {
3439         return master.restoreSnapshot(null, request);
3440       }
3441     });
3442   }
3443 
3444   /**
3445    * List completed snapshots.
3446    * @return a list of snapshot descriptors for completed snapshots
3447    * @throws IOException if a network error occurs
3448    */
3449   @Override
3450   public List<SnapshotDescription> listSnapshots() throws IOException {
3451     return executeCallable(new MasterCallable<List<SnapshotDescription>>(getConnection()) {
3452       @Override
3453       public List<SnapshotDescription> call(int callTimeout) throws ServiceException {
3454         return master.getCompletedSnapshots(null, GetCompletedSnapshotsRequest.newBuilder().build())
3455             .getSnapshotsList();
3456       }
3457     });
3458   }
3459 
3460   /**
3461    * List all the completed snapshots matching the given regular expression.
3462    *
3463    * @param regex The regular expression to match against
3464    * @return - returns a List of SnapshotDescription
3465    * @throws IOException if a remote or network exception occurs
3466    */
3467   @Override
3468   public List<SnapshotDescription> listSnapshots(String regex) throws IOException {
3469     return listSnapshots(Pattern.compile(regex));
3470   }
3471 
3472   /**
3473    * List all the completed snapshots matching the given pattern.
3474    *
3475    * @param pattern The compiled regular expression to match against
3476    * @return - returns a List of SnapshotDescription
3477    * @throws IOException if a remote or network exception occurs
3478    */
3479   @Override
3480   public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
3481     List<SnapshotDescription> matched = new LinkedList<SnapshotDescription>();
3482     List<SnapshotDescription> snapshots = listSnapshots();
3483     for (SnapshotDescription snapshot : snapshots) {
3484       if (pattern.matcher(snapshot.getName()).matches()) {
3485         matched.add(snapshot);
3486       }
3487     }
3488     return matched;
3489   }
3490 
3491   /**
3492    * Delete an existing snapshot.
3493    * @param snapshotName name of the snapshot
3494    * @throws IOException if a remote or network exception occurs
3495    */
3496   @Override
3497   public void deleteSnapshot(final byte[] snapshotName) throws IOException {
3498     deleteSnapshot(Bytes.toString(snapshotName));
3499   }
3500 
3501   /**
3502    * Delete an existing snapshot.
3503    * @param snapshotName name of the snapshot
3504    * @throws IOException if a remote or network exception occurs
3505    */
3506   @Override
3507   public void deleteSnapshot(final String snapshotName) throws IOException {
3508     // make sure the snapshot is possibly valid
3509     TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName));
3510     // do the delete
3511     executeCallable(new MasterCallable<Void>(getConnection()) {
3512       @Override
3513       public Void call(int callTimeout) throws ServiceException {
3514         master.deleteSnapshot(null,
3515           DeleteSnapshotRequest.newBuilder().
3516             setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build()
3517         );
3518         return null;
3519       }
3520     });
3521   }
3522 
3523   /**
3524    * Delete existing snapshots whose names match the pattern passed.
3525    * @param regex The regular expression to match against
3526    * @throws IOException if a remote or network exception occurs
3527    */
3528   @Override
3529   public void deleteSnapshots(final String regex) throws IOException {
3530     deleteSnapshots(Pattern.compile(regex));
3531   }
3532 
3533   /**
3534    * Delete existing snapshots whose names match the pattern passed.
3535    * @param pattern pattern for names of the snapshot to match
3536    * @throws IOException if a remote or network exception occurs
3537    */
3538   @Override
3539   public void deleteSnapshots(final Pattern pattern) throws IOException {
3540     List<SnapshotDescription> snapshots = listSnapshots(pattern);
3541     for (final SnapshotDescription snapshot : snapshots) {
3542       // do the delete
3543       executeCallable(new MasterCallable<Void>(getConnection()) {
3544         @Override
3545         public Void call(int callTimeout) throws ServiceException {
3546           this.master.deleteSnapshot(null,
3547             DeleteSnapshotRequest.newBuilder().setSnapshot(snapshot).build());
3548           return null;
3549         }
3550       });
3551     }
3552   }
3553 
3554   /**
3555    * Parent of {@link MasterCallable} and {@link MasterCallable}.
3556    * Has common methods.
3557    * @param <V>
3558    */
3559   abstract static class MasterCallable<V> implements RetryingCallable<V>, Closeable {
3560     protected HConnection connection;
3561     protected MasterKeepAliveConnection master;
3562 
3563     public MasterCallable(final HConnection connection) {
3564       this.connection = connection;
3565     }
3566 
3567     @Override
3568     public void prepare(boolean reload) throws IOException {
3569       this.master = this.connection.getKeepAliveMasterService();
3570     }
3571 
3572     @Override
3573     public void close() throws IOException {
3574       // The above prepare could fail but this would still be called though masterAdmin is null
3575       if (this.master != null) this.master.close();
3576     }
3577 
3578     @Override
3579     public void throwable(Throwable t, boolean retrying) {
3580     }
3581 
3582     @Override
3583     public String getExceptionMessageAdditionalDetail() {
3584       return "";
3585     }
3586 
3587     @Override
3588     public long sleep(long pause, int tries) {
3589       return ConnectionUtils.getPauseTime(pause, tries);
3590     }
3591   }
3592 
3593   private <V> V executeCallable(MasterCallable<V> callable) throws IOException {
3594     RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller();
3595     try {
3596       return caller.callWithRetries(callable, operationTimeout);
3597     } finally {
3598       callable.close();
3599     }
3600   }
3601 
3602   /**
3603    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
3604    * connected to the active master.
3605    *
3606    * <p>
3607    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
3608    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
3609    * </p>
3610    *
3611    * <div style="background-color: #cccccc; padding: 2px">
3612    * <blockquote><pre>
3613    * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
3614    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
3615    * MyCallRequest request = MyCallRequest.newBuilder()
3616    *     ...
3617    *     .build();
3618    * MyCallResponse response = service.myCall(null, request);
3619    * </pre></blockquote></div>
3620    *
3621    * @return A MasterCoprocessorRpcChannel instance
3622    */
3623   @Override
3624   public CoprocessorRpcChannel coprocessorService() {
3625     return new MasterCoprocessorRpcChannel(connection);
3626   }
3627 
3628   /**
3629    * Simple {@link Abortable}, throwing RuntimeException on abort.
3630    */
3631   private static class ThrowableAbortable implements Abortable {
3632 
3633     @Override
3634     public void abort(String why, Throwable e) {
3635       throw new RuntimeException(why, e);
3636     }
3637 
3638     @Override
3639     public boolean isAborted() {
3640       return true;
3641     }
3642   }
3643 }