View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  
22  import java.io.Closeable;
23  import java.io.IOException;
24  import java.io.InterruptedIOException;
25  import java.net.SocketTimeoutException;
26  import java.util.ArrayList;
27  import java.util.Arrays;
28  import java.util.HashMap;
29  import java.util.LinkedList;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.Map.Entry;
33  import java.util.concurrent.atomic.AtomicInteger;
34  import java.util.concurrent.atomic.AtomicReference;
35  import java.util.regex.Pattern;
36  
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.hadoop.conf.Configuration;
40  import org.apache.hadoop.hbase.Abortable;
41  import org.apache.hadoop.hbase.ClusterStatus;
42  import org.apache.hadoop.hbase.HBaseConfiguration;
43  import org.apache.hadoop.hbase.HBaseIOException;
44  import org.apache.hadoop.hbase.HColumnDescriptor;
45  import org.apache.hadoop.hbase.HConstants;
46  import org.apache.hadoop.hbase.HRegionInfo;
47  import org.apache.hadoop.hbase.HRegionLocation;
48  import org.apache.hadoop.hbase.HTableDescriptor;
49  import org.apache.hadoop.hbase.MasterNotRunningException;
50  import org.apache.hadoop.hbase.MetaTableAccessor;
51  import org.apache.hadoop.hbase.NamespaceDescriptor;
52  import org.apache.hadoop.hbase.NotServingRegionException;
53  import org.apache.hadoop.hbase.RegionException;
54  import org.apache.hadoop.hbase.RegionLocations;
55  import org.apache.hadoop.hbase.ServerName;
56  import org.apache.hadoop.hbase.TableExistsException;
57  import org.apache.hadoop.hbase.TableName;
58  import org.apache.hadoop.hbase.TableNotDisabledException;
59  import org.apache.hadoop.hbase.TableNotEnabledException;
60  import org.apache.hadoop.hbase.TableNotFoundException;
61  import org.apache.hadoop.hbase.UnknownRegionException;
62  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
63  import org.apache.hadoop.hbase.classification.InterfaceAudience;
64  import org.apache.hadoop.hbase.classification.InterfaceStability;
65  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
66  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
67  import org.apache.hadoop.hbase.exceptions.DeserializationException;
68  import org.apache.hadoop.hbase.exceptions.MergeRegionException;
69  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
70  import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
71  import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel;
72  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
73  import org.apache.hadoop.hbase.protobuf.RequestConverter;
74  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
75  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
76  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
77  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
78  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
79  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
80  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
81  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
82  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
83  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
84  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
85  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
86  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
87  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
88  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
89  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
90  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
91  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
92  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
93  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
94  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
95  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
96  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
97  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
98  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
99  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
100 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
101 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
102 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
103 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
104 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
105 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
106 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
107 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
108 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
109 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
110 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
111 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
112 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
113 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
114 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
115 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
116 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
117 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
118 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
119 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
120 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
121 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
122 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
123 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
124 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
125 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
126 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
127 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
128 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
129 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
130 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
131 import org.apache.hadoop.hbase.quotas.QuotaFilter;
132 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
133 import org.apache.hadoop.hbase.quotas.QuotaSettings;
134 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
135 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
136 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
137 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
138 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
139 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
140 import org.apache.hadoop.hbase.util.Addressing;
141 import org.apache.hadoop.hbase.util.Bytes;
142 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
143 import org.apache.hadoop.hbase.util.Pair;
144 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
145 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
146 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
147 import org.apache.hadoop.ipc.RemoteException;
148 import org.apache.hadoop.util.StringUtils;
149 import org.apache.zookeeper.KeeperException;
150 
151 import com.google.protobuf.ByteString;
152 import com.google.protobuf.ServiceException;
153 
154 /**
155  * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that
156  * this is an HBase-internal class as defined in
157  * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
158  * There are no guarantees for backwards source / binary compatibility and methods or class can
159  * change or go away without deprecation.
160  * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing
161  * an HBaseAdmin directly.
162  *
163  * <p>Connection should be an <i>unmanaged</i> connection obtained via
164  * {@link ConnectionFactory#createConnection(Configuration)}
165  *
166  * @see ConnectionFactory
167  * @see Connection
168  * @see Admin
169  */
170 @InterfaceAudience.Private
171 @InterfaceStability.Evolving
172 public class HBaseAdmin implements Admin {
173   private static final Log LOG = LogFactory.getLog(HBaseAdmin.class);
174 
175   private static final String ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
176 
177   private ClusterConnection connection;
178 
179   private volatile Configuration conf;
180   private final long pause;
181   private final int numRetries;
182   // Some operations can take a long time such as disable of big table.
183   // numRetries is for 'normal' stuff... Multiply by this factor when
184   // want to wait a long time.
185   private final int retryLongerMultiplier;
186   private boolean aborted;
187   private boolean cleanupConnectionOnClose = false; // close the connection in close()
188   private boolean closed = false;
189   private int operationTimeout;
190 
191   private RpcRetryingCallerFactory rpcCallerFactory;
192 
193   /**
194    * Constructor.
195    * See {@link #HBaseAdmin(Connection connection)}
196    *
197    * @param c Configuration object. Copied internally.
198    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
199    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
200    */
201   @Deprecated
202   public HBaseAdmin(Configuration c)
203   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
204     // Will not leak connections, as the new implementation of the constructor
205     // does not throw exceptions anymore.
206     this(ConnectionManager.getConnectionInternal(new Configuration(c)));
207     this.cleanupConnectionOnClose = true;
208   }
209 
210   @Override
211   public int getOperationTimeout() {
212     return operationTimeout;
213   }
214 
215 
216   /**
217    * Constructor for externally managed Connections.
218    * The connection to master will be created when required by admin functions.
219    *
220    * @param connection The Connection instance to use
221    * @throws MasterNotRunningException, ZooKeeperConnectionException are not
222    *  thrown anymore but kept into the interface for backward api compatibility
223    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
224    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
225    */
226   @Deprecated
227   public HBaseAdmin(Connection connection)
228       throws MasterNotRunningException, ZooKeeperConnectionException {
229     this((ClusterConnection)connection);
230   }
231 
232   HBaseAdmin(ClusterConnection connection) {
233     this.conf = connection.getConfiguration();
234     this.connection = connection;
235 
236     this.pause = this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
237         HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
238     this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
239         HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
240     this.retryLongerMultiplier = this.conf.getInt(
241         "hbase.client.retries.longer.multiplier", 10);
242     this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
243         HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
244 
245     this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf);
246   }
247 
248   @Override
249   public void abort(String why, Throwable e) {
250     // Currently does nothing but throw the passed message and exception
251     this.aborted = true;
252     throw new RuntimeException(why, e);
253   }
254 
255   @Override
256   public boolean isAborted(){
257     return this.aborted;
258   }
259 
260   /** @return HConnection used by this object. */
261   @Override
262   public HConnection getConnection() {
263     return connection;
264   }
265 
266   /** @return - true if the master server is running. Throws an exception
267    *  otherwise.
268    * @throws ZooKeeperConnectionException
269    * @throws MasterNotRunningException
270    */
271   @Override
272   public boolean isMasterRunning()
273   throws MasterNotRunningException, ZooKeeperConnectionException {
274     return connection.isMasterRunning();
275   }
276 
277   /**
278    * @param tableName Table to check.
279    * @return True if table exists already.
280    * @throws IOException
281    */
282   @Override
283   public boolean tableExists(final TableName tableName) throws IOException {
284     return MetaTableAccessor.tableExists(connection, tableName);
285   }
286 
287   public boolean tableExists(final byte[] tableName)
288   throws IOException {
289     return tableExists(TableName.valueOf(tableName));
290   }
291 
292   public boolean tableExists(final String tableName)
293   throws IOException {
294     return tableExists(TableName.valueOf(tableName));
295   }
296 
297   /**
298    * List all the userspace tables.  In other words, scan the hbase:meta table.
299    *
300    * If we wanted this to be really fast, we could implement a special
301    * catalog table that just contains table names and their descriptors.
302    * Right now, it only exists as part of the hbase:meta table's region info.
303    *
304    * @return - returns an array of HTableDescriptors
305    * @throws IOException if a remote or network exception occurs
306    */
307   @Override
308   public HTableDescriptor[] listTables() throws IOException {
309     return this.connection.listTables();
310   }
311 
312   /**
313    * List all the userspace tables matching the given pattern.
314    *
315    * @param pattern The compiled regular expression to match against
316    * @return - returns an array of HTableDescriptors
317    * @throws IOException if a remote or network exception occurs
318    * @see #listTables()
319    */
320   @Override
321   public HTableDescriptor[] listTables(Pattern pattern) throws IOException {
322     List<HTableDescriptor> matched = new LinkedList<HTableDescriptor>();
323     HTableDescriptor[] tables = listTables();
324     for (HTableDescriptor table : tables) {
325       if (pattern.matcher(table.getTableName().getNameAsString()).matches()) {
326         matched.add(table);
327       }
328     }
329     return matched.toArray(new HTableDescriptor[matched.size()]);
330   }
331 
332   /**
333    * List all the userspace tables matching the given regular expression.
334    *
335    * @param regex The regular expression to match against
336    * @return - returns an array of HTableDescriptors
337    * @throws IOException if a remote or network exception occurs
338    * @see #listTables(java.util.regex.Pattern)
339    */
340   @Override
341   public HTableDescriptor[] listTables(String regex) throws IOException {
342     return listTables(Pattern.compile(regex));
343   }
344 
345   /**
346    * List all of the names of userspace tables.
347    * @return String[] table names
348    * @throws IOException if a remote or network exception occurs
349    */
350   @Deprecated
351   public String[] getTableNames() throws IOException {
352     return this.connection.getTableNames();
353   }
354 
355   /**
356    * List all of the names of userspace tables matching the given regular expression.
357    * @param pattern The regular expression to match against
358    * @return String[] table names
359    * @throws IOException if a remote or network exception occurs
360    */
361   @Deprecated
362   public String[] getTableNames(Pattern pattern) throws IOException {
363     List<String> matched = new ArrayList<String>();
364     for (String name: this.connection.getTableNames()) {
365       if (pattern.matcher(name).matches()) {
366         matched.add(name);
367       }
368     }
369     return matched.toArray(new String[matched.size()]);
370   }
371 
372   /**
373    * List all of the names of userspace tables matching the given regular expression.
374    * @param regex The regular expression to match against
375    * @return String[] table names
376    * @throws IOException if a remote or network exception occurs
377    */
378   @Deprecated
379   public String[] getTableNames(String regex) throws IOException {
380     return getTableNames(Pattern.compile(regex));
381   }
382 
383   /**
384    * List all of the names of userspace tables.
385    * @return TableName[] table names
386    * @throws IOException if a remote or network exception occurs
387    */
388   @Override
389   public TableName[] listTableNames() throws IOException {
390     return this.connection.listTableNames();
391   }
392 
393   /**
394    * Method for getting the tableDescriptor
395    * @param tableName as a byte []
396    * @return the tableDescriptor
397    * @throws TableNotFoundException
398    * @throws IOException if a remote or network exception occurs
399    */
400   @Override
401   public HTableDescriptor getTableDescriptor(final TableName tableName)
402   throws TableNotFoundException, IOException {
403     return this.connection.getHTableDescriptor(tableName);
404   }
405 
406   public HTableDescriptor getTableDescriptor(final byte[] tableName)
407   throws TableNotFoundException, IOException {
408     return getTableDescriptor(TableName.valueOf(tableName));
409   }
410 
411   private long getPauseTime(int tries) {
412     int triesCount = tries;
413     if (triesCount >= HConstants.RETRY_BACKOFF.length) {
414       triesCount = HConstants.RETRY_BACKOFF.length - 1;
415     }
416     return this.pause * HConstants.RETRY_BACKOFF[triesCount];
417   }
418 
419   /**
420    * Creates a new table.
421    * Synchronous operation.
422    *
423    * @param desc table descriptor for table
424    *
425    * @throws IllegalArgumentException if the table name is reserved
426    * @throws MasterNotRunningException if master is not running
427    * @throws TableExistsException if table already exists (If concurrent
428    * threads, the table may have been created between test-for-existence
429    * and attempt-at-creation).
430    * @throws IOException if a remote or network exception occurs
431    */
432   @Override
433   public void createTable(HTableDescriptor desc)
434   throws IOException {
435     createTable(desc, null);
436   }
437 
438   /**
439    * Creates a new table with the specified number of regions.  The start key
440    * specified will become the end key of the first region of the table, and
441    * the end key specified will become the start key of the last region of the
442    * table (the first region has a null start key and the last region has a
443    * null end key).
444    *
445    * BigInteger math will be used to divide the key range specified into
446    * enough segments to make the required number of total regions.
447    *
448    * Synchronous operation.
449    *
450    * @param desc table descriptor for table
451    * @param startKey beginning of key range
452    * @param endKey end of key range
453    * @param numRegions the total number of regions to create
454    *
455    * @throws IllegalArgumentException if the table name is reserved
456    * @throws MasterNotRunningException if master is not running
457    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
458    * threads, the table may have been created between test-for-existence
459    * and attempt-at-creation).
460    * @throws IOException
461    */
462   @Override
463   public void createTable(HTableDescriptor desc, byte [] startKey,
464       byte [] endKey, int numRegions)
465   throws IOException {
466     if(numRegions < 3) {
467       throw new IllegalArgumentException("Must create at least three regions");
468     } else if(Bytes.compareTo(startKey, endKey) >= 0) {
469       throw new IllegalArgumentException("Start key must be smaller than end key");
470     }
471     if (numRegions == 3) {
472       createTable(desc, new byte[][]{startKey, endKey});
473       return;
474     }
475     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
476     if(splitKeys == null || splitKeys.length != numRegions - 1) {
477       throw new IllegalArgumentException("Unable to split key range into enough regions");
478     }
479     createTable(desc, splitKeys);
480   }
481 
482   /**
483    * Creates a new table with an initial set of empty regions defined by the
484    * specified split keys.  The total number of regions created will be the
485    * number of split keys plus one. Synchronous operation.
486    * Note : Avoid passing empty split key.
487    *
488    * @param desc table descriptor for table
489    * @param splitKeys array of split keys for the initial regions of the table
490    *
491    * @throws IllegalArgumentException if the table name is reserved, if the split keys
492    * are repeated and if the split key has empty byte array.
493    * @throws MasterNotRunningException if master is not running
494    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
495    * threads, the table may have been created between test-for-existence
496    * and attempt-at-creation).
497    * @throws IOException
498    */
499   @Override
500   public void createTable(final HTableDescriptor desc, byte [][] splitKeys)
501   throws IOException {
502     try {
503       createTableAsync(desc, splitKeys);
504     } catch (SocketTimeoutException ste) {
505       LOG.warn("Creating " + desc.getTableName() + " took too long", ste);
506     }
507     int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication();
508     int prevRegCount = 0;
509     boolean doneWithMetaScan = false;
510     for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier;
511       ++tries) {
512       if (!doneWithMetaScan) {
513         // Wait for new table to come on-line
514         final AtomicInteger actualRegCount = new AtomicInteger(0);
515         MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
516           @Override
517           public boolean processRow(Result rowResult) throws IOException {
518             RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
519             if (list == null) {
520               LOG.warn("No serialized HRegionInfo in " + rowResult);
521               return true;
522             }
523             HRegionLocation l = list.getRegionLocation();
524             if (l == null) {
525               return true;
526             }
527             if (!l.getRegionInfo().getTable().equals(desc.getTableName())) {
528               return false;
529             }
530             if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
531             HRegionLocation[] locations = list.getRegionLocations();
532             for (HRegionLocation location : locations) {
533               if (location == null) continue;
534               ServerName serverName = location.getServerName();
535               // Make sure that regions are assigned to server
536               if (serverName != null && serverName.getHostAndPort() != null) {
537                 actualRegCount.incrementAndGet();
538               }
539             }
540             return true;
541           }
542         };
543         MetaScanner.metaScan(conf, connection, visitor, desc.getTableName());
544         if (actualRegCount.get() < numRegs) {
545           if (tries == this.numRetries * this.retryLongerMultiplier - 1) {
546             throw new RegionOfflineException("Only " + actualRegCount.get() +
547               " of " + numRegs + " regions are online; retries exhausted.");
548           }
549           try { // Sleep
550             Thread.sleep(getPauseTime(tries));
551           } catch (InterruptedException e) {
552             throw new InterruptedIOException("Interrupted when opening" +
553               " regions; " + actualRegCount.get() + " of " + numRegs +
554               " regions processed so far");
555           }
556           if (actualRegCount.get() > prevRegCount) { // Making progress
557             prevRegCount = actualRegCount.get();
558             tries = -1;
559           }
560         } else {
561           doneWithMetaScan = true;
562           tries = -1;
563         }
564       } else if (isTableEnabled(desc.getTableName())) {
565         return;
566       } else {
567         try { // Sleep
568           Thread.sleep(getPauseTime(tries));
569         } catch (InterruptedException e) {
570           throw new InterruptedIOException("Interrupted when waiting" +
571             " for table to be enabled; meta scan was done");
572         }
573       }
574     }
575     throw new TableNotEnabledException(
576       "Retries exhausted while still waiting for table: "
577       + desc.getTableName() + " to be enabled");
578   }
579 
580   /**
581    * Creates a new table but does not block and wait for it to come online.
582    * Asynchronous operation.  To check if the table exists, use
583    * {@link #isTableAvailable} -- it is not safe to create an HTable
584    * instance to this table before it is available.
585    * Note : Avoid passing empty split key.
586    * @param desc table descriptor for table
587    *
588    * @throws IllegalArgumentException Bad table name, if the split keys
589    * are repeated and if the split key has empty byte array.
590    * @throws MasterNotRunningException if master is not running
591    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
592    * threads, the table may have been created between test-for-existence
593    * and attempt-at-creation).
594    * @throws IOException
595    */
596   @Override
597   public void createTableAsync(
598     final HTableDescriptor desc, final byte [][] splitKeys)
599   throws IOException {
600     if(desc.getTableName() == null) {
601       throw new IllegalArgumentException("TableName cannot be null");
602     }
603     if(splitKeys != null && splitKeys.length > 0) {
604       Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
605       // Verify there are no duplicate split keys
606       byte [] lastKey = null;
607       for(byte [] splitKey : splitKeys) {
608         if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
609           throw new IllegalArgumentException(
610               "Empty split key must not be passed in the split keys.");
611         }
612         if(lastKey != null && Bytes.equals(splitKey, lastKey)) {
613           throw new IllegalArgumentException("All split keys must be unique, " +
614             "found duplicate: " + Bytes.toStringBinary(splitKey) +
615             ", " + Bytes.toStringBinary(lastKey));
616         }
617         lastKey = splitKey;
618       }
619     }
620 
621     executeCallable(new MasterCallable<Void>(getConnection()) {
622       @Override
623       public Void call(int callTimeout) throws ServiceException {
624         CreateTableRequest request = RequestConverter.buildCreateTableRequest(desc, splitKeys);
625         master.createTable(null, request);
626         return null;
627       }
628     });
629   }
630 
631   public void deleteTable(final String tableName) throws IOException {
632     deleteTable(TableName.valueOf(tableName));
633   }
634 
635   public void deleteTable(final byte[] tableName) throws IOException {
636     deleteTable(TableName.valueOf(tableName));
637   }
638 
639   /**
640    * Deletes a table.
641    * Synchronous operation.
642    *
643    * @param tableName name of table to delete
644    * @throws IOException if a remote or network exception occurs
645    */
646   @Override
647   public void deleteTable(final TableName tableName) throws IOException {
648     boolean tableExists = true;
649 
650     executeCallable(new MasterCallable<Void>(getConnection()) {
651       @Override
652       public Void call(int callTimeout) throws ServiceException {
653         DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName);
654         master.deleteTable(null,req);
655         return null;
656       }
657     });
658 
659     int failures = 0;
660     // Wait until all regions deleted
661     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
662       try {
663         // Find whether all regions are deleted.
664         List<RegionLocations> regionLations =
665             MetaScanner.listTableRegionLocations(conf, connection, tableName);
666 
667         // let us wait until hbase:meta table is updated and
668         // HMaster removes the table from its HTableDescriptors
669         if (regionLations == null || regionLations.size() == 0) {
670           HTableDescriptor htd = getTableDescriptorByTableName(tableName);
671 
672           if (htd == null) {
673             // table could not be found in master - we are done.
674             tableExists = false;
675             break;
676           }
677         }
678       } catch (IOException ex) {
679         failures++;
680         if(failures >= numRetries - 1) {           // no more tries left
681           if (ex instanceof RemoteException) {
682             throw ((RemoteException) ex).unwrapRemoteException();
683           } else {
684             throw ex;
685           }
686         }
687       }
688       try {
689         Thread.sleep(getPauseTime(tries));
690       } catch (InterruptedException e) {
691         throw new InterruptedIOException("Interrupted when waiting" +
692             " for table to be deleted");
693       }
694     }
695 
696     if (tableExists) {
697       throw new IOException("Retries exhausted, it took too long to wait"+
698         " for the table " + tableName + " to be deleted.");
699     }
700     // Delete cached information to prevent clients from using old locations
701     this.connection.clearRegionCache(tableName);
702     LOG.info("Deleted " + tableName);
703   }
704 
705   /**
706    * Deletes tables matching the passed in pattern and wait on completion.
707    *
708    * Warning: Use this method carefully, there is no prompting and the effect is
709    * immediate. Consider using {@link #listTables(java.lang.String)} and
710    * {@link #deleteTable(byte[])}
711    *
712    * @param regex The regular expression to match table names against
713    * @return Table descriptors for tables that couldn't be deleted
714    * @throws IOException
715    * @see #deleteTables(java.util.regex.Pattern)
716    * @see #deleteTable(java.lang.String)
717    */
718   @Override
719   public HTableDescriptor[] deleteTables(String regex) throws IOException {
720     return deleteTables(Pattern.compile(regex));
721   }
722 
723   /**
724    * Delete tables matching the passed in pattern and wait on completion.
725    *
726    * Warning: Use this method carefully, there is no prompting and the effect is
727    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
728    * {@link #deleteTable(byte[])}
729    *
730    * @param pattern The pattern to match table names against
731    * @return Table descriptors for tables that couldn't be deleted
732    * @throws IOException
733    */
734   @Override
735   public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException {
736     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
737     for (HTableDescriptor table : listTables(pattern)) {
738       try {
739         deleteTable(table.getTableName());
740       } catch (IOException ex) {
741         LOG.info("Failed to delete table " + table.getTableName(), ex);
742         failed.add(table);
743       }
744     }
745     return failed.toArray(new HTableDescriptor[failed.size()]);
746   }
747 
748   /**
749    * Truncate a table.
750    * Synchronous operation.
751    *
752    * @param tableName name of table to truncate
753    * @param preserveSplits True if the splits should be preserved
754    * @throws IOException if a remote or network exception occurs
755    */
756   @Override
757   public void truncateTable(final TableName tableName, final boolean preserveSplits)
758       throws IOException {
759     executeCallable(new MasterCallable<Void>(getConnection()) {
760       @Override
761       public Void call(int callTimeout) throws ServiceException {
762         TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(
763           tableName, preserveSplits);
764         master.truncateTable(null, req);
765         return null;
766       }
767     });
768   }
769 
770   /**
771    * Enable a table.  May timeout.  Use {@link #enableTableAsync(byte[])}
772    * and {@link #isTableEnabled(byte[])} instead.
773    * The table has to be in disabled state for it to be enabled.
774    * @param tableName name of the table
775    * @throws IOException if a remote or network exception occurs
776    * There could be couple types of IOException
777    * TableNotFoundException means the table doesn't exist.
778    * TableNotDisabledException means the table isn't in disabled state.
779    * @see #isTableEnabled(byte[])
780    * @see #disableTable(byte[])
781    * @see #enableTableAsync(byte[])
782    */
783   @Override
784   public void enableTable(final TableName tableName)
785   throws IOException {
786     enableTableAsync(tableName);
787 
788     // Wait until all regions are enabled
789     waitUntilTableIsEnabled(tableName);
790 
791     LOG.info("Enabled table " + tableName);
792   }
793 
794   public void enableTable(final byte[] tableName)
795   throws IOException {
796     enableTable(TableName.valueOf(tableName));
797   }
798 
799   public void enableTable(final String tableName)
800   throws IOException {
801     enableTable(TableName.valueOf(tableName));
802   }
803 
804   /**
805    * Wait for the table to be enabled and available
806    * If enabling the table exceeds the retry period, an exception is thrown.
807    * @param tableName name of the table
808    * @throws IOException if a remote or network exception occurs or
809    *    table is not enabled after the retries period.
810    */
811   private void waitUntilTableIsEnabled(final TableName tableName) throws IOException {
812     boolean enabled = false;
813     long start = EnvironmentEdgeManager.currentTime();
814     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
815       try {
816         enabled = isTableEnabled(tableName);
817       } catch (TableNotFoundException tnfe) {
818         // wait for table to be created
819         enabled = false;
820       }
821       enabled = enabled && isTableAvailable(tableName);
822       if (enabled) {
823         break;
824       }
825       long sleep = getPauseTime(tries);
826       if (LOG.isDebugEnabled()) {
827         LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
828           "enabled in " + tableName);
829       }
830       try {
831         Thread.sleep(sleep);
832       } catch (InterruptedException e) {
833         // Do this conversion rather than let it out because do not want to
834         // change the method signature.
835         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
836       }
837     }
838     if (!enabled) {
839       long msec = EnvironmentEdgeManager.currentTime() - start;
840       throw new IOException("Table '" + tableName +
841         "' not yet enabled, after " + msec + "ms.");
842     }
843   }
844 
845   /**
846    * Brings a table on-line (enables it).  Method returns immediately though
847    * enable of table may take some time to complete, especially if the table
848    * is large (All regions are opened as part of enabling process).  Check
849    * {@link #isTableEnabled(byte[])} to learn when table is fully online.  If
850    * table is taking too long to online, check server logs.
851    * @param tableName
852    * @throws IOException
853    * @since 0.90.0
854    */
855   @Override
856   public void enableTableAsync(final TableName tableName)
857   throws IOException {
858     TableName.isLegalFullyQualifiedTableName(tableName.getName());
859     executeCallable(new MasterCallable<Void>(getConnection()) {
860       @Override
861       public Void call(int callTimeout) throws ServiceException {
862         LOG.info("Started enable of " + tableName);
863         EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName);
864         master.enableTable(null,req);
865         return null;
866       }
867     });
868   }
869 
870   public void enableTableAsync(final byte[] tableName)
871   throws IOException {
872     enableTable(TableName.valueOf(tableName));
873   }
874 
875   public void enableTableAsync(final String tableName)
876   throws IOException {
877     enableTableAsync(TableName.valueOf(tableName));
878   }
879 
880   /**
881    * Enable tables matching the passed in pattern and wait on completion.
882    *
883    * Warning: Use this method carefully, there is no prompting and the effect is
884    * immediate. Consider using {@link #listTables(java.lang.String)} and
885    * {@link #enableTable(byte[])}
886    *
887    * @param regex The regular expression to match table names against
888    * @throws IOException
889    * @see #enableTables(java.util.regex.Pattern)
890    * @see #enableTable(java.lang.String)
891    */
892   @Override
893   public HTableDescriptor[] enableTables(String regex) throws IOException {
894     return enableTables(Pattern.compile(regex));
895   }
896 
897   /**
898    * Enable tables matching the passed in pattern and wait on completion.
899    *
900    * Warning: Use this method carefully, there is no prompting and the effect is
901    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
902    * {@link #enableTable(byte[])}
903    *
904    * @param pattern The pattern to match table names against
905    * @throws IOException
906    */
907   @Override
908   public HTableDescriptor[] enableTables(Pattern pattern) throws IOException {
909     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
910     for (HTableDescriptor table : listTables(pattern)) {
911       if (isTableDisabled(table.getTableName())) {
912         try {
913           enableTable(table.getTableName());
914         } catch (IOException ex) {
915           LOG.info("Failed to enable table " + table.getTableName(), ex);
916           failed.add(table);
917         }
918       }
919     }
920     return failed.toArray(new HTableDescriptor[failed.size()]);
921   }
922 
923   /**
924    * Starts the disable of a table.  If it is being served, the master
925    * will tell the servers to stop serving it.  This method returns immediately.
926    * The disable of a table can take some time if the table is large (all
927    * regions are closed as part of table disable operation).
928    * Call {@link #isTableDisabled(byte[])} to check for when disable completes.
929    * If table is taking too long to online, check server logs.
930    * @param tableName name of table
931    * @throws IOException if a remote or network exception occurs
932    * @see #isTableDisabled(byte[])
933    * @see #isTableEnabled(byte[])
934    * @since 0.90.0
935    */
936   @Override
937   public void disableTableAsync(final TableName tableName) throws IOException {
938     TableName.isLegalFullyQualifiedTableName(tableName.getName());
939     executeCallable(new MasterCallable<Void>(getConnection()) {
940       @Override
941       public Void call(int callTimeout) throws ServiceException {
942         LOG.info("Started disable of " + tableName);
943         DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName);
944         master.disableTable(null,req);
945         return null;
946       }
947     });
948   }
949 
950   public void disableTableAsync(final byte[] tableName) throws IOException {
951     disableTableAsync(TableName.valueOf(tableName));
952   }
953 
954   public void disableTableAsync(final String tableName) throws IOException {
955     disableTableAsync(TableName.valueOf(tableName));
956   }
957 
958   /**
959    * Disable table and wait on completion.  May timeout eventually.  Use
960    * {@link #disableTableAsync(byte[])} and {@link #isTableDisabled(String)}
961    * instead.
962    * The table has to be in enabled state for it to be disabled.
963    * @param tableName
964    * @throws IOException
965    * There could be couple types of IOException
966    * TableNotFoundException means the table doesn't exist.
967    * TableNotEnabledException means the table isn't in enabled state.
968    */
969   @Override
970   public void disableTable(final TableName tableName)
971   throws IOException {
972     disableTableAsync(tableName);
973     // Wait until table is disabled
974     boolean disabled = false;
975     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
976       disabled = isTableDisabled(tableName);
977       if (disabled) {
978         break;
979       }
980       long sleep = getPauseTime(tries);
981       if (LOG.isDebugEnabled()) {
982         LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
983           "disabled in " + tableName);
984       }
985       try {
986         Thread.sleep(sleep);
987       } catch (InterruptedException e) {
988         // Do this conversion rather than let it out because do not want to
989         // change the method signature.
990         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
991       }
992     }
993     if (!disabled) {
994       throw new RegionException("Retries exhausted, it took too long to wait"+
995         " for the table " + tableName + " to be disabled.");
996     }
997     LOG.info("Disabled " + tableName);
998   }
999 
1000   public void disableTable(final byte[] tableName)
1001   throws IOException {
1002     disableTable(TableName.valueOf(tableName));
1003   }
1004 
1005   public void disableTable(final String tableName)
1006   throws IOException {
1007     disableTable(TableName.valueOf(tableName));
1008   }
1009 
1010   /**
1011    * Disable tables matching the passed in pattern and wait on completion.
1012    *
1013    * Warning: Use this method carefully, there is no prompting and the effect is
1014    * immediate. Consider using {@link #listTables(java.lang.String)} and
1015    * {@link #disableTable(byte[])}
1016    *
1017    * @param regex The regular expression to match table names against
1018    * @return Table descriptors for tables that couldn't be disabled
1019    * @throws IOException
1020    * @see #disableTables(java.util.regex.Pattern)
1021    * @see #disableTable(java.lang.String)
1022    */
1023   @Override
1024   public HTableDescriptor[] disableTables(String regex) throws IOException {
1025     return disableTables(Pattern.compile(regex));
1026   }
1027 
1028   /**
1029    * Disable tables matching the passed in pattern and wait on completion.
1030    *
1031    * Warning: Use this method carefully, there is no prompting and the effect is
1032    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1033    * {@link #disableTable(byte[])}
1034    *
1035    * @param pattern The pattern to match table names against
1036    * @return Table descriptors for tables that couldn't be disabled
1037    * @throws IOException
1038    */
1039   @Override
1040   public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
1041     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1042     for (HTableDescriptor table : listTables(pattern)) {
1043       if (isTableEnabled(table.getTableName())) {
1044         try {
1045           disableTable(table.getTableName());
1046         } catch (IOException ex) {
1047           LOG.info("Failed to disable table " + table.getTableName(), ex);
1048           failed.add(table);
1049         }
1050       }
1051     }
1052     return failed.toArray(new HTableDescriptor[failed.size()]);
1053   }
1054 
1055   /*
1056    * Checks whether table exists. If not, throws TableNotFoundException
1057    * @param tableName
1058    */
1059   private void checkTableExistence(TableName tableName) throws IOException {
1060     if (!tableExists(tableName)) {
1061       throw new TableNotFoundException(tableName);
1062     }
1063   }
1064 
1065   /**
1066    * @param tableName name of table to check
1067    * @return true if table is on-line
1068    * @throws IOException if a remote or network exception occurs
1069    */
1070   @Override
1071   public boolean isTableEnabled(TableName tableName) throws IOException {
1072     checkTableExistence(tableName);
1073     return connection.isTableEnabled(tableName);
1074   }
1075 
1076   public boolean isTableEnabled(byte[] tableName) throws IOException {
1077     return isTableEnabled(TableName.valueOf(tableName));
1078   }
1079 
1080   public boolean isTableEnabled(String tableName) throws IOException {
1081     return isTableEnabled(TableName.valueOf(tableName));
1082   }
1083 
1084 
1085 
1086   /**
1087    * @param tableName name of table to check
1088    * @return true if table is off-line
1089    * @throws IOException if a remote or network exception occurs
1090    */
1091   @Override
1092   public boolean isTableDisabled(TableName tableName) throws IOException {
1093     checkTableExistence(tableName);
1094     return connection.isTableDisabled(tableName);
1095   }
1096 
1097   public boolean isTableDisabled(byte[] tableName) throws IOException {
1098     return isTableDisabled(TableName.valueOf(tableName));
1099   }
1100 
1101   public boolean isTableDisabled(String tableName) throws IOException {
1102     return isTableDisabled(TableName.valueOf(tableName));
1103   }
1104 
1105   /**
1106    * @param tableName name of table to check
1107    * @return true if all regions of the table are available
1108    * @throws IOException if a remote or network exception occurs
1109    */
1110   @Override
1111   public boolean isTableAvailable(TableName tableName) throws IOException {
1112     return connection.isTableAvailable(tableName);
1113   }
1114 
1115   public boolean isTableAvailable(byte[] tableName) throws IOException {
1116     return isTableAvailable(TableName.valueOf(tableName));
1117   }
1118 
1119   public boolean isTableAvailable(String tableName) throws IOException {
1120     return isTableAvailable(TableName.valueOf(tableName));
1121   }
1122 
1123   /**
1124    * Use this api to check if the table has been created with the specified number of
1125    * splitkeys which was used while creating the given table.
1126    * Note : If this api is used after a table's region gets splitted, the api may return
1127    * false.
1128    * @param tableName
1129    *          name of table to check
1130    * @param splitKeys
1131    *          keys to check if the table has been created with all split keys
1132    * @throws IOException
1133    *           if a remote or network excpetion occurs
1134    */
1135   @Override
1136   public boolean isTableAvailable(TableName tableName,
1137                                   byte[][] splitKeys) throws IOException {
1138     return connection.isTableAvailable(tableName, splitKeys);
1139   }
1140 
1141   public boolean isTableAvailable(byte[] tableName,
1142                                   byte[][] splitKeys) throws IOException {
1143     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1144   }
1145 
1146   public boolean isTableAvailable(String tableName,
1147                                   byte[][] splitKeys) throws IOException {
1148     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1149   }
1150 
1151   /**
1152    * Get the status of alter command - indicates how many regions have received
1153    * the updated schema Asynchronous operation.
1154    *
1155    * @param tableName TableName instance
1156    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1157    *         regions that are yet to be updated Pair.getSecond() is the total number
1158    *         of regions of the table
1159    * @throws IOException
1160    *           if a remote or network exception occurs
1161    */
1162   @Override
1163   public Pair<Integer, Integer> getAlterStatus(final TableName tableName)
1164   throws IOException {
1165     return executeCallable(new MasterCallable<Pair<Integer, Integer>>(getConnection()) {
1166       @Override
1167       public Pair<Integer, Integer> call(int callTimeout) throws ServiceException {
1168         GetSchemaAlterStatusRequest req = RequestConverter
1169             .buildGetSchemaAlterStatusRequest(tableName);
1170         GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(null, req);
1171         Pair<Integer, Integer> pair = new Pair<Integer, Integer>(Integer.valueOf(ret
1172             .getYetToUpdateRegions()), Integer.valueOf(ret.getTotalRegions()));
1173         return pair;
1174       }
1175     });
1176   }
1177 
1178   /**
1179    * Get the status of alter command - indicates how many regions have received
1180    * the updated schema Asynchronous operation.
1181    *
1182    * @param tableName
1183    *          name of the table to get the status of
1184    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1185    *         regions that are yet to be updated Pair.getSecond() is the total number
1186    *         of regions of the table
1187    * @throws IOException
1188    *           if a remote or network exception occurs
1189    */
1190   @Override
1191   public Pair<Integer, Integer> getAlterStatus(final byte[] tableName)
1192    throws IOException {
1193     return getAlterStatus(TableName.valueOf(tableName));
1194   }
1195 
1196   /**
1197    * Add a column to an existing table.
1198    * Asynchronous operation.
1199    *
1200    * @param tableName name of the table to add column to
1201    * @param column column descriptor of column to be added
1202    * @throws IOException if a remote or network exception occurs
1203    */
1204   public void addColumn(final byte[] tableName, HColumnDescriptor column)
1205   throws IOException {
1206     addColumn(TableName.valueOf(tableName), column);
1207   }
1208 
1209 
1210   /**
1211    * Add a column to an existing table.
1212    * Asynchronous operation.
1213    *
1214    * @param tableName name of the table to add column to
1215    * @param column column descriptor of column to be added
1216    * @throws IOException if a remote or network exception occurs
1217    */
1218   public void addColumn(final String tableName, HColumnDescriptor column)
1219   throws IOException {
1220     addColumn(TableName.valueOf(tableName), column);
1221   }
1222 
1223   /**
1224    * Add a column to an existing table.
1225    * Asynchronous operation.
1226    *
1227    * @param tableName name of the table to add column to
1228    * @param column column descriptor of column to be added
1229    * @throws IOException if a remote or network exception occurs
1230    */
1231   @Override
1232   public void addColumn(final TableName tableName, final HColumnDescriptor column)
1233   throws IOException {
1234     executeCallable(new MasterCallable<Void>(getConnection()) {
1235       @Override
1236       public Void call(int callTimeout) throws ServiceException {
1237         AddColumnRequest req = RequestConverter.buildAddColumnRequest(tableName, column);
1238         master.addColumn(null,req);
1239         return null;
1240       }
1241     });
1242   }
1243 
1244   /**
1245    * Delete a column from a table.
1246    * Asynchronous operation.
1247    *
1248    * @param tableName name of table
1249    * @param columnName name of column to be deleted
1250    * @throws IOException if a remote or network exception occurs
1251    */
1252   public void deleteColumn(final byte[] tableName, final String columnName)
1253   throws IOException {
1254     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1255   }
1256 
1257   /**
1258    * Delete a column from a table.
1259    * Asynchronous operation.
1260    *
1261    * @param tableName name of table
1262    * @param columnName name of column to be deleted
1263    * @throws IOException if a remote or network exception occurs
1264    */
1265   public void deleteColumn(final String tableName, final String columnName)
1266   throws IOException {
1267     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1268   }
1269 
1270   /**
1271    * Delete a column from a table.
1272    * Asynchronous operation.
1273    *
1274    * @param tableName name of table
1275    * @param columnName name of column to be deleted
1276    * @throws IOException if a remote or network exception occurs
1277    */
1278   @Override
1279   public void deleteColumn(final TableName tableName, final byte [] columnName)
1280   throws IOException {
1281     executeCallable(new MasterCallable<Void>(getConnection()) {
1282       @Override
1283       public Void call(int callTimeout) throws ServiceException {
1284         DeleteColumnRequest req = RequestConverter.buildDeleteColumnRequest(tableName, columnName);
1285         master.deleteColumn(null,req);
1286         return null;
1287       }
1288     });
1289   }
1290 
1291   /**
1292    * Modify an existing column family on a table.
1293    * Asynchronous operation.
1294    *
1295    * @param tableName name of table
1296    * @param descriptor new column descriptor to use
1297    * @throws IOException if a remote or network exception occurs
1298    */
1299   public void modifyColumn(final String tableName, HColumnDescriptor descriptor)
1300   throws IOException {
1301     modifyColumn(TableName.valueOf(tableName), descriptor);
1302   }
1303 
1304   /**
1305    * Modify an existing column family on a table.
1306    * Asynchronous operation.
1307    *
1308    * @param tableName name of table
1309    * @param descriptor new column descriptor to use
1310    * @throws IOException if a remote or network exception occurs
1311    */
1312   public void modifyColumn(final byte[] tableName, HColumnDescriptor descriptor)
1313   throws IOException {
1314     modifyColumn(TableName.valueOf(tableName), descriptor);
1315   }
1316 
1317 
1318 
1319   /**
1320    * Modify an existing column family on a table.
1321    * Asynchronous operation.
1322    *
1323    * @param tableName name of table
1324    * @param descriptor new column descriptor to use
1325    * @throws IOException if a remote or network exception occurs
1326    */
1327   @Override
1328   public void modifyColumn(final TableName tableName, final HColumnDescriptor descriptor)
1329   throws IOException {
1330     executeCallable(new MasterCallable<Void>(getConnection()) {
1331       @Override
1332       public Void call(int callTimeout) throws ServiceException {
1333         ModifyColumnRequest req = RequestConverter.buildModifyColumnRequest(tableName, descriptor);
1334         master.modifyColumn(null,req);
1335         return null;
1336       }
1337     });
1338   }
1339 
1340   /**
1341    * Close a region. For expert-admins.  Runs close on the regionserver.  The
1342    * master will not be informed of the close.
1343    * @param regionname region name to close
1344    * @param serverName If supplied, we'll use this location rather than
1345    * the one currently in <code>hbase:meta</code>
1346    * @throws IOException if a remote or network exception occurs
1347    */
1348   @Override
1349   public void closeRegion(final String regionname, final String serverName)
1350   throws IOException {
1351     closeRegion(Bytes.toBytes(regionname), serverName);
1352   }
1353 
1354   /**
1355    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1356    * master will not be informed of the close.
1357    * @param regionname region name to close
1358    * @param serverName The servername of the regionserver.  If passed null we
1359    * will use servername found in the hbase:meta table. A server name
1360    * is made of host, port and startcode.  Here is an example:
1361    * <code> host187.example.com,60020,1289493121758</code>
1362    * @throws IOException if a remote or network exception occurs
1363    */
1364   @Override
1365   public void closeRegion(final byte [] regionname, final String serverName)
1366       throws IOException {
1367     if (serverName != null) {
1368       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1369       if (pair == null || pair.getFirst() == null) {
1370         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1371       } else {
1372         closeRegion(ServerName.valueOf(serverName), pair.getFirst());
1373       }
1374     } else {
1375       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1376       if (pair == null) {
1377         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1378       } else if (pair.getSecond() == null) {
1379         throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
1380       } else {
1381         closeRegion(pair.getSecond(), pair.getFirst());
1382       }
1383     }
1384   }
1385 
1386   /**
1387    * For expert-admins. Runs close on the regionserver. Closes a region based on
1388    * the encoded region name. The region server name is mandatory. If the
1389    * servername is provided then based on the online regions in the specified
1390    * regionserver the specified region will be closed. The master will not be
1391    * informed of the close. Note that the regionname is the encoded regionname.
1392    *
1393    * @param encodedRegionName
1394    *          The encoded region name; i.e. the hash that makes up the region
1395    *          name suffix: e.g. if regionname is
1396    *          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>
1397    *          , then the encoded region name is:
1398    *          <code>527db22f95c8a9e0116f0cc13c680396</code>.
1399    * @param serverName
1400    *          The servername of the regionserver. A server name is made of host,
1401    *          port and startcode. This is mandatory. Here is an example:
1402    *          <code> host187.example.com,60020,1289493121758</code>
1403    * @return true if the region was closed, false if not.
1404    * @throws IOException
1405    *           if a remote or network exception occurs
1406    */
1407   @Override
1408   public boolean closeRegionWithEncodedRegionName(final String encodedRegionName,
1409       final String serverName) throws IOException {
1410     if (null == serverName || ("").equals(serverName.trim())) {
1411       throw new IllegalArgumentException(
1412           "The servername cannot be null or empty.");
1413     }
1414     ServerName sn = ServerName.valueOf(serverName);
1415     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1416     // Close the region without updating zk state.
1417     CloseRegionRequest request =
1418       RequestConverter.buildCloseRegionRequest(sn, encodedRegionName);
1419     try {
1420       CloseRegionResponse response = admin.closeRegion(null, request);
1421       boolean isRegionClosed = response.getClosed();
1422       if (false == isRegionClosed) {
1423         LOG.error("Not able to close the region " + encodedRegionName + ".");
1424       }
1425       return isRegionClosed;
1426     } catch (ServiceException se) {
1427       throw ProtobufUtil.getRemoteException(se);
1428     }
1429   }
1430 
1431   /**
1432    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1433    * master will not be informed of the close.
1434    * @param sn
1435    * @param hri
1436    * @throws IOException
1437    */
1438   @Override
1439   public void closeRegion(final ServerName sn, final HRegionInfo hri)
1440   throws IOException {
1441     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1442     // Close the region without updating zk state.
1443     ProtobufUtil.closeRegion(admin, sn, hri.getRegionName());
1444   }
1445 
1446   /**
1447    * Get all the online regions on a region server.
1448    */
1449   @Override
1450   public List<HRegionInfo> getOnlineRegions(final ServerName sn) throws IOException {
1451     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1452     return ProtobufUtil.getOnlineRegions(admin);
1453   }
1454 
1455   /**
1456    * {@inheritDoc}
1457    */
1458   @Override
1459   public void flush(final TableName tableName) throws IOException, InterruptedException {
1460     checkTableExists(tableName);
1461     if (isTableDisabled(tableName)) {
1462       LOG.info("Table is disabled: " + tableName.getNameAsString());
1463       return;
1464     }
1465     execProcedure("flush-table-proc", tableName.getNameAsString(),
1466       new HashMap<String, String>());
1467   }
1468 
1469   /**
1470    * {@inheritDoc}
1471    */
1472   @Override
1473   public void flushRegion(final byte[] regionName) throws IOException, InterruptedException {
1474     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
1475     if (regionServerPair == null) {
1476       throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName));
1477     }
1478     if (regionServerPair.getSecond() == null) {
1479       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1480     }
1481     flush(regionServerPair.getSecond(), regionServerPair.getFirst());
1482   }
1483 
1484   /**
1485    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1486    * (byte[])} instead.
1487    */
1488   @Deprecated
1489   public void flush(final String tableNameOrRegionName)
1490   throws IOException, InterruptedException {
1491     flush(Bytes.toBytes(tableNameOrRegionName));
1492   }
1493 
1494   /**
1495    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1496    * (byte[])} instead.
1497    */
1498   @Deprecated
1499   public void flush(final byte[] tableNameOrRegionName)
1500   throws IOException, InterruptedException {
1501     try {
1502       flushRegion(tableNameOrRegionName);
1503     } catch (IllegalArgumentException e) {
1504       // Unknown region.  Try table.
1505       flush(TableName.valueOf(tableNameOrRegionName));
1506     }
1507   }
1508 
1509   private void flush(final ServerName sn, final HRegionInfo hri)
1510   throws IOException {
1511     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1512     FlushRegionRequest request =
1513       RequestConverter.buildFlushRegionRequest(hri.getRegionName());
1514     try {
1515       admin.flushRegion(null, request);
1516     } catch (ServiceException se) {
1517       throw ProtobufUtil.getRemoteException(se);
1518     }
1519   }
1520 
1521   /**
1522    * {@inheritDoc}
1523    */
1524   @Override
1525   public void compact(final TableName tableName)
1526     throws IOException, InterruptedException {
1527     compact(tableName, null, false);
1528   }
1529 
1530   /**
1531    * {@inheritDoc}
1532    */
1533   @Override
1534   public void compactRegion(final byte[] regionName)
1535     throws IOException, InterruptedException {
1536     compactRegion(regionName, null, false);
1537   }
1538 
1539   /**
1540    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1541    * (byte[])} instead.
1542    */
1543   @Deprecated
1544   public void compact(final String tableNameOrRegionName)
1545   throws IOException, InterruptedException {
1546     compact(Bytes.toBytes(tableNameOrRegionName));
1547   }
1548 
1549   /**
1550    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1551    * (byte[])} instead.
1552    */
1553   @Deprecated
1554   public void compact(final byte[] tableNameOrRegionName)
1555   throws IOException, InterruptedException {
1556     try {
1557       compactRegion(tableNameOrRegionName, null, false);
1558     } catch (IllegalArgumentException e) {
1559       compact(TableName.valueOf(tableNameOrRegionName), null, false);
1560     }
1561   }
1562 
1563   /**
1564    * {@inheritDoc}
1565    */
1566   @Override
1567   public void compact(final TableName tableName, final byte[] columnFamily)
1568     throws IOException, InterruptedException {
1569     compact(tableName, columnFamily, false);
1570   }
1571 
1572   /**
1573    * {@inheritDoc}
1574    */
1575   @Override
1576   public void compactRegion(final byte[] regionName, final byte[] columnFamily)
1577     throws IOException, InterruptedException {
1578     compactRegion(regionName, columnFamily, false);
1579   }
1580 
1581   /**
1582    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1583    * (byte[], byte[])} instead.
1584    */
1585   @Deprecated
1586   public void compact(String tableOrRegionName, String columnFamily)
1587     throws IOException,  InterruptedException {
1588     compact(Bytes.toBytes(tableOrRegionName), Bytes.toBytes(columnFamily));
1589   }
1590 
1591   /**
1592    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1593    * (byte[], byte[])} instead.
1594    */
1595   @Deprecated
1596   public void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
1597   throws IOException, InterruptedException {
1598     try {
1599       compactRegion(tableNameOrRegionName, columnFamily, false);
1600     } catch (IllegalArgumentException e) {
1601       // Bad region, try table
1602       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, false);
1603     }
1604   }
1605 
1606   /**
1607    * {@inheritDoc}
1608    */
1609   @Override
1610   public void compactRegionServer(final ServerName sn, boolean major)
1611   throws IOException, InterruptedException {
1612     for (HRegionInfo region : getOnlineRegions(sn)) {
1613       compact(sn, region, major, null);
1614     }
1615   }
1616 
1617   /**
1618    * {@inheritDoc}
1619    */
1620   @Override
1621   public void majorCompact(final TableName tableName)
1622   throws IOException, InterruptedException {
1623     compact(tableName, null, true);
1624   }
1625 
1626   /**
1627    * {@inheritDoc}
1628    */
1629   @Override
1630   public void majorCompactRegion(final byte[] regionName)
1631   throws IOException, InterruptedException {
1632     compactRegion(regionName, null, true);
1633   }
1634 
1635   /**
1636    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
1637    * #majorCompactRegion(byte[])} instead.
1638    */
1639   @Deprecated
1640   public void majorCompact(final String tableNameOrRegionName)
1641   throws IOException, InterruptedException {
1642     majorCompact(Bytes.toBytes(tableNameOrRegionName));
1643   }
1644 
1645   /**
1646    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
1647    * #majorCompactRegion(byte[])} instead.
1648    */
1649   @Deprecated
1650   public void majorCompact(final byte[] tableNameOrRegionName)
1651   throws IOException, InterruptedException {
1652     try {
1653       compactRegion(tableNameOrRegionName, null, true);
1654     } catch (IllegalArgumentException e) {
1655       // Invalid region, try table
1656       compact(TableName.valueOf(tableNameOrRegionName), null, true);
1657     }
1658   }
1659 
1660   /**
1661    * {@inheritDoc}
1662    */
1663   @Override
1664   public void majorCompact(final TableName tableName, final byte[] columnFamily)
1665   throws IOException, InterruptedException {
1666     compact(tableName, columnFamily, true);
1667   }
1668 
1669   /**
1670    * {@inheritDoc}
1671    */
1672   @Override
1673   public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
1674   throws IOException, InterruptedException {
1675     compactRegion(regionName, columnFamily, true);
1676   }
1677 
1678   /**
1679    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
1680    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
1681    */
1682   @Deprecated
1683   public void majorCompact(final String tableNameOrRegionName, final String columnFamily)
1684   throws IOException, InterruptedException {
1685     majorCompact(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(columnFamily));
1686   }
1687 
1688   /**
1689    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
1690    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
1691    */
1692   @Deprecated
1693   public void majorCompact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
1694   throws IOException, InterruptedException {
1695     try {
1696       compactRegion(tableNameOrRegionName, columnFamily, true);
1697     } catch (IllegalArgumentException e) {
1698       // Invalid region, try table
1699       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, true);
1700     }
1701   }
1702 
1703   /**
1704    * Compact a table.
1705    * Asynchronous operation.
1706    *
1707    * @param tableName table or region to compact
1708    * @param columnFamily column family within a table or region
1709    * @param major True if we are to do a major compaction.
1710    * @throws IOException if a remote or network exception occurs
1711    * @throws InterruptedException
1712    */
1713   private void compact(final TableName tableName, final byte[] columnFamily,final boolean major)
1714   throws IOException, InterruptedException {
1715     ZooKeeperWatcher zookeeper = null;
1716     try {
1717       checkTableExists(tableName);
1718       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
1719           new ThrowableAbortable());
1720       List<Pair<HRegionInfo, ServerName>> pairs;
1721       if (TableName.META_TABLE_NAME.equals(tableName)) {
1722         pairs = new MetaTableLocator().getMetaRegionsAndLocations(zookeeper);
1723       } else {
1724         pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
1725       }
1726       for (Pair<HRegionInfo, ServerName> pair: pairs) {
1727         if (pair.getFirst().isOffline()) continue;
1728         if (pair.getSecond() == null) continue;
1729         try {
1730           compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
1731         } catch (NotServingRegionException e) {
1732           if (LOG.isDebugEnabled()) {
1733             LOG.debug("Trying to" + (major ? " major" : "") + " compact " +
1734               pair.getFirst() + ": " +
1735               StringUtils.stringifyException(e));
1736           }
1737         }
1738       }
1739     } finally {
1740       if (zookeeper != null) {
1741         zookeeper.close();
1742       }
1743     }
1744   }
1745 
1746   /**
1747    * Compact an individual region.
1748    * Asynchronous operation.
1749    *
1750    * @param regionName region to compact
1751    * @param columnFamily column family within a table or region
1752    * @param major True if we are to do a major compaction.
1753    * @throws IOException if a remote or network exception occurs
1754    * @throws InterruptedException
1755    */
1756   private void compactRegion(final byte[] regionName, final byte[] columnFamily,final boolean major)
1757   throws IOException, InterruptedException {
1758     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
1759     if (regionServerPair == null) {
1760       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
1761     }
1762     if (regionServerPair.getSecond() == null) {
1763       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1764     }
1765     compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
1766   }
1767 
1768   private void compact(final ServerName sn, final HRegionInfo hri,
1769       final boolean major, final byte [] family)
1770   throws IOException {
1771     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1772     CompactRegionRequest request =
1773       RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family);
1774     try {
1775       admin.compactRegion(null, request);
1776     } catch (ServiceException se) {
1777       throw ProtobufUtil.getRemoteException(se);
1778     }
1779   }
1780 
1781   /**
1782    * Move the region <code>r</code> to <code>dest</code>.
1783    * @param encodedRegionName The encoded region name; i.e. the hash that makes
1784    * up the region name suffix: e.g. if regionname is
1785    * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
1786    * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
1787    * @param destServerName The servername of the destination regionserver.  If
1788    * passed the empty byte array we'll assign to a random server.  A server name
1789    * is made of host, port and startcode.  Here is an example:
1790    * <code> host187.example.com,60020,1289493121758</code>
1791    * @throws UnknownRegionException Thrown if we can't find a region named
1792    * <code>encodedRegionName</code>
1793    * @throws ZooKeeperConnectionException
1794    * @throws MasterNotRunningException
1795    */
1796   @Override
1797   public void move(final byte [] encodedRegionName, final byte [] destServerName)
1798   throws HBaseIOException, MasterNotRunningException, ZooKeeperConnectionException {
1799     MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
1800     try {
1801       MoveRegionRequest request =
1802         RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
1803       stub.moveRegion(null, request);
1804     } catch (ServiceException se) {
1805       IOException ioe = ProtobufUtil.getRemoteException(se);
1806       if (ioe instanceof HBaseIOException) {
1807         throw (HBaseIOException)ioe;
1808       }
1809       LOG.error("Unexpected exception: " + se + " from calling HMaster.moveRegion");
1810     } catch (DeserializationException de) {
1811       LOG.error("Could not parse destination server name: " + de);
1812     } finally {
1813       stub.close();
1814     }
1815   }
1816 
1817   /**
1818    * @param regionName
1819    *          Region name to assign.
1820    * @throws MasterNotRunningException
1821    * @throws ZooKeeperConnectionException
1822    * @throws IOException
1823    */
1824   @Override
1825   public void assign(final byte[] regionName) throws MasterNotRunningException,
1826       ZooKeeperConnectionException, IOException {
1827     final byte[] toBeAssigned = getRegionName(regionName);
1828     executeCallable(new MasterCallable<Void>(getConnection()) {
1829       @Override
1830       public Void call(int callTimeout) throws ServiceException {
1831         AssignRegionRequest request =
1832           RequestConverter.buildAssignRegionRequest(toBeAssigned);
1833         master.assignRegion(null,request);
1834         return null;
1835       }
1836     });
1837   }
1838 
1839   /**
1840    * Unassign a region from current hosting regionserver.  Region will then be
1841    * assigned to a regionserver chosen at random.  Region could be reassigned
1842    * back to the same server.  Use {@link #move(byte[], byte[])} if you want
1843    * to control the region movement.
1844    * @param regionName Region to unassign. Will clear any existing RegionPlan
1845    * if one found.
1846    * @param force If true, force unassign (Will remove region from
1847    * regions-in-transition too if present. If results in double assignment
1848    * use hbck -fix to resolve. To be used by experts).
1849    * @throws MasterNotRunningException
1850    * @throws ZooKeeperConnectionException
1851    * @throws IOException
1852    */
1853   @Override
1854   public void unassign(final byte [] regionName, final boolean force)
1855   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
1856     final byte[] toBeUnassigned = getRegionName(regionName);
1857     executeCallable(new MasterCallable<Void>(getConnection()) {
1858       @Override
1859       public Void call(int callTimeout) throws ServiceException {
1860         UnassignRegionRequest request =
1861           RequestConverter.buildUnassignRegionRequest(toBeUnassigned, force);
1862         master.unassignRegion(null, request);
1863         return null;
1864       }
1865     });
1866   }
1867 
1868   /**
1869    * Offline specified region from master's in-memory state. It will not attempt to reassign the
1870    * region as in unassign. This API can be used when a region not served by any region server and
1871    * still online as per Master's in memory state. If this API is incorrectly used on active region
1872    * then master will loose track of that region.
1873    *
1874    * This is a special method that should be used by experts or hbck.
1875    *
1876    * @param regionName
1877    *          Region to offline.
1878    * @throws IOException
1879    */
1880   @Override
1881   public void offline(final byte [] regionName)
1882   throws IOException {
1883     MasterKeepAliveConnection master = connection.getKeepAliveMasterService();
1884     try {
1885       master.offlineRegion(null,RequestConverter.buildOfflineRegionRequest(regionName));
1886     } catch (ServiceException se) {
1887       throw ProtobufUtil.getRemoteException(se);
1888     } finally {
1889       master.close();
1890     }
1891   }
1892 
1893   /**
1894    * Turn the load balancer on or off.
1895    * @param on If true, enable balancer. If false, disable balancer.
1896    * @param synchronous If true, it waits until current balance() call, if outstanding, to return.
1897    * @return Previous balancer value
1898    */
1899   @Override
1900   public boolean setBalancerRunning(final boolean on, final boolean synchronous)
1901   throws MasterNotRunningException, ZooKeeperConnectionException {
1902     MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
1903     try {
1904       SetBalancerRunningRequest req =
1905         RequestConverter.buildSetBalancerRunningRequest(on, synchronous);
1906       return stub.setBalancerRunning(null, req).getPrevBalanceValue();
1907     } catch (ServiceException se) {
1908       IOException ioe = ProtobufUtil.getRemoteException(se);
1909       if (ioe instanceof MasterNotRunningException) {
1910         throw (MasterNotRunningException)ioe;
1911       }
1912       if (ioe instanceof ZooKeeperConnectionException) {
1913         throw (ZooKeeperConnectionException)ioe;
1914       }
1915 
1916       // Throwing MasterNotRunningException even though not really valid in order to not
1917       // break interface by adding additional exception type.
1918       throw new MasterNotRunningException("Unexpected exception when calling balanceSwitch",se);
1919     } finally {
1920       stub.close();
1921     }
1922   }
1923 
1924   /**
1925    * Invoke the balancer.  Will run the balancer and if regions to move, it will
1926    * go ahead and do the reassignments.  Can NOT run for various reasons.  Check
1927    * logs.
1928    * @return True if balancer ran, false otherwise.
1929    */
1930   @Override
1931   public boolean balancer()
1932   throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException {
1933     MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
1934     try {
1935       return stub.balance(null, RequestConverter.buildBalanceRequest()).getBalancerRan();
1936     } finally {
1937       stub.close();
1938     }
1939   }
1940 
1941   /**
1942    * Enable/Disable the catalog janitor
1943    * @param enable if true enables the catalog janitor
1944    * @return the previous state
1945    * @throws ServiceException
1946    * @throws MasterNotRunningException
1947    */
1948   @Override
1949   public boolean enableCatalogJanitor(boolean enable)
1950       throws ServiceException, MasterNotRunningException {
1951     MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
1952     try {
1953       return stub.enableCatalogJanitor(null,
1954         RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
1955     } finally {
1956       stub.close();
1957     }
1958   }
1959 
1960   /**
1961    * Ask for a scan of the catalog table
1962    * @return the number of entries cleaned
1963    * @throws ServiceException
1964    * @throws MasterNotRunningException
1965    */
1966   @Override
1967   public int runCatalogScan() throws ServiceException, MasterNotRunningException {
1968     MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
1969     try {
1970       return stub.runCatalogScan(null,
1971         RequestConverter.buildCatalogScanRequest()).getScanResult();
1972     } finally {
1973       stub.close();
1974     }
1975   }
1976 
1977   /**
1978    * Query on the catalog janitor state (Enabled/Disabled?)
1979    * @throws ServiceException
1980    * @throws org.apache.hadoop.hbase.MasterNotRunningException
1981    */
1982   @Override
1983   public boolean isCatalogJanitorEnabled() throws ServiceException, MasterNotRunningException {
1984     MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
1985     try {
1986       return stub.isCatalogJanitorEnabled(null,
1987         RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
1988     } finally {
1989       stub.close();
1990     }
1991   }
1992 
1993   /**
1994    * Merge two regions. Asynchronous operation.
1995    * @param encodedNameOfRegionA encoded name of region a
1996    * @param encodedNameOfRegionB encoded name of region b
1997    * @param forcible true if do a compulsory merge, otherwise we will only merge
1998    *          two adjacent regions
1999    * @throws IOException
2000    */
2001   @Override
2002   public void mergeRegions(final byte[] encodedNameOfRegionA,
2003       final byte[] encodedNameOfRegionB, final boolean forcible)
2004       throws IOException {
2005     MasterKeepAliveConnection master = connection
2006         .getKeepAliveMasterService();
2007     try {
2008       DispatchMergingRegionsRequest request = RequestConverter
2009           .buildDispatchMergingRegionsRequest(encodedNameOfRegionA,
2010               encodedNameOfRegionB, forcible);
2011       master.dispatchMergingRegions(null, request);
2012     } catch (ServiceException se) {
2013       IOException ioe = ProtobufUtil.getRemoteException(se);
2014       if (ioe instanceof UnknownRegionException) {
2015         throw (UnknownRegionException) ioe;
2016       }
2017       if (ioe instanceof MergeRegionException) {
2018         throw (MergeRegionException) ioe;
2019       }
2020       LOG.error("Unexpected exception: " + se
2021           + " from calling HMaster.dispatchMergingRegions");
2022     } catch (DeserializationException de) {
2023       LOG.error("Could not parse destination server name: " + de);
2024     } finally {
2025       master.close();
2026     }
2027   }
2028 
2029   /**
2030    * {@inheritDoc}
2031    */
2032   @Override
2033   public void split(final TableName tableName)
2034     throws IOException, InterruptedException {
2035     split(tableName, null);
2036   }
2037 
2038   /**
2039    * {@inheritDoc}
2040    */
2041   @Override
2042   public void splitRegion(final byte[] regionName)
2043     throws IOException, InterruptedException {
2044     splitRegion(regionName, null);
2045   }
2046 
2047   /**
2048    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2049    * (byte[])} instead.
2050    */
2051   @Deprecated
2052   public void split(final String tableNameOrRegionName)
2053   throws IOException, InterruptedException {
2054     split(Bytes.toBytes(tableNameOrRegionName));
2055   }
2056 
2057   /**
2058    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2059    * (byte[])} instead.
2060    */
2061   @Deprecated
2062   public void split(final byte[] tableNameOrRegionName)
2063   throws IOException, InterruptedException {
2064     split(tableNameOrRegionName, null);
2065   }
2066 
2067   /**
2068    * {@inheritDoc}
2069    */
2070   @Override
2071   public void split(final TableName tableName, final byte [] splitPoint)
2072   throws IOException, InterruptedException {
2073     ZooKeeperWatcher zookeeper = null;
2074     try {
2075       checkTableExists(tableName);
2076       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2077         new ThrowableAbortable());
2078       List<Pair<HRegionInfo, ServerName>> pairs;
2079       if (TableName.META_TABLE_NAME.equals(tableName)) {
2080         pairs = new MetaTableLocator().getMetaRegionsAndLocations(zookeeper);
2081       } else {
2082         pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
2083       }
2084       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2085         // May not be a server for a particular row
2086         if (pair.getSecond() == null) continue;
2087         HRegionInfo r = pair.getFirst();
2088         // check for parents
2089         if (r.isSplitParent()) continue;
2090         // if a split point given, only split that particular region
2091         if (splitPoint != null && !r.containsRow(splitPoint)) continue;
2092         // call out to region server to do split now
2093         split(pair.getSecond(), pair.getFirst(), splitPoint);
2094       }
2095     } finally {
2096       if (zookeeper != null) {
2097         zookeeper.close();
2098       }
2099     }
2100   }
2101 
2102   /**
2103    * {@inheritDoc}
2104    */
2105   @Override
2106   public void splitRegion(final byte[] regionName, final byte [] splitPoint)
2107   throws IOException, InterruptedException {
2108     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2109     if (regionServerPair == null) {
2110       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2111     }
2112     if (regionServerPair.getSecond() == null) {
2113       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2114     }
2115     split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
2116   }
2117 
2118   /**
2119    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2120    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2121    */
2122   @Deprecated
2123   public void split(final String tableNameOrRegionName,
2124     final String splitPoint) throws IOException, InterruptedException {
2125     split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint));
2126   }
2127 
2128   /**
2129    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2130    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2131    */
2132   @Deprecated
2133   public void split(final byte[] tableNameOrRegionName,
2134       final byte [] splitPoint) throws IOException, InterruptedException {
2135     try {
2136       splitRegion(tableNameOrRegionName, splitPoint);
2137     } catch (IllegalArgumentException e) {
2138       // Bad region, try table
2139       split(TableName.valueOf(tableNameOrRegionName), splitPoint);
2140     }
2141   }
2142 
2143   private void split(final ServerName sn, final HRegionInfo hri,
2144       byte[] splitPoint) throws IOException {
2145     if (hri.getStartKey() != null && splitPoint != null &&
2146          Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
2147        throw new IOException("should not give a splitkey which equals to startkey!");
2148     }
2149     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2150     ProtobufUtil.split(admin, hri, splitPoint);
2151   }
2152 
2153   /**
2154    * Modify an existing table, more IRB friendly version.
2155    * Asynchronous operation.  This means that it may be a while before your
2156    * schema change is updated across all of the table.
2157    *
2158    * @param tableName name of table.
2159    * @param htd modified description of the table
2160    * @throws IOException if a remote or network exception occurs
2161    */
2162   @Override
2163   public void modifyTable(final TableName tableName, final HTableDescriptor htd)
2164   throws IOException {
2165     if (!tableName.equals(htd.getTableName())) {
2166       throw new IllegalArgumentException("the specified table name '" + tableName +
2167         "' doesn't match with the HTD one: " + htd.getTableName());
2168     }
2169 
2170     executeCallable(new MasterCallable<Void>(getConnection()) {
2171       @Override
2172       public Void call(int callTimeout) throws ServiceException {
2173         ModifyTableRequest request = RequestConverter.buildModifyTableRequest(tableName, htd);
2174         master.modifyTable(null, request);
2175         return null;
2176       }
2177     });
2178   }
2179 
2180   public void modifyTable(final byte[] tableName, final HTableDescriptor htd)
2181   throws IOException {
2182     modifyTable(TableName.valueOf(tableName), htd);
2183   }
2184 
2185   public void modifyTable(final String tableName, final HTableDescriptor htd)
2186   throws IOException {
2187     modifyTable(TableName.valueOf(tableName), htd);
2188   }
2189 
2190   /**
2191    * @param regionName Name of a region.
2192    * @return a pair of HRegionInfo and ServerName if <code>regionName</code> is
2193    *  a verified region name (we call {@link
2194    *  MetaTableAccessor#getRegion(HConnection, byte[])}
2195    *  else null.
2196    * Throw IllegalArgumentException if <code>regionName</code> is null.
2197    * @throws IOException
2198    */
2199   Pair<HRegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException {
2200     if (regionName == null) {
2201       throw new IllegalArgumentException("Pass a table name or region name");
2202     }
2203     Pair<HRegionInfo, ServerName> pair =
2204       MetaTableAccessor.getRegion(connection, regionName);
2205     if (pair == null) {
2206       final AtomicReference<Pair<HRegionInfo, ServerName>> result =
2207         new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
2208       final String encodedName = Bytes.toString(regionName);
2209       MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
2210         @Override
2211         public boolean processRow(Result data) throws IOException {
2212           HRegionInfo info = HRegionInfo.getHRegionInfo(data);
2213           if (info == null) {
2214             LOG.warn("No serialized HRegionInfo in " + data);
2215             return true;
2216           }
2217           if (!encodedName.equals(info.getEncodedName())) return true;
2218           ServerName sn = HRegionInfo.getServerName(data);
2219           result.set(new Pair<HRegionInfo, ServerName>(info, sn));
2220           return false; // found the region, stop
2221         }
2222       };
2223 
2224       MetaScanner.metaScan(conf, connection, visitor, null);
2225       pair = result.get();
2226     }
2227     return pair;
2228   }
2229 
2230   /**
2231    * If the input is a region name, it is returned as is. If it's an
2232    * encoded region name, the corresponding region is found from meta
2233    * and its region name is returned. If we can't find any region in
2234    * meta matching the input as either region name or encoded region
2235    * name, the input is returned as is. We don't throw unknown
2236    * region exception.
2237    */
2238   private byte[] getRegionName(
2239       final byte[] regionNameOrEncodedRegionName) throws IOException {
2240     if (Bytes.equals(regionNameOrEncodedRegionName,
2241         HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
2242           || Bytes.equals(regionNameOrEncodedRegionName,
2243             HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
2244       return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
2245     }
2246     byte[] tmp = regionNameOrEncodedRegionName;
2247     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionNameOrEncodedRegionName);
2248     if (regionServerPair != null && regionServerPair.getFirst() != null) {
2249       tmp = regionServerPair.getFirst().getRegionName();
2250     }
2251     return tmp;
2252   }
2253 
2254   /**
2255    * Check if table exists or not
2256    * @param tableName Name of a table.
2257    * @return tableName instance
2258    * @throws IOException if a remote or network exception occurs.
2259    * @throws TableNotFoundException if table does not exist.
2260    */
2261   private TableName checkTableExists(final TableName tableName)
2262       throws IOException {
2263     if (!MetaTableAccessor.tableExists(connection, tableName)) {
2264       throw new TableNotFoundException(tableName);
2265     }
2266     return tableName;
2267   }
2268 
2269   /**
2270    * Shuts down the HBase cluster
2271    * @throws IOException if a remote or network exception occurs
2272    */
2273   @Override
2274   public synchronized void shutdown() throws IOException {
2275     executeCallable(new MasterCallable<Void>(getConnection()) {
2276       @Override
2277       public Void call(int callTimeout) throws ServiceException {
2278         master.shutdown(null,ShutdownRequest.newBuilder().build());
2279         return null;
2280       }
2281     });
2282   }
2283 
2284   /**
2285    * Shuts down the current HBase master only.
2286    * Does not shutdown the cluster.
2287    * @see #shutdown()
2288    * @throws IOException if a remote or network exception occurs
2289    */
2290   @Override
2291   public synchronized void stopMaster() throws IOException {
2292     executeCallable(new MasterCallable<Void>(getConnection()) {
2293       @Override
2294       public Void call(int callTimeout) throws ServiceException {
2295         master.stopMaster(null, StopMasterRequest.newBuilder().build());
2296         return null;
2297       }
2298     });
2299   }
2300 
2301   /**
2302    * Stop the designated regionserver
2303    * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
2304    * <code>example.org:1234</code>
2305    * @throws IOException if a remote or network exception occurs
2306    */
2307   @Override
2308   public synchronized void stopRegionServer(final String hostnamePort)
2309   throws IOException {
2310     String hostname = Addressing.parseHostname(hostnamePort);
2311     int port = Addressing.parsePort(hostnamePort);
2312     AdminService.BlockingInterface admin =
2313       this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
2314     StopServerRequest request = RequestConverter.buildStopServerRequest(
2315       "Called by admin client " + this.connection.toString());
2316     try {
2317       admin.stopServer(null, request);
2318     } catch (ServiceException se) {
2319       throw ProtobufUtil.getRemoteException(se);
2320     }
2321   }
2322 
2323 
2324   /**
2325    * @return cluster status
2326    * @throws IOException if a remote or network exception occurs
2327    */
2328   @Override
2329   public ClusterStatus getClusterStatus() throws IOException {
2330     return executeCallable(new MasterCallable<ClusterStatus>(getConnection()) {
2331       @Override
2332       public ClusterStatus call(int callTimeout) throws ServiceException {
2333         GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest();
2334         return ClusterStatus.convert(master.getClusterStatus(null, req).getClusterStatus());
2335       }
2336     });
2337   }
2338 
2339   /**
2340    * @return Configuration used by the instance.
2341    */
2342   @Override
2343   public Configuration getConfiguration() {
2344     return this.conf;
2345   }
2346 
2347   /**
2348    * Create a new namespace
2349    * @param descriptor descriptor which describes the new namespace
2350    * @throws IOException
2351    */
2352   @Override
2353   public void createNamespace(final NamespaceDescriptor descriptor) throws IOException {
2354     executeCallable(new MasterCallable<Void>(getConnection()) {
2355       @Override
2356       public Void call(int callTimeout) throws Exception {
2357         master.createNamespace(null,
2358           CreateNamespaceRequest.newBuilder()
2359             .setNamespaceDescriptor(ProtobufUtil
2360               .toProtoNamespaceDescriptor(descriptor)).build()
2361         );
2362         return null;
2363       }
2364     });
2365   }
2366 
2367   /**
2368    * Modify an existing namespace
2369    * @param descriptor descriptor which describes the new namespace
2370    * @throws IOException
2371    */
2372   @Override
2373   public void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException {
2374     executeCallable(new MasterCallable<Void>(getConnection()) {
2375       @Override
2376       public Void call(int callTimeout) throws Exception {
2377         master.modifyNamespace(null, ModifyNamespaceRequest.newBuilder().
2378           setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build());
2379         return null;
2380       }
2381     });
2382   }
2383 
2384   /**
2385    * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
2386    * @param name namespace name
2387    * @throws IOException
2388    */
2389   @Override
2390   public void deleteNamespace(final String name) throws IOException {
2391     executeCallable(new MasterCallable<Void>(getConnection()) {
2392       @Override
2393       public Void call(int callTimeout) throws Exception {
2394         master.deleteNamespace(null, DeleteNamespaceRequest.newBuilder().
2395           setNamespaceName(name).build());
2396         return null;
2397       }
2398     });
2399   }
2400 
2401   /**
2402    * Get a namespace descriptor by name
2403    * @param name name of namespace descriptor
2404    * @return A descriptor
2405    * @throws IOException
2406    */
2407   @Override
2408   public NamespaceDescriptor getNamespaceDescriptor(final String name) throws IOException {
2409     return
2410         executeCallable(new MasterCallable<NamespaceDescriptor>(getConnection()) {
2411           @Override
2412           public NamespaceDescriptor call(int callTimeout) throws Exception {
2413             return ProtobufUtil.toNamespaceDescriptor(
2414               master.getNamespaceDescriptor(null, GetNamespaceDescriptorRequest.newBuilder().
2415                 setNamespaceName(name).build()).getNamespaceDescriptor());
2416           }
2417         });
2418   }
2419 
2420   /**
2421    * List available namespace descriptors
2422    * @return List of descriptors
2423    * @throws IOException
2424    */
2425   @Override
2426   public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
2427     return
2428         executeCallable(new MasterCallable<NamespaceDescriptor[]>(getConnection()) {
2429           @Override
2430           public NamespaceDescriptor[] call(int callTimeout) throws Exception {
2431             List<HBaseProtos.NamespaceDescriptor> list =
2432               master.listNamespaceDescriptors(null, ListNamespaceDescriptorsRequest.newBuilder().
2433                 build()).getNamespaceDescriptorList();
2434             NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
2435             for(int i = 0; i < list.size(); i++) {
2436               res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
2437             }
2438             return res;
2439           }
2440         });
2441   }
2442 
2443   /**
2444    * Get list of table descriptors by namespace
2445    * @param name namespace name
2446    * @return A descriptor
2447    * @throws IOException
2448    */
2449   @Override
2450   public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException {
2451     return
2452         executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
2453           @Override
2454           public HTableDescriptor[] call(int callTimeout) throws Exception {
2455             List<TableSchema> list =
2456               master.listTableDescriptorsByNamespace(null, ListTableDescriptorsByNamespaceRequest.
2457                 newBuilder().setNamespaceName(name).build()).getTableSchemaList();
2458             HTableDescriptor[] res = new HTableDescriptor[list.size()];
2459             for(int i=0; i < list.size(); i++) {
2460 
2461               res[i] = HTableDescriptor.convert(list.get(i));
2462             }
2463             return res;
2464           }
2465         });
2466   }
2467 
2468   /**
2469    * Get list of table names by namespace
2470    * @param name namespace name
2471    * @return The list of table names in the namespace
2472    * @throws IOException
2473    */
2474   @Override
2475   public TableName[] listTableNamesByNamespace(final String name) throws IOException {
2476     return
2477         executeCallable(new MasterCallable<TableName[]>(getConnection()) {
2478           @Override
2479           public TableName[] call(int callTimeout) throws Exception {
2480             List<HBaseProtos.TableName> tableNames =
2481               master.listTableNamesByNamespace(null, ListTableNamesByNamespaceRequest.
2482                 newBuilder().setNamespaceName(name).build())
2483                 .getTableNameList();
2484             TableName[] result = new TableName[tableNames.size()];
2485             for (int i = 0; i < tableNames.size(); i++) {
2486               result[i] = ProtobufUtil.toTableName(tableNames.get(i));
2487             }
2488             return result;
2489           }
2490         });
2491   }
2492 
2493   /**
2494    * Check to see if HBase is running. Throw an exception if not.
2495    * @param conf system configuration
2496    * @throws MasterNotRunningException if the master is not running
2497    * @throws ZooKeeperConnectionException if unable to connect to zookeeper
2498    */
2499   // Used by tests and by the Merge tool. Merge tool uses it to figure if HBase is up or not.
2500   public static void checkHBaseAvailable(Configuration conf)
2501   throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException {
2502     Configuration copyOfConf = HBaseConfiguration.create(conf);
2503     // We set it to make it fail as soon as possible if HBase is not available
2504     copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
2505     copyOfConf.setInt("zookeeper.recovery.retry", 0);
2506     try (ClusterConnection connection =
2507         (ClusterConnection)ConnectionFactory.createConnection(copyOfConf)) {
2508         // Check ZK first.
2509         // If the connection exists, we may have a connection to ZK that does not work anymore
2510         ZooKeeperKeepAliveConnection zkw = null;
2511         try {
2512           // This is NASTY. FIX!!!! Dependent on internal implementation! TODO
2513           zkw = ((ConnectionManager.HConnectionImplementation)connection).
2514             getKeepAliveZooKeeperWatcher();
2515           zkw.getRecoverableZooKeeper().getZooKeeper().exists(zkw.baseZNode, false);
2516         } catch (IOException e) {
2517           throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
2518         } catch (InterruptedException e) {
2519           throw (InterruptedIOException)
2520             new InterruptedIOException("Can't connect to ZooKeeper").initCause(e);
2521         } catch (KeeperException e) {
2522           throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
2523         } finally {
2524           if (zkw != null) {
2525             zkw.close();
2526           }
2527         }
2528       connection.isMasterRunning();
2529     }
2530   }
2531 
2532   /**
2533    * get the regions of a given table.
2534    *
2535    * @param tableName the name of the table
2536    * @return Ordered list of {@link HRegionInfo}.
2537    * @throws IOException
2538    */
2539   @Override
2540   public List<HRegionInfo> getTableRegions(final TableName tableName)
2541   throws IOException {
2542     ZooKeeperWatcher zookeeper =
2543       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2544         new ThrowableAbortable());
2545     List<HRegionInfo> regions = null;
2546     try {
2547       if (TableName.META_TABLE_NAME.equals(tableName)) {
2548         regions = new MetaTableLocator().getMetaRegions(zookeeper);
2549       } else {
2550         regions = MetaTableAccessor.getTableRegions(connection, tableName, true);
2551       }
2552     } finally {
2553       zookeeper.close();
2554     }
2555     return regions;
2556   }
2557 
2558   public List<HRegionInfo> getTableRegions(final byte[] tableName)
2559   throws IOException {
2560     return getTableRegions(TableName.valueOf(tableName));
2561   }
2562 
2563   @Override
2564   public synchronized void close() throws IOException {
2565     if (cleanupConnectionOnClose && this.connection != null && !this.closed) {
2566       this.connection.close();
2567       this.closed = true;
2568     }
2569   }
2570 
2571   /**
2572    * Get tableDescriptors
2573    * @param tableNames List of table names
2574    * @return HTD[] the tableDescriptor
2575    * @throws IOException if a remote or network exception occurs
2576    */
2577   @Override
2578   public HTableDescriptor[] getTableDescriptorsByTableName(List<TableName> tableNames)
2579   throws IOException {
2580     return this.connection.getHTableDescriptorsByTableName(tableNames);
2581   }
2582 
2583   /**
2584    * Get tableDescriptor
2585    * @param tableName one table name
2586    * @return HTD the HTableDescriptor or null if the table not exists
2587    * @throws IOException if a remote or network exception occurs
2588    */
2589   private HTableDescriptor getTableDescriptorByTableName(TableName tableName)
2590       throws IOException {
2591     List<TableName> tableNames = new ArrayList<TableName>(1);
2592     tableNames.add(tableName);
2593 
2594     HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames);
2595 
2596     if (htdl == null || htdl.length == 0) {
2597       return null;
2598     }
2599     else {
2600       return htdl[0];
2601     }
2602   }
2603 
2604   /**
2605    * Get tableDescriptors
2606    * @param names List of table names
2607    * @return HTD[] the tableDescriptor
2608    * @throws IOException if a remote or network exception occurs
2609    */
2610   @Override
2611   public HTableDescriptor[] getTableDescriptors(List<String> names)
2612   throws IOException {
2613     List<TableName> tableNames = new ArrayList<TableName>(names.size());
2614     for(String name : names) {
2615       tableNames.add(TableName.valueOf(name));
2616     }
2617     return getTableDescriptorsByTableName(tableNames);
2618   }
2619 
2620   private RollWALWriterResponse rollWALWriterImpl(final ServerName sn) throws IOException,
2621       FailedLogCloseException {
2622     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2623     RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();
2624     try {
2625       return admin.rollWALWriter(null, request);
2626     } catch (ServiceException se) {
2627       throw ProtobufUtil.getRemoteException(se);
2628     }
2629   }
2630 
2631   /**
2632    * Roll the log writer. I.e. when using a file system based write ahead log, 
2633    * start writing log messages to a new file.
2634    *
2635    * Note that when talking to a version 1.0+ HBase deployment, the rolling is asynchronous.
2636    * This method will return as soon as the roll is requested and the return value will
2637    * always be null. Additionally, the named region server may schedule store flushes at the
2638    * request of the wal handling the roll request.
2639    *
2640    * When talking to a 0.98 or older HBase deployment, the rolling is synchronous and the
2641    * return value may be either null or a list of encoded region names.
2642    *
2643    * @param serverName
2644    *          The servername of the regionserver. A server name is made of host,
2645    *          port and startcode. This is mandatory. Here is an example:
2646    *          <code> host187.example.com,60020,1289493121758</code>
2647    * @return a set of {@link HRegionInfo#getEncodedName()} that would allow the wal to
2648    *         clean up some underlying files. null if there's nothing to flush.
2649    * @throws IOException if a remote or network exception occurs
2650    * @throws FailedLogCloseException
2651    * @deprecated use {@link #rollWALWriter(ServerName)}
2652    */
2653   @Deprecated
2654   public synchronized byte[][] rollHLogWriter(String serverName)
2655       throws IOException, FailedLogCloseException {
2656     ServerName sn = ServerName.valueOf(serverName);
2657     final RollWALWriterResponse response = rollWALWriterImpl(sn);
2658     int regionCount = response.getRegionToFlushCount();
2659     if (0 == regionCount) {
2660       return null;
2661     }
2662     byte[][] regionsToFlush = new byte[regionCount][];
2663     for (int i = 0; i < regionCount; i++) {
2664       ByteString region = response.getRegionToFlush(i);
2665       regionsToFlush[i] = region.toByteArray();
2666     }
2667     return regionsToFlush;
2668   }
2669 
2670   @Override
2671   public synchronized void rollWALWriter(ServerName serverName)
2672       throws IOException, FailedLogCloseException {
2673     rollWALWriterImpl(serverName);
2674   }
2675 
2676   @Override
2677   public String[] getMasterCoprocessors() {
2678     try {
2679       return getClusterStatus().getMasterCoprocessors();
2680     } catch (IOException e) {
2681       LOG.error("Could not getClusterStatus()",e);
2682       return null;
2683     }
2684   }
2685 
2686   /**
2687    * {@inheritDoc}
2688    */
2689   @Override
2690   public CompactionState getCompactionState(final TableName tableName)
2691   throws IOException, InterruptedException {
2692     CompactionState state = CompactionState.NONE;
2693     ZooKeeperWatcher zookeeper =
2694       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2695         new ThrowableAbortable());
2696     try {
2697       checkTableExists(tableName);
2698       List<Pair<HRegionInfo, ServerName>> pairs;
2699       if (TableName.META_TABLE_NAME.equals(tableName)) {
2700         pairs = new MetaTableLocator().getMetaRegionsAndLocations(zookeeper);
2701       } else {
2702         pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
2703       }
2704       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2705         if (pair.getFirst().isOffline()) continue;
2706         if (pair.getSecond() == null) continue;
2707         try {
2708           ServerName sn = pair.getSecond();
2709           AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2710           GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
2711             pair.getFirst().getRegionName(), true);
2712           GetRegionInfoResponse response = admin.getRegionInfo(null, request);
2713           switch (response.getCompactionState()) {
2714           case MAJOR_AND_MINOR:
2715             return CompactionState.MAJOR_AND_MINOR;
2716           case MAJOR:
2717             if (state == CompactionState.MINOR) {
2718               return CompactionState.MAJOR_AND_MINOR;
2719             }
2720             state = CompactionState.MAJOR;
2721             break;
2722           case MINOR:
2723             if (state == CompactionState.MAJOR) {
2724               return CompactionState.MAJOR_AND_MINOR;
2725             }
2726             state = CompactionState.MINOR;
2727             break;
2728           case NONE:
2729           default: // nothing, continue
2730           }
2731         } catch (NotServingRegionException e) {
2732           if (LOG.isDebugEnabled()) {
2733             LOG.debug("Trying to get compaction state of " +
2734               pair.getFirst() + ": " +
2735               StringUtils.stringifyException(e));
2736           }
2737         } catch (RemoteException e) {
2738           if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
2739             if (LOG.isDebugEnabled()) {
2740               LOG.debug("Trying to get compaction state of " + pair.getFirst() + ": "
2741                 + StringUtils.stringifyException(e));
2742             }
2743           } else {
2744             throw e;
2745           }
2746         }
2747       }
2748     } catch (ServiceException se) {
2749       throw ProtobufUtil.getRemoteException(se);
2750     } finally {
2751       zookeeper.close();
2752     }
2753     return state;
2754   }
2755 
2756   /**
2757    * {@inheritDoc}
2758    */
2759   @Override
2760   public CompactionState getCompactionStateForRegion(final byte[] regionName)
2761   throws IOException, InterruptedException {
2762     try {
2763       Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2764       if (regionServerPair == null) {
2765         throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2766       }
2767       if (regionServerPair.getSecond() == null) {
2768         throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2769       }
2770       ServerName sn = regionServerPair.getSecond();
2771       AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2772       GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
2773         regionServerPair.getFirst().getRegionName(), true);
2774       GetRegionInfoResponse response = admin.getRegionInfo(null, request);
2775       return response.getCompactionState();
2776     } catch (ServiceException se) {
2777       throw ProtobufUtil.getRemoteException(se);
2778     }
2779   }
2780 
2781   /**
2782    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
2783    * #getCompactionStateForRegion(byte[])} instead.
2784    */
2785   @Deprecated
2786   public CompactionState getCompactionState(final String tableNameOrRegionName)
2787   throws IOException, InterruptedException {
2788     return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
2789   }
2790 
2791   /**
2792    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
2793    * #getCompactionStateForRegion(byte[])} instead.
2794    */
2795   @Deprecated
2796   public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
2797   throws IOException, InterruptedException {
2798     try {
2799       return getCompactionStateForRegion(tableNameOrRegionName);
2800     } catch (IllegalArgumentException e) {
2801       // Invalid region, try table
2802       return getCompactionState(TableName.valueOf(tableNameOrRegionName));
2803     }
2804   }
2805 
2806   /**
2807    * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
2808    * taken. If the table is disabled, an offline snapshot is taken.
2809    * <p>
2810    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2811    * snapshot with the same name (even a different type or with different parameters) will fail with
2812    * a {@link SnapshotCreationException} indicating the duplicate naming.
2813    * <p>
2814    * Snapshot names follow the same naming constraints as tables in HBase. See
2815    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
2816    * @param snapshotName name of the snapshot to be created
2817    * @param tableName name of the table for which snapshot is created
2818    * @throws IOException if a remote or network exception occurs
2819    * @throws SnapshotCreationException if snapshot creation failed
2820    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2821    */
2822   @Override
2823   public void snapshot(final String snapshotName,
2824                        final TableName tableName) throws IOException,
2825       SnapshotCreationException, IllegalArgumentException {
2826     snapshot(snapshotName, tableName, SnapshotDescription.Type.FLUSH);
2827   }
2828 
2829   public void snapshot(final String snapshotName,
2830                        final String tableName) throws IOException,
2831       SnapshotCreationException, IllegalArgumentException {
2832     snapshot(snapshotName, TableName.valueOf(tableName),
2833         SnapshotDescription.Type.FLUSH);
2834   }
2835 
2836   /**
2837    * Create snapshot for the given table of given flush type.
2838    * <p>
2839    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2840    * snapshot with the same name (even a different type or with different parameters) will fail with
2841    * a {@link SnapshotCreationException} indicating the duplicate naming.
2842    * <p>
2843    * Snapshot names follow the same naming constraints as tables in HBase.
2844    * @param snapshotName name of the snapshot to be created
2845    * @param tableName name of the table for which snapshot is created
2846    * @param flushType if the snapshot should be taken without flush memstore first
2847    * @throws IOException if a remote or network exception occurs
2848    * @throws SnapshotCreationException if snapshot creation failed
2849    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2850    */
2851    public void snapshot(final byte[] snapshotName, final byte[] tableName,
2852                        final SnapshotDescription.Type flushType) throws
2853       IOException, SnapshotCreationException, IllegalArgumentException {
2854       snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName), flushType);
2855   }
2856   /**
2857    public void snapshot(final String snapshotName,
2858     * Create a timestamp consistent snapshot for the given table.
2859                         final byte[] tableName) throws IOException,
2860     * <p>
2861     * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2862     * snapshot with the same name (even a different type or with different parameters) will fail with
2863     * a {@link SnapshotCreationException} indicating the duplicate naming.
2864     * <p>
2865     * Snapshot names follow the same naming constraints as tables in HBase.
2866     * @param snapshotName name of the snapshot to be created
2867     * @param tableName name of the table for which snapshot is created
2868     * @throws IOException if a remote or network exception occurs
2869     * @throws SnapshotCreationException if snapshot creation failed
2870     * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2871     */
2872   @Override
2873   public void snapshot(final byte[] snapshotName,
2874                        final TableName tableName) throws IOException,
2875       SnapshotCreationException, IllegalArgumentException {
2876     snapshot(Bytes.toString(snapshotName), tableName, SnapshotDescription.Type.FLUSH);
2877   }
2878 
2879   public void snapshot(final byte[] snapshotName,
2880                        final byte[] tableName) throws IOException,
2881       SnapshotCreationException, IllegalArgumentException {
2882     snapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName),
2883       SnapshotDescription.Type.FLUSH);
2884   }
2885 
2886   /**
2887    * Create typed snapshot of the table.
2888    * <p>
2889    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2890    * snapshot with the same name (even a different type or with different parameters) will fail with
2891    * a {@link SnapshotCreationException} indicating the duplicate naming.
2892    * <p>
2893    * Snapshot names follow the same naming constraints as tables in HBase. See
2894    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
2895    * <p>
2896    * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
2897    *          snapshots stored on the cluster
2898    * @param tableName name of the table to snapshot
2899    * @param type type of snapshot to take
2900    * @throws IOException we fail to reach the master
2901    * @throws SnapshotCreationException if snapshot creation failed
2902    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2903    */
2904   @Override
2905   public void snapshot(final String snapshotName,
2906                        final TableName tableName,
2907                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
2908       IllegalArgumentException {
2909     SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
2910     builder.setTable(tableName.getNameAsString());
2911     builder.setName(snapshotName);
2912     builder.setType(type);
2913     snapshot(builder.build());
2914   }
2915 
2916   public void snapshot(final String snapshotName,
2917                        final String tableName,
2918                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
2919       IllegalArgumentException {
2920     snapshot(snapshotName, TableName.valueOf(tableName), type);
2921   }
2922 
2923   public void snapshot(final String snapshotName,
2924                        final byte[] tableName,
2925                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
2926       IllegalArgumentException {
2927     snapshot(snapshotName, TableName.valueOf(tableName), type);
2928   }
2929 
2930   /**
2931    * Take a snapshot and wait for the server to complete that snapshot (blocking).
2932    * <p>
2933    * Only a single snapshot should be taken at a time for an instance of HBase, or results may be
2934    * undefined (you can tell multiple HBase clusters to snapshot at the same time, but only one at a
2935    * time for a single cluster).
2936    * <p>
2937    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2938    * snapshot with the same name (even a different type or with different parameters) will fail with
2939    * a {@link SnapshotCreationException} indicating the duplicate naming.
2940    * <p>
2941    * Snapshot names follow the same naming constraints as tables in HBase. See
2942    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
2943    * <p>
2944    * You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])}
2945    * unless you are sure about the type of snapshot that you want to take.
2946    * @param snapshot snapshot to take
2947    * @throws IOException or we lose contact with the master.
2948    * @throws SnapshotCreationException if snapshot failed to be taken
2949    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2950    */
2951   @Override
2952   public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException,
2953       IllegalArgumentException {
2954     // actually take the snapshot
2955     SnapshotResponse response = takeSnapshotAsync(snapshot);
2956     final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
2957         .build();
2958     IsSnapshotDoneResponse done = null;
2959     long start = EnvironmentEdgeManager.currentTime();
2960     long max = response.getExpectedTimeout();
2961     long maxPauseTime = max / this.numRetries;
2962     int tries = 0;
2963     LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
2964         ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
2965         maxPauseTime + " ms per retry)");
2966     while (tries == 0
2967         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) {
2968       try {
2969         // sleep a backoff <= pauseTime amount
2970         long sleep = getPauseTime(tries++);
2971         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
2972         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
2973           "ms while waiting for snapshot completion.");
2974         Thread.sleep(sleep);
2975       } catch (InterruptedException e) {
2976         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
2977       }
2978       LOG.debug("Getting current status of snapshot from master...");
2979       done = executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
2980         @Override
2981         public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
2982           return master.isSnapshotDone(null, request);
2983         }
2984       });
2985     }
2986     if (!done.getDone()) {
2987       throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
2988           + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
2989     }
2990   }
2991 
2992   /**
2993    * Take a snapshot without waiting for the server to complete that snapshot (asynchronous)
2994    * <p>
2995    * Only a single snapshot should be taken at a time, or results may be undefined.
2996    * @param snapshot snapshot to take
2997    * @return response from the server indicating the max time to wait for the snapshot
2998    * @throws IOException if the snapshot did not succeed or we lose contact with the master.
2999    * @throws SnapshotCreationException if snapshot creation failed
3000    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3001    */
3002   @Override
3003   public SnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException,
3004       SnapshotCreationException {
3005     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
3006     final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot)
3007         .build();
3008     // run the snapshot on the master
3009     return executeCallable(new MasterCallable<SnapshotResponse>(getConnection()) {
3010       @Override
3011       public SnapshotResponse call(int callTimeout) throws ServiceException {
3012         return master.snapshot(null, request);
3013       }
3014     });
3015   }
3016 
3017   /**
3018    * Check the current state of the passed snapshot.
3019    * <p>
3020    * There are three possible states:
3021    * <ol>
3022    * <li>running - returns <tt>false</tt></li>
3023    * <li>finished - returns <tt>true</tt></li>
3024    * <li>finished with error - throws the exception that caused the snapshot to fail</li>
3025    * </ol>
3026    * <p>
3027    * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
3028    * run/started since the snapshot your are checking, you will recieve an
3029    * {@link UnknownSnapshotException}.
3030    * @param snapshot description of the snapshot to check
3031    * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
3032    *         running
3033    * @throws IOException if we have a network issue
3034    * @throws HBaseSnapshotException if the snapshot failed
3035    * @throws UnknownSnapshotException if the requested snapshot is unknown
3036    */
3037   @Override
3038   public boolean isSnapshotFinished(final SnapshotDescription snapshot)
3039       throws IOException, HBaseSnapshotException, UnknownSnapshotException {
3040 
3041     return executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
3042       @Override
3043       public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3044         return master.isSnapshotDone(null,
3045           IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build());
3046       }
3047     }).getDone();
3048   }
3049 
3050   /**
3051    * Restore the specified snapshot on the original table. (The table must be disabled)
3052    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3053    * is set to true, a snapshot of the current table is taken
3054    * before executing the restore operation.
3055    * In case of restore failure, the failsafe snapshot will be restored.
3056    * If the restore completes without problem the failsafe snapshot is deleted.
3057    *
3058    * @param snapshotName name of the snapshot to restore
3059    * @throws IOException if a remote or network exception occurs
3060    * @throws RestoreSnapshotException if snapshot failed to be restored
3061    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3062    */
3063   @Override
3064   public void restoreSnapshot(final byte[] snapshotName)
3065       throws IOException, RestoreSnapshotException {
3066     restoreSnapshot(Bytes.toString(snapshotName));
3067   }
3068 
3069   /**
3070    * Restore the specified snapshot on the original table. (The table must be disabled)
3071    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3072    * is set to true, a snapshot of the current table is taken
3073    * before executing the restore operation.
3074    * In case of restore failure, the failsafe snapshot will be restored.
3075    * If the restore completes without problem the failsafe snapshot is deleted.
3076    *
3077    * @param snapshotName name of the snapshot to restore
3078    * @throws IOException if a remote or network exception occurs
3079    * @throws RestoreSnapshotException if snapshot failed to be restored
3080    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3081    */
3082   @Override
3083   public void restoreSnapshot(final String snapshotName)
3084       throws IOException, RestoreSnapshotException {
3085     boolean takeFailSafeSnapshot =
3086       conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false);
3087     restoreSnapshot(snapshotName, takeFailSafeSnapshot);
3088   }
3089 
3090   /**
3091    * Restore the specified snapshot on the original table. (The table must be disabled)
3092    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3093    * before executing the restore operation.
3094    * In case of restore failure, the failsafe snapshot will be restored.
3095    * If the restore completes without problem the failsafe snapshot is deleted.
3096    *
3097    * The failsafe snapshot name is configurable by using the property
3098    * "hbase.snapshot.restore.failsafe.name".
3099    *
3100    * @param snapshotName name of the snapshot to restore
3101    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3102    * @throws IOException if a remote or network exception occurs
3103    * @throws RestoreSnapshotException if snapshot failed to be restored
3104    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3105    */
3106   @Override
3107   public void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot)
3108       throws IOException, RestoreSnapshotException {
3109     restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
3110   }
3111 
3112   /**
3113    * Restore the specified snapshot on the original table. (The table must be disabled)
3114    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3115    * before executing the restore operation.
3116    * In case of restore failure, the failsafe snapshot will be restored.
3117    * If the restore completes without problem the failsafe snapshot is deleted.
3118    *
3119    * The failsafe snapshot name is configurable by using the property
3120    * "hbase.snapshot.restore.failsafe.name".
3121    *
3122    * @param snapshotName name of the snapshot to restore
3123    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3124    * @throws IOException if a remote or network exception occurs
3125    * @throws RestoreSnapshotException if snapshot failed to be restored
3126    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3127    */
3128   @Override
3129   public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot)
3130       throws IOException, RestoreSnapshotException {
3131     TableName tableName = null;
3132     for (SnapshotDescription snapshotInfo: listSnapshots()) {
3133       if (snapshotInfo.getName().equals(snapshotName)) {
3134         tableName = TableName.valueOf(snapshotInfo.getTable());
3135         break;
3136       }
3137     }
3138 
3139     if (tableName == null) {
3140       throw new RestoreSnapshotException(
3141         "Unable to find the table name for snapshot=" + snapshotName);
3142     }
3143 
3144     // The table does not exists, switch to clone.
3145     if (!tableExists(tableName)) {
3146       try {
3147         cloneSnapshot(snapshotName, tableName);
3148       } catch (InterruptedException e) {
3149         throw new InterruptedIOException("Interrupted when restoring a nonexistent table: " +
3150           e.getMessage());
3151       }
3152       return;
3153     }
3154 
3155     // Check if the table is disabled
3156     if (!isTableDisabled(tableName)) {
3157       throw new TableNotDisabledException(tableName);
3158     }
3159 
3160     // Take a snapshot of the current state
3161     String failSafeSnapshotSnapshotName = null;
3162     if (takeFailSafeSnapshot) {
3163       failSafeSnapshotSnapshotName = conf.get("hbase.snapshot.restore.failsafe.name",
3164         "hbase-failsafe-{snapshot.name}-{restore.timestamp}");
3165       failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName
3166         .replace("{snapshot.name}", snapshotName)
3167         .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.'))
3168         .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime()));
3169       LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3170       snapshot(failSafeSnapshotSnapshotName, tableName);
3171     }
3172 
3173     try {
3174       // Restore snapshot
3175       internalRestoreSnapshot(snapshotName, tableName);
3176     } catch (IOException e) {
3177       // Somthing went wrong during the restore...
3178       // if the pre-restore snapshot is available try to rollback
3179       if (takeFailSafeSnapshot) {
3180         try {
3181           internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName);
3182           String msg = "Restore snapshot=" + snapshotName +
3183             " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded.";
3184           LOG.error(msg, e);
3185           throw new RestoreSnapshotException(msg, e);
3186         } catch (IOException ex) {
3187           String msg = "Failed to restore and rollback to snapshot=" + failSafeSnapshotSnapshotName;
3188           LOG.error(msg, ex);
3189           throw new RestoreSnapshotException(msg, e);
3190         }
3191       } else {
3192         throw new RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e);
3193       }
3194     }
3195 
3196     // If the restore is succeeded, delete the pre-restore snapshot
3197     if (takeFailSafeSnapshot) {
3198       try {
3199         LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3200         deleteSnapshot(failSafeSnapshotSnapshotName);
3201       } catch (IOException e) {
3202         LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, e);
3203       }
3204     }
3205   }
3206 
3207   /**
3208    * Create a new table by cloning the snapshot content.
3209    *
3210    * @param snapshotName name of the snapshot to be cloned
3211    * @param tableName name of the table where the snapshot will be restored
3212    * @throws IOException if a remote or network exception occurs
3213    * @throws TableExistsException if table to be created already exists
3214    * @throws RestoreSnapshotException if snapshot failed to be cloned
3215    * @throws IllegalArgumentException if the specified table has not a valid name
3216    */
3217   public void cloneSnapshot(final byte[] snapshotName, final byte[] tableName)
3218       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
3219     cloneSnapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName));
3220   }
3221 
3222   /**
3223    * Create a new table by cloning the snapshot content.
3224    *
3225    * @param snapshotName name of the snapshot to be cloned
3226    * @param tableName name of the table where the snapshot will be restored
3227    * @throws IOException if a remote or network exception occurs
3228    * @throws TableExistsException if table to be created already exists
3229    * @throws RestoreSnapshotException if snapshot failed to be cloned
3230    * @throws IllegalArgumentException if the specified table has not a valid name
3231    */
3232   @Override
3233   public void cloneSnapshot(final byte[] snapshotName, final TableName tableName)
3234       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
3235     cloneSnapshot(Bytes.toString(snapshotName), tableName);
3236   }
3237 
3238 
3239 
3240   /**
3241    * Create a new table by cloning the snapshot content.
3242    *
3243    * @param snapshotName name of the snapshot to be cloned
3244    * @param tableName name of the table where the snapshot will be restored
3245    * @throws IOException if a remote or network exception occurs
3246    * @throws TableExistsException if table to be created already exists
3247    * @throws RestoreSnapshotException if snapshot failed to be cloned
3248    * @throws IllegalArgumentException if the specified table has not a valid name
3249    */
3250   public void cloneSnapshot(final String snapshotName, final String tableName)
3251       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
3252     cloneSnapshot(snapshotName, TableName.valueOf(tableName));
3253   }
3254 
3255   /**
3256    * Create a new table by cloning the snapshot content.
3257    *
3258    * @param snapshotName name of the snapshot to be cloned
3259    * @param tableName name of the table where the snapshot will be restored
3260    * @throws IOException if a remote or network exception occurs
3261    * @throws TableExistsException if table to be created already exists
3262    * @throws RestoreSnapshotException if snapshot failed to be cloned
3263    * @throws IllegalArgumentException if the specified table has not a valid name
3264    */
3265   @Override
3266   public void cloneSnapshot(final String snapshotName, final TableName tableName)
3267       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
3268     if (tableExists(tableName)) {
3269       throw new TableExistsException(tableName);
3270     }
3271     internalRestoreSnapshot(snapshotName, tableName);
3272     waitUntilTableIsEnabled(tableName);
3273   }
3274 
3275   /**
3276    * Execute a distributed procedure on a cluster synchronously with return data
3277    *
3278    * @param signature A distributed procedure is uniquely identified
3279    * by its signature (default the root ZK node name of the procedure).
3280    * @param instance The instance name of the procedure. For some procedures, this parameter is
3281    * optional.
3282    * @param props Property/Value pairs of properties passing to the procedure
3283    * @return data returned after procedure execution. null if no return data.
3284    * @throws IOException
3285    */
3286   @Override
3287   public byte[] execProcedureWithRet(String signature, String instance,
3288       Map<String, String> props) throws IOException {
3289     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3290     builder.setSignature(signature).setInstance(instance);
3291     for (Entry<String, String> entry : props.entrySet()) {
3292       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3293           .setValue(entry.getValue()).build();
3294       builder.addConfiguration(pair);
3295     }
3296 
3297     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
3298         .setProcedure(builder.build()).build();
3299     // run the procedure on the master
3300     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
3301         getConnection()) {
3302       @Override
3303       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
3304         return master.execProcedureWithRet(null, request);
3305       }
3306     });
3307 
3308     return response.hasReturnData() ? response.getReturnData().toByteArray() : null;
3309   }
3310   /**
3311    * Execute a distributed procedure on a cluster.
3312    *
3313    * @param signature A distributed procedure is uniquely identified
3314    * by its signature (default the root ZK node name of the procedure).
3315    * @param instance The instance name of the procedure. For some procedures, this parameter is
3316    * optional.
3317    * @param props Property/Value pairs of properties passing to the procedure
3318    * @throws IOException
3319    */
3320   @Override
3321   public void execProcedure(String signature, String instance,
3322       Map<String, String> props) throws IOException {
3323     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3324     builder.setSignature(signature).setInstance(instance);
3325     for (Entry<String, String> entry : props.entrySet()) {
3326       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3327           .setValue(entry.getValue()).build();
3328       builder.addConfiguration(pair);
3329     }
3330 
3331     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
3332         .setProcedure(builder.build()).build();
3333     // run the procedure on the master
3334     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
3335         getConnection()) {
3336       @Override
3337       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
3338         return master.execProcedure(null, request);
3339       }
3340     });
3341 
3342     long start = EnvironmentEdgeManager.currentTime();
3343     long max = response.getExpectedTimeout();
3344     long maxPauseTime = max / this.numRetries;
3345     int tries = 0;
3346     LOG.debug("Waiting a max of " + max + " ms for procedure '" +
3347         signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)");
3348     boolean done = false;
3349     while (tries == 0
3350         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
3351       try {
3352         // sleep a backoff <= pauseTime amount
3353         long sleep = getPauseTime(tries++);
3354         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3355         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
3356           "ms while waiting for procedure completion.");
3357         Thread.sleep(sleep);
3358       } catch (InterruptedException e) {
3359         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
3360       }
3361       LOG.debug("Getting current status of procedure from master...");
3362       done = isProcedureFinished(signature, instance, props);
3363     }
3364     if (!done) {
3365       throw new IOException("Procedure '" + signature + " : " + instance
3366           + "' wasn't completed in expectedTime:" + max + " ms");
3367     }
3368   }
3369 
3370   /**
3371    * Check the current state of the specified procedure.
3372    * <p>
3373    * There are three possible states:
3374    * <ol>
3375    * <li>running - returns <tt>false</tt></li>
3376    * <li>finished - returns <tt>true</tt></li>
3377    * <li>finished with error - throws the exception that caused the procedure to fail</li>
3378    * </ol>
3379    * <p>
3380    *
3381    * @param signature The signature that uniquely identifies a procedure
3382    * @param instance The instance name of the procedure
3383    * @param props Property/Value pairs of properties passing to the procedure
3384    * @return true if the specified procedure is finished successfully, false if it is still running
3385    * @throws IOException if the specified procedure finished with error
3386    */
3387   @Override
3388   public boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
3389       throws IOException {
3390     final ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3391     builder.setSignature(signature).setInstance(instance);
3392     for (Entry<String, String> entry : props.entrySet()) {
3393       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3394           .setValue(entry.getValue()).build();
3395       builder.addConfiguration(pair);
3396     }
3397     final ProcedureDescription desc = builder.build();
3398     return executeCallable(
3399         new MasterCallable<IsProcedureDoneResponse>(getConnection()) {
3400           @Override
3401           public IsProcedureDoneResponse call(int callTimeout) throws ServiceException {
3402             return master.isProcedureDone(null, IsProcedureDoneRequest
3403                 .newBuilder().setProcedure(desc).build());
3404           }
3405         }).getDone();
3406   }
3407 
3408   /**
3409    * Execute Restore/Clone snapshot and wait for the server to complete (blocking).
3410    * To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to
3411    * create an HTable instance to this table before it is available.
3412    * @param snapshotName snapshot to restore
3413    * @param tableName table name to restore the snapshot on
3414    * @throws IOException if a remote or network exception occurs
3415    * @throws RestoreSnapshotException if snapshot failed to be restored
3416    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3417    */
3418   private void internalRestoreSnapshot(final String snapshotName, final TableName
3419       tableName)
3420       throws IOException, RestoreSnapshotException {
3421     SnapshotDescription snapshot = SnapshotDescription.newBuilder()
3422         .setName(snapshotName).setTable(tableName.getNameAsString()).build();
3423 
3424     // actually restore the snapshot
3425     internalRestoreSnapshotAsync(snapshot);
3426 
3427     final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder()
3428         .setSnapshot(snapshot).build();
3429     IsRestoreSnapshotDoneResponse done = IsRestoreSnapshotDoneResponse.newBuilder()
3430         .setDone(false).buildPartial();
3431     final long maxPauseTime = 5000;
3432     int tries = 0;
3433     while (!done.getDone()) {
3434       try {
3435         // sleep a backoff <= pauseTime amount
3436         long sleep = getPauseTime(tries++);
3437         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3438         LOG.debug(tries + ") Sleeping: " + sleep + " ms while we wait for snapshot restore to complete.");
3439         Thread.sleep(sleep);
3440       } catch (InterruptedException e) {
3441         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
3442       }
3443       LOG.debug("Getting current status of snapshot restore from master...");
3444       done = executeCallable(new MasterCallable<IsRestoreSnapshotDoneResponse>(
3445           getConnection()) {
3446         @Override
3447         public IsRestoreSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3448           return master.isRestoreSnapshotDone(null, request);
3449         }
3450       });
3451     }
3452     if (!done.getDone()) {
3453       throw new RestoreSnapshotException("Snapshot '" + snapshot.getName() + "' wasn't restored.");
3454     }
3455   }
3456 
3457   /**
3458    * Execute Restore/Clone snapshot and wait for the server to complete (asynchronous)
3459    * <p>
3460    * Only a single snapshot should be restored at a time, or results may be undefined.
3461    * @param snapshot snapshot to restore
3462    * @return response from the server indicating the max time to wait for the snapshot
3463    * @throws IOException if a remote or network exception occurs
3464    * @throws RestoreSnapshotException if snapshot failed to be restored
3465    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3466    */
3467   private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot)
3468       throws IOException, RestoreSnapshotException {
3469     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
3470 
3471     final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot)
3472         .build();
3473 
3474     // run the snapshot restore on the master
3475     return executeCallable(new MasterCallable<RestoreSnapshotResponse>(getConnection()) {
3476       @Override
3477       public RestoreSnapshotResponse call(int callTimeout) throws ServiceException {
3478         return master.restoreSnapshot(null, request);
3479       }
3480     });
3481   }
3482 
3483   /**
3484    * List completed snapshots.
3485    * @return a list of snapshot descriptors for completed snapshots
3486    * @throws IOException if a network error occurs
3487    */
3488   @Override
3489   public List<SnapshotDescription> listSnapshots() throws IOException {
3490     return executeCallable(new MasterCallable<List<SnapshotDescription>>(getConnection()) {
3491       @Override
3492       public List<SnapshotDescription> call(int callTimeout) throws ServiceException {
3493         return master.getCompletedSnapshots(null, GetCompletedSnapshotsRequest.newBuilder().build())
3494             .getSnapshotsList();
3495       }
3496     });
3497   }
3498 
3499   /**
3500    * List all the completed snapshots matching the given regular expression.
3501    *
3502    * @param regex The regular expression to match against
3503    * @return - returns a List of SnapshotDescription
3504    * @throws IOException if a remote or network exception occurs
3505    */
3506   @Override
3507   public List<SnapshotDescription> listSnapshots(String regex) throws IOException {
3508     return listSnapshots(Pattern.compile(regex));
3509   }
3510 
3511   /**
3512    * List all the completed snapshots matching the given pattern.
3513    *
3514    * @param pattern The compiled regular expression to match against
3515    * @return - returns a List of SnapshotDescription
3516    * @throws IOException if a remote or network exception occurs
3517    */
3518   @Override
3519   public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
3520     List<SnapshotDescription> matched = new LinkedList<SnapshotDescription>();
3521     List<SnapshotDescription> snapshots = listSnapshots();
3522     for (SnapshotDescription snapshot : snapshots) {
3523       if (pattern.matcher(snapshot.getName()).matches()) {
3524         matched.add(snapshot);
3525       }
3526     }
3527     return matched;
3528   }
3529 
3530   /**
3531    * Delete an existing snapshot.
3532    * @param snapshotName name of the snapshot
3533    * @throws IOException if a remote or network exception occurs
3534    */
3535   @Override
3536   public void deleteSnapshot(final byte[] snapshotName) throws IOException {
3537     deleteSnapshot(Bytes.toString(snapshotName));
3538   }
3539 
3540   /**
3541    * Delete an existing snapshot.
3542    * @param snapshotName name of the snapshot
3543    * @throws IOException if a remote or network exception occurs
3544    */
3545   @Override
3546   public void deleteSnapshot(final String snapshotName) throws IOException {
3547     // make sure the snapshot is possibly valid
3548     TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName));
3549     // do the delete
3550     executeCallable(new MasterCallable<Void>(getConnection()) {
3551       @Override
3552       public Void call(int callTimeout) throws ServiceException {
3553         master.deleteSnapshot(null,
3554           DeleteSnapshotRequest.newBuilder().
3555             setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build()
3556         );
3557         return null;
3558       }
3559     });
3560   }
3561 
3562   /**
3563    * Delete existing snapshots whose names match the pattern passed.
3564    * @param regex The regular expression to match against
3565    * @throws IOException if a remote or network exception occurs
3566    */
3567   @Override
3568   public void deleteSnapshots(final String regex) throws IOException {
3569     deleteSnapshots(Pattern.compile(regex));
3570   }
3571 
3572   /**
3573    * Delete existing snapshots whose names match the pattern passed.
3574    * @param pattern pattern for names of the snapshot to match
3575    * @throws IOException if a remote or network exception occurs
3576    */
3577   @Override
3578   public void deleteSnapshots(final Pattern pattern) throws IOException {
3579     List<SnapshotDescription> snapshots = listSnapshots(pattern);
3580     for (final SnapshotDescription snapshot : snapshots) {
3581       // do the delete
3582       executeCallable(new MasterCallable<Void>(getConnection()) {
3583         @Override
3584         public Void call(int callTimeout) throws ServiceException {
3585           this.master.deleteSnapshot(null,
3586             DeleteSnapshotRequest.newBuilder().setSnapshot(snapshot).build());
3587           return null;
3588         }
3589       });
3590     }
3591   }
3592 
3593   /**
3594    * Apply the new quota settings.
3595    *
3596    * @param quota the quota settings
3597    * @throws IOException if a remote or network exception occurs
3598    */
3599   @Override
3600   public void setQuota(final QuotaSettings quota) throws IOException {
3601     executeCallable(new MasterCallable<Void>(getConnection()) {
3602       @Override
3603       public Void call(int callTimeout) throws ServiceException {
3604         this.master.setQuota(null, QuotaSettings.buildSetQuotaRequestProto(quota));
3605         return null;
3606       }
3607     });
3608   }
3609 
3610   /**
3611    * Return a Quota Scanner to list the quotas based on the filter.
3612    *
3613    * @param filter the quota settings filter
3614    * @return the quota scanner
3615    * @throws IOException if a remote or network exception occurs
3616    */
3617   @Override
3618   public QuotaRetriever getQuotaRetriever(final QuotaFilter filter) throws IOException {
3619     return QuotaRetriever.open(conf, filter);
3620   }
3621 
3622   /**
3623    * Parent of {@link MasterCallable} and {@link MasterCallable}.
3624    * Has common methods.
3625    * @param <V>
3626    */
3627   abstract static class MasterCallable<V> implements RetryingCallable<V>, Closeable {
3628     protected HConnection connection;
3629     protected MasterKeepAliveConnection master;
3630 
3631     public MasterCallable(final HConnection connection) {
3632       this.connection = connection;
3633     }
3634 
3635     @Override
3636     public void prepare(boolean reload) throws IOException {
3637       this.master = this.connection.getKeepAliveMasterService();
3638     }
3639 
3640     @Override
3641     public void close() throws IOException {
3642       // The above prepare could fail but this would still be called though masterAdmin is null
3643       if (this.master != null) this.master.close();
3644     }
3645 
3646     @Override
3647     public void throwable(Throwable t, boolean retrying) {
3648     }
3649 
3650     @Override
3651     public String getExceptionMessageAdditionalDetail() {
3652       return "";
3653     }
3654 
3655     @Override
3656     public long sleep(long pause, int tries) {
3657       return ConnectionUtils.getPauseTime(pause, tries);
3658     }
3659   }
3660 
3661   private <V> V executeCallable(MasterCallable<V> callable) throws IOException {
3662     RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller();
3663     try {
3664       return caller.callWithRetries(callable, operationTimeout);
3665     } finally {
3666       callable.close();
3667     }
3668   }
3669 
3670   /**
3671    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
3672    * connected to the active master.
3673    *
3674    * <p>
3675    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
3676    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
3677    * </p>
3678    *
3679    * <div style="background-color: #cccccc; padding: 2px">
3680    * <blockquote><pre>
3681    * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
3682    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
3683    * MyCallRequest request = MyCallRequest.newBuilder()
3684    *     ...
3685    *     .build();
3686    * MyCallResponse response = service.myCall(null, request);
3687    * </pre></blockquote></div>
3688    *
3689    * @return A MasterCoprocessorRpcChannel instance
3690    */
3691   @Override
3692   public CoprocessorRpcChannel coprocessorService() {
3693     return new MasterCoprocessorRpcChannel(connection);
3694   }
3695 
3696   /**
3697    * Simple {@link Abortable}, throwing RuntimeException on abort.
3698    */
3699   private static class ThrowableAbortable implements Abortable {
3700 
3701     @Override
3702     public void abort(String why, Throwable e) {
3703       throw new RuntimeException(why, e);
3704     }
3705 
3706     @Override
3707     public boolean isAborted() {
3708       return true;
3709     }
3710   }
3711 
3712   /**
3713    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
3714    * connected to the passed region server.
3715    *
3716    * <p>
3717    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
3718    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
3719    * </p>
3720    *
3721    * <div style="background-color: #cccccc; padding: 2px">
3722    * <blockquote><pre>
3723    * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
3724    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
3725    * MyCallRequest request = MyCallRequest.newBuilder()
3726    *     ...
3727    *     .build();
3728    * MyCallResponse response = service.myCall(null, request);
3729    * </pre></blockquote></div>
3730    *
3731    * @param sn the server name to which the endpoint call is made
3732    * @return A RegionServerCoprocessorRpcChannel instance
3733    */
3734   @Override
3735   public CoprocessorRpcChannel coprocessorService(ServerName sn) {
3736     return new RegionServerCoprocessorRpcChannel(connection, sn);
3737   }
3738 
3739   @Override
3740   public void updateConfiguration(ServerName server) throws IOException {
3741     try {
3742       this.connection.getAdmin(server).updateConfiguration(null,
3743         UpdateConfigurationRequest.getDefaultInstance());
3744     } catch (ServiceException e) {
3745       throw ProtobufUtil.getRemoteException(e);
3746     }
3747   }
3748 
3749   @Override
3750   public void updateConfiguration() throws IOException {
3751     for (ServerName server : this.getClusterStatus().getServers()) {
3752       updateConfiguration(server);
3753     }
3754   }
3755 
3756   @Override
3757   public int getMasterInfoPort() throws IOException {
3758     // TODO: Fix!  Reaching into internal implementation!!!!
3759     ConnectionManager.HConnectionImplementation connection =
3760         (ConnectionManager.HConnectionImplementation)this.connection;
3761     ZooKeeperKeepAliveConnection zkw = connection.getKeepAliveZooKeeperWatcher();
3762     try {
3763       return MasterAddressTracker.getMasterInfoPort(zkw);
3764     } catch (KeeperException e) {
3765       throw new IOException("Failed to get master info port from MasterAddressTracker", e);
3766     }
3767   }
3768 }