View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import com.google.protobuf.ServiceException;
22  import org.apache.hadoop.conf.Configuration;
23  import org.apache.hadoop.hbase.Abortable;
24  import org.apache.hadoop.hbase.ClusterStatus;
25  import org.apache.hadoop.hbase.HBaseIOException;
26  import org.apache.hadoop.hbase.HColumnDescriptor;
27  import org.apache.hadoop.hbase.HRegionInfo;
28  import org.apache.hadoop.hbase.HTableDescriptor;
29  import org.apache.hadoop.hbase.MasterNotRunningException;
30  import org.apache.hadoop.hbase.NamespaceDescriptor;
31  import org.apache.hadoop.hbase.ServerName;
32  import org.apache.hadoop.hbase.TableExistsException;
33  import org.apache.hadoop.hbase.TableName;
34  import org.apache.hadoop.hbase.TableNotFoundException;
35  import org.apache.hadoop.hbase.UnknownRegionException;
36  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
37  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
38  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
39  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
40  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
41  import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
42  import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
43  import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
44  import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
45  import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
46  import org.apache.hadoop.hbase.util.Pair;
47  
48  import java.io.Closeable;
49  import java.io.IOException;
50  import java.util.List;
51  import java.util.Map;
52  import java.util.regex.Pattern;
53  
54  /**
55   * The administrative API for HBase. Obtain an instance from an {@link HConnection}.
56   *
57   * @since 0.99.0
58   */
59  public interface Admin extends Abortable, Closeable {
60    int getOperationTimeout();
61  
62    @Override
63    void abort(String why, Throwable e);
64  
65    @Override
66    boolean isAborted();
67  
68    /**
69     * @return Connection used by this object.
70     */
71    Connection getConnection();
72  
73    /**
74     * @return - true if the master server is running. Throws an exception otherwise.
75     * @throws ZooKeeperConnectionException
76     * @throws MasterNotRunningException
77     */
78    boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException;
79  
80    /**
81     * @param tableName Table to check.
82     * @return True if table exists already.
83     * @throws IOException
84     */
85    boolean tableExists(final TableName tableName) throws IOException;
86  
87    /**
88     * List all the userspace tables.  In other words, scan the hbase:meta table. If we wanted this to
89     * be really fast, we could implement a special catalog table that just contains table names and
90     * their descriptors. Right now, it only exists as part of the hbase:meta table's region info.
91     *
92     * @return - returns an array of HTableDescriptors
93     * @throws IOException if a remote or network exception occurs
94     */
95    HTableDescriptor[] listTables() throws IOException;
96  
97    /**
98     * List all the userspace tables matching the given pattern.
99     *
100    * @param pattern The compiled regular expression to match against
101    * @return - returns an array of HTableDescriptors
102    * @throws IOException if a remote or network exception occurs
103    * @see #listTables()
104    */
105   HTableDescriptor[] listTables(Pattern pattern) throws IOException;
106 
107   /**
108    * List all the userspace tables matching the given regular expression.
109    *
110    * @param regex The regular expression to match against
111    * @return - returns an array of HTableDescriptors
112    * @throws IOException if a remote or network exception occurs
113    * @see #listTables(java.util.regex.Pattern)
114    */
115   HTableDescriptor[] listTables(String regex) throws IOException;
116 
117   /**
118    * List all of the names of userspace tables.
119    *
120    * @return TableName[] table names
121    * @throws IOException if a remote or network exception occurs
122    */
123   TableName[] listTableNames() throws IOException;
124 
125   /**
126    * Method for getting the tableDescriptor
127    *
128    * @param tableName as a byte []
129    * @return the tableDescriptor
130    * @throws org.apache.hadoop.hbase.TableNotFoundException
131    * @throws IOException if a remote or network exception occurs
132    */
133   HTableDescriptor getTableDescriptor(final TableName tableName)
134       throws TableNotFoundException, IOException;
135 
136   /**
137    * Creates a new table. Synchronous operation.
138    *
139    * @param desc table descriptor for table
140    * @throws IllegalArgumentException if the table name is reserved
141    * @throws MasterNotRunningException if master is not running
142    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
143    * threads, the table may have been created between test-for-existence and attempt-at-creation).
144    * @throws IOException if a remote or network exception occurs
145    */
146   void createTable(HTableDescriptor desc) throws IOException;
147 
148   /**
149    * Creates a new table with the specified number of regions.  The start key specified will become
150    * the end key of the first region of the table, and the end key specified will become the start
151    * key of the last region of the table (the first region has a null start key and the last region
152    * has a null end key). BigInteger math will be used to divide the key range specified into enough
153    * segments to make the required number of total regions. Synchronous operation.
154    *
155    * @param desc table descriptor for table
156    * @param startKey beginning of key range
157    * @param endKey end of key range
158    * @param numRegions the total number of regions to create
159    * @throws IllegalArgumentException if the table name is reserved
160    * @throws MasterNotRunningException if master is not running
161    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
162    * threads, the table may have been created between test-for-existence and attempt-at-creation).
163    * @throws IOException
164    */
165   void createTable(HTableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions)
166       throws IOException;
167 
168   /**
169    * Creates a new table with an initial set of empty regions defined by the specified split keys.
170    * The total number of regions created will be the number of split keys plus one. Synchronous
171    * operation. Note : Avoid passing empty split key.
172    *
173    * @param desc table descriptor for table
174    * @param splitKeys array of split keys for the initial regions of the table
175    * @throws IllegalArgumentException if the table name is reserved, if the split keys are repeated
176    * and if the split key has empty byte array.
177    * @throws MasterNotRunningException if master is not running
178    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
179    * threads, the table may have been created between test-for-existence and attempt-at-creation).
180    * @throws IOException
181    */
182   void createTable(final HTableDescriptor desc, byte[][] splitKeys) throws IOException;
183 
184   /**
185    * Creates a new table but does not block and wait for it to come online. Asynchronous operation.
186    * To check if the table exists, use {@link #isTableAvailable} -- it is not safe to create an
187    * HTable instance to this table before it is available. Note : Avoid passing empty split key.
188    *
189    * @param desc table descriptor for table
190    * @throws IllegalArgumentException Bad table name, if the split keys are repeated and if the
191    * split key has empty byte array.
192    * @throws MasterNotRunningException if master is not running
193    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
194    * threads, the table may have been created between test-for-existence and attempt-at-creation).
195    * @throws IOException
196    */
197   void createTableAsync(final HTableDescriptor desc, final byte[][] splitKeys) throws IOException;
198 
199   /**
200    * Deletes a table. Synchronous operation.
201    *
202    * @param tableName name of table to delete
203    * @throws IOException if a remote or network exception occurs
204    */
205   void deleteTable(final TableName tableName) throws IOException;
206 
207   /**
208    * Deletes tables matching the passed in pattern and wait on completion. Warning: Use this method
209    * carefully, there is no prompting and the effect is immediate. Consider using {@link
210    * #listTables(java.lang.String)} and {@link #deleteTable(org.apache.hadoop.hbase.TableName)}
211    *
212    * @param regex The regular expression to match table names against
213    * @return Table descriptors for tables that couldn't be deleted
214    * @throws IOException
215    * @see #deleteTables(java.util.regex.Pattern)
216    * @see #deleteTable(org.apache.hadoop.hbase.TableName)
217    */
218   HTableDescriptor[] deleteTables(String regex) throws IOException;
219 
220   /**
221    * Delete tables matching the passed in pattern and wait on completion. Warning: Use this method
222    * carefully, there is no prompting and the effect is immediate. Consider using {@link
223    * #listTables(java.util.regex.Pattern) } and
224    * {@link #deleteTable(org.apache.hadoop.hbase.TableName)}
225    *
226    * @param pattern The pattern to match table names against
227    * @return Table descriptors for tables that couldn't be deleted
228    * @throws IOException
229    */
230   HTableDescriptor[] deleteTables(Pattern pattern) throws IOException;
231 
232   /**
233    * Truncate a table.
234    * Synchronous operation.
235    *
236    * @param tableName name of table to truncate
237    * @param preserveSplits True if the splits should be preserved
238    * @throws IOException if a remote or network exception occurs
239    */
240   public void truncateTable(final TableName tableName, final boolean preserveSplits)
241       throws IOException;
242 
243   /**
244    * Enable a table.  May timeout.  Use {@link #enableTableAsync(org.apache.hadoop.hbase.TableName)}
245    * and {@link #isTableEnabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
246    * disabled state for it to be enabled.
247    *
248    * @param tableName name of the table
249    * @throws IOException if a remote or network exception occurs There could be couple types of
250    * IOException TableNotFoundException means the table doesn't exist. TableNotDisabledException
251    * means the table isn't in disabled state.
252    * @see #isTableEnabled(org.apache.hadoop.hbase.TableName)
253    * @see #disableTable(org.apache.hadoop.hbase.TableName)
254    * @see #enableTableAsync(org.apache.hadoop.hbase.TableName)
255    */
256   void enableTable(final TableName tableName) throws IOException;
257 
258   /**
259    * Brings a table on-line (enables it).  Method returns immediately though enable of table may
260    * take some time to complete, especially if the table is large (All regions are opened as part of
261    * enabling process).  Check {@link #isTableEnabled(org.apache.hadoop.hbase.TableName)} to learn
262    * when table is fully online.  If table is taking too long to online, check server logs.
263    *
264    * @param tableName
265    * @throws IOException
266    * @since 0.90.0
267    */
268   void enableTableAsync(final TableName tableName) throws IOException;
269 
270   /**
271    * Enable tables matching the passed in pattern and wait on completion. Warning: Use this method
272    * carefully, there is no prompting and the effect is immediate. Consider using {@link
273    * #listTables(java.lang.String)} and {@link #enableTable(org.apache.hadoop.hbase.TableName)}
274    *
275    * @param regex The regular expression to match table names against
276    * @throws IOException
277    * @see #enableTables(java.util.regex.Pattern)
278    * @see #enableTable(org.apache.hadoop.hbase.TableName)
279    */
280   HTableDescriptor[] enableTables(String regex) throws IOException;
281 
282   /**
283    * Enable tables matching the passed in pattern and wait on completion. Warning: Use this method
284    * carefully, there is no prompting and the effect is immediate. Consider using {@link
285    * #listTables(java.util.regex.Pattern) } and
286    * {@link #enableTable(org.apache.hadoop.hbase.TableName)}
287    *
288    * @param pattern The pattern to match table names against
289    * @throws IOException
290    */
291   HTableDescriptor[] enableTables(Pattern pattern) throws IOException;
292 
293   /**
294    * Starts the disable of a table.  If it is being served, the master will tell the servers to stop
295    * serving it.  This method returns immediately. The disable of a table can take some time if the
296    * table is large (all regions are closed as part of table disable operation). Call {@link
297    * #isTableDisabled(org.apache.hadoop.hbase.TableName)} to check for when disable completes. If
298    * table is taking too long to online, check server logs.
299    *
300    * @param tableName name of table
301    * @throws IOException if a remote or network exception occurs
302    * @see #isTableDisabled(org.apache.hadoop.hbase.TableName)
303    * @see #isTableEnabled(org.apache.hadoop.hbase.TableName)
304    * @since 0.90.0
305    */
306   void disableTableAsync(final TableName tableName) throws IOException;
307 
308   /**
309    * Disable table and wait on completion.  May timeout eventually.  Use {@link
310    * #disableTableAsync(org.apache.hadoop.hbase.TableName)} and
311    * {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
312    * enabled state for it to be disabled.
313    *
314    * @param tableName
315    * @throws IOException There could be couple types of IOException TableNotFoundException means the
316    * table doesn't exist. TableNotEnabledException means the table isn't in enabled state.
317    */
318   void disableTable(final TableName tableName) throws IOException;
319 
320   /**
321    * Disable tables matching the passed in pattern and wait on completion. Warning: Use this method
322    * carefully, there is no prompting and the effect is immediate. Consider using {@link
323    * #listTables(java.lang.String)} and {@link #disableTable(org.apache.hadoop.hbase.TableName)}
324    *
325    * @param regex The regular expression to match table names against
326    * @return Table descriptors for tables that couldn't be disabled
327    * @throws IOException
328    * @see #disableTables(java.util.regex.Pattern)
329    * @see #disableTable(org.apache.hadoop.hbase.TableName)
330    */
331   HTableDescriptor[] disableTables(String regex) throws IOException;
332 
333   /**
334    * Disable tables matching the passed in pattern and wait on completion. Warning: Use this method
335    * carefully, there is no prompting and the effect is immediate. Consider using {@link
336    * #listTables(java.util.regex.Pattern) } and
337    * {@link #disableTable(org.apache.hadoop.hbase.TableName)}
338    *
339    * @param pattern The pattern to match table names against
340    * @return Table descriptors for tables that couldn't be disabled
341    * @throws IOException
342    */
343   HTableDescriptor[] disableTables(Pattern pattern) throws IOException;
344 
345   /**
346    * @param tableName name of table to check
347    * @return true if table is on-line
348    * @throws IOException if a remote or network exception occurs
349    */
350   boolean isTableEnabled(TableName tableName) throws IOException;
351 
352   /**
353    * @param tableName name of table to check
354    * @return true if table is off-line
355    * @throws IOException if a remote or network exception occurs
356    */
357   boolean isTableDisabled(TableName tableName) throws IOException;
358 
359   /**
360    * @param tableName name of table to check
361    * @return true if all regions of the table are available
362    * @throws IOException if a remote or network exception occurs
363    */
364   boolean isTableAvailable(TableName tableName) throws IOException;
365 
366   /**
367    * Use this api to check if the table has been created with the specified number of splitkeys
368    * which was used while creating the given table. Note : If this api is used after a table's
369    * region gets splitted, the api may return false.
370    *
371    * @param tableName name of table to check
372    * @param splitKeys keys to check if the table has been created with all split keys
373    * @throws IOException if a remote or network excpetion occurs
374    */
375   boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException;
376 
377   /**
378    * Get the status of alter command - indicates how many regions have received the updated schema
379    * Asynchronous operation.
380    *
381    * @param tableName TableName instance
382    * @return Pair indicating the number of regions updated Pair.getFirst() is the regions that are
383    * yet to be updated Pair.getSecond() is the total number of regions of the table
384    * @throws IOException if a remote or network exception occurs
385    */
386   Pair<Integer, Integer> getAlterStatus(final TableName tableName) throws IOException;
387 
388   /**
389    * Get the status of alter command - indicates how many regions have received the updated schema
390    * Asynchronous operation.
391    *
392    * @param tableName name of the table to get the status of
393    * @return Pair indicating the number of regions updated Pair.getFirst() is the regions that are
394    * yet to be updated Pair.getSecond() is the total number of regions of the table
395    * @throws IOException if a remote or network exception occurs
396    */
397   Pair<Integer, Integer> getAlterStatus(final byte[] tableName) throws IOException;
398 
399   /**
400    * Add a column to an existing table. Asynchronous operation.
401    *
402    * @param tableName name of the table to add column to
403    * @param column column descriptor of column to be added
404    * @throws IOException if a remote or network exception occurs
405    */
406   void addColumn(final TableName tableName, final HColumnDescriptor column) throws IOException;
407 
408   /**
409    * Delete a column from a table. Asynchronous operation.
410    *
411    * @param tableName name of table
412    * @param columnName name of column to be deleted
413    * @throws IOException if a remote or network exception occurs
414    */
415   void deleteColumn(final TableName tableName, final byte[] columnName) throws IOException;
416 
417   /**
418    * Modify an existing column family on a table. Asynchronous operation.
419    *
420    * @param tableName name of table
421    * @param descriptor new column descriptor to use
422    * @throws IOException if a remote or network exception occurs
423    */
424   void modifyColumn(final TableName tableName, final HColumnDescriptor descriptor)
425       throws IOException;
426 
427   /**
428    * Close a region. For expert-admins.  Runs close on the regionserver.  The master will not be
429    * informed of the close.
430    *
431    * @param regionname region name to close
432    * @param serverName If supplied, we'll use this location rather than the one currently in
433    * <code>hbase:meta</code>
434    * @throws IOException if a remote or network exception occurs
435    */
436   void closeRegion(final String regionname, final String serverName) throws IOException;
437 
438   /**
439    * Close a region.  For expert-admins  Runs close on the regionserver.  The master will not be
440    * informed of the close.
441    *
442    * @param regionname region name to close
443    * @param serverName The servername of the regionserver.  If passed null we will use servername
444    * found in the hbase:meta table. A server name is made of host, port and startcode.  Here is an
445    * example: <code> host187.example.com,60020,1289493121758</code>
446    * @throws IOException if a remote or network exception occurs
447    */
448   void closeRegion(final byte[] regionname, final String serverName) throws IOException;
449 
450   /**
451    * For expert-admins. Runs close on the regionserver. Closes a region based on the encoded region
452    * name. The region server name is mandatory. If the servername is provided then based on the
453    * online regions in the specified regionserver the specified region will be closed. The master
454    * will not be informed of the close. Note that the regionname is the encoded regionname.
455    *
456    * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
457    * suffix: e.g. if regionname is
458    * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
459    * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
460    * @param serverName The servername of the regionserver. A server name is made of host, port and
461    * startcode. This is mandatory. Here is an example:
462    * <code> host187.example.com,60020,1289493121758</code>
463    * @return true if the region was closed, false if not.
464    * @throws IOException if a remote or network exception occurs
465    */
466   boolean closeRegionWithEncodedRegionName(final String encodedRegionName, final String serverName)
467       throws IOException;
468 
469   /**
470    * Close a region.  For expert-admins  Runs close on the regionserver.  The master will not be
471    * informed of the close.
472    *
473    * @param sn
474    * @param hri
475    * @throws IOException
476    */
477   void closeRegion(final ServerName sn, final HRegionInfo hri) throws IOException;
478 
479   /**
480    * Get all the online regions on a region server.
481    */
482   List<HRegionInfo> getOnlineRegions(final ServerName sn) throws IOException;
483 
484   /**
485    * Flush a table. Synchronous operation.
486    *
487    * @param tableName table to flush
488    * @throws IOException if a remote or network exception occurs
489    * @throws InterruptedException
490    */
491   void flush(final TableName tableName) throws IOException, InterruptedException;
492 
493   /**
494    * Flush an individual region. Synchronous operation.
495    *
496    * @param regionName region to flush
497    * @throws IOException if a remote or network exception occurs
498    * @throws InterruptedException
499    */
500   void flushRegion(final byte[] regionName) throws IOException, InterruptedException;
501 
502   /**
503    * Compact a table. Asynchronous operation.
504    *
505    * @param tableName table to compact
506    * @throws IOException if a remote or network exception occurs
507    * @throws InterruptedException
508    */
509   void compact(final TableName tableName) throws IOException, InterruptedException;
510 
511   /**
512    * Compact an individual region. Asynchronous operation.
513    *
514    * @param regionName region to compact
515    * @throws IOException if a remote or network exception occurs
516    * @throws InterruptedException
517    */
518   void compactRegion(final byte[] regionName) throws IOException, InterruptedException;
519 
520   /**
521    * Compact a column family within a table. Asynchronous operation.
522    *
523    * @param tableName table to compact
524    * @param columnFamily column family within a table
525    * @throws IOException if a remote or network exception occurs
526    * @throws InterruptedException
527    */
528   void compact(final TableName tableName, final byte[] columnFamily)
529     throws IOException, InterruptedException;
530 
531   /**
532    * Compact a column family within a region. Asynchronous operation.
533    *
534    * @param regionName region to compact
535    * @param columnFamily column family within a region
536    * @throws IOException if a remote or network exception occurs
537    * @throws InterruptedException
538    */
539   void compactRegion(final byte[] regionName, final byte[] columnFamily)
540     throws IOException, InterruptedException;
541 
542   /**
543    * Major compact a table. Asynchronous operation.
544    *
545    * @param tableName table to major compact
546    * @throws IOException if a remote or network exception occurs
547    * @throws InterruptedException
548    */
549   void majorCompact(TableName tableName) throws IOException, InterruptedException;
550 
551   /**
552    * Major compact a table or an individual region. Asynchronous operation.
553    *
554    * @param regionName region to major compact
555    * @throws IOException if a remote or network exception occurs
556    * @throws InterruptedException
557    */
558   void majorCompactRegion(final byte[] regionName) throws IOException, InterruptedException;
559 
560   /**
561    * Major compact a column family within a table. Asynchronous operation.
562    *
563    * @param tableName table to major compact
564    * @param columnFamily column family within a table
565    * @throws IOException if a remote or network exception occurs
566    * @throws InterruptedException
567    */
568   void majorCompact(TableName tableName, final byte[] columnFamily)
569     throws IOException, InterruptedException;
570 
571   /**
572    * Major compact a column family within region. Asynchronous operation.
573    *
574    * @param regionName egion to major compact
575    * @param columnFamily column family within a region
576    * @throws IOException if a remote or network exception occurs
577    * @throws InterruptedException
578    */
579   void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
580     throws IOException, InterruptedException;
581 
582   /**
583    * Move the region <code>r</code> to <code>dest</code>.
584    *
585    * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
586    * suffix: e.g. if regionname is
587    * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
588    * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
589    * @param destServerName The servername of the destination regionserver.  If passed the empty byte
590    * array we'll assign to a random server.  A server name is made of host, port and startcode.
591    * Here is an example: <code> host187.example.com,60020,1289493121758</code>
592    * @throws UnknownRegionException Thrown if we can't find a region named
593    * <code>encodedRegionName</code>
594    * @throws ZooKeeperConnectionException
595    * @throws MasterNotRunningException
596    */
597   void move(final byte[] encodedRegionName, final byte[] destServerName)
598       throws HBaseIOException, MasterNotRunningException, ZooKeeperConnectionException;
599 
600   /**
601    * @param regionName Region name to assign.
602    * @throws MasterNotRunningException
603    * @throws ZooKeeperConnectionException
604    * @throws IOException
605    */
606   void assign(final byte[] regionName)
607       throws MasterNotRunningException, ZooKeeperConnectionException, IOException;
608 
609   /**
610    * Unassign a region from current hosting regionserver.  Region will then be assigned to a
611    * regionserver chosen at random.  Region could be reassigned back to the same server.  Use {@link
612    * #move(byte[], byte[])} if you want to control the region movement.
613    *
614    * @param regionName Region to unassign. Will clear any existing RegionPlan if one found.
615    * @param force If true, force unassign (Will remove region from regions-in-transition too if
616    * present. If results in double assignment use hbck -fix to resolve. To be used by experts).
617    * @throws MasterNotRunningException
618    * @throws ZooKeeperConnectionException
619    * @throws IOException
620    */
621   void unassign(final byte[] regionName, final boolean force)
622       throws MasterNotRunningException, ZooKeeperConnectionException, IOException;
623 
624   /**
625    * Offline specified region from master's in-memory state. It will not attempt to reassign the
626    * region as in unassign. This API can be used when a region not served by any region server and
627    * still online as per Master's in memory state. If this API is incorrectly used on active region
628    * then master will loose track of that region. This is a special method that should be used by
629    * experts or hbck.
630    *
631    * @param regionName Region to offline.
632    * @throws IOException
633    */
634   void offline(final byte[] regionName) throws IOException;
635 
636   /**
637    * Turn the load balancer on or off.
638    *
639    * @param on If true, enable balancer. If false, disable balancer.
640    * @param synchronous If true, it waits until current balance() call, if outstanding, to return.
641    * @return Previous balancer value
642    */
643   boolean setBalancerRunning(final boolean on, final boolean synchronous)
644       throws MasterNotRunningException, ZooKeeperConnectionException;
645 
646   /**
647    * Invoke the balancer.  Will run the balancer and if regions to move, it will go ahead and do the
648    * reassignments.  Can NOT run for various reasons.  Check logs.
649    *
650    * @return True if balancer ran, false otherwise.
651    */
652   boolean balancer()
653       throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException;
654 
655   /**
656    * Enable/Disable the catalog janitor
657    *
658    * @param enable if true enables the catalog janitor
659    * @return the previous state
660    * @throws ServiceException
661    * @throws MasterNotRunningException
662    */
663   boolean enableCatalogJanitor(boolean enable) throws ServiceException, MasterNotRunningException;
664 
665   /**
666    * Ask for a scan of the catalog table
667    *
668    * @return the number of entries cleaned
669    * @throws ServiceException
670    * @throws MasterNotRunningException
671    */
672   int runCatalogScan() throws ServiceException, MasterNotRunningException;
673 
674   /**
675    * Query on the catalog janitor state (Enabled/Disabled?)
676    *
677    * @throws ServiceException
678    * @throws org.apache.hadoop.hbase.MasterNotRunningException
679    */
680   boolean isCatalogJanitorEnabled() throws ServiceException, MasterNotRunningException;
681 
682   /**
683    * Merge two regions. Asynchronous operation.
684    *
685    * @param encodedNameOfRegionA encoded name of region a
686    * @param encodedNameOfRegionB encoded name of region b
687    * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent
688    * regions
689    * @throws IOException
690    */
691   void mergeRegions(final byte[] encodedNameOfRegionA, final byte[] encodedNameOfRegionB,
692       final boolean forcible) throws IOException;
693 
694   /**
695    * Split a table. Asynchronous operation.
696    *
697    * @param tableName table to split
698    * @throws IOException if a remote or network exception occurs
699    * @throws InterruptedException
700    */
701   void split(final TableName tableName) throws IOException, InterruptedException;
702 
703   /**
704    * Split an individual region. Asynchronous operation.
705    *
706    * @param regionName region to split
707    * @throws IOException if a remote or network exception occurs
708    * @throws InterruptedException
709    */
710   void splitRegion(final byte[] regionName) throws IOException, InterruptedException;
711 
712   /**
713    * Split a table. Asynchronous operation.
714    *
715    * @param tableName table to split
716    * @param splitPoint the explicit position to split on
717    * @throws IOException if a remote or network exception occurs
718    * @throws InterruptedException interrupt exception occurred
719    */
720   void split(final TableName tableName, final byte[] splitPoint)
721     throws IOException, InterruptedException;
722 
723   /**
724    * Split an individual region. Asynchronous operation.
725    *
726    * @param regionName region to split
727    * @param splitPoint the explicit position to split on
728    * @throws IOException if a remote or network exception occurs
729    * @throws InterruptedException interrupt exception occurred
730    */
731   void splitRegion(final byte[] regionName, final byte[] splitPoint)
732     throws IOException, InterruptedException;
733 
734   /**
735    * Modify an existing table, more IRB friendly version. Asynchronous operation.  This means that
736    * it may be a while before your schema change is updated across all of the table.
737    *
738    * @param tableName name of table.
739    * @param htd modified description of the table
740    * @throws IOException if a remote or network exception occurs
741    */
742   void modifyTable(final TableName tableName, final HTableDescriptor htd) throws IOException;
743 
744   /**
745    * Shuts down the HBase cluster
746    *
747    * @throws IOException if a remote or network exception occurs
748    */
749   void shutdown() throws IOException;
750 
751   /**
752    * Shuts down the current HBase master only. Does not shutdown the cluster.
753    *
754    * @throws IOException if a remote or network exception occurs
755    * @see #shutdown()
756    */
757   void stopMaster() throws IOException;
758 
759   /**
760    * Stop the designated regionserver
761    *
762    * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
763    * <code>example.org:1234</code>
764    * @throws IOException if a remote or network exception occurs
765    */
766   void stopRegionServer(final String hostnamePort) throws IOException;
767 
768   /**
769    * @return cluster status
770    * @throws IOException if a remote or network exception occurs
771    */
772   ClusterStatus getClusterStatus() throws IOException;
773 
774   /**
775    * @return Configuration used by the instance.
776    */
777   Configuration getConfiguration();
778 
779   /**
780    * Create a new namespace
781    *
782    * @param descriptor descriptor which describes the new namespace
783    * @throws IOException
784    */
785   void createNamespace(final NamespaceDescriptor descriptor) throws IOException;
786 
787   /**
788    * Modify an existing namespace
789    *
790    * @param descriptor descriptor which describes the new namespace
791    * @throws IOException
792    */
793   void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException;
794 
795   /**
796    * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
797    *
798    * @param name namespace name
799    * @throws IOException
800    */
801   void deleteNamespace(final String name) throws IOException;
802 
803   /**
804    * Get a namespace descriptor by name
805    *
806    * @param name name of namespace descriptor
807    * @return A descriptor
808    * @throws IOException
809    */
810   NamespaceDescriptor getNamespaceDescriptor(final String name) throws IOException;
811 
812   /**
813    * List available namespace descriptors
814    *
815    * @return List of descriptors
816    * @throws IOException
817    */
818   NamespaceDescriptor[] listNamespaceDescriptors() throws IOException;
819 
820   /**
821    * Get list of table descriptors by namespace
822    *
823    * @param name namespace name
824    * @return A descriptor
825    * @throws IOException
826    */
827   HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException;
828 
829   /**
830    * Get list of table names by namespace
831    *
832    * @param name namespace name
833    * @return The list of table names in the namespace
834    * @throws IOException
835    */
836   TableName[] listTableNamesByNamespace(final String name) throws IOException;
837 
838   /**
839    * Get the regions of a given table.
840    *
841    * @param tableName the name of the table
842    * @return List of {@link HRegionInfo}.
843    * @throws IOException
844    */
845   List<HRegionInfo> getTableRegions(final TableName tableName) throws IOException;
846 
847   @Override
848   void close() throws IOException;
849 
850   /**
851    * Get tableDescriptors
852    *
853    * @param tableNames List of table names
854    * @return HTD[] the tableDescriptor
855    * @throws IOException if a remote or network exception occurs
856    */
857   HTableDescriptor[] getTableDescriptorsByTableName(List<TableName> tableNames) throws IOException;
858 
859   /**
860    * Get tableDescriptors
861    *
862    * @param names List of table names
863    * @return HTD[] the tableDescriptor
864    * @throws IOException if a remote or network exception occurs
865    */
866   HTableDescriptor[] getTableDescriptors(List<String> names) throws IOException;
867 
868   /**
869    * Roll the log writer. That is, start writing log messages to a new file.
870    *
871    * @param serverName The servername of the regionserver. A server name is made of host, port and
872    * startcode. This is mandatory. Here is an example:
873    * <code> host187.example.com,60020,1289493121758</code>
874    * @return If lots of logs, flush the returned regions so next time through we can clean logs.
875    * Returns null if nothing to flush.  Names are actual region names as returned by {@link
876    * HRegionInfo#getEncodedName()}
877    * @throws IOException if a remote or network exception occurs
878    * @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException
879    */
880   byte[][] rollHLogWriter(String serverName) throws IOException, FailedLogCloseException;
881 
882   /**
883    * Helper delegage to getClusterStatus().getMasterCoprocessors().
884    * @return an array of master coprocessors
885    * @see org.apache.hadoop.hbase.ClusterStatus#getMasterCoprocessors()
886    */
887   String[] getMasterCoprocessors();
888 
889   /**
890    * Get the current compaction state of a table. It could be in a major compaction, a minor
891    * compaction, both, or none.
892    *
893    * @param tableName table to examine
894    * @return the current compaction state
895    * @throws IOException if a remote or network exception occurs
896    * @throws InterruptedException
897    */
898   AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState(final TableName tableName)
899     throws IOException, InterruptedException;
900 
901   /**
902    * Get the current compaction state of region. It could be in a major compaction, a minor
903    * compaction, both, or none.
904    *
905    * @param regionName region to examine
906    * @return the current compaction state
907    * @throws IOException if a remote or network exception occurs
908    * @throws InterruptedException
909    */
910   AdminProtos.GetRegionInfoResponse.CompactionState getCompactionStateForRegion(
911     final byte[] regionName) throws IOException, InterruptedException;
912 
913   /**
914    * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
915    * taken. If the table is disabled, an offline snapshot is taken. Snapshots are considered unique
916    * based on <b>the name of the snapshot</b>. Attempts to take a snapshot with the same name (even
917    * a different type or with different parameters) will fail with a {@link
918    * org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate naming.
919    * Snapshot names follow the same naming constraints as tables in HBase. See {@link
920    * org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
921    *
922    * @param snapshotName name of the snapshot to be created
923    * @param tableName name of the table for which snapshot is created
924    * @throws IOException if a remote or network exception occurs
925    * @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if snapshot creation failed
926    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
927    */
928   void snapshot(final String snapshotName, final TableName tableName)
929       throws IOException, SnapshotCreationException, IllegalArgumentException;
930 
931   /**
932    * public void snapshot(final String snapshotName, Create a timestamp consistent snapshot for the
933    * given table. final byte[] tableName) throws IOException, Snapshots are considered unique based
934    * on <b>the name of the snapshot</b>. Attempts to take a snapshot with the same name (even a
935    * different type or with different parameters) will fail with a {@link SnapshotCreationException}
936    * indicating the duplicate naming. Snapshot names follow the same naming constraints as tables in
937    * HBase.
938    *
939    * @param snapshotName name of the snapshot to be created
940    * @param tableName name of the table for which snapshot is created
941    * @throws IOException if a remote or network exception occurs
942    * @throws SnapshotCreationException if snapshot creation failed
943    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
944    */
945   void snapshot(final byte[] snapshotName, final TableName tableName)
946       throws IOException, SnapshotCreationException, IllegalArgumentException;
947 
948   /**
949    * Create typed snapshot of the table. Snapshots are considered unique based on <b>the name of the
950    * snapshot</b>. Attempts to take a snapshot with the same name (even a different type or with
951    * different parameters) will fail with a {@link SnapshotCreationException} indicating the
952    * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See
953    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
954    *
955    * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
956    * snapshots stored on the cluster
957    * @param tableName name of the table to snapshot
958    * @param type type of snapshot to take
959    * @throws IOException we fail to reach the master
960    * @throws SnapshotCreationException if snapshot creation failed
961    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
962    */
963   void snapshot(final String snapshotName,
964       final TableName tableName,
965       HBaseProtos.SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
966       IllegalArgumentException;
967 
968   /**
969    * Take a snapshot and wait for the server to complete that snapshot (blocking). Only a single
970    * snapshot should be taken at a time for an instance of HBase, or results may be undefined (you
971    * can tell multiple HBase clusters to snapshot at the same time, but only one at a time for a
972    * single cluster). Snapshots are considered unique based on <b>the name of the snapshot</b>.
973    * Attempts to take a snapshot with the same name (even a different type or with different
974    * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming.
975    * Snapshot names follow the same naming constraints as tables in HBase. See {@link
976    * org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. You should probably
977    * use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} or
978    * {@link #snapshot(byte[], org.apache.hadoop.hbase.TableName)} unless you are sure about the type
979    * of snapshot that you want to take.
980    *
981    * @param snapshot snapshot to take
982    * @throws IOException or we lose contact with the master.
983    * @throws SnapshotCreationException if snapshot failed to be taken
984    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
985    */
986   void snapshot(HBaseProtos.SnapshotDescription snapshot)
987       throws IOException, SnapshotCreationException, IllegalArgumentException;
988 
989   /**
990    * Take a snapshot without waiting for the server to complete that snapshot (asynchronous) Only a
991    * single snapshot should be taken at a time, or results may be undefined.
992    *
993    * @param snapshot snapshot to take
994    * @return response from the server indicating the max time to wait for the snapshot
995    * @throws IOException if the snapshot did not succeed or we lose contact with the master.
996    * @throws SnapshotCreationException if snapshot creation failed
997    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
998    */
999   MasterProtos.SnapshotResponse takeSnapshotAsync(HBaseProtos.SnapshotDescription snapshot)
1000       throws IOException, SnapshotCreationException;
1001 
1002   /**
1003    * Check the current state of the passed snapshot. There are three possible states: <ol>
1004    * <li>running - returns <tt>false</tt></li> <li>finished - returns <tt>true</tt></li>
1005    * <li>finished with error - throws the exception that caused the snapshot to fail</li> </ol> The
1006    * cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
1007    * run/started since the snapshot your are checking, you will recieve an {@link
1008    * org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}.
1009    *
1010    * @param snapshot description of the snapshot to check
1011    * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
1012    * running
1013    * @throws IOException if we have a network issue
1014    * @throws org.apache.hadoop.hbase.snapshot.HBaseSnapshotException if the snapshot failed
1015    * @throws org.apache.hadoop.hbase.snapshot.UnknownSnapshotException if the requested snapshot is
1016    * unknown
1017    */
1018   boolean isSnapshotFinished(final HBaseProtos.SnapshotDescription snapshot)
1019       throws IOException, HBaseSnapshotException, UnknownSnapshotException;
1020 
1021   /**
1022    * Restore the specified snapshot on the original table. (The table must be disabled) If the
1023    * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to true, a
1024    * snapshot of the current table is taken before executing the restore operation. In case of
1025    * restore failure, the failsafe snapshot will be restored. If the restore completes without
1026    * problem the failsafe snapshot is deleted.
1027    *
1028    * @param snapshotName name of the snapshot to restore
1029    * @throws IOException if a remote or network exception occurs
1030    * @throws org.apache.hadoop.hbase.snapshot.RestoreSnapshotException if snapshot failed to be
1031    * restored
1032    * @throws IllegalArgumentException if the restore request is formatted incorrectly
1033    */
1034   void restoreSnapshot(final byte[] snapshotName) throws IOException, RestoreSnapshotException;
1035 
1036   /**
1037    * Restore the specified snapshot on the original table. (The table must be disabled) If the
1038    * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to true, a
1039    * snapshot of the current table is taken before executing the restore operation. In case of
1040    * restore failure, the failsafe snapshot will be restored. If the restore completes without
1041    * problem the failsafe snapshot is deleted.
1042    *
1043    * @param snapshotName name of the snapshot to restore
1044    * @throws IOException if a remote or network exception occurs
1045    * @throws RestoreSnapshotException if snapshot failed to be restored
1046    * @throws IllegalArgumentException if the restore request is formatted incorrectly
1047    */
1048   void restoreSnapshot(final String snapshotName) throws IOException, RestoreSnapshotException;
1049 
1050   /**
1051    * Restore the specified snapshot on the original table. (The table must be disabled) If
1052    * 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
1053    * executing the restore operation. In case of restore failure, the failsafe snapshot will be
1054    * restored. If the restore completes without problem the failsafe snapshot is deleted. The
1055    * failsafe snapshot name is configurable by using the property
1056    * "hbase.snapshot.restore.failsafe.name".
1057    *
1058    * @param snapshotName name of the snapshot to restore
1059    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
1060    * @throws IOException if a remote or network exception occurs
1061    * @throws RestoreSnapshotException if snapshot failed to be restored
1062    * @throws IllegalArgumentException if the restore request is formatted incorrectly
1063    */
1064   void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot)
1065       throws IOException, RestoreSnapshotException;
1066 
1067   /**
1068    * Restore the specified snapshot on the original table. (The table must be disabled) If
1069    * 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
1070    * executing the restore operation. In case of restore failure, the failsafe snapshot will be
1071    * restored. If the restore completes without problem the failsafe snapshot is deleted. The
1072    * failsafe snapshot name is configurable by using the property
1073    * "hbase.snapshot.restore.failsafe.name".
1074    *
1075    * @param snapshotName name of the snapshot to restore
1076    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
1077    * @throws IOException if a remote or network exception occurs
1078    * @throws RestoreSnapshotException if snapshot failed to be restored
1079    * @throws IllegalArgumentException if the restore request is formatted incorrectly
1080    */
1081   void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot)
1082       throws IOException, RestoreSnapshotException;
1083 
1084   /**
1085    * Create a new table by cloning the snapshot content.
1086    *
1087    * @param snapshotName name of the snapshot to be cloned
1088    * @param tableName name of the table where the snapshot will be restored
1089    * @throws IOException if a remote or network exception occurs
1090    * @throws TableExistsException if table to be created already exists
1091    * @throws RestoreSnapshotException if snapshot failed to be cloned
1092    * @throws IllegalArgumentException if the specified table has not a valid name
1093    */
1094   void cloneSnapshot(final byte[] snapshotName, final TableName tableName)
1095       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException;
1096 
1097   /**
1098    * Create a new table by cloning the snapshot content.
1099    *
1100    * @param snapshotName name of the snapshot to be cloned
1101    * @param tableName name of the table where the snapshot will be restored
1102    * @throws IOException if a remote or network exception occurs
1103    * @throws TableExistsException if table to be created already exists
1104    * @throws RestoreSnapshotException if snapshot failed to be cloned
1105    * @throws IllegalArgumentException if the specified table has not a valid name
1106    */
1107   void cloneSnapshot(final String snapshotName, final TableName tableName)
1108       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException;
1109 
1110   /**
1111    * Execute a distributed procedure on a cluster.
1112    *
1113    * @param signature A distributed procedure is uniquely identified by its signature (default the
1114    * root ZK node name of the procedure).
1115    * @param instance The instance name of the procedure. For some procedures, this parameter is
1116    * optional.
1117    * @param props Property/Value pairs of properties passing to the procedure
1118    * @throws IOException
1119    */
1120   void execProcedure(String signature, String instance, Map<String, String> props)
1121       throws IOException;
1122 
1123   /**
1124    * Execute a distributed procedure on a cluster.
1125    *
1126    * @param signature A distributed procedure is uniquely identified by its signature (default the
1127    * root ZK node name of the procedure).
1128    * @param instance The instance name of the procedure. For some procedures, this parameter is
1129    * optional.
1130    * @param props Property/Value pairs of properties passing to the procedure
1131    * @return data returned after procedure execution. null if no return data.
1132    * @throws IOException
1133    */
1134   byte[] execProcedureWithRet(String signature, String instance, Map<String, String> props)
1135       throws IOException;
1136 
1137   /**
1138    * Check the current state of the specified procedure. There are three possible states: <ol>
1139    * <li>running - returns <tt>false</tt></li> <li>finished - returns <tt>true</tt></li>
1140    * <li>finished with error - throws the exception that caused the procedure to fail</li> </ol>
1141    *
1142    * @param signature The signature that uniquely identifies a procedure
1143    * @param instance The instance name of the procedure
1144    * @param props Property/Value pairs of properties passing to the procedure
1145    * @return true if the specified procedure is finished successfully, false if it is still running
1146    * @throws IOException if the specified procedure finished with error
1147    */
1148   boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
1149       throws IOException;
1150 
1151   /**
1152    * List completed snapshots.
1153    *
1154    * @return a list of snapshot descriptors for completed snapshots
1155    * @throws IOException if a network error occurs
1156    */
1157   List<HBaseProtos.SnapshotDescription> listSnapshots() throws IOException;
1158 
1159   /**
1160    * List all the completed snapshots matching the given regular expression.
1161    *
1162    * @param regex The regular expression to match against
1163    * @return - returns a List of SnapshotDescription
1164    * @throws IOException if a remote or network exception occurs
1165    */
1166   List<HBaseProtos.SnapshotDescription> listSnapshots(String regex) throws IOException;
1167 
1168   /**
1169    * List all the completed snapshots matching the given pattern.
1170    *
1171    * @param pattern The compiled regular expression to match against
1172    * @return - returns a List of SnapshotDescription
1173    * @throws IOException if a remote or network exception occurs
1174    */
1175   List<HBaseProtos.SnapshotDescription> listSnapshots(Pattern pattern) throws IOException;
1176 
1177   /**
1178    * Delete an existing snapshot.
1179    *
1180    * @param snapshotName name of the snapshot
1181    * @throws IOException if a remote or network exception occurs
1182    */
1183   void deleteSnapshot(final byte[] snapshotName) throws IOException;
1184 
1185   /**
1186    * Delete an existing snapshot.
1187    *
1188    * @param snapshotName name of the snapshot
1189    * @throws IOException if a remote or network exception occurs
1190    */
1191   void deleteSnapshot(final String snapshotName) throws IOException;
1192 
1193   /**
1194    * Delete existing snapshots whose names match the pattern passed.
1195    *
1196    * @param regex The regular expression to match against
1197    * @throws IOException if a remote or network exception occurs
1198    */
1199   void deleteSnapshots(final String regex) throws IOException;
1200 
1201   /**
1202    * Delete existing snapshots whose names match the pattern passed.
1203    *
1204    * @param pattern pattern for names of the snapshot to match
1205    * @throws IOException if a remote or network exception occurs
1206    */
1207   void deleteSnapshots(final Pattern pattern) throws IOException;
1208 
1209   /**
1210    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the active
1211    * master. <p> The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access
1212    * a published coprocessor {@link com.google.protobuf.Service} using standard protobuf service
1213    * invocations: </p> <div style="background-color: #cccccc; padding: 2px">
1214    * <blockquote><pre>
1215    * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
1216    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
1217    * MyCallRequest request = MyCallRequest.newBuilder()
1218    *     ...
1219    *     .build();
1220    * MyCallResponse response = service.myCall(null, request);
1221    * </pre></blockquote></div>
1222    *
1223    * @return A MasterCoprocessorRpcChannel instance
1224    */
1225   CoprocessorRpcChannel coprocessorService();
1226 }