View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import com.google.protobuf.Descriptors;
22  import com.google.protobuf.Message;
23  import com.google.protobuf.Service;
24  import com.google.protobuf.ServiceException;
25  
26  import org.apache.hadoop.classification.InterfaceAudience;
27  import org.apache.hadoop.classification.InterfaceStability;
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.hadoop.hbase.TableName;
30  import org.apache.hadoop.hbase.HTableDescriptor;
31  import org.apache.hadoop.hbase.KeyValue;
32  import org.apache.hadoop.hbase.client.coprocessor.Batch;
33  import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
34  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
35  
36  import java.io.Closeable;
37  import java.io.IOException;
38  import java.util.List;
39  import java.util.Map;
40  
41  /**
42   * Used to communicate with a single HBase table.
43   * Obtain an instance from an {@link HConnection}.
44   *
45   * @since 0.21.0
46   */
47  @InterfaceAudience.Public
48  @InterfaceStability.Stable
49  public interface HTableInterface extends Closeable {
50  
51    /**
52     * Gets the name of this table.
53     *
54     * @return the table name.
55     */
56    byte[] getTableName();
57  
58    /**
59     * Gets the fully qualified table name instance of this table.
60     */
61    TableName getName();
62  
63    /**
64     * Returns the {@link Configuration} object used by this instance.
65     * <p>
66     * The reference returned is not a copy, so any change made to it will
67     * affect this instance.
68     */
69    Configuration getConfiguration();
70  
71    /**
72     * Gets the {@link HTableDescriptor table descriptor} for this table.
73     * @throws IOException if a remote or network exception occurs.
74     */
75    HTableDescriptor getTableDescriptor() throws IOException;
76  
77    /**
78     * Test for the existence of columns in the table, as specified by the Get.
79     * <p>
80     *
81     * This will return true if the Get matches one or more keys, false if not.
82     * <p>
83     *
84     * This is a server-side call so it prevents any data from being transfered to
85     * the client.
86     *
87     * @param get the Get
88     * @return true if the specified Get matches one or more keys, false if not
89     * @throws IOException e
90     */
91    boolean exists(Get get) throws IOException;
92  
93    /**
94     * Test for the existence of columns in the table, as specified by the Gets.
95     * <p>
96     *
97     * This will return an array of booleans. Each value will be true if the related Get matches
98     * one or more keys, false if not.
99     * <p>
100    *
101    * This is a server-side call so it prevents any data from being transfered to
102    * the client.
103    *
104    * @param gets the Gets
105    * @return Array of Boolean true if the specified Get matches one or more keys, false if not
106    * @throws IOException e
107    */
108   Boolean[] exists(List<Get> gets) throws IOException;
109 
110   /**
111    * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends and RowMutations.
112    * The ordering of execution of the actions is not defined. Meaning if you do a Put and a
113    * Get in the same {@link #batch} call, you will not necessarily be
114    * guaranteed that the Get returns what the Put had put.
115    *
116    * @param actions list of Get, Put, Delete, Increment, Append, RowMutations objects
117    * @param results Empty Object[], same size as actions. Provides access to partial
118    *                results, in case an exception is thrown. A null in the result array means that
119    *                the call for that action failed, even after retries
120    * @throws IOException
121    * @since 0.90.0
122    */
123   void batch(final List<?extends Row> actions, final Object[] results) throws IOException, InterruptedException;
124 
125   /**
126    * Same as {@link #batch(List, Object[])}, but returns an array of
127    * results instead of using a results parameter reference.
128    *
129    * @param actions list of Get, Put, Delete, Increment, Append, RowMutations objects
130    * @return the results from the actions. A null in the return array means that
131    *         the call for that action failed, even after retries
132    * @throws IOException
133    * @since 0.90.0
134    * @deprecated If any exception is thrown by one of the actions, there is no way to
135    * retrieve the partially executed results. Use {@link #batch(List, Object[])} instead.
136    */
137   Object[] batch(final List<? extends Row> actions) throws IOException, InterruptedException;
138 
139   /**
140    * Same as {@link #batch(List, Object[])}, but with a callback.
141    * @since 0.96.0
142    */
143   <R> void batchCallback(
144     final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback
145   )
146     throws IOException, InterruptedException;
147 
148 
149   /**
150    * Same as {@link #batch(List)}, but with a callback.
151    * @since 0.96.0
152    * @deprecated If any exception is thrown by one of the actions, there is no way to
153    * retrieve the partially executed results. Use
154    * {@link #batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)}
155    * instead.
156    */
157   <R> Object[] batchCallback(
158     List<? extends Row> actions, Batch.Callback<R> callback
159   ) throws IOException,
160     InterruptedException;
161 
162   /**
163    * Extracts certain cells from a given row.
164    * @param get The object that specifies what data to fetch and from which row.
165    * @return The data coming from the specified row, if it exists.  If the row
166    * specified doesn't exist, the {@link Result} instance returned won't
167    * contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}.
168    * @throws IOException if a remote or network exception occurs.
169    * @since 0.20.0
170    */
171   Result get(Get get) throws IOException;
172 
173   /**
174    * Extracts certain cells from the given rows, in batch.
175    *
176    * @param gets The objects that specify what data to fetch and from which rows.
177    *
178    * @return The data coming from the specified rows, if it exists.  If the row
179    *         specified doesn't exist, the {@link Result} instance returned won't
180    *         contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}.
181    *         If there are any failures even after retries, there will be a null in
182    *         the results array for those Gets, AND an exception will be thrown.
183    * @throws IOException if a remote or network exception occurs.
184    *
185    * @since 0.90.0
186    */
187   Result[] get(List<Get> gets) throws IOException;
188 
189   /**
190    * Return the row that matches <i>row</i> exactly,
191    * or the one that immediately precedes it.
192    *
193    * @param row A row key.
194    * @param family Column family to include in the {@link Result}.
195    * @throws IOException if a remote or network exception occurs.
196    * @since 0.20.0
197    * 
198    * @deprecated As of version 0.92 this method is deprecated without
199    * replacement.   
200    * getRowOrBefore is used internally to find entries in hbase:meta and makes
201    * various assumptions about the table (which are true for hbase:meta but not
202    * in general) to be efficient.
203    */
204   Result getRowOrBefore(byte[] row, byte[] family) throws IOException;
205 
206   /**
207    * Returns a scanner on the current table as specified by the {@link Scan}
208    * object.
209    * Note that the passed {@link Scan}'s start row and caching properties
210    * maybe changed.
211    *
212    * @param scan A configured {@link Scan} object.
213    * @return A scanner.
214    * @throws IOException if a remote or network exception occurs.
215    * @since 0.20.0
216    */
217   ResultScanner getScanner(Scan scan) throws IOException;
218 
219   /**
220    * Gets a scanner on the current table for the given family.
221    *
222    * @param family The column family to scan.
223    * @return A scanner.
224    * @throws IOException if a remote or network exception occurs.
225    * @since 0.20.0
226    */
227   ResultScanner getScanner(byte[] family) throws IOException;
228 
229   /**
230    * Gets a scanner on the current table for the given family and qualifier.
231    *
232    * @param family The column family to scan.
233    * @param qualifier The column qualifier to scan.
234    * @return A scanner.
235    * @throws IOException if a remote or network exception occurs.
236    * @since 0.20.0
237    */
238   ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
239 
240 
241   /**
242    * Puts some data in the table.
243    * <p>
244    * If {@link #isAutoFlush isAutoFlush} is false, the update is buffered
245    * until the internal buffer is full.
246    * @param put The data to put.
247    * @throws IOException if a remote or network exception occurs.
248    * @since 0.20.0
249    */
250   void put(Put put) throws IOException;
251 
252   /**
253    * Puts some data in the table, in batch.
254    * <p>
255    * If {@link #isAutoFlush isAutoFlush} is false, the update is buffered
256    * until the internal buffer is full.
257    * <p>
258    * This can be used for group commit, or for submitting user defined
259    * batches.  The writeBuffer will be periodically inspected while the List
260    * is processed, so depending on the List size the writeBuffer may flush
261    * not at all, or more than once.
262    * @param puts The list of mutations to apply. The batch put is done by
263    * aggregating the iteration of the Puts over the write buffer
264    * at the client-side for a single RPC call.
265    * @throws IOException if a remote or network exception occurs.
266    * @since 0.20.0
267    */
268   void put(List<Put> puts) throws IOException;
269 
270   /**
271    * Atomically checks if a row/family/qualifier value matches the expected
272    * value. If it does, it adds the put.  If the passed value is null, the check
273    * is for the lack of column (ie: non-existance)
274    *
275    * @param row to check
276    * @param family column family to check
277    * @param qualifier column qualifier to check
278    * @param value the expected value
279    * @param put data to put if check succeeds
280    * @throws IOException e
281    * @return true if the new put was executed, false otherwise
282    */
283   boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
284       byte[] value, Put put) throws IOException;
285 
286   boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
287       CompareOp compareOp, byte[] value, Put put) throws IOException;
288   /**
289    * Deletes the specified cells/row.
290    *
291    * @param delete The object that specifies what to delete.
292    * @throws IOException if a remote or network exception occurs.
293    * @since 0.20.0
294    */
295   void delete(Delete delete) throws IOException;
296 
297   /**
298    * Deletes the specified cells/rows in bulk.
299    * @param deletes List of things to delete.  List gets modified by this
300    * method (in particular it gets re-ordered, so the order in which the elements
301    * are inserted in the list gives no guarantee as to the order in which the
302    * {@link Delete}s are executed).
303    * @throws IOException if a remote or network exception occurs. In that case
304    * the {@code deletes} argument will contain the {@link Delete} instances
305    * that have not be successfully applied.
306    * @since 0.20.1
307    */
308   void delete(List<Delete> deletes) throws IOException;
309 
310   /**
311    * Atomically checks if a row/family/qualifier value matches the expected
312    * value. If it does, it adds the delete.  If the passed value is null, the
313    * check is for the lack of column (ie: non-existance)
314    *
315    * @param row to check
316    * @param family column family to check
317    * @param qualifier column qualifier to check
318    * @param value the expected value
319    * @param delete data to delete if check succeeds
320    * @throws IOException e
321    * @return true if the new delete was executed, false otherwise
322    */
323   boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
324       byte[] value, Delete delete) throws IOException;
325 
326   boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
327       CompareOp compareOp, byte[] value, Delete delete) throws IOException;
328 
329   /**
330    * Performs multiple mutations atomically on a single row. Currently
331    * {@link Put} and {@link Delete} are supported.
332    *
333    * @param rm object that specifies the set of mutations to perform atomically
334    * @throws IOException
335    */
336   void mutateRow(final RowMutations rm) throws IOException;
337 
338   /**
339    * Appends values to one or more columns within a single row.
340    * <p>
341    * This operation does not appear atomic to readers.  Appends are done
342    * under a single row lock, so write operations to a row are synchronized, but
343    * readers do not take row locks so get and scan operations can see this
344    * operation partially completed.
345    *
346    * @param append object that specifies the columns and amounts to be used
347    *                  for the increment operations
348    * @throws IOException e
349    * @return values of columns after the append operation (maybe null)
350    */
351   Result append(final Append append) throws IOException;
352 
353   /**
354    * Increments one or more columns within a single row.
355    * <p>
356    * This operation does not appear atomic to readers.  Increments are done
357    * under a single row lock, so write operations to a row are synchronized, but
358    * readers do not take row locks so get and scan operations can see this
359    * operation partially completed.
360    *
361    * @param increment object that specifies the columns and amounts to be used
362    *                  for the increment operations
363    * @throws IOException e
364    * @return values of columns after the increment
365    */
366   Result increment(final Increment increment) throws IOException;
367 
368   /**
369    * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
370    * <p>
371    * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}.
372    * @param row The row that contains the cell to increment.
373    * @param family The column family of the cell to increment.
374    * @param qualifier The column qualifier of the cell to increment.
375    * @param amount The amount to increment the cell with (or decrement, if the
376    * amount is negative).
377    * @return The new value, post increment.
378    * @throws IOException if a remote or network exception occurs.
379    */
380   long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
381       long amount) throws IOException;
382 
383   /**
384    * Atomically increments a column value. If the column value already exists
385    * and is not a big-endian long, this could throw an exception. If the column
386    * value does not yet exist it is initialized to <code>amount</code> and
387    * written to the specified column.
388    *
389    * <p>Setting durability to {@link Durability#SKIP_WAL} means that in a fail
390    * scenario you will lose any increments that have not been flushed.
391    * @param row The row that contains the cell to increment.
392    * @param family The column family of the cell to increment.
393    * @param qualifier The column qualifier of the cell to increment.
394    * @param amount The amount to increment the cell with (or decrement, if the
395    * amount is negative).
396    * @param durability The persistence guarantee for this increment.
397    * @return The new value, post increment.
398    * @throws IOException if a remote or network exception occurs.
399    */
400   long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
401       long amount, Durability durability) throws IOException;
402 
403   /**
404    * @deprecated Use {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
405    */
406   @Deprecated
407   long incrementColumnValue(final byte [] row, final byte [] family,
408       final byte [] qualifier, final long amount, final boolean writeToWAL)
409   throws IOException;
410 
411   /**
412    * Tells whether or not 'auto-flush' is turned on.
413    *
414    * @return {@code true} if 'auto-flush' is enabled (default), meaning
415    * {@link Put} operations don't get buffered/delayed and are immediately
416    * executed.
417    */
418   boolean isAutoFlush();
419 
420   /**
421    * Executes all the buffered {@link Put} operations.
422    * <p>
423    * This method gets called once automatically for every {@link Put} or batch
424    * of {@link Put}s (when <code>put(List<Put>)</code> is used) when
425    * {@link #isAutoFlush} is {@code true}.
426    * @throws IOException if a remote or network exception occurs.
427    */
428   void flushCommits() throws IOException;
429 
430   /**
431    * Releases any resources held or pending changes in internal buffers.
432    *
433    * @throws IOException if a remote or network exception occurs.
434    */
435   void close() throws IOException;
436 
437   /**
438    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the
439    * table region containing the specified row.  The row given does not actually have
440    * to exist.  Whichever region would contain the row based on start and end keys will
441    * be used.  Note that the {@code row} parameter is also not passed to the
442    * coprocessor handler registered for this protocol, unless the {@code row}
443    * is separately passed as an argument in the service request.  The parameter
444    * here is only used to locate the region used to handle the call.
445    *
446    * <p>
447    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
448    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
449    * </p>
450    *
451    * <div style="background-color: #cccccc; padding: 2px">
452    * <blockquote><pre>
453    * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
454    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
455    * MyCallRequest request = MyCallRequest.newBuilder()
456    *     ...
457    *     .build();
458    * MyCallResponse response = service.myCall(null, request);
459    * </pre></blockquote></div>
460    *
461    * @param row The row key used to identify the remote region location
462    * @return A CoprocessorRpcChannel instance
463    */
464   @InterfaceAudience.Private // TODO add coproc audience level  
465   CoprocessorRpcChannel coprocessorService(byte[] row);
466 
467   /**
468    * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
469    * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive),
470    * and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
471    * method with each {@link Service}
472    * instance.
473    *
474    * @param service the protocol buffer {@code Service} implementation to call
475    * @param startKey start region selection with region containing this row.  If {@code null}, the
476    *                 selection will start with the first table region.
477    * @param endKey select regions up to and including the region containing this row.
478    *               If {@code null}, selection will continue through the last table region.
479    * @param callable this instance's
480    *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
481    *                 method will be invoked once per table region, using the {@link Service}
482    *                 instance connected to that region.
483    * @param <T> the {@link Service} subclass to connect to
484    * @param <R> Return type for the {@code callable} parameter's
485    * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
486    * @return a map of result values keyed by region name
487    */
488   @InterfaceAudience.Private // TODO add coproc audience level
489   <T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service,
490       byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable)
491       throws ServiceException, Throwable;
492 
493   /**
494    * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
495    * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive),
496    * and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
497    * method with each {@link Service} instance.
498    *
499    * <p>
500    * The given
501    * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
502    * method will be called with the return value from each region's
503    * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation.
504    *</p>
505    *
506    * @param service the protocol buffer {@code Service} implementation to call
507    * @param startKey start region selection with region containing this row.  If {@code null}, the
508    *                 selection will start with the first table region.
509    * @param endKey select regions up to and including the region containing this row.
510    *               If {@code null}, selection will continue through the last table region.
511    * @param callable this instance's
512    *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
513    *                 will be invoked once per table region, using the {@link Service} instance
514    *                 connected to that region.
515    * @param callback
516    * @param <T> the {@link Service} subclass to connect to
517    * @param <R> Return type for the {@code callable} parameter's
518    * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
519    */
520   @InterfaceAudience.Private // TODO add coproc audience level
521   <T extends Service, R> void coprocessorService(final Class<T> service,
522       byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable,
523       final Batch.Callback<R> callback) throws ServiceException, Throwable;
524 
525   /**
526    * See {@link #setAutoFlush(boolean, boolean)}
527    *
528    * @param autoFlush
529    *          Whether or not to enable 'auto-flush'.
530    * @deprecated in 0.96. When called with setAutoFlush(false), this function also
531    *  set clearBufferOnFail to true, which is unexpected but kept for historical reasons.
532    *  Replace it with setAutoFlush(false, false) if this is exactly what you want, or by
533    *  {@link #setAutoFlushTo(boolean)} for all other cases.
534    */
535   @Deprecated
536   void setAutoFlush(boolean autoFlush);
537 
538   /**
539    * Turns 'auto-flush' on or off.
540    * <p>
541    * When enabled (default), {@link Put} operations don't get buffered/delayed
542    * and are immediately executed. Failed operations are not retried. This is
543    * slower but safer.
544    * <p>
545    * Turning off {@code #autoFlush} means that multiple {@link Put}s will be
546    * accepted before any RPC is actually sent to do the write operations. If the
547    * application dies before pending writes get flushed to HBase, data will be
548    * lost.
549    * <p>
550    * When you turn {@code #autoFlush} off, you should also consider the
551    * {@code #clearBufferOnFail} option. By default, asynchronous {@link Put}
552    * requests will be retried on failure until successful. However, this can
553    * pollute the writeBuffer and slow down batching performance. Additionally,
554    * you may want to issue a number of Put requests and call
555    * {@link #flushCommits()} as a barrier. In both use cases, consider setting
556    * clearBufferOnFail to true to erase the buffer after {@link #flushCommits()}
557    * has been called, regardless of success.
558    * <p>
559    * In other words, if you call {@code #setAutoFlush(false)}; HBase will retry N time for each
560    *  flushCommit, including the last one when closing the table. This is NOT recommended,
561    *  most of the time you want to call {@code #setAutoFlush(false, true)}.
562    *
563    * @param autoFlush
564    *          Whether or not to enable 'auto-flush'.
565    * @param clearBufferOnFail
566    *          Whether to keep Put failures in the writeBuffer. If autoFlush is true, then
567    *          the value of this parameter is ignored and clearBufferOnFail is set to true.
568    *          Setting clearBufferOnFail to false is deprecated since 0.96.
569    * @see #flushCommits
570    */
571   void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail);
572 
573   /**
574    * Set the autoFlush behavior, without changing the value of {@code clearBufferOnFail}
575    */
576   void setAutoFlushTo(boolean autoFlush);
577 
578   /**
579    * Returns the maximum size in bytes of the write buffer for this HTable.
580    * <p>
581    * The default value comes from the configuration parameter
582    * {@code hbase.client.write.buffer}.
583    * @return The size of the write buffer in bytes.
584    */
585   long getWriteBufferSize();
586 
587   /**
588    * Sets the size of the buffer in bytes.
589    * <p>
590    * If the new size is less than the current amount of data in the
591    * write buffer, the buffer gets flushed.
592    * @param writeBufferSize The new write buffer size, in bytes.
593    * @throws IOException if a remote or network exception occurs.
594    */
595   void setWriteBufferSize(long writeBufferSize) throws IOException;
596 
597   /**
598    * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
599    * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
600    * the invocations to the same region server will be batched into one call. The coprocessor
601    * service is invoked according to the service instance, method name and parameters.
602    * 
603    * @param methodDescriptor
604    *          the descriptor for the protobuf service method to call.
605    * @param request
606    *          the method call parameters
607    * @param startKey
608    *          start region selection with region containing this row. If {@code null}, the
609    *          selection will start with the first table region.
610    * @param endKey
611    *          select regions up to and including the region containing this row. If {@code null},
612    *          selection will continue through the last table region.
613    * @param responsePrototype
614    *          the proto type of the response of the method in Service.
615    * @param <R>
616    *          the response type for the coprocessor Service method
617    * @throws ServiceException
618    * @throws Throwable
619    * @return a map of result values keyed by region name
620    */
621   @InterfaceAudience.Private
622   <R extends Message> Map<byte[], R> batchCoprocessorService(
623       Descriptors.MethodDescriptor methodDescriptor, Message request,
624       byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable;
625 
626   /**
627    * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
628    * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
629    * the invocations to the same region server will be batched into one call. The coprocessor
630    * service is invoked according to the service instance, method name and parameters.
631    * 
632    * <p>
633    * The given
634    * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
635    * method will be called with the return value from each region's invocation.
636    * </p>
637    * 
638    * @param methodDescriptor
639    *          the descriptor for the protobuf service method to call.
640    * @param request
641    *          the method call parameters
642    * @param startKey
643    *          start region selection with region containing this row. If {@code null}, the
644    *          selection will start with the first table region.
645    * @param endKey
646    *          select regions up to and including the region containing this row. If {@code null},
647    *          selection will continue through the last table region.
648    * @param responsePrototype
649    *          the proto type of the response of the method in Service.
650    * @param callback
651    *          callback to invoke with the response for each region
652    * @param <R>
653    *          the response type for the coprocessor Service method
654    * @throws ServiceException
655    * @throws Throwable
656    */
657   @InterfaceAudience.Private
658   <R extends Message> void batchCoprocessorService(Descriptors.MethodDescriptor methodDescriptor,
659       Message request, byte[] startKey, byte[] endKey, R responsePrototype,
660       Batch.Callback<R> callback) throws ServiceException, Throwable;
661 }