001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import java.io.Closeable;
021import java.io.IOException;
022import java.util.Collections;
023import java.util.List;
024import java.util.Map;
025import java.util.TreeMap;
026import java.util.concurrent.TimeUnit;
027import org.apache.commons.lang3.NotImplementedException;
028import org.apache.hadoop.conf.Configuration;
029import org.apache.hadoop.hbase.Cell;
030import org.apache.hadoop.hbase.CompareOperator;
031import org.apache.hadoop.hbase.TableName;
032import org.apache.hadoop.hbase.client.coprocessor.Batch;
033import org.apache.hadoop.hbase.filter.Filter;
034import org.apache.hadoop.hbase.io.TimeRange;
035import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
036import org.apache.hadoop.hbase.util.Bytes;
037import org.apache.yetus.audience.InterfaceAudience;
038
039import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors;
040import org.apache.hbase.thirdparty.com.google.protobuf.Message;
041import org.apache.hbase.thirdparty.com.google.protobuf.Service;
042import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
043
044/**
045 * Used to communicate with a single HBase table. Obtain an instance from a {@link Connection} and
046 * call {@link #close()} afterwards.
047 * <p>
048 * <code>Table</code> can be used to get, put, delete or scan data from a table.
049 * @see ConnectionFactory
050 * @see Connection
051 * @see Admin
052 * @see RegionLocator
053 * @since 0.99.0
054 */
055@InterfaceAudience.Public
056public interface Table extends Closeable {
057  /**
058   * Gets the fully qualified table name instance of this table.
059   */
060  TableName getName();
061
062  /**
063   * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance.
064   * <p>
065   * The reference returned is not a copy, so any change made to it will affect this instance.
066   */
067  Configuration getConfiguration();
068
069  /**
070   * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this
071   * table.
072   * @throws java.io.IOException if a remote or network exception occurs.
073   */
074  TableDescriptor getDescriptor() throws IOException;
075
076  /**
077   * Gets the {@link RegionLocator} for this table.
078   */
079  RegionLocator getRegionLocator() throws IOException;
080
081  /**
082   * Test for the existence of columns in the table, as specified by the Get.
083   * <p>
084   * This will return true if the Get matches one or more keys, false if not.
085   * <p>
086   * This is a server-side call so it prevents any data from being transfered to the client.
087   * @param get the Get
088   * @return true if the specified Get matches one or more keys, false if not
089   * @throws IOException e
090   */
091  default boolean exists(Get get) throws IOException {
092    return exists(Collections.singletonList(get))[0];
093  }
094
095  /**
096   * Test for the existence of columns in the table, as specified by the Gets.
097   * <p>
098   * This will return an array of booleans. Each value will be true if the related Get matches one
099   * or more keys, false if not.
100   * <p>
101   * This is a server-side call so it prevents any data from being transferred to the client.
102   * @param gets the Gets
103   * @return Array of boolean. True if the specified Get matches one or more keys, false if not.
104   * @throws IOException e
105   */
106  default boolean[] exists(List<Get> gets) throws IOException {
107    throw new NotImplementedException("Add an implementation!");
108  }
109
110  /**
111   * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations. The
112   * ordering of execution of the actions is not defined. Meaning if you do a Put and a Get in the
113   * same {@link #batch} call, you will not necessarily be guaranteed that the Get returns what the
114   * Put had put.
115   * @param actions list of Get, Put, Delete, Increment, Append, RowMutations.
116   * @param results Empty Object[], same size as actions. Provides access to partial results, in
117   *                case an exception is thrown. On per-action failure after retries, the
118   *                corresponding slot contains the Throwable (typically an IOException). On
119   *                success, the slot contains a {@link Result} (possibly empty for a Get with no
120   *                match, or empty for successful mutations). A null slot may appear if the entire
121   *                batch was rejected before dispatch (for example, IllegalArgumentException from
122   *                input validation), in which case all slots will be null. The order of the
123   *                objects in the results array corresponds to the order of actions in the request
124   *                list.
125   * @since 0.90.0
126   */
127  default void batch(final List<? extends Row> actions, final Object[] results)
128    throws IOException, InterruptedException {
129    throw new NotImplementedException("Add an implementation!");
130  }
131
132  /**
133   * Same as {@link #batch(List, Object[])}, but with a callback.
134   * @since 0.96.0
135   * @deprecated since 3.0.0, will removed in 4.0.0. Please use the batch related methods in
136   *             {@link AsyncTable} directly if you want to use callback. We reuse the callback for
137   *             coprocessor here, and the problem is that for batch operation, the
138   *             {@link AsyncTable} does not tell us the region, so in this method we need an extra
139   *             locating after we get the result, which is not good.
140   */
141  @Deprecated
142  default <R> void batchCallback(final List<? extends Row> actions, final Object[] results,
143    final Batch.Callback<R> callback) throws IOException, InterruptedException {
144    throw new NotImplementedException("Add an implementation!");
145  }
146
147  /**
148   * Extracts certain cells from a given row.
149   * @param get The object that specifies what data to fetch and from which row.
150   * @return The data coming from the specified row, if it exists. If the row specified doesn't
151   *         exist, the {@link Result} instance returned won't contain any
152   *         {@link org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}.
153   * @throws IOException if a remote or network exception occurs.
154   * @since 0.20.0
155   */
156  default Result get(Get get) throws IOException {
157    return get(Collections.singletonList(get))[0];
158  }
159
160  /**
161   * Extracts specified cells from the given rows, as a batch.
162   * @param gets The objects that specify what data to fetch and from which rows.
163   * @return The data coming from the specified rows, if it exists. If the row specified doesn't
164   *         exist, the {@link Result} instance returned won't contain any
165   *         {@link org.apache.hadoop.hbase.Cell}s, as indicated by {@link Result#isEmpty()}. If
166   *         there are any failures even after retries, there will be a <code>null</code> in the
167   *         results' array for those Gets, AND an exception will be thrown. The ordering of the
168   *         Result array corresponds to the order of the list of passed in Gets.
169   * @throws IOException if a remote or network exception occurs.
170   * @since 0.90.0
171   * @apiNote {@link #put(List)} runs pre-flight validations on the input list on client. Currently
172   *          {@link #get(List)} doesn't run any validations on the client-side, currently there is
173   *          no need, but this may change in the future. An {@link IllegalArgumentException} will
174   *          be thrown in this case.
175   */
176  default Result[] get(List<Get> gets) throws IOException {
177    throw new NotImplementedException("Add an implementation!");
178  }
179
180  /**
181   * Returns a scanner on the current table as specified by the {@link Scan} object. Note that the
182   * passed {@link Scan}'s start row and caching properties maybe changed.
183   * @param scan A configured {@link Scan} object.
184   * @return A scanner.
185   * @throws IOException if a remote or network exception occurs.
186   * @since 0.20.0
187   */
188  default ResultScanner getScanner(Scan scan) throws IOException {
189    throw new NotImplementedException("Add an implementation!");
190  }
191
192  /**
193   * Gets a scanner on the current table for the given family.
194   * @param family The column family to scan.
195   * @return A scanner.
196   * @throws IOException if a remote or network exception occurs.
197   * @since 0.20.0
198   */
199  default ResultScanner getScanner(byte[] family) throws IOException {
200    throw new NotImplementedException("Add an implementation!");
201  }
202
203  /**
204   * Gets a scanner on the current table for the given family and qualifier.
205   * @param family    The column family to scan.
206   * @param qualifier The column qualifier to scan.
207   * @return A scanner.
208   * @throws IOException if a remote or network exception occurs.
209   * @since 0.20.0
210   */
211  default ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
212    throw new NotImplementedException("Add an implementation!");
213  }
214
215  /**
216   * Puts some data in the table.
217   * @param put The data to put.
218   * @throws IOException if a remote or network exception occurs.
219   * @since 0.20.0
220   */
221  default void put(Put put) throws IOException {
222    put(Collections.singletonList(put));
223  }
224
225  /**
226   * Batch puts the specified data into the table.
227   * <p>
228   * This can be used for group commit, or for submitting user defined batches. Before sending a
229   * batch of mutations to the server, the client runs a few validations on the input list. If an
230   * error is found, for example, a mutation was supplied but was missing it's column an
231   * {@link IllegalArgumentException} will be thrown and no mutations will be applied. If there are
232   * any failures even after retries, a {@link RetriesExhaustedWithDetailsException} will be thrown.
233   * RetriesExhaustedWithDetailsException contains lists of failed mutations and corresponding
234   * remote exceptions. The ordering of mutations and exceptions in the encapsulating exception
235   * corresponds to the order of the input list of Put requests.
236   * @param puts The list of mutations to apply.
237   * @throws IOException if a remote or network exception occurs.
238   * @since 0.20.0
239   */
240  default void put(List<Put> puts) throws IOException {
241    throw new NotImplementedException("Add an implementation!");
242  }
243
244  /**
245   * Deletes the specified cells/row.
246   * @param delete The object that specifies what to delete.
247   * @throws IOException if a remote or network exception occurs.
248   * @since 0.20.0
249   */
250  default void delete(Delete delete) throws IOException {
251    throw new NotImplementedException("Add an implementation!");
252  }
253
254  /**
255   * Batch Deletes the specified cells/rows from the table.
256   * <p>
257   * If a specified row does not exist, {@link Delete} will report as though sucessful delete; no
258   * exception will be thrown. If there are any failures even after retries, a
259   * {@link RetriesExhaustedWithDetailsException} will be thrown.
260   * RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and corresponding
261   * remote exceptions.
262   * @param deletes List of things to delete. The input list gets modified by this method. All
263   *                successfully applied {@link Delete}s in the list are removed (in particular it
264   *                gets re-ordered, so the order in which the elements are inserted in the list
265   *                gives no guarantee as to the order in which the {@link Delete}s are executed).
266   * @throws IOException if a remote or network exception occurs. In that case the {@code deletes}
267   *                     argument will contain the {@link Delete} instances that have not be
268   *                     successfully applied.
269   * @since 0.20.1
270   * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
271   *          {@link #put(List)} runs pre-flight validations on the input list on client. Currently
272   *          {@link #delete(List)} doesn't run validations on the client, there is no need
273   *          currently, but this may change in the future. An {@link IllegalArgumentException} will
274   *          be thrown in this case.
275   */
276  default void delete(List<Delete> deletes) throws IOException {
277    throw new NotImplementedException("Add an implementation!");
278  }
279
280  /**
281   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
282   * adds the Put/Delete/RowMutations.
283   * <p>
284   * Use the returned {@link CheckAndMutateBuilder} to construct your request and then execute it.
285   * This is a fluent style API, the code is like:
286   *
287   * <pre>
288   * <code>
289   * table.checkAndMutate(row, family).qualifier(qualifier).ifNotExists().thenPut(put);
290   * </code>
291   * </pre>
292   *
293   * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it
294   *             any more.
295   */
296  @Deprecated
297  default CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
298    throw new NotImplementedException("Add an implementation!");
299  }
300
301  /**
302   * A helper class for sending checkAndMutate request.
303   * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it
304   *             any more.
305   */
306  @Deprecated
307  interface CheckAndMutateBuilder {
308
309    /**
310     * Specify a column qualifer
311     * @param qualifier column qualifier to check.
312     */
313    CheckAndMutateBuilder qualifier(byte[] qualifier);
314
315    /**
316     * Specify a timerange
317     * @param timeRange timeRange to check
318     */
319    CheckAndMutateBuilder timeRange(TimeRange timeRange);
320
321    /**
322     * Check for lack of column.
323     */
324    CheckAndMutateBuilder ifNotExists();
325
326    /**
327     * Check for equality.
328     * @param value the expected value
329     */
330    default CheckAndMutateBuilder ifEquals(byte[] value) {
331      return ifMatches(CompareOperator.EQUAL, value);
332    }
333
334    /**
335     * Check for match.
336     * @param compareOp comparison operator to use
337     * @param value     the expected value
338     */
339    CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value);
340
341    /**
342     * Specify a Put to commit if the check succeeds.
343     * @param put data to put if check succeeds
344     * @return {@code true} if the new put was executed, {@code false} otherwise.
345     */
346    boolean thenPut(Put put) throws IOException;
347
348    /**
349     * Specify a Delete to commit if the check succeeds.
350     * @param delete data to delete if check succeeds
351     * @return {@code true} if the new delete was executed, {@code false} otherwise.
352     */
353    boolean thenDelete(Delete delete) throws IOException;
354
355    /**
356     * Specify a RowMutations to commit if the check succeeds.
357     * @param mutation mutations to perform if check succeeds
358     * @return true if the new mutation was executed, false otherwise.
359     */
360    boolean thenMutate(RowMutations mutation) throws IOException;
361  }
362
363  /**
364   * Atomically checks if a row matches the specified filter. If it does, it adds the
365   * Put/Delete/RowMutations.
366   * <p>
367   * Use the returned {@link CheckAndMutateWithFilterBuilder} to construct your request and then
368   * execute it. This is a fluent style API, the code is like:
369   *
370   * <pre>
371   * <code>
372   * table.checkAndMutate(row, filter).thenPut(put);
373   * </code>
374   * </pre>
375   *
376   * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it
377   *             any more.
378   */
379  @Deprecated
380  default CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) {
381    throw new NotImplementedException("Add an implementation!");
382  }
383
384  /**
385   * A helper class for sending checkAndMutate request with a filter.
386   * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it
387   *             any more.
388   */
389  @Deprecated
390  interface CheckAndMutateWithFilterBuilder {
391
392    /**
393     * Specify a timerange.
394     * @param timeRange timeRange to check
395     */
396    CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange);
397
398    /**
399     * Specify a Put to commit if the check succeeds.
400     * @param put data to put if check succeeds
401     * @return {@code true} if the new put was executed, {@code false} otherwise.
402     */
403    boolean thenPut(Put put) throws IOException;
404
405    /**
406     * Specify a Delete to commit if the check succeeds.
407     * @param delete data to delete if check succeeds
408     * @return {@code true} if the new delete was executed, {@code false} otherwise.
409     */
410    boolean thenDelete(Delete delete) throws IOException;
411
412    /**
413     * Specify a RowMutations to commit if the check succeeds.
414     * @param mutation mutations to perform if check succeeds
415     * @return true if the new mutation was executed, false otherwise.
416     */
417    boolean thenMutate(RowMutations mutation) throws IOException;
418  }
419
420  /**
421   * checkAndMutate that atomically checks if a row matches the specified condition. If it does, it
422   * performs the specified action.
423   * @param checkAndMutate The CheckAndMutate object.
424   * @return A CheckAndMutateResult object that represents the result for the CheckAndMutate.
425   * @throws IOException if a remote or network exception occurs.
426   */
427  default CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws IOException {
428    return checkAndMutate(Collections.singletonList(checkAndMutate)).get(0);
429  }
430
431  /**
432   * Batch version of checkAndMutate. The specified CheckAndMutates are batched only in the sense
433   * that they are sent to a RS in one RPC, but each CheckAndMutate operation is still executed
434   * atomically (and thus, each may fail independently of others).
435   * @param checkAndMutates The list of CheckAndMutate.
436   * @return A list of CheckAndMutateResult objects that represents the result for each
437   *         CheckAndMutate.
438   * @throws IOException if a remote or network exception occurs.
439   */
440  default List<CheckAndMutateResult> checkAndMutate(List<CheckAndMutate> checkAndMutates)
441    throws IOException {
442    throw new NotImplementedException("Add an implementation!");
443  }
444
445  /**
446   * Performs multiple mutations atomically on a single row. Currently {@link Put} and
447   * {@link Delete} are supported.
448   * @param rm object that specifies the set of mutations to perform atomically
449   * @return results of Increment/Append operations
450   * @throws IOException if a remote or network exception occurs.
451   */
452  default Result mutateRow(final RowMutations rm) throws IOException {
453    throw new NotImplementedException("Add an implementation!");
454  }
455
456  /**
457   * Appends values to one or more columns within a single row.
458   * <p>
459   * This operation guaranteed atomicity to readers. Appends are done under a single row lock, so
460   * write operations to a row are synchronized, and readers are guaranteed to see this operation
461   * fully completed.
462   * @param append object that specifies the columns and values to be appended
463   * @throws IOException e
464   * @return values of columns after the append operation (maybe null)
465   */
466  default Result append(final Append append) throws IOException {
467    throw new NotImplementedException("Add an implementation!");
468  }
469
470  /**
471   * Increments one or more columns within a single row.
472   * <p>
473   * This operation ensures atomicity to readers. Increments are done under a single row lock, so
474   * write operations to a row are synchronized, and readers are guaranteed to see this operation
475   * fully completed.
476   * @param increment object that specifies the columns and amounts to be used for the increment
477   *                  operations
478   * @throws IOException e
479   * @return values of columns after the increment
480   */
481  default Result increment(final Increment increment) throws IOException {
482    throw new NotImplementedException("Add an implementation!");
483  }
484
485  /**
486   * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
487   * <p>
488   * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}.
489   * @param row       The row that contains the cell to increment.
490   * @param family    The column family of the cell to increment.
491   * @param qualifier The column qualifier of the cell to increment.
492   * @param amount    The amount to increment the cell with (or decrement, if the amount is
493   *                  negative).
494   * @return The new value, post increment.
495   * @throws IOException if a remote or network exception occurs.
496   */
497  default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount)
498    throws IOException {
499    Increment increment = new Increment(row).addColumn(family, qualifier, amount);
500    Cell cell = increment(increment).getColumnLatestCell(family, qualifier);
501    return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
502  }
503
504  /**
505   * Atomically increments a column value. If the column value already exists and is not a
506   * big-endian long, this could throw an exception. If the column value does not yet exist it is
507   * initialized to <code>amount</code> and written to the specified column.
508   * <p>
509   * Setting durability to {@link Durability#SKIP_WAL} means that in a fail scenario you will lose
510   * any increments that have not been flushed.
511   * @param row        The row that contains the cell to increment.
512   * @param family     The column family of the cell to increment.
513   * @param qualifier  The column qualifier of the cell to increment.
514   * @param amount     The amount to increment the cell with (or decrement, if the amount is
515   *                   negative).
516   * @param durability The persistence guarantee for this increment.
517   * @return The new value, post increment.
518   * @throws IOException if a remote or network exception occurs.
519   */
520  default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount,
521    Durability durability) throws IOException {
522    Increment increment =
523      new Increment(row).addColumn(family, qualifier, amount).setDurability(durability);
524    Cell cell = increment(increment).getColumnLatestCell(family, qualifier);
525    return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
526  }
527
528  /**
529   * Releases any resources held or pending changes in internal buffers.
530   * @throws IOException if a remote or network exception occurs.
531   */
532  @Override
533  default void close() throws IOException {
534    throw new NotImplementedException("Add an implementation!");
535  }
536
537  /**
538   * Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel}
539   * instance connected to the table region containing the specified row. The row given does not
540   * actually have to exist. Whichever region would contain the row based on start and end keys will
541   * be used. Note that the {@code row} parameter is also not passed to the coprocessor handler
542   * registered for this protocol, unless the {@code row} is separately passed as an argument in the
543   * service request. The parameter here is only used to locate the region used to handle the call.
544   * <p/>
545   * The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be
546   * used to access a published coprocessor {@link Service} using standard protobuf service
547   * invocations:
548   * <p/>
549   * <div style="background-color: #cccccc; padding: 2px"> <blockquote>
550   *
551   * <pre>
552   * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
553   * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
554   * MyCallRequest request = MyCallRequest.newBuilder()
555   *     ...
556   *     .build();
557   * MyCallResponse response = service.myCall(null, request);
558   * </pre>
559   *
560   * </blockquote> </div>
561   * @param row The row key used to identify the remote region location
562   * @return A CoprocessorRpcChannel instance
563   * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
564   *             more. Use the coprocessorService methods in {@link AsyncTable} instead.
565   * @see Connection#toAsyncConnection()
566   */
567  @Deprecated
568  default CoprocessorRpcChannel coprocessorService(byte[] row) {
569    throw new NotImplementedException("Add an implementation!");
570  }
571
572  /**
573   * Creates an instance of the given {@link Service} subclass for each table region spanning the
574   * range from the {@code startKey} row to {@code endKey} row (inclusive), and invokes the passed
575   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method with each
576   * {@link Service} instance.
577   * @param service  the protocol buffer {@code Service} implementation to call
578   * @param startKey start region selection with region containing this row. If {@code null}, the
579   *                 selection will start with the first table region.
580   * @param endKey   select regions up to and including the region containing this row. If
581   *                 {@code null}, selection will continue through the last table region.
582   * @param callable this instance's
583   *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will
584   *                 be invoked once per table region, using the {@link Service} instance connected
585   *                 to that region.
586   * @param <T>      the {@link Service} subclass to connect to
587   * @param <R>      Return type for the {@code callable} parameter's
588   *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
589   * @return a map of result values keyed by region name
590   * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking
591   *             interface for of a protobuf stub, so it is not possible to do it in an asynchronous
592   *             way, even if now we are building the {@link Table} implementation based on the
593   *             {@link AsyncTable}, which is not good. Use the coprocessorService methods in
594   *             {@link AsyncTable} directly instead.
595   * @see Connection#toAsyncConnection()
596   */
597  @Deprecated
598  default <T extends Service, R> Map<byte[], R> coprocessorService(final Class<T> service,
599    byte[] startKey, byte[] endKey, final Batch.Call<T, R> callable)
600    throws ServiceException, Throwable {
601    Map<byte[], R> results =
602      Collections.synchronizedMap(new TreeMap<byte[], R>(Bytes.BYTES_COMPARATOR));
603    coprocessorService(service, startKey, endKey, callable, new Batch.Callback<R>() {
604      @Override
605      public void update(byte[] region, byte[] row, R value) {
606        if (region != null) {
607          results.put(region, value);
608        }
609      }
610    });
611    return results;
612  }
613
614  /**
615   * Creates an instance of the given {@link Service} subclass for each table region spanning the
616   * range from the {@code startKey} row to {@code endKey} row (inclusive), and invokes the passed
617   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method with each
618   * {@link Service} instance.
619   * <p/>
620   * The given
621   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
622   * method will be called with the return value from each region's
623   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation.
624   * @param service  the protocol buffer {@code Service} implementation to call
625   * @param startKey start region selection with region containing this row. If {@code null}, the
626   *                 selection will start with the first table region.
627   * @param endKey   select regions up to and including the region containing this row. If
628   *                 {@code null}, selection will continue through the last table region.
629   * @param callable this instance's
630   *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will
631   *                 be invoked once per table region, using the {@link Service} instance connected
632   *                 to that region.
633   * @param <T>      the {@link Service} subclass to connect to
634   * @param <R>      Return type for the {@code callable} parameter's
635   *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
636   * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking
637   *             interface for of a protobuf stub, so it is not possible to do it in an asynchronous
638   *             way, even if now we are building the {@link Table} implementation based on the
639   *             {@link AsyncTable}, which is not good. Use the coprocessorService methods in
640   *             {@link AsyncTable} directly instead.
641   * @see Connection#toAsyncConnection()
642   */
643  @Deprecated
644  default <T extends Service, R> void coprocessorService(final Class<T> service, byte[] startKey,
645    byte[] endKey, final Batch.Call<T, R> callable, final Batch.Callback<R> callback)
646    throws ServiceException, Throwable {
647    throw new NotImplementedException("Add an implementation!");
648  }
649
650  /**
651   * Creates an instance of the given {@link Service} subclass for each table region spanning the
652   * range from the {@code startKey} row to {@code endKey} row (inclusive), all the invocations to
653   * the same region server will be batched into one call. The coprocessor service is invoked
654   * according to the service instance, method name and parameters.
655   * @param methodDescriptor  the descriptor for the protobuf service method to call.
656   * @param request           the method call parameters
657   * @param startKey          start region selection with region containing this row. If
658   *                          {@code null}, the selection will start with the first table region.
659   * @param endKey            select regions up to and including the region containing this row. If
660   *                          {@code null}, selection will continue through the last table region.
661   * @param responsePrototype the proto type of the response of the method in Service.
662   * @param <R>               the response type for the coprocessor Service method
663   * @return a map of result values keyed by region name
664   * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking
665   *             interface for of a protobuf stub, so it is not possible to do it in an asynchronous
666   *             way, even if now we are building the {@link Table} implementation based on the
667   *             {@link AsyncTable}, which is not good. Use the coprocessorService methods in
668   *             {@link AsyncTable} directly instead.
669   * @see Connection#toAsyncConnection()
670   */
671  @Deprecated
672  default <R extends Message> Map<byte[], R> batchCoprocessorService(
673    Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey,
674    R responsePrototype) throws ServiceException, Throwable {
675    final Map<byte[], R> results =
676      Collections.synchronizedMap(new TreeMap<byte[], R>(Bytes.BYTES_COMPARATOR));
677    batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype,
678      new Batch.Callback<R>() {
679        @Override
680        public void update(byte[] region, byte[] row, R result) {
681          if (region != null) {
682            results.put(region, result);
683          }
684        }
685      });
686    return results;
687  }
688
689  /**
690   * Creates an instance of the given {@link Service} subclass for each table region spanning the
691   * range from the {@code startKey} row to {@code endKey} row (inclusive), all the invocations to
692   * the same region server will be batched into one call. The coprocessor service is invoked
693   * according to the service instance, method name and parameters.
694   * <p/>
695   * The given
696   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
697   * method will be called with the return value from each region's invocation.
698   * @param methodDescriptor  the descriptor for the protobuf service method to call.
699   * @param request           the method call parameters
700   * @param startKey          start region selection with region containing this row. If
701   *                          {@code null}, the selection will start with the first table region.
702   * @param endKey            select regions up to and including the region containing this row. If
703   *                          {@code null}, selection will continue through the last table region.
704   * @param responsePrototype the proto type of the response of the method in Service.
705   * @param callback          callback to invoke with the response for each region
706   * @param <R>               the response type for the coprocessor Service method
707   * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking
708   *             interface for of a protobuf stub, so it is not possible to do it in an asynchronous
709   *             way, even if now we are building the {@link Table} implementation based on the
710   *             {@link AsyncTable}, which is not good. Use the coprocessorService methods in
711   *             {@link AsyncTable} directly instead.
712   * @see Connection#toAsyncConnection()
713   */
714  @Deprecated
715  default <R extends Message> void batchCoprocessorService(
716    Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey,
717    R responsePrototype, Batch.Callback<R> callback) throws ServiceException, Throwable {
718    throw new NotImplementedException("Add an implementation!");
719  }
720
721  /**
722   * Get timeout of each rpc request in this Table instance. It will be overridden by a more
723   * specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
724   * @see #getReadRpcTimeout(TimeUnit)
725   * @see #getWriteRpcTimeout(TimeUnit)
726   * @param unit the unit of time the timeout to be represented in
727   * @return rpc timeout in the specified time unit
728   */
729  default long getRpcTimeout(TimeUnit unit) {
730    throw new NotImplementedException("Add an implementation!");
731  }
732
733  /**
734   * Get timeout of each rpc read request in this Table instance.
735   * @param unit the unit of time the timeout to be represented in
736   * @return read rpc timeout in the specified time unit
737   */
738  default long getReadRpcTimeout(TimeUnit unit) {
739    throw new NotImplementedException("Add an implementation!");
740  }
741
742  /**
743   * Get timeout of each rpc write request in this Table instance.
744   * @param unit the unit of time the timeout to be represented in
745   * @return write rpc timeout in the specified time unit
746   */
747  default long getWriteRpcTimeout(TimeUnit unit) {
748    throw new NotImplementedException("Add an implementation!");
749  }
750
751  /**
752   * Get timeout of each operation in Table instance.
753   * @param unit the unit of time the timeout to be represented in
754   * @return operation rpc timeout in the specified time unit
755   */
756  default long getOperationTimeout(TimeUnit unit) {
757    throw new NotImplementedException("Add an implementation!");
758  }
759
760  /**
761   * Get the attributes to be submitted with requests
762   * @return map of request attributes
763   */
764  default Map<String, byte[]> getRequestAttributes() {
765    return Collections.emptyMap();
766  }
767}