001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.client;
020
021import java.io.Closeable;
022import java.io.IOException;
023import java.util.List;
024import java.util.Map;
025
026import org.apache.hadoop.conf.Configuration;
027import org.apache.hadoop.hbase.HTableDescriptor;
028import org.apache.hadoop.hbase.TableName;
029import org.apache.hadoop.hbase.classification.InterfaceAudience;
030import org.apache.hadoop.hbase.classification.InterfaceStability;
031import org.apache.hadoop.hbase.client.coprocessor.Batch;
032import org.apache.hadoop.hbase.filter.CompareFilter;
033import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
034
035import com.google.protobuf.Descriptors;
036import com.google.protobuf.Message;
037import com.google.protobuf.Service;
038import com.google.protobuf.ServiceException;
039
040/**
041 * Used to communicate with a single HBase table.
042 * Obtain an instance from a {@link Connection} and call {@link #close()} afterwards.
043 *
044 * <p>Table can be used to get, put, delete or scan data from a table.
045 * @see ConnectionFactory
046 * @see Connection
047 * @see Admin
048 * @see RegionLocator
049 * @since 0.99.0
050 */
051@InterfaceAudience.Public
052@InterfaceStability.Evolving
053public interface Table extends Closeable {
054  /**
055   * Gets the fully qualified table name instance of this table.
056   */
057  TableName getName();
058
059  /**
060   * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance.
061   * <p>
062   * The reference returned is not a copy, so any change made to it will
063   * affect this instance.
064   */
065  Configuration getConfiguration();
066
067  /**
068   * Gets the {@link org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table.
069   * @throws java.io.IOException if a remote or network exception occurs.
070   */
071  HTableDescriptor getTableDescriptor() throws IOException;
072
073  /**
074   * Test for the existence of columns in the table, as specified by the Get.
075   * <p>
076   *
077   * This will return true if the Get matches one or more keys, false if not.
078   * <p>
079   *
080   * This is a server-side call so it prevents any data from being transfered to
081   * the client.
082   *
083   * @param get the Get
084   * @return true if the specified Get matches one or more keys, false if not
085   * @throws IOException e
086   */
087  boolean exists(Get get) throws IOException;
088
089  /**
090   * Test for the existence of columns in the table, as specified by the Gets.
091   * <p>
092   *
093   * This will return an array of booleans. Each value will be true if the related Get matches
094   * one or more keys, false if not.
095   * <p>
096   *
097   * This is a server-side call so it prevents any data from being transferred to
098   * the client.
099   *
100   * @param gets the Gets
101   * @return Array of boolean.  True if the specified Get matches one or more keys, false if not.
102   * @throws IOException e
103   */
104  boolean[] existsAll(List<Get> gets) throws IOException;
105
106  /**
107   * Method that does a batch call on Deletes, Gets, Puts, Increments and Appends.
108   * The ordering of execution of the actions is not defined. Meaning if you do a Put and a
109   * Get in the same {@link #batch} call, you will not necessarily be
110   * guaranteed that the Get returns what the Put had put.
111   *
112   * @param actions list of Get, Put, Delete, Increment, Append objects
113   * @param results Empty Object[], same size as actions. Provides access to partial
114   *                results, in case an exception is thrown. A null in the result array means that
115   *                the call for that action failed, even after retries. The order of the objects
116   *                in the results array corresponds to the order of actions in the request list.
117   * @throws IOException
118   * @since 0.90.0
119   */
120  void batch(final List<? extends Row> actions, final Object[] results) throws IOException,
121    InterruptedException;
122
123  /**
124   * Same as {@link #batch(List, Object[])}, but with a callback.
125   * @since 0.96.0
126   */
127  <R> void batchCallback(
128    final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback
129  )
130    throws IOException, InterruptedException;
131
132  /**
133   * Extracts certain cells from a given row.
134   * @param get The object that specifies what data to fetch and from which row.
135   * @return The data coming from the specified row, if it exists.  If the row
136   * specified doesn't exist, the {@link Result} instance returned won't
137   * contain any {@link org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}.
138   * @throws IOException if a remote or network exception occurs.
139   * @since 0.20.0
140   */
141  Result get(Get get) throws IOException;
142
143  /**
144   * Extracts certain cells from the given rows, in batch.
145   *
146   * @param gets The objects that specify what data to fetch and from which rows.
147   * @return The data coming from the specified rows, if it exists.  If the row specified doesn't
148   * exist, the {@link Result} instance returned won't contain any {@link
149   * org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}. If there are any
150   * failures even after retries, there will be a null in the results array for those Gets, AND an
151   * exception will be thrown. The ordering of the Result array corresponds to the order of the
152   * list of Get requests.
153   * @throws IOException if a remote or network exception occurs.
154   * @since 0.90.0
155   */
156  Result[] get(List<Get> gets) throws IOException;
157
158  /**
159   * Returns a scanner on the current table as specified by the {@link Scan}
160   * object.
161   * Note that the passed {@link Scan}'s start row and caching properties
162   * maybe changed.
163   *
164   * @param scan A configured {@link Scan} object.
165   * @return A scanner.
166   * @throws IOException if a remote or network exception occurs.
167   * @since 0.20.0
168   */
169  ResultScanner getScanner(Scan scan) throws IOException;
170
171  /**
172   * Gets a scanner on the current table for the given family.
173   *
174   * @param family The column family to scan.
175   * @return A scanner.
176   * @throws IOException if a remote or network exception occurs.
177   * @since 0.20.0
178   */
179  ResultScanner getScanner(byte[] family) throws IOException;
180
181  /**
182   * Gets a scanner on the current table for the given family and qualifier.
183   *
184   * @param family The column family to scan.
185   * @param qualifier The column qualifier to scan.
186   * @return A scanner.
187   * @throws IOException if a remote or network exception occurs.
188   * @since 0.20.0
189   */
190  ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
191
192
193  /**
194   * Puts some data in the table.
195   *
196   * @param put The data to put.
197   * @throws IOException if a remote or network exception occurs.
198   * @since 0.20.0
199   */
200  void put(Put put) throws IOException;
201
202  /**
203   * Puts some data in the table, in batch.
204   * <p>
205   * This can be used for group commit, or for submitting user defined
206   * batches.  The writeBuffer will be periodically inspected while the List
207   * is processed, so depending on the List size the writeBuffer may flush
208   * not at all, or more than once.
209   * @param puts The list of mutations to apply. The batch put is done by
210   * aggregating the iteration of the Puts over the write buffer
211   * at the client-side for a single RPC call.
212   * @throws IOException if a remote or network exception occurs.
213   * @since 0.20.0
214   */
215  void put(List<Put> puts) throws IOException;
216
217  /**
218   * Atomically checks if a row/family/qualifier value matches the expected
219   * value. If it does, it adds the put.  If the passed value is null, the check
220   * is for the lack of column (ie: non-existance)
221   *
222   * @param row to check
223   * @param family column family to check
224   * @param qualifier column qualifier to check
225   * @param value the expected value
226   * @param put data to put if check succeeds
227   * @throws IOException e
228   * @return true if the new put was executed, false otherwise
229   */
230  boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
231    byte[] value, Put put) throws IOException;
232
233  /**
234   * Atomically checks if a row/family/qualifier value matches the expected
235   * value. If it does, it adds the put.  If the passed value is null, the check
236   * is for the lack of column (ie: non-existence)
237   *
238   * The expected value argument of this call is on the left and the current
239   * value of the cell is on the right side of the comparison operator.
240   *
241   * Ie. eg. GREATER operator means expected value > existing <=> add the put.
242   *
243   * @param row to check
244   * @param family column family to check
245   * @param qualifier column qualifier to check
246   * @param compareOp comparison operator to use
247   * @param value the expected value
248   * @param put data to put if check succeeds
249   * @throws IOException e
250   * @return true if the new put was executed, false otherwise
251   */
252  boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
253    CompareFilter.CompareOp compareOp, byte[] value, Put put) throws IOException;
254
255  /**
256   * Deletes the specified cells/row.
257   *
258   * @param delete The object that specifies what to delete.
259   * @throws IOException if a remote or network exception occurs.
260   * @since 0.20.0
261   */
262  void delete(Delete delete) throws IOException;
263
264  /**
265   * Deletes the specified cells/rows in bulk.
266   * @param deletes List of things to delete.  List gets modified by this
267   * method (in particular it gets re-ordered, so the order in which the elements
268   * are inserted in the list gives no guarantee as to the order in which the
269   * {@link Delete}s are executed).
270   * @throws IOException if a remote or network exception occurs. In that case
271   * the {@code deletes} argument will contain the {@link Delete} instances
272   * that have not be successfully applied.
273   * @since 0.20.1
274   */
275  void delete(List<Delete> deletes) throws IOException;
276
277  /**
278   * Atomically checks if a row/family/qualifier value matches the expected
279   * value. If it does, it adds the delete.  If the passed value is null, the
280   * check is for the lack of column (ie: non-existance)
281   *
282   * @param row to check
283   * @param family column family to check
284   * @param qualifier column qualifier to check
285   * @param value the expected value
286   * @param delete data to delete if check succeeds
287   * @throws IOException e
288   * @return true if the new delete was executed, false otherwise
289   */
290  boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
291    byte[] value, Delete delete) throws IOException;
292
293  /**
294   * Atomically checks if a row/family/qualifier value matches the expected
295   * value. If it does, it adds the delete.  If the passed value is null, the
296   * check is for the lack of column (ie: non-existence)
297   *
298   * The expected value argument of this call is on the left and the current
299   * value of the cell is on the right side of the comparison operator.
300   *
301   * Ie. eg. GREATER operator means expected value > existing <=> add the delete.
302   *
303   * @param row to check
304   * @param family column family to check
305   * @param qualifier column qualifier to check
306   * @param compareOp comparison operator to use
307   * @param value the expected value
308   * @param delete data to delete if check succeeds
309   * @throws IOException e
310   * @return true if the new delete was executed, false otherwise
311   */
312  boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
313    CompareFilter.CompareOp compareOp, byte[] value, Delete delete) throws IOException;
314
315  /**
316   * Performs multiple mutations atomically on a single row. Currently
317   * {@link Put} and {@link Delete} are supported.
318   *
319   * @param rm object that specifies the set of mutations to perform atomically
320   * @throws IOException
321   */
322  void mutateRow(final RowMutations rm) throws IOException;
323
324  /**
325   * Appends values to one or more columns within a single row.
326   * <p>
327   * This operation does not appear atomic to readers.  Appends are done
328   * under a single row lock, so write operations to a row are synchronized, but
329   * readers do not take row locks so get and scan operations can see this
330   * operation partially completed.
331   *
332   * @param append object that specifies the columns and amounts to be used
333   *                  for the increment operations
334   * @throws IOException e
335   * @return values of columns after the append operation (maybe null)
336   */
337  Result append(final Append append) throws IOException;
338
339  /**
340   * Increments one or more columns within a single row.
341   * <p>
342   * This operation does not appear atomic to readers.  Increments are done
343   * under a single row lock, so write operations to a row are synchronized, but
344   * readers do not take row locks so get and scan operations can see this
345   * operation partially completed.
346   *
347   * @param increment object that specifies the columns and amounts to be used
348   *                  for the increment operations
349   * @throws IOException e
350   * @return values of columns after the increment
351   */
352  Result increment(final Increment increment) throws IOException;
353
354  /**
355   * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
356   * <p>
357   * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}.
358   * @param row The row that contains the cell to increment.
359   * @param family The column family of the cell to increment.
360   * @param qualifier The column qualifier of the cell to increment.
361   * @param amount The amount to increment the cell with (or decrement, if the
362   * amount is negative).
363   * @return The new value, post increment.
364   * @throws IOException if a remote or network exception occurs.
365   */
366  long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
367    long amount) throws IOException;
368
369  /**
370   * Atomically increments a column value. If the column value already exists
371   * and is not a big-endian long, this could throw an exception. If the column
372   * value does not yet exist it is initialized to <code>amount</code> and
373   * written to the specified column.
374   *
375   * <p>Setting durability to {@link Durability#SKIP_WAL} means that in a fail
376   * scenario you will lose any increments that have not been flushed.
377   * @param row The row that contains the cell to increment.
378   * @param family The column family of the cell to increment.
379   * @param qualifier The column qualifier of the cell to increment.
380   * @param amount The amount to increment the cell with (or decrement, if the
381   * amount is negative).
382   * @param durability The persistence guarantee for this increment.
383   * @return The new value, post increment.
384   * @throws IOException if a remote or network exception occurs.
385   */
386  long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
387    long amount, Durability durability) throws IOException;
388
389  /**
390   * Releases any resources held or pending changes in internal buffers.
391   *
392   * @throws IOException if a remote or network exception occurs.
393   */
394  @Override
395  void close() throws IOException;
396
397  /**
398   * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the
399   * table region containing the specified row.  The row given does not actually have
400   * to exist.  Whichever region would contain the row based on start and end keys will
401   * be used.  Note that the {@code row} parameter is also not passed to the
402   * coprocessor handler registered for this protocol, unless the {@code row}
403   * is separately passed as an argument in the service request.  The parameter
404   * here is only used to locate the region used to handle the call.
405   *
406   * <p>
407   * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
408   * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
409   * </p>
410   *
411   * <div style="background-color: #cccccc; padding: 2px">
412   * <blockquote><pre>
413   * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
414   * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
415   * MyCallRequest request = MyCallRequest.newBuilder()
416   *     ...
417   *     .build();
418   * MyCallResponse response = service.myCall(null, request);
419   * </pre></blockquote></div>
420   *
421   * @param row The row key used to identify the remote region location
422   * @return A CoprocessorRpcChannel instance
423   */
424  CoprocessorRpcChannel coprocessorService(byte[] row);
425
426  /**
427   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
428   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
429   * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
430   * with each {@link com.google.protobuf.Service} instance.
431   *
432   * @param service the protocol buffer {@code Service} implementation to call
433   * @param startKey start region selection with region containing this row.  If {@code null}, the
434   * selection will start with the first table region.
435   * @param endKey select regions up to and including the region containing this row. If {@code
436   * null}, selection will continue through the last table region.
437   * @param callable this instance's {@link org.apache.hadoop.hbase.client.coprocessor.Batch
438   * .Call#call}
439   * method will be invoked once per table region, using the {@link com.google.protobuf.Service}
440   * instance connected to that region.
441   * @param <T> the {@link com.google.protobuf.Service} subclass to connect to
442   * @param <R> Return type for the {@code callable} parameter's {@link
443   * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
444   * @return a map of result values keyed by region name
445   */
446  <T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service,
447    byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable)
448    throws ServiceException, Throwable;
449
450  /**
451   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
452   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
453   * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
454   * with each {@link Service} instance.
455   *
456   * <p> The given {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],
457   * byte[], Object)} method will be called with the return value from each region's {@link
458   * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. </p>
459   *
460   * @param service the protocol buffer {@code Service} implementation to call
461   * @param startKey start region selection with region containing this row.  If {@code null}, the
462   * selection will start with the first table region.
463   * @param endKey select regions up to and including the region containing this row. If {@code
464   * null}, selection will continue through the last table region.
465   * @param callable this instance's {@link org.apache.hadoop.hbase.client.coprocessor.Batch
466   * .Call#call}
467   * method will be invoked once per table region, using the {@link Service} instance connected to
468   * that region.
469   * @param callback
470   * @param <T> the {@link Service} subclass to connect to
471   * @param <R> Return type for the {@code callable} parameter's {@link
472   * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
473   */
474  <T extends Service, R> void coprocessorService(final Class<T> service,
475    byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable,
476    final Batch.Callback<R> callback) throws ServiceException, Throwable;
477
478  /**
479   * Returns the maximum size in bytes of the write buffer for this HTable.
480   * <p>
481   * The default value comes from the configuration parameter
482   * {@code hbase.client.write.buffer}.
483   * @return The size of the write buffer in bytes.
484    * @deprecated as of 1.0.1 (should not have been in 1.0.0). Replaced by {@link BufferedMutator#getWriteBufferSize()}
485   */
486  @Deprecated
487  long getWriteBufferSize();
488
489  /**
490   * Sets the size of the buffer in bytes.
491   * <p>
492   * If the new size is less than the current amount of data in the
493   * write buffer, the buffer gets flushed.
494   * @param writeBufferSize The new write buffer size, in bytes.
495   * @throws IOException if a remote or network exception occurs.
496   * @deprecated as of 1.0.1 (should not have been in 1.0.0). Replaced by {@link BufferedMutator} and
497   * {@link BufferedMutatorParams#writeBufferSize(long)}
498   */
499  @Deprecated
500  void setWriteBufferSize(long writeBufferSize) throws IOException;
501
502  /**
503   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
504   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
505   * the invocations to the same region server will be batched into one call. The coprocessor
506   * service is invoked according to the service instance, method name and parameters.
507   *
508   * @param methodDescriptor
509   *          the descriptor for the protobuf service method to call.
510   * @param request
511   *          the method call parameters
512   * @param startKey
513   *          start region selection with region containing this row. If {@code null}, the
514   *          selection will start with the first table region.
515   * @param endKey
516   *          select regions up to and including the region containing this row. If {@code null},
517   *          selection will continue through the last table region.
518   * @param responsePrototype
519   *          the proto type of the response of the method in Service.
520   * @param <R>
521   *          the response type for the coprocessor Service method
522   * @throws ServiceException
523   * @throws Throwable
524   * @return a map of result values keyed by region name
525   */
526  <R extends Message> Map<byte[], R> batchCoprocessorService(
527    Descriptors.MethodDescriptor methodDescriptor, Message request,
528    byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable;
529
530  /**
531   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
532   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
533   * the invocations to the same region server will be batched into one call. The coprocessor
534   * service is invoked according to the service instance, method name and parameters.
535   *
536   * <p>
537   * The given
538   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
539   * method will be called with the return value from each region's invocation.
540   * </p>
541   *
542   * @param methodDescriptor
543   *          the descriptor for the protobuf service method to call.
544   * @param request
545   *          the method call parameters
546   * @param startKey
547   *          start region selection with region containing this row. If {@code null}, the
548   *          selection will start with the first table region.
549   * @param endKey
550   *          select regions up to and including the region containing this row. If {@code null},
551   *          selection will continue through the last table region.
552   * @param responsePrototype
553   *          the proto type of the response of the method in Service.
554   * @param callback
555   *          callback to invoke with the response for each region
556   * @param <R>
557   *          the response type for the coprocessor Service method
558   * @throws ServiceException
559   * @throws Throwable
560   */
561  <R extends Message> void batchCoprocessorService(Descriptors.MethodDescriptor methodDescriptor,
562    Message request, byte[] startKey, byte[] endKey, R responsePrototype,
563    Batch.Callback<R> callback) throws ServiceException, Throwable;
564
565  /**
566   * Atomically checks if a row/family/qualifier value matches the expected value.
567   * If it does, it performs the row mutations.  If the passed value is null, the check
568   * is for the lack of column (ie: non-existence)
569   *
570   * The expected value argument of this call is on the left and the current
571   * value of the cell is on the right side of the comparison operator.
572   *
573   * Ie. eg. GREATER operator means expected value > existing <=> perform row mutations.
574   *
575   * @param row to check
576   * @param family column family to check
577   * @param qualifier column qualifier to check
578   * @param compareOp the comparison operator
579   * @param value the expected value
580   * @param mutation  mutations to perform if check succeeds
581   * @throws IOException e
582   * @return true if the new put was executed, false otherwise
583   */
584  boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
585      CompareFilter.CompareOp compareOp, byte[] value, RowMutations mutation) throws IOException;
586
587  /**
588   * Set timeout (millisecond) of each operation in this Table instance, will override the value
589   * of hbase.client.operation.timeout in configuration.
590   * Operation timeout is a top-level restriction that makes sure a blocking method will not be
591   * blocked more than this. In each operation, if rpc request fails because of timeout or
592   * other reason, it will retry until success or throw a RetriesExhaustedException. But if the
593   * total time being blocking reach the operation timeout before retries exhausted, it will break
594   * early and throw SocketTimeoutException.
595   * @param operationTimeout the total timeout of each operation in millisecond.
596   * @deprecated since 2.0.0, use {@link TableBuilder#setOperationTimeout} instead
597   */
598  @Deprecated
599  void setOperationTimeout(int operationTimeout);
600
601  /**
602   * Get timeout (millisecond) of each operation for in Table instance.
603   */
604  int getOperationTimeout();
605
606  /**
607   * Get timeout (millisecond) of each rpc request in this Table instance.
608   *
609   * @returns Currently configured read timeout
610   * @deprecated Use getReadRpcTimeout or getWriteRpcTimeout instead
611   */
612  @Deprecated
613  int getRpcTimeout();
614
615  /**
616   * Set timeout (millisecond) of each rpc request in operations of this Table instance, will
617   * override the value of hbase.rpc.timeout in configuration.
618   * If a rpc request waiting too long, it will stop waiting and send a new request to retry until
619   * retries exhausted or operation timeout reached.
620   * <p>
621   * NOTE: This will set both the read and write timeout settings to the provided value.
622   *
623   * @param rpcTimeout the timeout of each rpc request in millisecond.
624   *
625   * @deprecated Use setReadRpcTimeout or setWriteRpcTimeout instead
626   */
627  @Deprecated
628  void setRpcTimeout(int rpcTimeout);
629
630  /**
631   * Get timeout (millisecond) of each rpc read request in this Table instance.
632   */
633  int getReadRpcTimeout();
634
635  /**
636   * Set timeout (millisecond) of each rpc read request in operations of this Table instance, will
637   * override the value of hbase.rpc.read.timeout in configuration.
638   * If a rpc read request waiting too long, it will stop waiting and send a new request to retry
639   * until retries exhausted or operation timeout reached.
640   *
641   * @param readRpcTimeout
642   * @deprecated since 2.0.0, use {@link TableBuilder#setReadRpcTimeout} instead
643   */
644  @Deprecated
645  void setReadRpcTimeout(int readRpcTimeout);
646
647  /**
648   * Get timeout (millisecond) of each rpc write request in this Table instance.
649   */
650  int getWriteRpcTimeout();
651
652  /**
653   * Set timeout (millisecond) of each rpc write request in operations of this Table instance, will
654   * override the value of hbase.rpc.write.timeout in configuration.
655   * If a rpc write request waiting too long, it will stop waiting and send a new request to retry
656   * until retries exhausted or operation timeout reached.
657   *
658   * @param writeRpcTimeout
659   * @deprecated since 2.0.0, use {@link TableBuilder#setWriteRpcTimeout} instead
660   */
661  @Deprecated
662  void setWriteRpcTimeout(int writeRpcTimeout);
663}