001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import com.google.protobuf.Descriptors;
021import com.google.protobuf.Message;
022import com.google.protobuf.Service;
023import com.google.protobuf.ServiceException;
024import java.io.Closeable;
025import java.io.IOException;
026import java.util.Collections;
027import java.util.List;
028import java.util.Map;
029import java.util.concurrent.TimeUnit;
030import org.apache.commons.lang3.NotImplementedException;
031import org.apache.hadoop.conf.Configuration;
032import org.apache.hadoop.hbase.Cell;
033import org.apache.hadoop.hbase.CompareOperator;
034import org.apache.hadoop.hbase.HTableDescriptor;
035import org.apache.hadoop.hbase.TableName;
036import org.apache.hadoop.hbase.client.coprocessor.Batch;
037import org.apache.hadoop.hbase.filter.CompareFilter;
038import org.apache.hadoop.hbase.filter.Filter;
039import org.apache.hadoop.hbase.io.TimeRange;
040import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
041import org.apache.hadoop.hbase.util.Bytes;
042import org.apache.yetus.audience.InterfaceAudience;
043
044/**
045 * Used to communicate with a single HBase table. Obtain an instance from a {@link Connection} and
046 * call {@link #close()} afterwards.
047 * <p>
048 * <code>Table</code> can be used to get, put, delete or scan data from a table.
049 * @see ConnectionFactory
050 * @see Connection
051 * @see Admin
052 * @see RegionLocator
053 * @since 0.99.0
054 */
055@InterfaceAudience.Public
056public interface Table extends Closeable {
057  /**
058   * Gets the fully qualified table name instance of this table.
059   */
060  TableName getName();
061
062  /**
063   * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance.
064   * <p>
065   * The reference returned is not a copy, so any change made to it will affect this instance.
066   */
067  Configuration getConfiguration();
068
069  /**
070   * Gets the {@link org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table.
071   * @throws java.io.IOException if a remote or network exception occurs.
072   * @deprecated since 2.0 version and will be removed in 3.0 version. use {@link #getDescriptor()}
073   */
074  @Deprecated
075  default HTableDescriptor getTableDescriptor() throws IOException {
076    TableDescriptor descriptor = getDescriptor();
077
078    if (descriptor instanceof HTableDescriptor) {
079      return (HTableDescriptor) descriptor;
080    } else {
081      return new HTableDescriptor(descriptor);
082    }
083  }
084
085  /**
086   * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this
087   * table.
088   * @throws java.io.IOException if a remote or network exception occurs.
089   */
090  TableDescriptor getDescriptor() throws IOException;
091
092  /**
093   * Gets the {@link RegionLocator} for this table.
094   */
095  RegionLocator getRegionLocator() throws IOException;
096
097  /**
098   * Test for the existence of columns in the table, as specified by the Get.
099   * <p>
100   * This will return true if the Get matches one or more keys, false if not.
101   * <p>
102   * This is a server-side call so it prevents any data from being transfered to the client.
103   * @param get the Get
104   * @return true if the specified Get matches one or more keys, false if not
105   * @throws IOException e
106   */
107  default boolean exists(Get get) throws IOException {
108    return exists(Collections.singletonList(get))[0];
109  }
110
111  /**
112   * Test for the existence of columns in the table, as specified by the Gets.
113   * <p>
114   * This will return an array of booleans. Each value will be true if the related Get matches one
115   * or more keys, false if not.
116   * <p>
117   * This is a server-side call so it prevents any data from being transferred to the client.
118   * @param gets the Gets
119   * @return Array of boolean. True if the specified Get matches one or more keys, false if not.
120   * @throws IOException e
121   */
122  default boolean[] exists(List<Get> gets) throws IOException {
123    throw new NotImplementedException("Add an implementation!");
124  }
125
126  /**
127   * Test for the existence of columns in the table, as specified by the Gets. This will return an
128   * array of booleans. Each value will be true if the related Get matches one or more keys, false
129   * if not. This is a server-side call so it prevents any data from being transferred to the
130   * client.
131   * @param gets the Gets
132   * @return Array of boolean. True if the specified Get matches one or more keys, false if not.
133   * @throws IOException e
134   * @deprecated since 2.0 version and will be removed in 3.0 version. use {@link #exists(List)}
135   */
136  @Deprecated
137  default boolean[] existsAll(List<Get> gets) throws IOException {
138    return exists(gets);
139  }
140
141  /**
142   * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations. The
143   * ordering of execution of the actions is not defined. Meaning if you do a Put and a Get in the
144   * same {@link #batch} call, you will not necessarily be guaranteed that the Get returns what the
145   * Put had put.
146   * @param actions list of Get, Put, Delete, Increment, Append, RowMutations.
147   * @param results Empty Object[], same size as actions. Provides access to partial results, in
148   *                case an exception is thrown. A null in the result array means that the call for
149   *                that action failed, even after retries. The order of the objects in the results
150   *                array corresponds to the order of actions in the request list. n * @since 0.90.0
151   */
152  default void batch(final List<? extends Row> actions, final Object[] results)
153    throws IOException, InterruptedException {
154    throw new NotImplementedException("Add an implementation!");
155  }
156
157  /**
158   * Same as {@link #batch(List, Object[])}, but with a callback.
159   * @since 0.96.0
160   */
161  default <R> void batchCallback(final List<? extends Row> actions, final Object[] results,
162    final Batch.Callback<R> callback) throws IOException, InterruptedException {
163    throw new NotImplementedException("Add an implementation!");
164  }
165
166  /**
167   * Extracts certain cells from a given row.
168   * @param get The object that specifies what data to fetch and from which row.
169   * @return The data coming from the specified row, if it exists. If the row specified doesn't
170   *         exist, the {@link Result} instance returned won't contain any
171   *         {@link org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}.
172   * @throws IOException if a remote or network exception occurs.
173   * @since 0.20.0
174   */
175  default Result get(Get get) throws IOException {
176    return get(Collections.singletonList(get))[0];
177  }
178
179  /**
180   * Extracts specified cells from the given rows, as a batch.
181   * @param gets The objects that specify what data to fetch and from which rows.
182   * @return The data coming from the specified rows, if it exists. If the row specified doesn't
183   *         exist, the {@link Result} instance returned won't contain any
184   *         {@link org.apache.hadoop.hbase.Cell}s, as indicated by {@link Result#isEmpty()}. If
185   *         there are any failures even after retries, there will be a <code>null</code> in the
186   *         results' array for those Gets, AND an exception will be thrown. The ordering of the
187   *         Result array corresponds to the order of the list of passed in Gets.
188   * @throws IOException if a remote or network exception occurs.
189   * @since 0.90.0
190   * @apiNote {@link #put(List)} runs pre-flight validations on the input list on client. Currently
191   *          {@link #get(List)} doesn't run any validations on the client-side, currently there is
192   *          no need, but this may change in the future. An {@link IllegalArgumentException} will
193   *          be thrown in this case.
194   */
195  default Result[] get(List<Get> gets) throws IOException {
196    throw new NotImplementedException("Add an implementation!");
197  }
198
199  /**
200   * Returns a scanner on the current table as specified by the {@link Scan} object. Note that the
201   * passed {@link Scan}'s start row and caching properties maybe changed.
202   * @param scan A configured {@link Scan} object.
203   * @return A scanner.
204   * @throws IOException if a remote or network exception occurs.
205   * @since 0.20.0
206   */
207  default ResultScanner getScanner(Scan scan) throws IOException {
208    throw new NotImplementedException("Add an implementation!");
209  }
210
211  /**
212   * Gets a scanner on the current table for the given family.
213   * @param family The column family to scan.
214   * @return A scanner.
215   * @throws IOException if a remote or network exception occurs.
216   * @since 0.20.0
217   */
218  default ResultScanner getScanner(byte[] family) throws IOException {
219    throw new NotImplementedException("Add an implementation!");
220  }
221
222  /**
223   * Gets a scanner on the current table for the given family and qualifier.
224   * @param family    The column family to scan.
225   * @param qualifier The column qualifier to scan.
226   * @return A scanner.
227   * @throws IOException if a remote or network exception occurs.
228   * @since 0.20.0
229   */
230  default ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
231    throw new NotImplementedException("Add an implementation!");
232  }
233
234  /**
235   * Puts some data in the table.
236   * @param put The data to put.
237   * @throws IOException if a remote or network exception occurs.
238   * @since 0.20.0
239   */
240  default void put(Put put) throws IOException {
241    put(Collections.singletonList(put));
242  }
243
244  /**
245   * Batch puts the specified data into the table.
246   * <p>
247   * This can be used for group commit, or for submitting user defined batches. Before sending a
248   * batch of mutations to the server, the client runs a few validations on the input list. If an
249   * error is found, for example, a mutation was supplied but was missing it's column an
250   * {@link IllegalArgumentException} will be thrown and no mutations will be applied. If there are
251   * any failures even after retries, a {@link RetriesExhaustedWithDetailsException} will be thrown.
252   * RetriesExhaustedWithDetailsException contains lists of failed mutations and corresponding
253   * remote exceptions. The ordering of mutations and exceptions in the encapsulating exception
254   * corresponds to the order of the input list of Put requests.
255   * @param puts The list of mutations to apply.
256   * @throws IOException if a remote or network exception occurs.
257   * @since 0.20.0
258   */
259  default void put(List<Put> puts) throws IOException {
260    throw new NotImplementedException("Add an implementation!");
261  }
262
263  /**
264   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
265   * adds the put. If the passed value is null, the check is for the lack of column (ie:
266   * non-existance)
267   * @param row       to check
268   * @param family    column family to check
269   * @param qualifier column qualifier to check
270   * @param value     the expected value
271   * @param put       data to put if check succeeds
272   * @throws IOException e
273   * @return true if the new put was executed, false otherwise
274   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
275   */
276  @Deprecated
277  default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
278    throws IOException {
279    return checkAndPut(row, family, qualifier, CompareOperator.EQUAL, value, put);
280  }
281
282  /**
283   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
284   * adds the put. If the passed value is null, the check is for the lack of column (ie:
285   * non-existence) The expected value argument of this call is on the left and the current value of
286   * the cell is on the right side of the comparison operator. Ie. eg. GREATER operator means
287   * expected value > existing <=> add the put.
288   * @param row       to check
289   * @param family    column family to check
290   * @param qualifier column qualifier to check
291   * @param compareOp comparison operator to use
292   * @param value     the expected value
293   * @param put       data to put if check succeeds
294   * @throws IOException e
295   * @return true if the new put was executed, false otherwise
296   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
297   */
298  @Deprecated
299  default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
300    CompareFilter.CompareOp compareOp, byte[] value, Put put) throws IOException {
301    RowMutations mutations = new RowMutations(put.getRow(), 1);
302    mutations.add(put);
303
304    return checkAndMutate(row, family, qualifier, compareOp, value, mutations);
305  }
306
307  /**
308   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
309   * adds the put. If the passed value is null, the check is for the lack of column (ie:
310   * non-existence) The expected value argument of this call is on the left and the current value of
311   * the cell is on the right side of the comparison operator. Ie. eg. GREATER operator means
312   * expected value > existing <=> add the put.
313   * @param row       to check
314   * @param family    column family to check
315   * @param qualifier column qualifier to check
316   * @param op        comparison operator to use
317   * @param value     the expected value
318   * @param put       data to put if check succeeds
319   * @throws IOException e
320   * @return true if the new put was executed, false otherwise
321   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
322   */
323  @Deprecated
324  default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
325    byte[] value, Put put) throws IOException {
326    RowMutations mutations = new RowMutations(put.getRow(), 1);
327    mutations.add(put);
328
329    return checkAndMutate(row, family, qualifier, op, value, mutations);
330  }
331
332  /**
333   * Deletes the specified cells/row.
334   * @param delete The object that specifies what to delete.
335   * @throws IOException if a remote or network exception occurs.
336   * @since 0.20.0
337   */
338  default void delete(Delete delete) throws IOException {
339    throw new NotImplementedException("Add an implementation!");
340  }
341
342  /**
343   * Batch Deletes the specified cells/rows from the table.
344   * <p>
345   * If a specified row does not exist, {@link Delete} will report as though sucessful delete; no
346   * exception will be thrown. If there are any failures even after retries, a
347   * {@link RetriesExhaustedWithDetailsException} will be thrown.
348   * RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and corresponding
349   * remote exceptions.
350   * @param deletes List of things to delete. The input list gets modified by this method. All
351   *                successfully applied {@link Delete}s in the list are removed (in particular it
352   *                gets re-ordered, so the order in which the elements are inserted in the list
353   *                gives no guarantee as to the order in which the {@link Delete}s are executed).
354   * @throws IOException if a remote or network exception occurs. In that case the {@code deletes}
355   *                     argument will contain the {@link Delete} instances that have not be
356   *                     successfully applied.
357   * @since 0.20.1
358   * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
359   *          {@link #put(List)} runs pre-flight validations on the input list on client. Currently
360   *          {@link #delete(List)} doesn't run validations on the client, there is no need
361   *          currently, but this may change in the future. An * {@link IllegalArgumentException}
362   *          will be thrown in this case.
363   */
364  default void delete(List<Delete> deletes) throws IOException {
365    throw new NotImplementedException("Add an implementation!");
366  }
367
368  /**
369   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
370   * adds the delete. If the passed value is null, the check is for the lack of column (ie:
371   * non-existance)
372   * @param row       to check
373   * @param family    column family to check
374   * @param qualifier column qualifier to check
375   * @param value     the expected value
376   * @param delete    data to delete if check succeeds
377   * @throws IOException e
378   * @return true if the new delete was executed, false otherwise
379   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
380   */
381  @Deprecated
382  default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value,
383    Delete delete) throws IOException {
384    return checkAndDelete(row, family, qualifier, CompareOperator.EQUAL, value, delete);
385  }
386
387  /**
388   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
389   * adds the delete. If the passed value is null, the check is for the lack of column (ie:
390   * non-existence) The expected value argument of this call is on the left and the current value of
391   * the cell is on the right side of the comparison operator. Ie. eg. GREATER operator means
392   * expected value > existing <=> add the delete.
393   * @param row       to check
394   * @param family    column family to check
395   * @param qualifier column qualifier to check
396   * @param compareOp comparison operator to use
397   * @param value     the expected value
398   * @param delete    data to delete if check succeeds
399   * @throws IOException e
400   * @return true if the new delete was executed, false otherwise
401   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
402   */
403  @Deprecated
404  default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
405    CompareFilter.CompareOp compareOp, byte[] value, Delete delete) throws IOException {
406    RowMutations mutations = new RowMutations(delete.getRow(), 1);
407    mutations.add(delete);
408
409    return checkAndMutate(row, family, qualifier, compareOp, value, mutations);
410  }
411
412  /**
413   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
414   * adds the delete. If the passed value is null, the check is for the lack of column (ie:
415   * non-existence) The expected value argument of this call is on the left and the current value of
416   * the cell is on the right side of the comparison operator. Ie. eg. GREATER operator means
417   * expected value > existing <=> add the delete.
418   * @param row       to check
419   * @param family    column family to check
420   * @param qualifier column qualifier to check
421   * @param op        comparison operator to use
422   * @param value     the expected value
423   * @param delete    data to delete if check succeeds
424   * @throws IOException e
425   * @return true if the new delete was executed, false otherwise
426   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
427   */
428  @Deprecated
429  default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
430    byte[] value, Delete delete) throws IOException {
431    RowMutations mutations = new RowMutations(delete.getRow(), 1);
432    mutations.add(delete);
433
434    return checkAndMutate(row, family, qualifier, op, value, mutations);
435  }
436
437  /**
438   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
439   * adds the Put/Delete/RowMutations.
440   * <p>
441   * Use the returned {@link CheckAndMutateBuilder} to construct your request and then execute it.
442   * This is a fluent style API, the code is like:
443   *
444   * <pre>
445   * <code>
446   * table.checkAndMutate(row, family).qualifier(qualifier).ifNotExists().thenPut(put);
447   * </code>
448   * </pre>
449   *
450   * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it
451   *             any more.
452   */
453  @Deprecated
454  default CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
455    throw new NotImplementedException("Add an implementation!");
456  }
457
458  /**
459   * A helper class for sending checkAndMutate request.
460   * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it
461   *             any more.
462   */
463  @Deprecated
464  interface CheckAndMutateBuilder {
465
466    /**
467     * Specify a column qualifer
468     * @param qualifier column qualifier to check.
469     */
470    CheckAndMutateBuilder qualifier(byte[] qualifier);
471
472    /**
473     * Specify a timerange
474     * @param timeRange timeRange to check
475     */
476    CheckAndMutateBuilder timeRange(TimeRange timeRange);
477
478    /**
479     * Check for lack of column.
480     */
481    CheckAndMutateBuilder ifNotExists();
482
483    /**
484     * Check for equality.
485     * @param value the expected value
486     */
487    default CheckAndMutateBuilder ifEquals(byte[] value) {
488      return ifMatches(CompareOperator.EQUAL, value);
489    }
490
491    /**
492     * Check for match.
493     * @param compareOp comparison operator to use
494     * @param value     the expected value
495     */
496    CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value);
497
498    /**
499     * Specify a Put to commit if the check succeeds.
500     * @param put data to put if check succeeds
501     * @return {@code true} if the new put was executed, {@code false} otherwise.
502     */
503    boolean thenPut(Put put) throws IOException;
504
505    /**
506     * Specify a Delete to commit if the check succeeds.
507     * @param delete data to delete if check succeeds
508     * @return {@code true} if the new delete was executed, {@code false} otherwise.
509     */
510    boolean thenDelete(Delete delete) throws IOException;
511
512    /**
513     * Specify a RowMutations to commit if the check succeeds.
514     * @param mutation mutations to perform if check succeeds
515     * @return true if the new mutation was executed, false otherwise.
516     */
517    boolean thenMutate(RowMutations mutation) throws IOException;
518  }
519
520  /**
521   * Atomically checks if a row matches the specified filter. If it does, it adds the
522   * Put/Delete/RowMutations.
523   * <p>
524   * Use the returned {@link CheckAndMutateWithFilterBuilder} to construct your request and then
525   * execute it. This is a fluent style API, the code is like:
526   *
527   * <pre>
528   * <code>
529   * table.checkAndMutate(row, filter).thenPut(put);
530   * </code>
531   * </pre>
532   *
533   * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it
534   *             any more.
535   */
536  @Deprecated
537  default CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) {
538    throw new NotImplementedException("Add an implementation!");
539  }
540
541  /**
542   * A helper class for sending checkAndMutate request with a filter.
543   * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it
544   *             any more.
545   */
546  @Deprecated
547  interface CheckAndMutateWithFilterBuilder {
548
549    /**
550     * Specify a timerange.
551     * @param timeRange timeRange to check
552     */
553    CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange);
554
555    /**
556     * Specify a Put to commit if the check succeeds.
557     * @param put data to put if check succeeds
558     * @return {@code true} if the new put was executed, {@code false} otherwise.
559     */
560    boolean thenPut(Put put) throws IOException;
561
562    /**
563     * Specify a Delete to commit if the check succeeds.
564     * @param delete data to delete if check succeeds
565     * @return {@code true} if the new delete was executed, {@code false} otherwise.
566     */
567    boolean thenDelete(Delete delete) throws IOException;
568
569    /**
570     * Specify a RowMutations to commit if the check succeeds.
571     * @param mutation mutations to perform if check succeeds
572     * @return true if the new mutation was executed, false otherwise.
573     */
574    boolean thenMutate(RowMutations mutation) throws IOException;
575  }
576
577  /**
578   * checkAndMutate that atomically checks if a row matches the specified condition. If it does, it
579   * performs the specified action.
580   * @param checkAndMutate The CheckAndMutate object.
581   * @return A CheckAndMutateResult object that represents the result for the CheckAndMutate.
582   * @throws IOException if a remote or network exception occurs.
583   */
584  default CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws IOException {
585    return checkAndMutate(Collections.singletonList(checkAndMutate)).get(0);
586  }
587
588  /**
589   * Batch version of checkAndMutate. The specified CheckAndMutates are batched only in the sense
590   * that they are sent to a RS in one RPC, but each CheckAndMutate operation is still executed
591   * atomically (and thus, each may fail independently of others).
592   * @param checkAndMutates The list of CheckAndMutate.
593   * @return A list of CheckAndMutateResult objects that represents the result for each
594   *         CheckAndMutate.
595   * @throws IOException if a remote or network exception occurs.
596   */
597  default List<CheckAndMutateResult> checkAndMutate(List<CheckAndMutate> checkAndMutates)
598    throws IOException {
599    throw new NotImplementedException("Add an implementation!");
600  }
601
602  /**
603   * Performs multiple mutations atomically on a single row. Currently {@link Put} and
604   * {@link Delete} are supported.
605   * @param rm object that specifies the set of mutations to perform atomically
606   * @return results of Increment/Append operations
607   * @throws IOException if a remote or network exception occurs.
608   */
609  default Result mutateRow(final RowMutations rm) throws IOException {
610    throw new NotImplementedException("Add an implementation!");
611  }
612
613  /**
614   * Appends values to one or more columns within a single row.
615   * <p>
616   * This operation guaranteed atomicity to readers. Appends are done under a single row lock, so
617   * write operations to a row are synchronized, and readers are guaranteed to see this operation
618   * fully completed.
619   * @param append object that specifies the columns and values to be appended
620   * @throws IOException e
621   * @return values of columns after the append operation (maybe null)
622   */
623  default Result append(final Append append) throws IOException {
624    throw new NotImplementedException("Add an implementation!");
625  }
626
627  /**
628   * Increments one or more columns within a single row.
629   * <p>
630   * This operation ensures atomicity to readers. Increments are done under a single row lock, so
631   * write operations to a row are synchronized, and readers are guaranteed to see this operation
632   * fully completed.
633   * @param increment object that specifies the columns and amounts to be used for the increment
634   *                  operations
635   * @throws IOException e
636   * @return values of columns after the increment
637   */
638  default Result increment(final Increment increment) throws IOException {
639    throw new NotImplementedException("Add an implementation!");
640  }
641
642  /**
643   * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
644   * <p>
645   * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}.
646   * @param row       The row that contains the cell to increment.
647   * @param family    The column family of the cell to increment.
648   * @param qualifier The column qualifier of the cell to increment.
649   * @param amount    The amount to increment the cell with (or decrement, if the amount is
650   *                  negative).
651   * @return The new value, post increment.
652   * @throws IOException if a remote or network exception occurs.
653   */
654  default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount)
655    throws IOException {
656    Increment increment = new Increment(row).addColumn(family, qualifier, amount);
657    Cell cell = increment(increment).getColumnLatestCell(family, qualifier);
658    return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
659  }
660
661  /**
662   * Atomically increments a column value. If the column value already exists and is not a
663   * big-endian long, this could throw an exception. If the column value does not yet exist it is
664   * initialized to <code>amount</code> and written to the specified column.
665   * <p>
666   * Setting durability to {@link Durability#SKIP_WAL} means that in a fail scenario you will lose
667   * any increments that have not been flushed.
668   * @param row        The row that contains the cell to increment.
669   * @param family     The column family of the cell to increment.
670   * @param qualifier  The column qualifier of the cell to increment.
671   * @param amount     The amount to increment the cell with (or decrement, if the amount is
672   *                   negative).
673   * @param durability The persistence guarantee for this increment.
674   * @return The new value, post increment.
675   * @throws IOException if a remote or network exception occurs.
676   */
677  default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount,
678    Durability durability) throws IOException {
679    Increment increment =
680      new Increment(row).addColumn(family, qualifier, amount).setDurability(durability);
681    Cell cell = increment(increment).getColumnLatestCell(family, qualifier);
682    return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
683  }
684
685  /**
686   * Releases any resources held or pending changes in internal buffers.
687   * @throws IOException if a remote or network exception occurs.
688   */
689  @Override
690  default void close() throws IOException {
691    throw new NotImplementedException("Add an implementation!");
692  }
693
694  /**
695   * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the table
696   * region containing the specified row. The row given does not actually have to exist. Whichever
697   * region would contain the row based on start and end keys will be used. Note that the
698   * {@code row} parameter is also not passed to the coprocessor handler registered for this
699   * protocol, unless the {@code row} is separately passed as an argument in the service request.
700   * The parameter here is only used to locate the region used to handle the call.
701   * <p>
702   * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
703   * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
704   * </p>
705   * <div style="background-color: #cccccc; padding: 2px"> <blockquote>
706   *
707   * <pre>
708   * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
709   * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
710   * MyCallRequest request = MyCallRequest.newBuilder()
711   *     ...
712   *     .build();
713   * MyCallResponse response = service.myCall(null, request);
714   * </pre>
715   *
716   * </blockquote></div>
717   * @param row The row key used to identify the remote region location
718   * @return A CoprocessorRpcChannel instance
719   */
720  default CoprocessorRpcChannel coprocessorService(byte[] row) {
721    throw new NotImplementedException("Add an implementation!");
722  }
723
724  /**
725   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
726   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
727   * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
728   * with each {@link com.google.protobuf.Service} instance.
729   * @param service  the protocol buffer {@code Service} implementation to call
730   * @param startKey start region selection with region containing this row. If {@code null}, the
731   *                 selection will start with the first table region.
732   * @param endKey   select regions up to and including the region containing this row. If
733   *                 {@code null}, selection will continue through the last table region.
734   * @param callable this instance's
735   *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will
736   *                 be invoked once per table region, using the {@link com.google.protobuf.Service}
737   *                 instance connected to that region.
738   * @param <T>      the {@link com.google.protobuf.Service} subclass to connect to
739   * @param <R>      Return type for the {@code callable} parameter's
740   *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
741   * @return a map of result values keyed by region name
742   */
743  default <T extends Service, R> Map<byte[], R> coprocessorService(final Class<T> service,
744    byte[] startKey, byte[] endKey, final Batch.Call<T, R> callable)
745    throws ServiceException, Throwable {
746    throw new NotImplementedException("Add an implementation!");
747  }
748
749  /**
750   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
751   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
752   * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
753   * with each {@link Service} instance.
754   * <p>
755   * The given
756   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
757   * method will be called with the return value from each region's
758   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation.
759   * </p>
760   * @param service  the protocol buffer {@code Service} implementation to call
761   * @param startKey start region selection with region containing this row. If {@code null}, the
762   *                 selection will start with the first table region.
763   * @param endKey   select regions up to and including the region containing this row. If
764   *                 {@code null}, selection will continue through the last table region.
765   * @param callable this instance's
766   *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will
767   *                 be invoked once per table region, using the {@link Service} instance connected
768   *                 to that region.
769   * @param <T>      the {@link Service} subclass to connect to
770   * @param <R>      Return type for the {@code callable} parameter's
771   *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
772   */
773  default <T extends Service, R> void coprocessorService(final Class<T> service, byte[] startKey,
774    byte[] endKey, final Batch.Call<T, R> callable, final Batch.Callback<R> callback)
775    throws ServiceException, Throwable {
776    throw new NotImplementedException("Add an implementation!");
777  }
778
779  /**
780   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
781   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
782   * the invocations to the same region server will be batched into one call. The coprocessor
783   * service is invoked according to the service instance, method name and parameters. n * the
784   * descriptor for the protobuf service method to call. n * the method call parameters n * start
785   * region selection with region containing this row. If {@code null}, the selection will start
786   * with the first table region. n * select regions up to and including the region containing this
787   * row. If {@code null}, selection will continue through the last table region. n * the proto type
788   * of the response of the method in Service.
789   * @param <R> the response type for the coprocessor Service method
790   * @return a map of result values keyed by region name
791   */
792  default <R extends Message> Map<byte[], R> batchCoprocessorService(
793    Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey,
794    R responsePrototype) throws ServiceException, Throwable {
795    throw new NotImplementedException("Add an implementation!");
796  }
797
798  /**
799   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
800   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
801   * the invocations to the same region server will be batched into one call. The coprocessor
802   * service is invoked according to the service instance, method name and parameters.
803   * <p>
804   * The given
805   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
806   * method will be called with the return value from each region's invocation.
807   * </p>
808   * @param methodDescriptor  the descriptor for the protobuf service method to call.
809   * @param request           the method call parameters
810   * @param startKey          start region selection with region containing this row. If
811   *                          {@code null}, the selection will start with the first table region.
812   * @param endKey            select regions up to and including the region containing this row. If
813   *                          {@code null}, selection will continue through the last table region.
814   * @param responsePrototype the proto type of the response of the method in Service.
815   * @param callback          callback to invoke with the response for each region
816   * @param <R>               the response type for the coprocessor Service method
817   */
818  default <R extends Message> void batchCoprocessorService(
819    Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey,
820    R responsePrototype, Batch.Callback<R> callback) throws ServiceException, Throwable {
821    throw new NotImplementedException("Add an implementation!");
822  }
823
824  /**
825   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
826   * performs the row mutations. If the passed value is null, the check is for the lack of column
827   * (ie: non-existence) The expected value argument of this call is on the left and the current
828   * value of the cell is on the right side of the comparison operator. Ie. eg. GREATER operator
829   * means expected value > existing <=> perform row mutations.
830   * @param row       to check
831   * @param family    column family to check
832   * @param qualifier column qualifier to check
833   * @param compareOp the comparison operator
834   * @param value     the expected value
835   * @param mutation  mutations to perform if check succeeds
836   * @throws IOException e
837   * @return true if the new put was executed, false otherwise
838   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
839   */
840  @Deprecated
841  default boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
842    CompareFilter.CompareOp compareOp, byte[] value, RowMutations mutation) throws IOException {
843    throw new NotImplementedException("Add an implementation!");
844  }
845
846  /**
847   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
848   * performs the row mutations. If the passed value is null, the check is for the lack of column
849   * (ie: non-existence) The expected value argument of this call is on the left and the current
850   * value of the cell is on the right side of the comparison operator. Ie. eg. GREATER operator
851   * means expected value > existing <=> perform row mutations.
852   * @param row       to check
853   * @param family    column family to check
854   * @param qualifier column qualifier to check
855   * @param op        the comparison operator
856   * @param value     the expected value
857   * @param mutation  mutations to perform if check succeeds
858   * @throws IOException e
859   * @return true if the new put was executed, false otherwise
860   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
861   */
862  @Deprecated
863  default boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
864    byte[] value, RowMutations mutation) throws IOException {
865    throw new NotImplementedException("Add an implementation!");
866  }
867
868  /**
869   * Get timeout of each rpc request in this Table instance. It will be overridden by a more
870   * specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
871   * @see #getReadRpcTimeout(TimeUnit)
872   * @see #getWriteRpcTimeout(TimeUnit)
873   * @param unit the unit of time the timeout to be represented in
874   * @return rpc timeout in the specified time unit
875   */
876  default long getRpcTimeout(TimeUnit unit) {
877    throw new NotImplementedException("Add an implementation!");
878  }
879
880  /**
881   * Get timeout (millisecond) of each rpc request in this Table instance.
882   * @return Currently configured read timeout
883   * @deprecated use {@link #getReadRpcTimeout(TimeUnit)} or {@link #getWriteRpcTimeout(TimeUnit)}
884   *             instead
885   */
886  @Deprecated
887  default int getRpcTimeout() {
888    return (int) getRpcTimeout(TimeUnit.MILLISECONDS);
889  }
890
891  /**
892   * Set timeout (millisecond) of each rpc request in operations of this Table instance, will
893   * override the value of hbase.rpc.timeout in configuration. If a rpc request waiting too long, it
894   * will stop waiting and send a new request to retry until retries exhausted or operation timeout
895   * reached.
896   * <p>
897   * NOTE: This will set both the read and write timeout settings to the provided value.
898   * @param rpcTimeout the timeout of each rpc request in millisecond.
899   * @deprecated Use setReadRpcTimeout or setWriteRpcTimeout instead
900   */
901  @Deprecated
902  default void setRpcTimeout(int rpcTimeout) {
903    setReadRpcTimeout(rpcTimeout);
904    setWriteRpcTimeout(rpcTimeout);
905  }
906
907  /**
908   * Get timeout of each rpc read request in this Table instance.
909   * @param unit the unit of time the timeout to be represented in
910   * @return read rpc timeout in the specified time unit
911   */
912  default long getReadRpcTimeout(TimeUnit unit) {
913    throw new NotImplementedException("Add an implementation!");
914  }
915
916  /**
917   * Get timeout (millisecond) of each rpc read request in this Table instance.
918   * @deprecated since 2.0 and will be removed in 3.0 version use
919   *             {@link #getReadRpcTimeout(TimeUnit)} instead
920   */
921  @Deprecated
922  default int getReadRpcTimeout() {
923    return (int) getReadRpcTimeout(TimeUnit.MILLISECONDS);
924  }
925
926  /**
927   * Set timeout (millisecond) of each rpc read request in operations of this Table instance, will
928   * override the value of hbase.rpc.read.timeout in configuration. If a rpc read request waiting
929   * too long, it will stop waiting and send a new request to retry until retries exhausted or
930   * operation timeout reached.
931   * @param readRpcTimeout the timeout for read rpc request in milliseconds
932   * @deprecated since 2.0.0, use {@link TableBuilder#setReadRpcTimeout} instead
933   */
934  @Deprecated
935  default void setReadRpcTimeout(int readRpcTimeout) {
936    throw new NotImplementedException("Add an implementation!");
937  }
938
939  /**
940   * Get timeout of each rpc write request in this Table instance.
941   * @param unit the unit of time the timeout to be represented in
942   * @return write rpc timeout in the specified time unit
943   */
944  default long getWriteRpcTimeout(TimeUnit unit) {
945    throw new NotImplementedException("Add an implementation!");
946  }
947
948  /**
949   * Get timeout (millisecond) of each rpc write request in this Table instance.
950   * @deprecated since 2.0 and will be removed in 3.0 version use
951   *             {@link #getWriteRpcTimeout(TimeUnit)} instead
952   */
953  @Deprecated
954  default int getWriteRpcTimeout() {
955    return (int) getWriteRpcTimeout(TimeUnit.MILLISECONDS);
956  }
957
958  /**
959   * Set timeout (millisecond) of each rpc write request in operations of this Table instance, will
960   * override the value of hbase.rpc.write.timeout in configuration. If a rpc write request waiting
961   * too long, it will stop waiting and send a new request to retry until retries exhausted or
962   * operation timeout reached.
963   * @param writeRpcTimeout the timeout for write rpc request in milliseconds
964   * @deprecated since 2.0.0, use {@link TableBuilder#setWriteRpcTimeout} instead
965   */
966  @Deprecated
967  default void setWriteRpcTimeout(int writeRpcTimeout) {
968    throw new NotImplementedException("Add an implementation!");
969  }
970
971  /**
972   * Get timeout of each operation in Table instance.
973   * @param unit the unit of time the timeout to be represented in
974   * @return operation rpc timeout in the specified time unit
975   */
976  default long getOperationTimeout(TimeUnit unit) {
977    throw new NotImplementedException("Add an implementation!");
978  }
979
980  /**
981   * Get timeout (millisecond) of each operation for in Table instance.
982   * @deprecated since 2.0 and will be removed in 3.0 version use
983   *             {@link #getOperationTimeout(TimeUnit)} instead
984   */
985  @Deprecated
986  default int getOperationTimeout() {
987    return (int) getOperationTimeout(TimeUnit.MILLISECONDS);
988  }
989
990  /**
991   * Set timeout (millisecond) of each operation in this Table instance, will override the value of
992   * hbase.client.operation.timeout in configuration. Operation timeout is a top-level restriction
993   * that makes sure a blocking method will not be blocked more than this. In each operation, if rpc
994   * request fails because of timeout or other reason, it will retry until success or throw a
995   * RetriesExhaustedException. But if the total time being blocking reach the operation timeout
996   * before retries exhausted, it will break early and throw SocketTimeoutException.
997   * @param operationTimeout the total timeout of each operation in millisecond.
998   * @deprecated since 2.0.0, use {@link TableBuilder#setOperationTimeout} instead
999   */
1000  @Deprecated
1001  default void setOperationTimeout(int operationTimeout) {
1002    throw new NotImplementedException("Add an implementation!");
1003  }
1004}