001/**
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019package org.apache.hadoop.hbase.client;
020
021import com.google.protobuf.Descriptors;
022import com.google.protobuf.Message;
023import com.google.protobuf.Service;
024import com.google.protobuf.ServiceException;
025import java.io.Closeable;
026import java.io.IOException;
027import java.util.Collections;
028import java.util.List;
029import java.util.Map;
030import java.util.concurrent.TimeUnit;
031import org.apache.commons.lang3.NotImplementedException;
032import org.apache.hadoop.conf.Configuration;
033import org.apache.hadoop.hbase.Cell;
034import org.apache.hadoop.hbase.CompareOperator;
035import org.apache.hadoop.hbase.HTableDescriptor;
036import org.apache.hadoop.hbase.TableName;
037import org.apache.hadoop.hbase.client.coprocessor.Batch;
038import org.apache.hadoop.hbase.filter.CompareFilter;
039import org.apache.hadoop.hbase.filter.Filter;
040import org.apache.hadoop.hbase.io.TimeRange;
041import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
042import org.apache.hadoop.hbase.util.Bytes;
043import org.apache.yetus.audience.InterfaceAudience;
044
045/**
046 * Used to communicate with a single HBase table.
047 * Obtain an instance from a {@link Connection} and call {@link #close()} afterwards.
048 *
049 * <p><code>Table</code> can be used to get, put, delete or scan data from a table.
050 * @see ConnectionFactory
051 * @see Connection
052 * @see Admin
053 * @see RegionLocator
054 * @since 0.99.0
055 */
056@InterfaceAudience.Public
057public interface Table extends Closeable {
058  /**
059   * Gets the fully qualified table name instance of this table.
060   */
061  TableName getName();
062
063  /**
064   * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance.
065   * <p>
066   * The reference returned is not a copy, so any change made to it will
067   * affect this instance.
068   */
069  Configuration getConfiguration();
070
071  /**
072   * Gets the {@link org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table.
073   * @throws java.io.IOException if a remote or network exception occurs.
074   * @deprecated since 2.0 version and will be removed in 3.0 version.
075   *             use {@link #getDescriptor()}
076   */
077  @Deprecated
078  default HTableDescriptor getTableDescriptor() throws IOException {
079    TableDescriptor descriptor = getDescriptor();
080
081    if (descriptor instanceof HTableDescriptor) {
082      return (HTableDescriptor)descriptor;
083    } else {
084      return new HTableDescriptor(descriptor);
085    }
086  }
087
088  /**
089   * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this table.
090   * @throws java.io.IOException if a remote or network exception occurs.
091   */
092  TableDescriptor getDescriptor() throws IOException;
093
094  /**
095   * Gets the {@link RegionLocator} for this table.
096   */
097  RegionLocator getRegionLocator() throws IOException;
098
099  /**
100   * Test for the existence of columns in the table, as specified by the Get.
101   * <p>
102   *
103   * This will return true if the Get matches one or more keys, false if not.
104   * <p>
105   *
106   * This is a server-side call so it prevents any data from being transfered to
107   * the client.
108   *
109   * @param get the Get
110   * @return true if the specified Get matches one or more keys, false if not
111   * @throws IOException e
112   */
113  default boolean exists(Get get) throws IOException {
114    return exists(Collections.singletonList(get))[0];
115  }
116
117  /**
118   * Test for the existence of columns in the table, as specified by the Gets.
119   * <p>
120   *
121   * This will return an array of booleans. Each value will be true if the related Get matches
122   * one or more keys, false if not.
123   * <p>
124   *
125   * This is a server-side call so it prevents any data from being transferred to
126   * the client.
127   *
128   * @param gets the Gets
129   * @return Array of boolean.  True if the specified Get matches one or more keys, false if not.
130   * @throws IOException e
131   */
132  default boolean[] exists(List<Get> gets) throws IOException {
133    throw new NotImplementedException("Add an implementation!");
134  }
135
136  /**
137   * Test for the existence of columns in the table, as specified by the Gets.
138   * This will return an array of booleans. Each value will be true if the related Get matches
139   * one or more keys, false if not.
140   * This is a server-side call so it prevents any data from being transferred to
141   * the client.
142   *
143   * @param gets the Gets
144   * @return Array of boolean.  True if the specified Get matches one or more keys, false if not.
145   * @throws IOException e
146   * @deprecated since 2.0 version and will be removed in 3.0 version.
147   *             use {@link #exists(List)}
148   */
149  @Deprecated
150  default boolean[] existsAll(List<Get> gets) throws IOException {
151    return exists(gets);
152  }
153
154  /**
155   * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations.
156   * The ordering of execution of the actions is not defined. Meaning if you do a Put and a
157   * Get in the same {@link #batch} call, you will not necessarily be
158   * guaranteed that the Get returns what the Put had put.
159   *
160   * @param actions list of Get, Put, Delete, Increment, Append, RowMutations.
161   * @param results Empty Object[], same size as actions. Provides access to partial
162   *                results, in case an exception is thrown. A null in the result array means that
163   *                the call for that action failed, even after retries. The order of the objects
164   *                in the results array corresponds to the order of actions in the request list.
165   * @throws IOException
166   * @since 0.90.0
167   */
168  default void batch(final List<? extends Row> actions, final Object[] results) throws IOException,
169    InterruptedException {
170    throw new NotImplementedException("Add an implementation!");
171  }
172
173  /**
174   * Same as {@link #batch(List, Object[])}, but with a callback.
175   * @since 0.96.0
176   */
177  default <R> void batchCallback(
178    final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback)
179      throws IOException, InterruptedException {
180    throw new NotImplementedException("Add an implementation!");
181  }
182
183  /**
184   * Extracts certain cells from a given row.
185   * @param get The object that specifies what data to fetch and from which row.
186   * @return The data coming from the specified row, if it exists.  If the row
187   *   specified doesn't exist, the {@link Result} instance returned won't
188   *   contain any {@link org.apache.hadoop.hbase.KeyValue}, as indicated by
189   *   {@link Result#isEmpty()}.
190   * @throws IOException if a remote or network exception occurs.
191   * @since 0.20.0
192   */
193  default Result get(Get get) throws IOException {
194    return get(Collections.singletonList(get))[0];
195  }
196
197  /**
198   * Extracts specified cells from the given rows, as a batch.
199   *
200   * @param gets The objects that specify what data to fetch and from which rows.
201   * @return The data coming from the specified rows, if it exists.  If the row specified doesn't
202   *   exist, the {@link Result} instance returned won't contain any
203   *   {@link org.apache.hadoop.hbase.Cell}s, as indicated by {@link Result#isEmpty()}. If there
204   *   are any failures even after retries, there will be a <code>null</code> in the results' array
205   *   for  those Gets, AND an exception will be thrown. The ordering of the Result array
206   *   corresponds to  the order of the list of passed in Gets.
207   * @throws IOException if a remote or network exception occurs.
208   * @since 0.90.0
209   * @apiNote {@link #put(List)} runs pre-flight validations on the input list on client.
210   *          Currently {@link #get(List)} doesn't run any validations on the client-side,
211   *          currently there is no need, but this may change in the future. An
212   *          {@link IllegalArgumentException} will be thrown in this case.
213   */
214  default Result[] get(List<Get> gets) throws IOException {
215    throw new NotImplementedException("Add an implementation!");
216  }
217
218  /**
219   * Returns a scanner on the current table as specified by the {@link Scan}
220   * object.
221   * Note that the passed {@link Scan}'s start row and caching properties
222   * maybe changed.
223   *
224   * @param scan A configured {@link Scan} object.
225   * @return A scanner.
226   * @throws IOException if a remote or network exception occurs.
227   * @since 0.20.0
228   */
229  default ResultScanner getScanner(Scan scan) throws IOException {
230    throw new NotImplementedException("Add an implementation!");
231  }
232
233  /**
234   * Gets a scanner on the current table for the given family.
235   *
236   * @param family The column family to scan.
237   * @return A scanner.
238   * @throws IOException if a remote or network exception occurs.
239   * @since 0.20.0
240   */
241  default ResultScanner getScanner(byte[] family) throws IOException {
242    throw new NotImplementedException("Add an implementation!");
243  }
244
245  /**
246   * Gets a scanner on the current table for the given family and qualifier.
247   *
248   * @param family The column family to scan.
249   * @param qualifier The column qualifier to scan.
250   * @return A scanner.
251   * @throws IOException if a remote or network exception occurs.
252   * @since 0.20.0
253   */
254  default ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
255    throw new NotImplementedException("Add an implementation!");
256  }
257
258  /**
259   * Puts some data in the table.
260   *
261   * @param put The data to put.
262   * @throws IOException if a remote or network exception occurs.
263   * @since 0.20.0
264   */
265  default void put(Put put) throws IOException {
266    put(Collections.singletonList(put));
267  }
268
269  /**
270   * Batch puts the specified data into the table.
271   * <p>
272   * This can be used for group commit, or for submitting user defined batches. Before sending
273   * a batch of mutations to the server, the client runs a few validations on the input list. If an
274   * error is found, for example, a mutation was supplied but was missing it's column an
275   * {@link IllegalArgumentException} will be thrown and no mutations will be applied. If there
276   * are any failures even after retries, a {@link RetriesExhaustedWithDetailsException} will be
277   * thrown. RetriesExhaustedWithDetailsException contains lists of failed mutations and
278   * corresponding remote exceptions. The ordering of mutations and exceptions in the
279   * encapsulating exception corresponds to the order of the input list of Put requests.
280   *
281   * @param puts The list of mutations to apply.
282   * @throws IOException if a remote or network exception occurs.
283   * @since 0.20.0
284   */
285  default void put(List<Put> puts) throws IOException {
286    throw new NotImplementedException("Add an implementation!");
287  }
288
289  /**
290   * Atomically checks if a row/family/qualifier value matches the expected
291   * value. If it does, it adds the put.  If the passed value is null, the check
292   * is for the lack of column (ie: non-existance)
293   *
294   * @param row to check
295   * @param family column family to check
296   * @param qualifier column qualifier to check
297   * @param value the expected value
298   * @param put data to put if check succeeds
299   * @throws IOException e
300   * @return true if the new put was executed, false otherwise
301   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
302   */
303  @Deprecated
304  default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
305      throws IOException {
306    return checkAndPut(row, family, qualifier, CompareOperator.EQUAL, value, put);
307  }
308
309  /**
310   * Atomically checks if a row/family/qualifier value matches the expected
311   * value. If it does, it adds the put.  If the passed value is null, the check
312   * is for the lack of column (ie: non-existence)
313   *
314   * The expected value argument of this call is on the left and the current
315   * value of the cell is on the right side of the comparison operator.
316   *
317   * Ie. eg. GREATER operator means expected value > existing <=> add the put.
318   *
319   * @param row to check
320   * @param family column family to check
321   * @param qualifier column qualifier to check
322   * @param compareOp comparison operator to use
323   * @param value the expected value
324   * @param put data to put if check succeeds
325   * @throws IOException e
326   * @return true if the new put was executed, false otherwise
327   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
328   */
329  @Deprecated
330  default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
331      CompareFilter.CompareOp compareOp, byte[] value, Put put) throws IOException {
332    RowMutations mutations = new RowMutations(put.getRow(), 1);
333    mutations.add(put);
334
335    return checkAndMutate(row, family, qualifier, compareOp, value, mutations);
336  }
337
338  /**
339   * Atomically checks if a row/family/qualifier value matches the expected
340   * value. If it does, it adds the put.  If the passed value is null, the check
341   * is for the lack of column (ie: non-existence)
342   *
343   * The expected value argument of this call is on the left and the current
344   * value of the cell is on the right side of the comparison operator.
345   *
346   * Ie. eg. GREATER operator means expected value > existing <=> add the put.
347   *
348   * @param row to check
349   * @param family column family to check
350   * @param qualifier column qualifier to check
351   * @param op comparison operator to use
352   * @param value the expected value
353   * @param put data to put if check succeeds
354   * @throws IOException e
355   * @return true if the new put was executed, false otherwise
356   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
357   */
358  @Deprecated
359  default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
360      byte[] value, Put put) throws IOException {
361    RowMutations mutations = new RowMutations(put.getRow(), 1);
362    mutations.add(put);
363
364    return checkAndMutate(row, family, qualifier, op, value, mutations);
365  }
366
367  /**
368   * Deletes the specified cells/row.
369   *
370   * @param delete The object that specifies what to delete.
371   * @throws IOException if a remote or network exception occurs.
372   * @since 0.20.0
373   */
374  default void delete(Delete delete) throws IOException {
375    throw new NotImplementedException("Add an implementation!");
376  }
377
378  /**
379   * Batch Deletes the specified cells/rows from the table.
380   * <p>
381   * If a specified row does not exist, {@link Delete} will report as though sucessful
382   * delete; no exception will be thrown. If there are any failures even after retries,
383   * a {@link RetriesExhaustedWithDetailsException} will be thrown.
384   * RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and
385   * corresponding remote exceptions.
386   *
387   * @param deletes List of things to delete. The input list gets modified by this
388   * method. All successfully applied {@link Delete}s in the list are removed (in particular it
389   * gets re-ordered, so the order in which the elements are inserted in the list gives no
390   * guarantee as to the order in which the {@link Delete}s are executed).
391   * @throws IOException if a remote or network exception occurs. In that case
392   * the {@code deletes} argument will contain the {@link Delete} instances
393   * that have not be successfully applied.
394   * @since 0.20.1
395   * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
396   *          {@link #put(List)} runs pre-flight validations on the input list on client. Currently
397   *          {@link #delete(List)} doesn't run validations on the client, there is no need
398   *          currently, but this may change in the future. An * {@link IllegalArgumentException}
399   *          will be thrown in this case.
400   */
401  default void delete(List<Delete> deletes) throws IOException {
402    throw new NotImplementedException("Add an implementation!");
403  }
404
405  /**
406   * Atomically checks if a row/family/qualifier value matches the expected
407   * value. If it does, it adds the delete.  If the passed value is null, the
408   * check is for the lack of column (ie: non-existance)
409   *
410   * @param row to check
411   * @param family column family to check
412   * @param qualifier column qualifier to check
413   * @param value the expected value
414   * @param delete data to delete if check succeeds
415   * @throws IOException e
416   * @return true if the new delete was executed, false otherwise
417   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
418   */
419  @Deprecated
420  default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
421    byte[] value, Delete delete) throws IOException {
422    return checkAndDelete(row, family, qualifier, CompareOperator.EQUAL, value, delete);
423  }
424
425  /**
426   * Atomically checks if a row/family/qualifier value matches the expected
427   * value. If it does, it adds the delete.  If the passed value is null, the
428   * check is for the lack of column (ie: non-existence)
429   *
430   * The expected value argument of this call is on the left and the current
431   * value of the cell is on the right side of the comparison operator.
432   *
433   * Ie. eg. GREATER operator means expected value > existing <=> add the delete.
434   *
435   * @param row to check
436   * @param family column family to check
437   * @param qualifier column qualifier to check
438   * @param compareOp comparison operator to use
439   * @param value the expected value
440   * @param delete data to delete if check succeeds
441   * @throws IOException e
442   * @return true if the new delete was executed, false otherwise
443   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
444   */
445  @Deprecated
446  default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
447    CompareFilter.CompareOp compareOp, byte[] value, Delete delete) throws IOException {
448    RowMutations mutations = new RowMutations(delete.getRow(), 1);
449    mutations.add(delete);
450
451    return checkAndMutate(row, family, qualifier, compareOp, value, mutations);
452  }
453
454  /**
455   * Atomically checks if a row/family/qualifier value matches the expected
456   * value. If it does, it adds the delete.  If the passed value is null, the
457   * check is for the lack of column (ie: non-existence)
458   *
459   * The expected value argument of this call is on the left and the current
460   * value of the cell is on the right side of the comparison operator.
461   *
462   * Ie. eg. GREATER operator means expected value > existing <=> add the delete.
463   *
464   * @param row to check
465   * @param family column family to check
466   * @param qualifier column qualifier to check
467   * @param op comparison operator to use
468   * @param value the expected value
469   * @param delete data to delete if check succeeds
470   * @throws IOException e
471   * @return true if the new delete was executed, false otherwise
472   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
473   */
474  @Deprecated
475  default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
476                         CompareOperator op, byte[] value, Delete delete) throws IOException {
477    RowMutations mutations = new RowMutations(delete.getRow(), 1);
478    mutations.add(delete);
479
480    return checkAndMutate(row, family, qualifier, op, value, mutations);
481  }
482
483  /**
484   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
485   * adds the Put/Delete/RowMutations.
486   * <p>
487   * Use the returned {@link CheckAndMutateBuilder} to construct your request and then execute it.
488   * This is a fluent style API, the code is like:
489   *
490   * <pre>
491   * <code>
492   * table.checkAndMutate(row, family).qualifier(qualifier).ifNotExists().thenPut(put);
493   * </code>
494   * </pre>
495   */
496  default CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
497    throw new NotImplementedException("Add an implementation!");
498  }
499
500  /**
501   * A helper class for sending checkAndMutate request.
502   */
503  interface CheckAndMutateBuilder {
504
505    /**
506     * @param qualifier column qualifier to check.
507     */
508    CheckAndMutateBuilder qualifier(byte[] qualifier);
509
510    /**
511     * @param timeRange timeRange to check
512     */
513    CheckAndMutateBuilder timeRange(TimeRange timeRange);
514
515    /**
516     * Check for lack of column.
517     */
518    CheckAndMutateBuilder ifNotExists();
519
520    /**
521     * Check for equality.
522     * @param value the expected value
523     */
524    default CheckAndMutateBuilder ifEquals(byte[] value) {
525      return ifMatches(CompareOperator.EQUAL, value);
526    }
527
528    /**
529     * @param compareOp comparison operator to use
530     * @param value the expected value
531     */
532    CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value);
533
534    /**
535     * @param put data to put if check succeeds
536     * @return {@code true} if the new put was executed, {@code false} otherwise.
537     */
538    boolean thenPut(Put put) throws IOException;
539
540    /**
541     * @param delete data to delete if check succeeds
542     * @return {@code true} if the new delete was executed, {@code false} otherwise.
543     */
544    boolean thenDelete(Delete delete) throws IOException;
545
546    /**
547     * @param mutation mutations to perform if check succeeds
548     * @return true if the new mutation was executed, false otherwise.
549     */
550    boolean thenMutate(RowMutations mutation) throws IOException;
551  }
552
553  /**
554   * Atomically checks if a row matches the specified filter. If it does, it adds the
555   * Put/Delete/RowMutations.
556   * <p>
557   * Use the returned {@link CheckAndMutateWithFilterBuilder} to construct your request and then
558   * execute it. This is a fluent style API, the code is like:
559   *
560   * <pre>
561   * <code>
562   * table.checkAndMutate(row, filter).thenPut(put);
563   * </code>
564   * </pre>
565   */
566  default CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) {
567    throw new NotImplementedException("Add an implementation!");
568  }
569
570  /**
571   * A helper class for sending checkAndMutate request with a filter.
572   */
573  interface CheckAndMutateWithFilterBuilder {
574
575    /**
576     * @param timeRange timeRange to check
577     */
578    CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange);
579
580    /**
581     * @param put data to put if check succeeds
582     * @return {@code true} if the new put was executed, {@code false} otherwise.
583     */
584    boolean thenPut(Put put) throws IOException;
585
586    /**
587     * @param delete data to delete if check succeeds
588     * @return {@code true} if the new delete was executed, {@code false} otherwise.
589     */
590    boolean thenDelete(Delete delete) throws IOException;
591
592    /**
593     * @param mutation mutations to perform if check succeeds
594     * @return true if the new mutation was executed, false otherwise.
595     */
596    boolean thenMutate(RowMutations mutation) throws IOException;
597  }
598
599  /**
600   * Performs multiple mutations atomically on a single row. Currently
601   * {@link Put} and {@link Delete} are supported.
602   *
603   * @param rm object that specifies the set of mutations to perform atomically
604   * @throws IOException
605   */
606  default void mutateRow(final RowMutations rm) throws IOException {
607    throw new NotImplementedException("Add an implementation!");
608  }
609
610  /**
611   * Appends values to one or more columns within a single row.
612   * <p>
613   * This operation guaranteed atomicity to readers. Appends are done
614   * under a single row lock, so write operations to a row are synchronized, and
615   * readers are guaranteed to see this operation fully completed.
616   *
617   * @param append object that specifies the columns and values to be appended
618   * @throws IOException e
619   * @return values of columns after the append operation (maybe null)
620   */
621  default Result append(final Append append) throws IOException {
622    throw new NotImplementedException("Add an implementation!");
623  }
624
625  /**
626   * Increments one or more columns within a single row.
627   * <p>
628   * This operation ensures atomicity to readers. Increments are done
629   * under a single row lock, so write operations to a row are synchronized, and
630   * readers are guaranteed to see this operation fully completed.
631   *
632   * @param increment object that specifies the columns and amounts to be used
633   *                  for the increment operations
634   * @throws IOException e
635   * @return values of columns after the increment
636   */
637  default Result increment(final Increment increment) throws IOException {
638    throw new NotImplementedException("Add an implementation!");
639  }
640
641  /**
642   * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
643   * <p>
644   * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}.
645   * @param row The row that contains the cell to increment.
646   * @param family The column family of the cell to increment.
647   * @param qualifier The column qualifier of the cell to increment.
648   * @param amount The amount to increment the cell with (or decrement, if the
649   * amount is negative).
650   * @return The new value, post increment.
651   * @throws IOException if a remote or network exception occurs.
652   */
653  default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount)
654      throws IOException {
655    Increment increment = new Increment(row).addColumn(family, qualifier, amount);
656    Cell cell = increment(increment).getColumnLatestCell(family, qualifier);
657    return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
658  }
659
660  /**
661   * Atomically increments a column value. If the column value already exists
662   * and is not a big-endian long, this could throw an exception. If the column
663   * value does not yet exist it is initialized to <code>amount</code> and
664   * written to the specified column.
665   *
666   * <p>Setting durability to {@link Durability#SKIP_WAL} means that in a fail
667   * scenario you will lose any increments that have not been flushed.
668   * @param row The row that contains the cell to increment.
669   * @param family The column family of the cell to increment.
670   * @param qualifier The column qualifier of the cell to increment.
671   * @param amount The amount to increment the cell with (or decrement, if the
672   * amount is negative).
673   * @param durability The persistence guarantee for this increment.
674   * @return The new value, post increment.
675   * @throws IOException if a remote or network exception occurs.
676   */
677  default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
678    long amount, Durability durability) throws IOException {
679    Increment increment = new Increment(row)
680        .addColumn(family, qualifier, amount)
681        .setDurability(durability);
682    Cell cell = increment(increment).getColumnLatestCell(family, qualifier);
683    return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
684  }
685
686  /**
687   * Releases any resources held or pending changes in internal buffers.
688   *
689   * @throws IOException if a remote or network exception occurs.
690   */
691  @Override
692  default void close() throws IOException {
693    throw new NotImplementedException("Add an implementation!");
694  }
695
696  /**
697   * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the
698   * table region containing the specified row.  The row given does not actually have
699   * to exist.  Whichever region would contain the row based on start and end keys will
700   * be used.  Note that the {@code row} parameter is also not passed to the
701   * coprocessor handler registered for this protocol, unless the {@code row}
702   * is separately passed as an argument in the service request.  The parameter
703   * here is only used to locate the region used to handle the call.
704   *
705   * <p>
706   * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
707   * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
708   * </p>
709   *
710   * <div style="background-color: #cccccc; padding: 2px">
711   * <blockquote><pre>
712   * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
713   * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
714   * MyCallRequest request = MyCallRequest.newBuilder()
715   *     ...
716   *     .build();
717   * MyCallResponse response = service.myCall(null, request);
718   * </pre></blockquote></div>
719   *
720   * @param row The row key used to identify the remote region location
721   * @return A CoprocessorRpcChannel instance
722   */
723  default CoprocessorRpcChannel coprocessorService(byte[] row) {
724    throw new NotImplementedException("Add an implementation!");
725  }
726
727  /**
728   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
729   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
730   * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
731   * with each {@link com.google.protobuf.Service} instance.
732   *
733   * @param service the protocol buffer {@code Service} implementation to call
734   * @param startKey start region selection with region containing this row.  If {@code null}, the
735   *   selection will start with the first table region.
736   * @param endKey select regions up to and including the region containing this row. If
737   *   {@code null}, selection will continue through the last table region.
738   * @param callable this instance's
739   *   {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
740   *   method will be invoked once per table region, using the {@link com.google.protobuf.Service}
741   *   instance connected to that region.
742   * @param <T> the {@link com.google.protobuf.Service} subclass to connect to
743   * @param <R> Return type for the {@code callable} parameter's {@link
744   * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
745   * @return a map of result values keyed by region name
746   */
747  default <T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service,
748    byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable)
749    throws ServiceException, Throwable {
750    throw new NotImplementedException("Add an implementation!");
751  }
752
753  /**
754   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
755   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
756   * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
757   * with each {@link Service} instance.
758   *
759   * <p> The given
760   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
761   * method will be called with the return value from each region's
762   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. </p>
763   *
764   * @param service the protocol buffer {@code Service} implementation to call
765   * @param startKey start region selection with region containing this row.  If {@code null}, the
766   *   selection will start with the first table region.
767   * @param endKey select regions up to and including the region containing this row. If
768   *   {@code null}, selection will continue through the last table region.
769   * @param callable this instance's
770   *   {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
771   *   method will be invoked once per table region, using the {@link Service} instance connected to
772   *   that region.
773   * @param <T> the {@link Service} subclass to connect to
774   * @param <R> Return type for the {@code callable} parameter's {@link
775   * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
776   */
777  default <T extends Service, R> void coprocessorService(final Class<T> service,
778    byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable,
779    final Batch.Callback<R> callback) throws ServiceException, Throwable {
780    throw new NotImplementedException("Add an implementation!");
781  }
782
783  /**
784   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
785   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
786   * the invocations to the same region server will be batched into one call. The coprocessor
787   * service is invoked according to the service instance, method name and parameters.
788   *
789   * @param methodDescriptor
790   *          the descriptor for the protobuf service method to call.
791   * @param request
792   *          the method call parameters
793   * @param startKey
794   *          start region selection with region containing this row. If {@code null}, the
795   *          selection will start with the first table region.
796   * @param endKey
797   *          select regions up to and including the region containing this row. If {@code null},
798   *          selection will continue through the last table region.
799   * @param responsePrototype
800   *          the proto type of the response of the method in Service.
801   * @param <R>
802   *          the response type for the coprocessor Service method
803   * @return a map of result values keyed by region name
804   */
805  default <R extends Message> Map<byte[], R> batchCoprocessorService(
806    Descriptors.MethodDescriptor methodDescriptor, Message request,
807    byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
808    throw new NotImplementedException("Add an implementation!");
809  }
810
811  /**
812   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
813   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
814   * the invocations to the same region server will be batched into one call. The coprocessor
815   * service is invoked according to the service instance, method name and parameters.
816   *
817   * <p>
818   * The given
819   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
820   * method will be called with the return value from each region's invocation.
821   * </p>
822   *
823   * @param methodDescriptor the descriptor for the protobuf service method to call.
824   * @param request the method call parameters
825   * @param startKey start region selection with region containing this row.
826   *   If {@code null}, the selection will start with the first table region.
827   * @param endKey select regions up to and including the region containing this row.
828   *   If {@code null}, selection will continue through the last table region.
829   * @param responsePrototype the proto type of the response of the method in Service.
830   * @param callback callback to invoke with the response for each region
831   * @param <R>
832   *          the response type for the coprocessor Service method
833   */
834  default <R extends Message> void batchCoprocessorService(
835      Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey,
836      byte[] endKey, R responsePrototype, Batch.Callback<R> callback)
837      throws ServiceException, Throwable {
838    throw new NotImplementedException("Add an implementation!");
839  }
840
841  /**
842   * Atomically checks if a row/family/qualifier value matches the expected value.
843   * If it does, it performs the row mutations.  If the passed value is null, the check
844   * is for the lack of column (ie: non-existence)
845   *
846   * The expected value argument of this call is on the left and the current
847   * value of the cell is on the right side of the comparison operator.
848   *
849   * Ie. eg. GREATER operator means expected value > existing <=> perform row mutations.
850   *
851   * @param row to check
852   * @param family column family to check
853   * @param qualifier column qualifier to check
854   * @param compareOp the comparison operator
855   * @param value the expected value
856   * @param mutation  mutations to perform if check succeeds
857   * @throws IOException e
858   * @return true if the new put was executed, false otherwise
859   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
860   */
861  @Deprecated
862  default boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
863      CompareFilter.CompareOp compareOp, byte[] value, RowMutations mutation) throws IOException {
864    throw new NotImplementedException("Add an implementation!");
865  }
866
867  /**
868   * Atomically checks if a row/family/qualifier value matches the expected value.
869   * If it does, it performs the row mutations.  If the passed value is null, the check
870   * is for the lack of column (ie: non-existence)
871   *
872   * The expected value argument of this call is on the left and the current
873   * value of the cell is on the right side of the comparison operator.
874   *
875   * Ie. eg. GREATER operator means expected value > existing <=> perform row mutations.
876   *
877   * @param row to check
878   * @param family column family to check
879   * @param qualifier column qualifier to check
880   * @param op the comparison operator
881   * @param value the expected value
882   * @param mutation  mutations to perform if check succeeds
883   * @throws IOException e
884   * @return true if the new put was executed, false otherwise
885   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
886   */
887  @Deprecated
888  default boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
889                         byte[] value, RowMutations mutation) throws IOException {
890    throw new NotImplementedException("Add an implementation!");
891  }
892
893  /**
894   * Get timeout of each rpc request in this Table instance. It will be overridden by a more
895   * specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
896   * @see #getReadRpcTimeout(TimeUnit)
897   * @see #getWriteRpcTimeout(TimeUnit)
898   * @param unit the unit of time the timeout to be represented in
899   * @return rpc timeout in the specified time unit
900   */
901  default long getRpcTimeout(TimeUnit unit) {
902    throw new NotImplementedException("Add an implementation!");
903  }
904
905  /**
906   * Get timeout (millisecond) of each rpc request in this Table instance.
907   *
908   * @return Currently configured read timeout
909   * @deprecated use {@link #getReadRpcTimeout(TimeUnit)} or
910   *             {@link #getWriteRpcTimeout(TimeUnit)} instead
911   */
912  @Deprecated
913  default int getRpcTimeout() {
914    return (int)getRpcTimeout(TimeUnit.MILLISECONDS);
915  }
916
917  /**
918   * Set timeout (millisecond) of each rpc request in operations of this Table instance, will
919   * override the value of hbase.rpc.timeout in configuration.
920   * If a rpc request waiting too long, it will stop waiting and send a new request to retry until
921   * retries exhausted or operation timeout reached.
922   * <p>
923   * NOTE: This will set both the read and write timeout settings to the provided value.
924   *
925   * @param rpcTimeout the timeout of each rpc request in millisecond.
926   *
927   * @deprecated Use setReadRpcTimeout or setWriteRpcTimeout instead
928   */
929  @Deprecated
930  default void setRpcTimeout(int rpcTimeout) {
931    setReadRpcTimeout(rpcTimeout);
932    setWriteRpcTimeout(rpcTimeout);
933  }
934
935  /**
936   * Get timeout of each rpc read request in this Table instance.
937   * @param unit the unit of time the timeout to be represented in
938   * @return read rpc timeout in the specified time unit
939   */
940  default long getReadRpcTimeout(TimeUnit unit) {
941    throw new NotImplementedException("Add an implementation!");
942  }
943
944  /**
945   * Get timeout (millisecond) of each rpc read request in this Table instance.
946   * @deprecated since 2.0 and will be removed in 3.0 version
947   *             use {@link #getReadRpcTimeout(TimeUnit)} instead
948   */
949  @Deprecated
950  default int getReadRpcTimeout() {
951    return (int)getReadRpcTimeout(TimeUnit.MILLISECONDS);
952  }
953
954  /**
955   * Set timeout (millisecond) of each rpc read request in operations of this Table instance, will
956   * override the value of hbase.rpc.read.timeout in configuration.
957   * If a rpc read request waiting too long, it will stop waiting and send a new request to retry
958   * until retries exhausted or operation timeout reached.
959   *
960   * @param readRpcTimeout the timeout for read rpc request in milliseconds
961   * @deprecated since 2.0.0, use {@link TableBuilder#setReadRpcTimeout} instead
962   */
963  @Deprecated
964  default void setReadRpcTimeout(int readRpcTimeout) {
965    throw new NotImplementedException("Add an implementation!");
966  }
967
968  /**
969   * Get timeout of each rpc write request in this Table instance.
970   * @param unit the unit of time the timeout to be represented in
971   * @return write rpc timeout in the specified time unit
972   */
973  default long getWriteRpcTimeout(TimeUnit unit) {
974    throw new NotImplementedException("Add an implementation!");
975  }
976
977  /**
978   * Get timeout (millisecond) of each rpc write request in this Table instance.
979   * @deprecated since 2.0 and will be removed in 3.0 version
980   *             use {@link #getWriteRpcTimeout(TimeUnit)} instead
981   */
982  @Deprecated
983  default int getWriteRpcTimeout() {
984    return (int)getWriteRpcTimeout(TimeUnit.MILLISECONDS);
985  }
986
987  /**
988   * Set timeout (millisecond) of each rpc write request in operations of this Table instance, will
989   * override the value of hbase.rpc.write.timeout in configuration.
990   * If a rpc write request waiting too long, it will stop waiting and send a new request to retry
991   * until retries exhausted or operation timeout reached.
992   *
993   * @param writeRpcTimeout the timeout for write rpc request in milliseconds
994   * @deprecated since 2.0.0, use {@link TableBuilder#setWriteRpcTimeout} instead
995   */
996  @Deprecated
997  default void setWriteRpcTimeout(int writeRpcTimeout) {
998    throw new NotImplementedException("Add an implementation!");
999  }
1000
1001  /**
1002   * Get timeout of each operation in Table instance.
1003   * @param unit the unit of time the timeout to be represented in
1004   * @return operation rpc timeout in the specified time unit
1005   */
1006  default long getOperationTimeout(TimeUnit unit) {
1007    throw new NotImplementedException("Add an implementation!");
1008  }
1009
1010  /**
1011   * Get timeout (millisecond) of each operation for in Table instance.
1012   * @deprecated since 2.0 and will be removed in 3.0 version
1013   *             use {@link #getOperationTimeout(TimeUnit)} instead
1014   */
1015  @Deprecated
1016  default int getOperationTimeout() {
1017    return (int)getOperationTimeout(TimeUnit.MILLISECONDS);
1018  }
1019
1020  /**
1021   * Set timeout (millisecond) of each operation in this Table instance, will override the value
1022   * of hbase.client.operation.timeout in configuration.
1023   * Operation timeout is a top-level restriction that makes sure a blocking method will not be
1024   * blocked more than this. In each operation, if rpc request fails because of timeout or
1025   * other reason, it will retry until success or throw a RetriesExhaustedException. But if the
1026   * total time being blocking reach the operation timeout before retries exhausted, it will break
1027   * early and throw SocketTimeoutException.
1028   * @param operationTimeout the total timeout of each operation in millisecond.
1029   * @deprecated since 2.0.0, use {@link TableBuilder#setOperationTimeout} instead
1030   */
1031  @Deprecated
1032  default void setOperationTimeout(int operationTimeout) {
1033    throw new NotImplementedException("Add an implementation!");
1034  }
1035}