1 /**
2 *
3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19 package org.apache.hadoop.hbase.client;
20
21 import java.io.Closeable;
22 import java.io.IOException;
23 import java.util.List;
24 import java.util.Map;
25
26 import com.google.protobuf.Descriptors;
27 import com.google.protobuf.Message;
28 import com.google.protobuf.Service;
29 import com.google.protobuf.ServiceException;
30
31 import org.apache.hadoop.hbase.classification.InterfaceAudience;
32 import org.apache.hadoop.hbase.classification.InterfaceStability;
33 import org.apache.hadoop.conf.Configuration;
34 import org.apache.hadoop.hbase.HTableDescriptor;
35 import org.apache.hadoop.hbase.TableName;
36 import org.apache.hadoop.hbase.client.coprocessor.Batch;
37 import org.apache.hadoop.hbase.filter.CompareFilter;
38 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
39
40 /**
41 * Used to communicate with a single HBase table.
42 * Obtain an instance from a {@link Connection} and call {@link #close()} afterwards.
43 *
44 * <p>Table can be used to get, put, delete or scan data from a table.
45 * @see ConnectionFactory
46 * @see Connection
47 * @see Admin
48 * @see RegionLocator
49 * @since 0.99.0
50 */
51 @InterfaceAudience.Public
52 @InterfaceStability.Evolving
53 public interface Table extends Closeable {
54 /**
55 * Gets the fully qualified table name instance of this table.
56 */
57 TableName getName();
58
59 /**
60 * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance.
61 * <p>
62 * The reference returned is not a copy, so any change made to it will
63 * affect this instance.
64 */
65 Configuration getConfiguration();
66
67 /**
68 * Gets the {@link org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table.
69 * @throws java.io.IOException if a remote or network exception occurs.
70 */
71 HTableDescriptor getTableDescriptor() throws IOException;
72
73 /**
74 * Test for the existence of columns in the table, as specified by the Get.
75 * <p>
76 *
77 * This will return true if the Get matches one or more keys, false if not.
78 * <p>
79 *
80 * This is a server-side call so it prevents any data from being transfered to
81 * the client.
82 *
83 * @param get the Get
84 * @return true if the specified Get matches one or more keys, false if not
85 * @throws IOException e
86 */
87 boolean exists(Get get) throws IOException;
88
89 /**
90 * Test for the existence of columns in the table, as specified by the Gets.
91 * <p>
92 *
93 * This will return an array of booleans. Each value will be true if the related Get matches
94 * one or more keys, false if not.
95 * <p>
96 *
97 * This is a server-side call so it prevents any data from being transferred to
98 * the client.
99 *
100 * @param gets the Gets
101 * @return Array of boolean. True if the specified Get matches one or more keys, false if not.
102 * @throws IOException e
103 */
104 boolean[] existsAll(List<Get> gets) throws IOException;
105
106 /**
107 * Method that does a batch call on Deletes, Gets, Puts, Increments and Appends.
108 * The ordering of execution of the actions is not defined. Meaning if you do a Put and a
109 * Get in the same {@link #batch} call, you will not necessarily be
110 * guaranteed that the Get returns what the Put had put.
111 *
112 * @param actions list of Get, Put, Delete, Increment, Append objects
113 * @param results Empty Object[], same size as actions. Provides access to partial
114 * results, in case an exception is thrown. A null in the result array means that
115 * the call for that action failed, even after retries
116 * @throws IOException
117 * @since 0.90.0
118 */
119 void batch(final List<? extends Row> actions, final Object[] results) throws IOException,
120 InterruptedException;
121
122 /**
123 * Same as {@link #batch(List, Object[])}, but returns an array of
124 * results instead of using a results parameter reference.
125 *
126 * @param actions list of Get, Put, Delete, Increment, Append objects
127 * @return the results from the actions. A null in the return array means that
128 * the call for that action failed, even after retries
129 * @throws IOException
130 * @since 0.90.0
131 * @deprecated If any exception is thrown by one of the actions, there is no way to
132 * retrieve the partially executed results. Use {@link #batch(List, Object[])} instead.
133 */
134 @Deprecated
135 Object[] batch(final List<? extends Row> actions) throws IOException, InterruptedException;
136
137 /**
138 * Same as {@link #batch(List, Object[])}, but with a callback.
139 * @since 0.96.0
140 */
141 <R> void batchCallback(
142 final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback
143 )
144 throws IOException, InterruptedException;
145
146 /**
147 * Same as {@link #batch(List)}, but with a callback.
148 *
149 * @since 0.96.0
150 * @deprecated If any exception is thrown by one of the actions, there is no way to retrieve the
151 * partially executed results. Use {@link #batchCallback(List, Object[],
152 * org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} instead.
153 */
154 @Deprecated
155 <R> Object[] batchCallback(
156 List<? extends Row> actions, Batch.Callback<R> callback
157 ) throws IOException,
158 InterruptedException;
159
160 /**
161 * Extracts certain cells from a given row.
162 * @param get The object that specifies what data to fetch and from which row.
163 * @return The data coming from the specified row, if it exists. If the row
164 * specified doesn't exist, the {@link Result} instance returned won't
165 * contain any {@link org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}.
166 * @throws IOException if a remote or network exception occurs.
167 * @since 0.20.0
168 */
169 Result get(Get get) throws IOException;
170
171 /**
172 * Extracts certain cells from the given rows, in batch.
173 *
174 * @param gets The objects that specify what data to fetch and from which rows.
175 * @return The data coming from the specified rows, if it exists. If the row specified doesn't
176 * exist, the {@link Result} instance returned won't contain any {@link
177 * org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}. If there are any
178 * failures even after retries, there will be a null in the results array for those Gets, AND an
179 * exception will be thrown.
180 * @throws IOException if a remote or network exception occurs.
181 * @since 0.90.0
182 */
183 Result[] get(List<Get> gets) throws IOException;
184
185 /**
186 * Returns a scanner on the current table as specified by the {@link Scan}
187 * object.
188 * Note that the passed {@link Scan}'s start row and caching properties
189 * maybe changed.
190 *
191 * @param scan A configured {@link Scan} object.
192 * @return A scanner.
193 * @throws IOException if a remote or network exception occurs.
194 * @since 0.20.0
195 */
196 ResultScanner getScanner(Scan scan) throws IOException;
197
198 /**
199 * Gets a scanner on the current table for the given family.
200 *
201 * @param family The column family to scan.
202 * @return A scanner.
203 * @throws IOException if a remote or network exception occurs.
204 * @since 0.20.0
205 */
206 ResultScanner getScanner(byte[] family) throws IOException;
207
208 /**
209 * Gets a scanner on the current table for the given family and qualifier.
210 *
211 * @param family The column family to scan.
212 * @param qualifier The column qualifier to scan.
213 * @return A scanner.
214 * @throws IOException if a remote or network exception occurs.
215 * @since 0.20.0
216 */
217 ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
218
219
220 /**
221 * Puts some data in the table.
222 *
223 * @param put The data to put.
224 * @throws IOException if a remote or network exception occurs.
225 * @since 0.20.0
226 */
227 void put(Put put) throws IOException;
228
229 /**
230 * Puts some data in the table, in batch.
231 * <p>
232 * This can be used for group commit, or for submitting user defined
233 * batches. The writeBuffer will be periodically inspected while the List
234 * is processed, so depending on the List size the writeBuffer may flush
235 * not at all, or more than once.
236 * @param puts The list of mutations to apply. The batch put is done by
237 * aggregating the iteration of the Puts over the write buffer
238 * at the client-side for a single RPC call.
239 * @throws IOException if a remote or network exception occurs.
240 * @since 0.20.0
241 */
242 void put(List<Put> puts) throws IOException;
243
244 /**
245 * Atomically checks if a row/family/qualifier value matches the expected
246 * value. If it does, it adds the put. If the passed value is null, the check
247 * is for the lack of column (ie: non-existance)
248 *
249 * @param row to check
250 * @param family column family to check
251 * @param qualifier column qualifier to check
252 * @param value the expected value
253 * @param put data to put if check succeeds
254 * @throws IOException e
255 * @return true if the new put was executed, false otherwise
256 */
257 boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
258 byte[] value, Put put) throws IOException;
259
260 /**
261 * Atomically checks if a row/family/qualifier value matches the expected
262 * value. If it does, it adds the put. If the passed value is null, the check
263 * is for the lack of column (ie: non-existance)
264 *
265 * @param row to check
266 * @param family column family to check
267 * @param qualifier column qualifier to check
268 * @param compareOp comparison operator to use
269 * @param value the expected value
270 * @param put data to put if check succeeds
271 * @throws IOException e
272 * @return true if the new put was executed, false otherwise
273 */
274 boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
275 CompareFilter.CompareOp compareOp, byte[] value, Put put) throws IOException;
276
277 /**
278 * Deletes the specified cells/row.
279 *
280 * @param delete The object that specifies what to delete.
281 * @throws IOException if a remote or network exception occurs.
282 * @since 0.20.0
283 */
284 void delete(Delete delete) throws IOException;
285
286 /**
287 * Deletes the specified cells/rows in bulk.
288 * @param deletes List of things to delete. List gets modified by this
289 * method (in particular it gets re-ordered, so the order in which the elements
290 * are inserted in the list gives no guarantee as to the order in which the
291 * {@link Delete}s are executed).
292 * @throws IOException if a remote or network exception occurs. In that case
293 * the {@code deletes} argument will contain the {@link Delete} instances
294 * that have not be successfully applied.
295 * @since 0.20.1
296 */
297 void delete(List<Delete> deletes) throws IOException;
298
299 /**
300 * Atomically checks if a row/family/qualifier value matches the expected
301 * value. If it does, it adds the delete. If the passed value is null, the
302 * check is for the lack of column (ie: non-existance)
303 *
304 * @param row to check
305 * @param family column family to check
306 * @param qualifier column qualifier to check
307 * @param value the expected value
308 * @param delete data to delete if check succeeds
309 * @throws IOException e
310 * @return true if the new delete was executed, false otherwise
311 */
312 boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
313 byte[] value, Delete delete) throws IOException;
314
315 /**
316 * Atomically checks if a row/family/qualifier value matches the expected
317 * value. If it does, it adds the delete. If the passed value is null, the
318 * check is for the lack of column (ie: non-existance)
319 *
320 * @param row to check
321 * @param family column family to check
322 * @param qualifier column qualifier to check
323 * @param compareOp comparison operator to use
324 * @param value the expected value
325 * @param delete data to delete if check succeeds
326 * @throws IOException e
327 * @return true if the new delete was executed, false otherwise
328 */
329 boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
330 CompareFilter.CompareOp compareOp, byte[] value, Delete delete) throws IOException;
331
332 /**
333 * Performs multiple mutations atomically on a single row. Currently
334 * {@link Put} and {@link Delete} are supported.
335 *
336 * @param rm object that specifies the set of mutations to perform atomically
337 * @throws IOException
338 */
339 void mutateRow(final RowMutations rm) throws IOException;
340
341 /**
342 * Appends values to one or more columns within a single row.
343 * <p>
344 * This operation guaranteed atomicity to readers. Appends are done
345 * under a single row lock, so write operations to a row are synchronized, and
346 * readers are guaranteed to see this operation fully completed.
347 *
348 * @param append object that specifies the columns and values to be appended
349 * @throws IOException e
350 * @return values of columns after the append operation (maybe null)
351 */
352 Result append(final Append append) throws IOException;
353
354 /**
355 * Increments one or more columns within a single row.
356 * <p>
357 * This operation ensures atomicity to readers. Increments are done
358 * under a single row lock, so write operations to a row are synchronized, and
359 * readers are guaranteed to see this operation fully completed.
360 *
361 * @param increment object that specifies the columns and amounts to be used
362 * for the increment operations
363 * @throws IOException e
364 * @return values of columns after the increment
365 */
366 Result increment(final Increment increment) throws IOException;
367
368 /**
369 * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
370 * <p>
371 * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}.
372 * @param row The row that contains the cell to increment.
373 * @param family The column family of the cell to increment.
374 * @param qualifier The column qualifier of the cell to increment.
375 * @param amount The amount to increment the cell with (or decrement, if the
376 * amount is negative).
377 * @return The new value, post increment.
378 * @throws IOException if a remote or network exception occurs.
379 */
380 long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
381 long amount) throws IOException;
382
383 /**
384 * Atomically increments a column value. If the column value already exists
385 * and is not a big-endian long, this could throw an exception. If the column
386 * value does not yet exist it is initialized to <code>amount</code> and
387 * written to the specified column.
388 *
389 * <p>Setting durability to {@link Durability#SKIP_WAL} means that in a fail
390 * scenario you will lose any increments that have not been flushed.
391 * @param row The row that contains the cell to increment.
392 * @param family The column family of the cell to increment.
393 * @param qualifier The column qualifier of the cell to increment.
394 * @param amount The amount to increment the cell with (or decrement, if the
395 * amount is negative).
396 * @param durability The persistence guarantee for this increment.
397 * @return The new value, post increment.
398 * @throws IOException if a remote or network exception occurs.
399 */
400 long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
401 long amount, Durability durability) throws IOException;
402
403 /**
404 * Releases any resources held or pending changes in internal buffers.
405 *
406 * @throws IOException if a remote or network exception occurs.
407 */
408 @Override
409 void close() throws IOException;
410
411 /**
412 * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the
413 * table region containing the specified row. The row given does not actually have
414 * to exist. Whichever region would contain the row based on start and end keys will
415 * be used. Note that the {@code row} parameter is also not passed to the
416 * coprocessor handler registered for this protocol, unless the {@code row}
417 * is separately passed as an argument in the service request. The parameter
418 * here is only used to locate the region used to handle the call.
419 *
420 * <p>
421 * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
422 * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
423 * </p>
424 *
425 * <div style="background-color: #cccccc; padding: 2px">
426 * <blockquote><pre>
427 * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
428 * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
429 * MyCallRequest request = MyCallRequest.newBuilder()
430 * ...
431 * .build();
432 * MyCallResponse response = service.myCall(null, request);
433 * </pre></blockquote></div>
434 *
435 * @param row The row key used to identify the remote region location
436 * @return A CoprocessorRpcChannel instance
437 */
438 CoprocessorRpcChannel coprocessorService(byte[] row);
439
440 /**
441 * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
442 * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
443 * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
444 * with each {@link com.google.protobuf.Service} instance.
445 *
446 * @param service the protocol buffer {@code Service} implementation to call
447 * @param startKey start region selection with region containing this row. If {@code null}, the
448 * selection will start with the first table region.
449 * @param endKey select regions up to and including the region containing this row. If {@code
450 * null}, selection will continue through the last table region.
451 * @param callable this instance's {@link org.apache.hadoop.hbase.client.coprocessor.Batch
452 * .Call#call}
453 * method will be invoked once per table region, using the {@link com.google.protobuf.Service}
454 * instance connected to that region.
455 * @param <T> the {@link com.google.protobuf.Service} subclass to connect to
456 * @param <R> Return type for the {@code callable} parameter's {@link
457 * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
458 * @return a map of result values keyed by region name
459 */
460 <T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service,
461 byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable)
462 throws ServiceException, Throwable;
463
464 /**
465 * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
466 * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
467 * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
468 * with each {@link Service} instance.
469 *
470 * <p> The given {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],
471 * byte[], Object)} method will be called with the return value from each region's {@link
472 * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. </p>
473 *
474 * @param service the protocol buffer {@code Service} implementation to call
475 * @param startKey start region selection with region containing this row. If {@code null}, the
476 * selection will start with the first table region.
477 * @param endKey select regions up to and including the region containing this row. If {@code
478 * null}, selection will continue through the last table region.
479 * @param callable this instance's {@link org.apache.hadoop.hbase.client.coprocessor.Batch
480 * .Call#call}
481 * method will be invoked once per table region, using the {@link Service} instance connected to
482 * that region.
483 * @param callback
484 * @param <T> the {@link Service} subclass to connect to
485 * @param <R> Return type for the {@code callable} parameter's {@link
486 * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
487 */
488 <T extends Service, R> void coprocessorService(final Class<T> service,
489 byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable,
490 final Batch.Callback<R> callback) throws ServiceException, Throwable;
491
492 /**
493 * Returns the maximum size in bytes of the write buffer for this HTable.
494 * <p>
495 * The default value comes from the configuration parameter
496 * {@code hbase.client.write.buffer}.
497 * @return The size of the write buffer in bytes.
498 * @deprecated as of 1.0.1 (should not have been in 1.0.0). Replaced by {@link BufferedMutator#getWriteBufferSize()}
499 */
500 @Deprecated
501 long getWriteBufferSize();
502
503 /**
504 * Sets the size of the buffer in bytes.
505 * <p>
506 * If the new size is less than the current amount of data in the
507 * write buffer, the buffer gets flushed.
508 * @param writeBufferSize The new write buffer size, in bytes.
509 * @throws IOException if a remote or network exception occurs.
510 * @deprecated as of 1.0.1 (should not have been in 1.0.0). Replaced by {@link BufferedMutator} and
511 * {@link BufferedMutatorParams#writeBufferSize(long)}
512 */
513 @Deprecated
514 void setWriteBufferSize(long writeBufferSize) throws IOException;
515
516 /**
517 * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
518 * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
519 * the invocations to the same region server will be batched into one call. The coprocessor
520 * service is invoked according to the service instance, method name and parameters.
521 *
522 * @param methodDescriptor
523 * the descriptor for the protobuf service method to call.
524 * @param request
525 * the method call parameters
526 * @param startKey
527 * start region selection with region containing this row. If {@code null}, the
528 * selection will start with the first table region.
529 * @param endKey
530 * select regions up to and including the region containing this row. If {@code null},
531 * selection will continue through the last table region.
532 * @param responsePrototype
533 * the proto type of the response of the method in Service.
534 * @param <R>
535 * the response type for the coprocessor Service method
536 * @throws ServiceException
537 * @throws Throwable
538 * @return a map of result values keyed by region name
539 */
540 <R extends Message> Map<byte[], R> batchCoprocessorService(
541 Descriptors.MethodDescriptor methodDescriptor, Message request,
542 byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable;
543
544 /**
545 * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
546 * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
547 * the invocations to the same region server will be batched into one call. The coprocessor
548 * service is invoked according to the service instance, method name and parameters.
549 *
550 * <p>
551 * The given
552 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
553 * method will be called with the return value from each region's invocation.
554 * </p>
555 *
556 * @param methodDescriptor
557 * the descriptor for the protobuf service method to call.
558 * @param request
559 * the method call parameters
560 * @param startKey
561 * start region selection with region containing this row. If {@code null}, the
562 * selection will start with the first table region.
563 * @param endKey
564 * select regions up to and including the region containing this row. If {@code null},
565 * selection will continue through the last table region.
566 * @param responsePrototype
567 * the proto type of the response of the method in Service.
568 * @param callback
569 * callback to invoke with the response for each region
570 * @param <R>
571 * the response type for the coprocessor Service method
572 * @throws ServiceException
573 * @throws Throwable
574 */
575 <R extends Message> void batchCoprocessorService(Descriptors.MethodDescriptor methodDescriptor,
576 Message request, byte[] startKey, byte[] endKey, R responsePrototype,
577 Batch.Callback<R> callback) throws ServiceException, Throwable;
578
579 /**
580 * Atomically checks if a row/family/qualifier value matches the expected value.
581 * If it does, it performs the row mutations. If the passed value is null, the check
582 * is for the lack of column (ie: non-existence)
583 *
584 * @param row to check
585 * @param family column family to check
586 * @param qualifier column qualifier to check
587 * @param compareOp the comparison operator
588 * @param value the expected value
589 * @param mutation mutations to perform if check succeeds
590 * @throws IOException e
591 * @return true if the new put was executed, false otherwise
592 */
593 boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
594 CompareFilter.CompareOp compareOp, byte[] value, RowMutations mutation) throws IOException;
595
596 }