001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import java.io.Closeable; 021import java.io.IOException; 022import java.util.Collections; 023import java.util.List; 024import java.util.Map; 025import java.util.TreeMap; 026import java.util.concurrent.TimeUnit; 027import org.apache.commons.lang3.NotImplementedException; 028import org.apache.hadoop.conf.Configuration; 029import org.apache.hadoop.hbase.Cell; 030import org.apache.hadoop.hbase.CompareOperator; 031import org.apache.hadoop.hbase.TableName; 032import org.apache.hadoop.hbase.client.coprocessor.Batch; 033import org.apache.hadoop.hbase.filter.Filter; 034import org.apache.hadoop.hbase.io.TimeRange; 035import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; 036import org.apache.hadoop.hbase.util.Bytes; 037import org.apache.yetus.audience.InterfaceAudience; 038 039import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; 040import org.apache.hbase.thirdparty.com.google.protobuf.Message; 041import org.apache.hbase.thirdparty.com.google.protobuf.Service; 042import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; 043 044/** 045 * Used to communicate with a single HBase table. Obtain an instance from a {@link Connection} and 046 * call {@link #close()} afterwards. 047 * <p> 048 * <code>Table</code> can be used to get, put, delete or scan data from a table. 049 * @see ConnectionFactory 050 * @see Connection 051 * @see Admin 052 * @see RegionLocator 053 * @since 0.99.0 054 */ 055@InterfaceAudience.Public 056public interface Table extends Closeable { 057 /** 058 * Gets the fully qualified table name instance of this table. 059 */ 060 TableName getName(); 061 062 /** 063 * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance. 064 * <p> 065 * The reference returned is not a copy, so any change made to it will affect this instance. 066 */ 067 Configuration getConfiguration(); 068 069 /** 070 * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this 071 * table. 072 * @throws java.io.IOException if a remote or network exception occurs. 073 */ 074 TableDescriptor getDescriptor() throws IOException; 075 076 /** 077 * Gets the {@link RegionLocator} for this table. 078 */ 079 RegionLocator getRegionLocator() throws IOException; 080 081 /** 082 * Test for the existence of columns in the table, as specified by the Get. 083 * <p> 084 * This will return true if the Get matches one or more keys, false if not. 085 * <p> 086 * This is a server-side call so it prevents any data from being transfered to the client. 087 * @param get the Get 088 * @return true if the specified Get matches one or more keys, false if not 089 * @throws IOException e 090 */ 091 default boolean exists(Get get) throws IOException { 092 return exists(Collections.singletonList(get))[0]; 093 } 094 095 /** 096 * Test for the existence of columns in the table, as specified by the Gets. 097 * <p> 098 * This will return an array of booleans. Each value will be true if the related Get matches one 099 * or more keys, false if not. 100 * <p> 101 * This is a server-side call so it prevents any data from being transferred to the client. 102 * @param gets the Gets 103 * @return Array of boolean. True if the specified Get matches one or more keys, false if not. 104 * @throws IOException e 105 */ 106 default boolean[] exists(List<Get> gets) throws IOException { 107 throw new NotImplementedException("Add an implementation!"); 108 } 109 110 /** 111 * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations. The 112 * ordering of execution of the actions is not defined. Meaning if you do a Put and a Get in the 113 * same {@link #batch} call, you will not necessarily be guaranteed that the Get returns what the 114 * Put had put. 115 * @param actions list of Get, Put, Delete, Increment, Append, RowMutations. 116 * @param results Empty Object[], same size as actions. Provides access to partial results, in 117 * case an exception is thrown. A null in the result array means that the call for 118 * that action failed, even after retries. The order of the objects in the results 119 * array corresponds to the order of actions in the request list. 120 * @since 0.90.0 121 */ 122 default void batch(final List<? extends Row> actions, final Object[] results) 123 throws IOException, InterruptedException { 124 throw new NotImplementedException("Add an implementation!"); 125 } 126 127 /** 128 * Same as {@link #batch(List, Object[])}, but with a callback. 129 * @since 0.96.0 130 * @deprecated since 3.0.0, will removed in 4.0.0. Please use the batch related methods in 131 * {@link AsyncTable} directly if you want to use callback. We reuse the callback for 132 * coprocessor here, and the problem is that for batch operation, the 133 * {@link AsyncTable} does not tell us the region, so in this method we need an extra 134 * locating after we get the result, which is not good. 135 */ 136 @Deprecated 137 default <R> void batchCallback(final List<? extends Row> actions, final Object[] results, 138 final Batch.Callback<R> callback) throws IOException, InterruptedException { 139 throw new NotImplementedException("Add an implementation!"); 140 } 141 142 /** 143 * Extracts certain cells from a given row. 144 * @param get The object that specifies what data to fetch and from which row. 145 * @return The data coming from the specified row, if it exists. If the row specified doesn't 146 * exist, the {@link Result} instance returned won't contain any 147 * {@link org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}. 148 * @throws IOException if a remote or network exception occurs. 149 * @since 0.20.0 150 */ 151 default Result get(Get get) throws IOException { 152 return get(Collections.singletonList(get))[0]; 153 } 154 155 /** 156 * Extracts specified cells from the given rows, as a batch. 157 * @param gets The objects that specify what data to fetch and from which rows. 158 * @return The data coming from the specified rows, if it exists. If the row specified doesn't 159 * exist, the {@link Result} instance returned won't contain any 160 * {@link org.apache.hadoop.hbase.Cell}s, as indicated by {@link Result#isEmpty()}. If 161 * there are any failures even after retries, there will be a <code>null</code> in the 162 * results' array for those Gets, AND an exception will be thrown. The ordering of the 163 * Result array corresponds to the order of the list of passed in Gets. 164 * @throws IOException if a remote or network exception occurs. 165 * @since 0.90.0 166 * @apiNote {@link #put(List)} runs pre-flight validations on the input list on client. Currently 167 * {@link #get(List)} doesn't run any validations on the client-side, currently there is 168 * no need, but this may change in the future. An {@link IllegalArgumentException} will 169 * be thrown in this case. 170 */ 171 default Result[] get(List<Get> gets) throws IOException { 172 throw new NotImplementedException("Add an implementation!"); 173 } 174 175 /** 176 * Returns a scanner on the current table as specified by the {@link Scan} object. Note that the 177 * passed {@link Scan}'s start row and caching properties maybe changed. 178 * @param scan A configured {@link Scan} object. 179 * @return A scanner. 180 * @throws IOException if a remote or network exception occurs. 181 * @since 0.20.0 182 */ 183 default ResultScanner getScanner(Scan scan) throws IOException { 184 throw new NotImplementedException("Add an implementation!"); 185 } 186 187 /** 188 * Gets a scanner on the current table for the given family. 189 * @param family The column family to scan. 190 * @return A scanner. 191 * @throws IOException if a remote or network exception occurs. 192 * @since 0.20.0 193 */ 194 default ResultScanner getScanner(byte[] family) throws IOException { 195 throw new NotImplementedException("Add an implementation!"); 196 } 197 198 /** 199 * Gets a scanner on the current table for the given family and qualifier. 200 * @param family The column family to scan. 201 * @param qualifier The column qualifier to scan. 202 * @return A scanner. 203 * @throws IOException if a remote or network exception occurs. 204 * @since 0.20.0 205 */ 206 default ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException { 207 throw new NotImplementedException("Add an implementation!"); 208 } 209 210 /** 211 * Puts some data in the table. 212 * @param put The data to put. 213 * @throws IOException if a remote or network exception occurs. 214 * @since 0.20.0 215 */ 216 default void put(Put put) throws IOException { 217 put(Collections.singletonList(put)); 218 } 219 220 /** 221 * Batch puts the specified data into the table. 222 * <p> 223 * This can be used for group commit, or for submitting user defined batches. Before sending a 224 * batch of mutations to the server, the client runs a few validations on the input list. If an 225 * error is found, for example, a mutation was supplied but was missing it's column an 226 * {@link IllegalArgumentException} will be thrown and no mutations will be applied. If there are 227 * any failures even after retries, a {@link RetriesExhaustedWithDetailsException} will be thrown. 228 * RetriesExhaustedWithDetailsException contains lists of failed mutations and corresponding 229 * remote exceptions. The ordering of mutations and exceptions in the encapsulating exception 230 * corresponds to the order of the input list of Put requests. 231 * @param puts The list of mutations to apply. 232 * @throws IOException if a remote or network exception occurs. 233 * @since 0.20.0 234 */ 235 default void put(List<Put> puts) throws IOException { 236 throw new NotImplementedException("Add an implementation!"); 237 } 238 239 /** 240 * Deletes the specified cells/row. 241 * @param delete The object that specifies what to delete. 242 * @throws IOException if a remote or network exception occurs. 243 * @since 0.20.0 244 */ 245 default void delete(Delete delete) throws IOException { 246 throw new NotImplementedException("Add an implementation!"); 247 } 248 249 /** 250 * Batch Deletes the specified cells/rows from the table. 251 * <p> 252 * If a specified row does not exist, {@link Delete} will report as though sucessful delete; no 253 * exception will be thrown. If there are any failures even after retries, a 254 * {@link RetriesExhaustedWithDetailsException} will be thrown. 255 * RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and corresponding 256 * remote exceptions. 257 * @param deletes List of things to delete. The input list gets modified by this method. All 258 * successfully applied {@link Delete}s in the list are removed (in particular it 259 * gets re-ordered, so the order in which the elements are inserted in the list 260 * gives no guarantee as to the order in which the {@link Delete}s are executed). 261 * @throws IOException if a remote or network exception occurs. In that case the {@code deletes} 262 * argument will contain the {@link Delete} instances that have not be 263 * successfully applied. 264 * @since 0.20.1 265 * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also, 266 * {@link #put(List)} runs pre-flight validations on the input list on client. Currently 267 * {@link #delete(List)} doesn't run validations on the client, there is no need 268 * currently, but this may change in the future. An {@link IllegalArgumentException} will 269 * be thrown in this case. 270 */ 271 default void delete(List<Delete> deletes) throws IOException { 272 throw new NotImplementedException("Add an implementation!"); 273 } 274 275 /** 276 * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it 277 * adds the Put/Delete/RowMutations. 278 * <p> 279 * Use the returned {@link CheckAndMutateBuilder} to construct your request and then execute it. 280 * This is a fluent style API, the code is like: 281 * 282 * <pre> 283 * <code> 284 * table.checkAndMutate(row, family).qualifier(qualifier).ifNotExists().thenPut(put); 285 * </code> 286 * </pre> 287 * 288 * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it 289 * any more. 290 */ 291 @Deprecated 292 default CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { 293 throw new NotImplementedException("Add an implementation!"); 294 } 295 296 /** 297 * A helper class for sending checkAndMutate request. 298 * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it 299 * any more. 300 */ 301 @Deprecated 302 interface CheckAndMutateBuilder { 303 304 /** 305 * Specify a column qualifer 306 * @param qualifier column qualifier to check. 307 */ 308 CheckAndMutateBuilder qualifier(byte[] qualifier); 309 310 /** 311 * Specify a timerange 312 * @param timeRange timeRange to check 313 */ 314 CheckAndMutateBuilder timeRange(TimeRange timeRange); 315 316 /** 317 * Check for lack of column. 318 */ 319 CheckAndMutateBuilder ifNotExists(); 320 321 /** 322 * Check for equality. 323 * @param value the expected value 324 */ 325 default CheckAndMutateBuilder ifEquals(byte[] value) { 326 return ifMatches(CompareOperator.EQUAL, value); 327 } 328 329 /** 330 * Check for match. 331 * @param compareOp comparison operator to use 332 * @param value the expected value 333 */ 334 CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value); 335 336 /** 337 * Specify a Put to commit if the check succeeds. 338 * @param put data to put if check succeeds 339 * @return {@code true} if the new put was executed, {@code false} otherwise. 340 */ 341 boolean thenPut(Put put) throws IOException; 342 343 /** 344 * Specify a Delete to commit if the check succeeds. 345 * @param delete data to delete if check succeeds 346 * @return {@code true} if the new delete was executed, {@code false} otherwise. 347 */ 348 boolean thenDelete(Delete delete) throws IOException; 349 350 /** 351 * Specify a RowMutations to commit if the check succeeds. 352 * @param mutation mutations to perform if check succeeds 353 * @return true if the new mutation was executed, false otherwise. 354 */ 355 boolean thenMutate(RowMutations mutation) throws IOException; 356 } 357 358 /** 359 * Atomically checks if a row matches the specified filter. If it does, it adds the 360 * Put/Delete/RowMutations. 361 * <p> 362 * Use the returned {@link CheckAndMutateWithFilterBuilder} to construct your request and then 363 * execute it. This is a fluent style API, the code is like: 364 * 365 * <pre> 366 * <code> 367 * table.checkAndMutate(row, filter).thenPut(put); 368 * </code> 369 * </pre> 370 * 371 * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it 372 * any more. 373 */ 374 @Deprecated 375 default CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) { 376 throw new NotImplementedException("Add an implementation!"); 377 } 378 379 /** 380 * A helper class for sending checkAndMutate request with a filter. 381 * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it 382 * any more. 383 */ 384 @Deprecated 385 interface CheckAndMutateWithFilterBuilder { 386 387 /** 388 * Specify a timerange. 389 * @param timeRange timeRange to check 390 */ 391 CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange); 392 393 /** 394 * Specify a Put to commit if the check succeeds. 395 * @param put data to put if check succeeds 396 * @return {@code true} if the new put was executed, {@code false} otherwise. 397 */ 398 boolean thenPut(Put put) throws IOException; 399 400 /** 401 * Specify a Delete to commit if the check succeeds. 402 * @param delete data to delete if check succeeds 403 * @return {@code true} if the new delete was executed, {@code false} otherwise. 404 */ 405 boolean thenDelete(Delete delete) throws IOException; 406 407 /** 408 * Specify a RowMutations to commit if the check succeeds. 409 * @param mutation mutations to perform if check succeeds 410 * @return true if the new mutation was executed, false otherwise. 411 */ 412 boolean thenMutate(RowMutations mutation) throws IOException; 413 } 414 415 /** 416 * checkAndMutate that atomically checks if a row matches the specified condition. If it does, it 417 * performs the specified action. 418 * @param checkAndMutate The CheckAndMutate object. 419 * @return A CheckAndMutateResult object that represents the result for the CheckAndMutate. 420 * @throws IOException if a remote or network exception occurs. 421 */ 422 default CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws IOException { 423 return checkAndMutate(Collections.singletonList(checkAndMutate)).get(0); 424 } 425 426 /** 427 * Batch version of checkAndMutate. The specified CheckAndMutates are batched only in the sense 428 * that they are sent to a RS in one RPC, but each CheckAndMutate operation is still executed 429 * atomically (and thus, each may fail independently of others). 430 * @param checkAndMutates The list of CheckAndMutate. 431 * @return A list of CheckAndMutateResult objects that represents the result for each 432 * CheckAndMutate. 433 * @throws IOException if a remote or network exception occurs. 434 */ 435 default List<CheckAndMutateResult> checkAndMutate(List<CheckAndMutate> checkAndMutates) 436 throws IOException { 437 throw new NotImplementedException("Add an implementation!"); 438 } 439 440 /** 441 * Performs multiple mutations atomically on a single row. Currently {@link Put} and 442 * {@link Delete} are supported. 443 * @param rm object that specifies the set of mutations to perform atomically 444 * @return results of Increment/Append operations 445 * @throws IOException if a remote or network exception occurs. 446 */ 447 default Result mutateRow(final RowMutations rm) throws IOException { 448 throw new NotImplementedException("Add an implementation!"); 449 } 450 451 /** 452 * Appends values to one or more columns within a single row. 453 * <p> 454 * This operation guaranteed atomicity to readers. Appends are done under a single row lock, so 455 * write operations to a row are synchronized, and readers are guaranteed to see this operation 456 * fully completed. 457 * @param append object that specifies the columns and values to be appended 458 * @throws IOException e 459 * @return values of columns after the append operation (maybe null) 460 */ 461 default Result append(final Append append) throws IOException { 462 throw new NotImplementedException("Add an implementation!"); 463 } 464 465 /** 466 * Increments one or more columns within a single row. 467 * <p> 468 * This operation ensures atomicity to readers. Increments are done under a single row lock, so 469 * write operations to a row are synchronized, and readers are guaranteed to see this operation 470 * fully completed. 471 * @param increment object that specifies the columns and amounts to be used for the increment 472 * operations 473 * @throws IOException e 474 * @return values of columns after the increment 475 */ 476 default Result increment(final Increment increment) throws IOException { 477 throw new NotImplementedException("Add an implementation!"); 478 } 479 480 /** 481 * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)} 482 * <p> 483 * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}. 484 * @param row The row that contains the cell to increment. 485 * @param family The column family of the cell to increment. 486 * @param qualifier The column qualifier of the cell to increment. 487 * @param amount The amount to increment the cell with (or decrement, if the amount is 488 * negative). 489 * @return The new value, post increment. 490 * @throws IOException if a remote or network exception occurs. 491 */ 492 default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) 493 throws IOException { 494 Increment increment = new Increment(row).addColumn(family, qualifier, amount); 495 Cell cell = increment(increment).getColumnLatestCell(family, qualifier); 496 return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); 497 } 498 499 /** 500 * Atomically increments a column value. If the column value already exists and is not a 501 * big-endian long, this could throw an exception. If the column value does not yet exist it is 502 * initialized to <code>amount</code> and written to the specified column. 503 * <p> 504 * Setting durability to {@link Durability#SKIP_WAL} means that in a fail scenario you will lose 505 * any increments that have not been flushed. 506 * @param row The row that contains the cell to increment. 507 * @param family The column family of the cell to increment. 508 * @param qualifier The column qualifier of the cell to increment. 509 * @param amount The amount to increment the cell with (or decrement, if the amount is 510 * negative). 511 * @param durability The persistence guarantee for this increment. 512 * @return The new value, post increment. 513 * @throws IOException if a remote or network exception occurs. 514 */ 515 default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, 516 Durability durability) throws IOException { 517 Increment increment = 518 new Increment(row).addColumn(family, qualifier, amount).setDurability(durability); 519 Cell cell = increment(increment).getColumnLatestCell(family, qualifier); 520 return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); 521 } 522 523 /** 524 * Releases any resources held or pending changes in internal buffers. 525 * @throws IOException if a remote or network exception occurs. 526 */ 527 @Override 528 default void close() throws IOException { 529 throw new NotImplementedException("Add an implementation!"); 530 } 531 532 /** 533 * Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} 534 * instance connected to the table region containing the specified row. The row given does not 535 * actually have to exist. Whichever region would contain the row based on start and end keys will 536 * be used. Note that the {@code row} parameter is also not passed to the coprocessor handler 537 * registered for this protocol, unless the {@code row} is separately passed as an argument in the 538 * service request. The parameter here is only used to locate the region used to handle the call. 539 * <p/> 540 * The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be 541 * used to access a published coprocessor {@link Service} using standard protobuf service 542 * invocations: 543 * <p/> 544 * <div style="background-color: #cccccc; padding: 2px"> <blockquote> 545 * 546 * <pre> 547 * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey); 548 * MyService.BlockingInterface service = MyService.newBlockingStub(channel); 549 * MyCallRequest request = MyCallRequest.newBuilder() 550 * ... 551 * .build(); 552 * MyCallResponse response = service.myCall(null, request); 553 * </pre> 554 * 555 * </blockquote> </div> 556 * @param row The row key used to identify the remote region location 557 * @return A CoprocessorRpcChannel instance 558 * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any 559 * more. Use the coprocessorService methods in {@link AsyncTable} instead. 560 * @see Connection#toAsyncConnection() 561 */ 562 @Deprecated 563 default CoprocessorRpcChannel coprocessorService(byte[] row) { 564 throw new NotImplementedException("Add an implementation!"); 565 } 566 567 /** 568 * Creates an instance of the given {@link Service} subclass for each table region spanning the 569 * range from the {@code startKey} row to {@code endKey} row (inclusive), and invokes the passed 570 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method with each 571 * {@link Service} instance. 572 * @param service the protocol buffer {@code Service} implementation to call 573 * @param startKey start region selection with region containing this row. If {@code null}, the 574 * selection will start with the first table region. 575 * @param endKey select regions up to and including the region containing this row. If 576 * {@code null}, selection will continue through the last table region. 577 * @param callable this instance's 578 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will 579 * be invoked once per table region, using the {@link Service} instance connected 580 * to that region. 581 * @param <T> the {@link Service} subclass to connect to 582 * @param <R> Return type for the {@code callable} parameter's 583 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method 584 * @return a map of result values keyed by region name 585 * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking 586 * interface for of a protobuf stub, so it is not possible to do it in an asynchronous 587 * way, even if now we are building the {@link Table} implementation based on the 588 * {@link AsyncTable}, which is not good. Use the coprocessorService methods in 589 * {@link AsyncTable} directly instead. 590 * @see Connection#toAsyncConnection() 591 */ 592 @Deprecated 593 default <T extends Service, R> Map<byte[], R> coprocessorService(final Class<T> service, 594 byte[] startKey, byte[] endKey, final Batch.Call<T, R> callable) 595 throws ServiceException, Throwable { 596 Map<byte[], R> results = 597 Collections.synchronizedMap(new TreeMap<byte[], R>(Bytes.BYTES_COMPARATOR)); 598 coprocessorService(service, startKey, endKey, callable, new Batch.Callback<R>() { 599 @Override 600 public void update(byte[] region, byte[] row, R value) { 601 if (region != null) { 602 results.put(region, value); 603 } 604 } 605 }); 606 return results; 607 } 608 609 /** 610 * Creates an instance of the given {@link Service} subclass for each table region spanning the 611 * range from the {@code startKey} row to {@code endKey} row (inclusive), and invokes the passed 612 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method with each 613 * {@link Service} instance. 614 * <p/> 615 * The given 616 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)} 617 * method will be called with the return value from each region's 618 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. 619 * @param service the protocol buffer {@code Service} implementation to call 620 * @param startKey start region selection with region containing this row. If {@code null}, the 621 * selection will start with the first table region. 622 * @param endKey select regions up to and including the region containing this row. If 623 * {@code null}, selection will continue through the last table region. 624 * @param callable this instance's 625 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will 626 * be invoked once per table region, using the {@link Service} instance connected 627 * to that region. 628 * @param <T> the {@link Service} subclass to connect to 629 * @param <R> Return type for the {@code callable} parameter's 630 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method 631 * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking 632 * interface for of a protobuf stub, so it is not possible to do it in an asynchronous 633 * way, even if now we are building the {@link Table} implementation based on the 634 * {@link AsyncTable}, which is not good. Use the coprocessorService methods in 635 * {@link AsyncTable} directly instead. 636 * @see Connection#toAsyncConnection() 637 */ 638 @Deprecated 639 default <T extends Service, R> void coprocessorService(final Class<T> service, byte[] startKey, 640 byte[] endKey, final Batch.Call<T, R> callable, final Batch.Callback<R> callback) 641 throws ServiceException, Throwable { 642 throw new NotImplementedException("Add an implementation!"); 643 } 644 645 /** 646 * Creates an instance of the given {@link Service} subclass for each table region spanning the 647 * range from the {@code startKey} row to {@code endKey} row (inclusive), all the invocations to 648 * the same region server will be batched into one call. The coprocessor service is invoked 649 * according to the service instance, method name and parameters. 650 * @param methodDescriptor the descriptor for the protobuf service method to call. 651 * @param request the method call parameters 652 * @param startKey start region selection with region containing this row. If 653 * {@code null}, the selection will start with the first table region. 654 * @param endKey select regions up to and including the region containing this row. If 655 * {@code null}, selection will continue through the last table region. 656 * @param responsePrototype the proto type of the response of the method in Service. 657 * @param <R> the response type for the coprocessor Service method 658 * @return a map of result values keyed by region name 659 * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking 660 * interface for of a protobuf stub, so it is not possible to do it in an asynchronous 661 * way, even if now we are building the {@link Table} implementation based on the 662 * {@link AsyncTable}, which is not good. Use the coprocessorService methods in 663 * {@link AsyncTable} directly instead. 664 * @see Connection#toAsyncConnection() 665 */ 666 @Deprecated 667 default <R extends Message> Map<byte[], R> batchCoprocessorService( 668 Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, 669 R responsePrototype) throws ServiceException, Throwable { 670 final Map<byte[], R> results = 671 Collections.synchronizedMap(new TreeMap<byte[], R>(Bytes.BYTES_COMPARATOR)); 672 batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype, 673 new Batch.Callback<R>() { 674 @Override 675 public void update(byte[] region, byte[] row, R result) { 676 if (region != null) { 677 results.put(region, result); 678 } 679 } 680 }); 681 return results; 682 } 683 684 /** 685 * Creates an instance of the given {@link Service} subclass for each table region spanning the 686 * range from the {@code startKey} row to {@code endKey} row (inclusive), all the invocations to 687 * the same region server will be batched into one call. The coprocessor service is invoked 688 * according to the service instance, method name and parameters. 689 * <p/> 690 * The given 691 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)} 692 * method will be called with the return value from each region's invocation. 693 * @param methodDescriptor the descriptor for the protobuf service method to call. 694 * @param request the method call parameters 695 * @param startKey start region selection with region containing this row. If 696 * {@code null}, the selection will start with the first table region. 697 * @param endKey select regions up to and including the region containing this row. If 698 * {@code null}, selection will continue through the last table region. 699 * @param responsePrototype the proto type of the response of the method in Service. 700 * @param callback callback to invoke with the response for each region 701 * @param <R> the response type for the coprocessor Service method 702 * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking 703 * interface for of a protobuf stub, so it is not possible to do it in an asynchronous 704 * way, even if now we are building the {@link Table} implementation based on the 705 * {@link AsyncTable}, which is not good. Use the coprocessorService methods in 706 * {@link AsyncTable} directly instead. 707 * @see Connection#toAsyncConnection() 708 */ 709 @Deprecated 710 default <R extends Message> void batchCoprocessorService( 711 Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, 712 R responsePrototype, Batch.Callback<R> callback) throws ServiceException, Throwable { 713 throw new NotImplementedException("Add an implementation!"); 714 } 715 716 /** 717 * Get timeout of each rpc request in this Table instance. It will be overridden by a more 718 * specific rpc timeout config such as readRpcTimeout or writeRpcTimeout. 719 * @see #getReadRpcTimeout(TimeUnit) 720 * @see #getWriteRpcTimeout(TimeUnit) 721 * @param unit the unit of time the timeout to be represented in 722 * @return rpc timeout in the specified time unit 723 */ 724 default long getRpcTimeout(TimeUnit unit) { 725 throw new NotImplementedException("Add an implementation!"); 726 } 727 728 /** 729 * Get timeout of each rpc read request in this Table instance. 730 * @param unit the unit of time the timeout to be represented in 731 * @return read rpc timeout in the specified time unit 732 */ 733 default long getReadRpcTimeout(TimeUnit unit) { 734 throw new NotImplementedException("Add an implementation!"); 735 } 736 737 /** 738 * Get timeout of each rpc write request in this Table instance. 739 * @param unit the unit of time the timeout to be represented in 740 * @return write rpc timeout in the specified time unit 741 */ 742 default long getWriteRpcTimeout(TimeUnit unit) { 743 throw new NotImplementedException("Add an implementation!"); 744 } 745 746 /** 747 * Get timeout of each operation in Table instance. 748 * @param unit the unit of time the timeout to be represented in 749 * @return operation rpc timeout in the specified time unit 750 */ 751 default long getOperationTimeout(TimeUnit unit) { 752 throw new NotImplementedException("Add an implementation!"); 753 } 754 755 /** 756 * Get the attributes to be submitted with requests 757 * @return map of request attributes 758 */ 759 default Map<String, byte[]> getRequestAttributes() { 760 return Collections.emptyMap(); 761 } 762}