@InterfaceAudience.Public @InterfaceStability.Stable public class RemoteHTable extends Object implements Table
Constructor and Description |
---|
RemoteHTable(Client client,
org.apache.hadoop.conf.Configuration conf,
byte[] name)
Constructor
|
RemoteHTable(Client client,
org.apache.hadoop.conf.Configuration conf,
String name)
Constructor
|
RemoteHTable(Client client,
String name)
Constructor
|
Modifier and Type | Method and Description |
---|---|
Result |
append(Append append)
Appends values to one or more columns within a single row.
|
Object[] |
batch(List<? extends Row> actions)
Same as
Table.batch(List, Object[]) , but returns an array of
results instead of using a results parameter reference. |
void |
batch(List<? extends Row> actions,
Object[] results)
Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations
The ordering of execution of the actions is not defined.
|
<R> Object[] |
batchCallback(List<? extends Row> actions,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback)
Same as
Table.batch(List) , but with a callback. |
<R> void |
batchCallback(List<? extends Row> actions,
Object[] results,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback)
Same as
Table.batch(List, Object[]) , but with a callback. |
<R extends com.google.protobuf.Message> |
batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype)
Creates an instance of the given
Service subclass for each table
region spanning the range from the startKey row to endKey row (inclusive), all
the invocations to the same region server will be batched into one call. |
<R extends com.google.protobuf.Message> |
batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback)
Creates an instance of the given
Service subclass for each table
region spanning the range from the startKey row to endKey row (inclusive), all
the invocations to the same region server will be batched into one call. |
protected org.apache.hadoop.hbase.rest.model.CellSetModel |
buildModelFromPut(Put put) |
protected String |
buildMultiRowSpec(byte[][] rows,
int maxVersions) |
protected Result[] |
buildResultFromModel(org.apache.hadoop.hbase.rest.model.CellSetModel model) |
protected String |
buildRowSpec(byte[] row,
Map familyMap,
long startTime,
long endTime,
int maxVersions) |
boolean |
checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
Delete delete)
Atomically checks if a row/family/qualifier value matches the expected
value.
|
boolean |
checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
CompareFilter.CompareOp compareOp,
byte[] value,
Delete delete)
Atomically checks if a row/family/qualifier value matches the expected
value.
|
boolean |
checkAndMutate(byte[] row,
byte[] family,
byte[] qualifier,
CompareFilter.CompareOp compareOp,
byte[] value,
RowMutations rm)
Atomically checks if a row/family/qualifier value matches the expected value.
|
boolean |
checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
Put put)
Atomically checks if a row/family/qualifier value matches the expected
value.
|
boolean |
checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
CompareFilter.CompareOp compareOp,
byte[] value,
Put put)
Atomically checks if a row/family/qualifier value matches the expected
value.
|
void |
close()
Releases any resources held or pending changes in internal buffers.
|
CoprocessorRpcChannel |
coprocessorService(byte[] row)
Creates and returns a
RpcChannel instance connected to the
table region containing the specified row. |
<T extends com.google.protobuf.Service,R> |
coprocessorService(Class<T> service,
byte[] startKey,
byte[] endKey,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable)
Creates an instance of the given
Service subclass for each table
region spanning the range from the startKey row to endKey row (inclusive), and
invokes the passed Batch.Call.call(T) method
with each Service instance. |
<T extends com.google.protobuf.Service,R> |
coprocessorService(Class<T> service,
byte[] startKey,
byte[] endKey,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback)
Creates an instance of the given
Service subclass for each table
region spanning the range from the startKey row to endKey row (inclusive), and
invokes the passed Batch.Call.call(T) method
with each Service instance. |
void |
delete(Delete delete)
Deletes the specified cells/row.
|
void |
delete(List<Delete> deletes)
Deletes the specified cells/rows in bulk.
|
boolean |
exists(Get get)
Test for the existence of columns in the table, as specified by the Get.
|
Boolean[] |
exists(List<Get> gets)
Deprecated.
|
boolean[] |
existsAll(List<Get> gets)
exists(List) is really a list of get() calls.
|
void |
flushCommits() |
Result |
get(Get get)
Extracts certain cells from a given row.
|
Result[] |
get(List<Get> gets)
Extracts certain cells from the given rows, in batch.
|
org.apache.hadoop.conf.Configuration |
getConfiguration()
Returns the
Configuration object used by this instance. |
TableName |
getName()
Gets the fully qualified table name instance of this table.
|
int |
getOperationTimeout()
Get timeout (millisecond) of each operation for in Table instance.
|
int |
getReadRpcTimeout()
Get timeout (millisecond) of each rpc read request in this Table instance.
|
Result |
getRowOrBefore(byte[] row,
byte[] family) |
int |
getRpcTimeout()
Deprecated.
|
ResultScanner |
getScanner(byte[] family)
Gets a scanner on the current table for the given family.
|
ResultScanner |
getScanner(byte[] family,
byte[] qualifier)
Gets a scanner on the current table for the given family and qualifier.
|
ResultScanner |
getScanner(Scan scan)
Returns a scanner on the current table as specified by the
Scan
object. |
HTableDescriptor |
getTableDescriptor()
Gets the
table descriptor for this table. |
byte[] |
getTableName() |
long |
getWriteBufferSize()
Returns the maximum size in bytes of the write buffer for this HTable.
|
int |
getWriteRpcTimeout()
Get timeout (millisecond) of each rpc write request in this Table instance.
|
Result |
increment(Increment increment)
Increments one or more columns within a single row.
|
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount)
|
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
Durability durability)
Atomically increments a column value.
|
boolean |
isAutoFlush() |
void |
mutateRow(RowMutations rm)
Performs multiple mutations atomically on a single row.
|
void |
put(List<Put> puts)
Puts some data in the table, in batch.
|
void |
put(Put put)
Puts some data in the table.
|
void |
setOperationTimeout(int operationTimeout)
Set timeout (millisecond) of each operation in this Table instance, will override the value
of hbase.client.operation.timeout in configuration.
|
void |
setReadRpcTimeout(int readRpcTimeout)
Set timeout (millisecond) of each rpc read request in operations of this Table instance, will
override the value of hbase.rpc.read.timeout in configuration.
|
void |
setRpcTimeout(int rpcTimeout)
Deprecated.
|
void |
setWriteBufferSize(long writeBufferSize)
Sets the size of the buffer in bytes.
|
void |
setWriteRpcTimeout(int writeRpcTimeout)
Set timeout (millisecond) of each rpc write request in operations of this Table instance, will
override the value of hbase.rpc.write.timeout in configuration.
|
public RemoteHTable(Client client, org.apache.hadoop.conf.Configuration conf, String name)
public RemoteHTable(Client client, org.apache.hadoop.conf.Configuration conf, byte[] name)
protected String buildRowSpec(byte[] row, Map familyMap, long startTime, long endTime, int maxVersions)
protected String buildMultiRowSpec(byte[][] rows, int maxVersions)
protected Result[] buildResultFromModel(org.apache.hadoop.hbase.rest.model.CellSetModel model)
protected org.apache.hadoop.hbase.rest.model.CellSetModel buildModelFromPut(Put put)
public byte[] getTableName()
public TableName getName()
Table
public org.apache.hadoop.conf.Configuration getConfiguration()
Table
Configuration
object used by this instance.
The reference returned is not a copy, so any change made to it will affect this instance.
getConfiguration
in interface Table
public HTableDescriptor getTableDescriptor() throws IOException
Table
table descriptor
for this table.getTableDescriptor
in interface Table
IOException
- if a remote or network exception occurs.public void close() throws IOException
Table
close
in interface Closeable
close
in interface AutoCloseable
close
in interface Table
IOException
- if a remote or network exception occurs.public Result get(Get get) throws IOException
Table
get
in interface Table
get
- The object that specifies what data to fetch and from which row.Result
instance returned won't
contain any KeyValue
, as indicated by Result.isEmpty()
.IOException
- if a remote or network exception occurs.public Result[] get(List<Get> gets) throws IOException
Table
get
in interface Table
gets
- The objects that specify what data to fetch and from which rows.Result
instance returned won't contain any KeyValue
, as indicated by Result.isEmpty()
. If there are any
failures even after retries, there will be a null in the results array for those Gets, AND an
exception will be thrown.IOException
- if a remote or network exception occurs.public boolean exists(Get get) throws IOException
Table
This will return true if the Get matches one or more keys, false if not.
This is a server-side call so it prevents any data from being transfered to the client.
exists
in interface Table
get
- the GetIOException
- epublic boolean[] existsAll(List<Get> gets) throws IOException
existsAll
in interface Table
gets
- list of Get to test for the existenceIOException
- e@Deprecated public Boolean[] exists(List<Get> gets) throws IOException
IOException
public void put(Put put) throws IOException
Table
put
in interface Table
put
- The data to put.IOException
- if a remote or network exception occurs.public void put(List<Put> puts) throws IOException
Table
This can be used for group commit, or for submitting user defined batches. The writeBuffer will be periodically inspected while the List is processed, so depending on the List size the writeBuffer may flush not at all, or more than once.
put
in interface Table
puts
- The list of mutations to apply. The batch put is done by
aggregating the iteration of the Puts over the write buffer
at the client-side for a single RPC call.IOException
- if a remote or network exception occurs.public void delete(Delete delete) throws IOException
Table
delete
in interface Table
delete
- The object that specifies what to delete.IOException
- if a remote or network exception occurs.public void delete(List<Delete> deletes) throws IOException
Table
delete
in interface Table
deletes
- List of things to delete. List gets modified by this
method (in particular it gets re-ordered, so the order in which the elements
are inserted in the list gives no guarantee as to the order in which the
Delete
s are executed).IOException
- if a remote or network exception occurs. In that case
the deletes
argument will contain the Delete
instances
that have not be successfully applied.public void flushCommits() throws IOException
IOException
public ResultScanner getScanner(Scan scan) throws IOException
Table
Scan
object.
Note that the passed Scan
's start row and caching properties
maybe changed.getScanner
in interface Table
scan
- A configured Scan
object.IOException
- if a remote or network exception occurs.public ResultScanner getScanner(byte[] family) throws IOException
Table
getScanner
in interface Table
family
- The column family to scan.IOException
- if a remote or network exception occurs.public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException
Table
getScanner
in interface Table
family
- The column family to scan.qualifier
- The column qualifier to scan.IOException
- if a remote or network exception occurs.public boolean isAutoFlush()
public Result getRowOrBefore(byte[] row, byte[] family) throws IOException
IOException
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException
Table
checkAndPut
in interface Table
row
- to checkfamily
- column family to checkqualifier
- column qualifier to checkvalue
- the expected valueput
- data to put if check succeedsIOException
- epublic boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareFilter.CompareOp compareOp, byte[] value, Put put) throws IOException
Table
checkAndPut
in interface Table
row
- to checkfamily
- column family to checkqualifier
- column qualifier to checkcompareOp
- comparison operator to usevalue
- the expected valueput
- data to put if check succeedsIOException
- epublic boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete) throws IOException
Table
checkAndDelete
in interface Table
row
- to checkfamily
- column family to checkqualifier
- column qualifier to checkvalue
- the expected valuedelete
- data to delete if check succeedsIOException
- epublic boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareFilter.CompareOp compareOp, byte[] value, Delete delete) throws IOException
Table
checkAndDelete
in interface Table
row
- to checkfamily
- column family to checkqualifier
- column qualifier to checkcompareOp
- comparison operator to usevalue
- the expected valuedelete
- data to delete if check succeedsIOException
- epublic Result increment(Increment increment) throws IOException
Table
This operation ensures atomicity to readers. Increments are done under a single row lock, so write operations to a row are synchronized, and readers are guaranteed to see this operation fully completed.
increment
in interface Table
increment
- object that specifies the columns and amounts to be used
for the increment operationsIOException
- epublic Result append(Append append) throws IOException
Table
This operation guaranteed atomicity to readers. Appends are done under a single row lock, so write operations to a row are synchronized, and readers are guaranteed to see this operation fully completed.
append
in interface Table
append
- object that specifies the columns and values to be appendedIOException
- epublic long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) throws IOException
Table
Table.incrementColumnValue(byte[], byte[], byte[], long, Durability)
The Durability
is defaulted to Durability.SYNC_WAL
.
incrementColumnValue
in interface Table
row
- The row that contains the cell to increment.family
- The column family of the cell to increment.qualifier
- The column qualifier of the cell to increment.amount
- The amount to increment the cell with (or decrement, if the
amount is negative).IOException
- if a remote or network exception occurs.public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, Durability durability) throws IOException
Table
amount
and
written to the specified column.
Setting durability to Durability.SKIP_WAL
means that in a fail
scenario you will lose any increments that have not been flushed.
incrementColumnValue
in interface Table
row
- The row that contains the cell to increment.family
- The column family of the cell to increment.qualifier
- The column qualifier of the cell to increment.amount
- The amount to increment the cell with (or decrement, if the
amount is negative).durability
- The persistence guarantee for this increment.IOException
- if a remote or network exception occurs.public void batch(List<? extends Row> actions, Object[] results) throws IOException
Table
Table.batch(java.util.List<? extends org.apache.hadoop.hbase.client.Row>, java.lang.Object[])
call, you will not necessarily be
guaranteed that the Get returns what the Put had put.batch
in interface Table
actions
- list of Get, Put, Delete, Increment, Append, RowMutationsresults
- Empty Object[], same size as actions. Provides access to partial
results, in case an exception is thrown. A null in the result array means that
the call for that action failed, even after retriesIOException
public Object[] batch(List<? extends Row> actions) throws IOException
Table
Table.batch(List, Object[])
, but returns an array of
results instead of using a results parameter reference.batch
in interface Table
actions
- list of Get, Put, Delete, Increment, Append, RowMutationsIOException
public <R> void batchCallback(List<? extends Row> actions, Object[] results, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws IOException, InterruptedException
Table
Table.batch(List, Object[])
, but with a callback.batchCallback
in interface Table
IOException
InterruptedException
public <R> Object[] batchCallback(List<? extends Row> actions, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws IOException, InterruptedException
Table
Table.batch(List)
, but with a callback.batchCallback
in interface Table
IOException
InterruptedException
public CoprocessorRpcChannel coprocessorService(byte[] row)
Table
RpcChannel
instance connected to the
table region containing the specified row. The row given does not actually have
to exist. Whichever region would contain the row based on start and end keys will
be used. Note that the row
parameter is also not passed to the
coprocessor handler registered for this protocol, unless the row
is separately passed as an argument in the service request. The parameter
here is only used to locate the region used to handle the call.
The obtained RpcChannel
instance can be used to access a published
coprocessor Service
using standard protobuf service invocations:
CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey); MyService.BlockingInterface service = MyService.newBlockingStub(channel); MyCallRequest request = MyCallRequest.newBuilder() ... .build(); MyCallResponse response = service.myCall(null, request);
coprocessorService
in interface Table
row
- The row key used to identify the remote region locationpublic <T extends com.google.protobuf.Service,R> Map<byte[],R> coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable) throws com.google.protobuf.ServiceException, Throwable
Table
Service
subclass for each table
region spanning the range from the startKey
row to endKey
row (inclusive), and
invokes the passed Batch.Call.call(T)
method
with each Service
instance.coprocessorService
in interface Table
T
- the Service
subclass to connect toR
- Return type for the callable
parameter's Batch.Call.call(T)
methodservice
- the protocol buffer Service
implementation to callstartKey
- start region selection with region containing this row. If null
, the
selection will start with the first table region.endKey
- select regions up to and including the region containing this row. If null
, selection will continue through the last table region.callable
- this instance's .Call#call
method will be invoked once per table region, using the Service
instance connected to that region.com.google.protobuf.ServiceException
Throwable
public <T extends com.google.protobuf.Service,R> void coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws com.google.protobuf.ServiceException, Throwable
Table
Service
subclass for each table
region spanning the range from the startKey
row to endKey
row (inclusive), and
invokes the passed Batch.Call.call(T)
method
with each Service
instance.
The given Batch.Callback.update(byte[],
byte[], Object)
method will be called with the return value from each region's Batch.Call.call(T)
invocation.
coprocessorService
in interface Table
T
- the Service
subclass to connect toR
- Return type for the callable
parameter's Batch.Call.call(T)
methodservice
- the protocol buffer Service
implementation to callstartKey
- start region selection with region containing this row. If null
, the
selection will start with the first table region.endKey
- select regions up to and including the region containing this row. If null
, selection will continue through the last table region.callable
- this instance's .Call#call
method will be invoked once per table region, using the Service
instance connected to
that region.com.google.protobuf.ServiceException
Throwable
public void mutateRow(RowMutations rm) throws IOException
Table
mutateRow
in interface Table
rm
- object that specifies the set of mutations to perform atomicallyIOException
public long getWriteBufferSize()
Table
The default value comes from the configuration parameter
hbase.client.write.buffer
.
getWriteBufferSize
in interface Table
public void setWriteBufferSize(long writeBufferSize) throws IOException
Table
If the new size is less than the current amount of data in the write buffer, the buffer gets flushed.
setWriteBufferSize
in interface Table
writeBufferSize
- The new write buffer size, in bytes.IOException
- if a remote or network exception occurs.public <R extends com.google.protobuf.Message> Map<byte[],R> batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor method, com.google.protobuf.Message request, byte[] startKey, byte[] endKey, R responsePrototype) throws com.google.protobuf.ServiceException, Throwable
Table
Service
subclass for each table
region spanning the range from the startKey
row to endKey
row (inclusive), all
the invocations to the same region server will be batched into one call. The coprocessor
service is invoked according to the service instance, method name and parameters.batchCoprocessorService
in interface Table
R
- the response type for the coprocessor Service methodmethod
- the descriptor for the protobuf service method to call.request
- the method call parametersstartKey
- start region selection with region containing this row. If null
, the
selection will start with the first table region.endKey
- select regions up to and including the region containing this row. If null
,
selection will continue through the last table region.responsePrototype
- the proto type of the response of the method in Service.com.google.protobuf.ServiceException
Throwable
public <R extends com.google.protobuf.Message> void batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor method, com.google.protobuf.Message request, byte[] startKey, byte[] endKey, R responsePrototype, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws com.google.protobuf.ServiceException, Throwable
Table
Service
subclass for each table
region spanning the range from the startKey
row to endKey
row (inclusive), all
the invocations to the same region server will be batched into one call. The coprocessor
service is invoked according to the service instance, method name and parameters.
The given
Batch.Callback.update(byte[],byte[],Object)
method will be called with the return value from each region's invocation.
batchCoprocessorService
in interface Table
R
- the response type for the coprocessor Service methodmethod
- the descriptor for the protobuf service method to call.request
- the method call parametersstartKey
- start region selection with region containing this row. If null
, the
selection will start with the first table region.endKey
- select regions up to and including the region containing this row. If null
,
selection will continue through the last table region.responsePrototype
- the proto type of the response of the method in Service.callback
- callback to invoke with the response for each regioncom.google.protobuf.ServiceException
Throwable
public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareFilter.CompareOp compareOp, byte[] value, RowMutations rm) throws IOException
Table
checkAndMutate
in interface Table
row
- to checkfamily
- column family to checkqualifier
- column qualifier to checkcompareOp
- the comparison operatorvalue
- the expected valuerm
- mutations to perform if check succeedsIOException
- epublic void setOperationTimeout(int operationTimeout)
Table
setOperationTimeout
in interface Table
operationTimeout
- the total timeout of each operation in millisecond.public int getOperationTimeout()
Table
getOperationTimeout
in interface Table
@Deprecated public void setRpcTimeout(int rpcTimeout)
Table
NOTE: This will set both the read and write timeout settings to the provided value.
setRpcTimeout
in interface Table
rpcTimeout
- the timeout of each rpc request in millisecond.@Deprecated public int getRpcTimeout()
Table
getRpcTimeout
in interface Table
public int getReadRpcTimeout()
Table
getReadRpcTimeout
in interface Table
public void setReadRpcTimeout(int readRpcTimeout)
Table
setReadRpcTimeout
in interface Table
public int getWriteRpcTimeout()
Table
getWriteRpcTimeout
in interface Table
public void setWriteRpcTimeout(int writeRpcTimeout)
Table
setWriteRpcTimeout
in interface Table
Copyright © 2007–2019 The Apache Software Foundation. All rights reserved.