org.apache.hadoop.hbase.client.Put.add(byte[], byte[], byte[])
|
org.apache.hadoop.hbase.client.Put.add(byte[], byte[], long, byte[])
|
org.apache.hadoop.hbase.client.Put.add(byte[], ByteBuffer, long, ByteBuffer)
|
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(Configuration, Class>...)
|
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.addPeer(String, String)
Use addPeer(String, ReplicationPeerConfig, Map) instead.
|
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.addPeer(String, String, String) |
org.apache.hadoop.hbase.client.Result.addResults(ClientProtos.RegionLoadStats)
|
org.apache.hadoop.hbase.client.Table.batch(List extends Row>)
If any exception is thrown by one of the actions, there is no way to
retrieve the partially executed results. Use Table.batch(List, Object[]) instead.
|
org.apache.hadoop.hbase.client.Table.batchCallback(List extends Row>, Batch.Callback)
|
org.apache.hadoop.hbase.quotas.QuotaSettings.buildSetQuotaRequestProto(QuotaSettings)
Removed in HBase 2.0+ as a part of removing protobuf from our API
|
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.calculateRebalancedSplits(List, JobContext, long) |
org.apache.hadoop.hbase.client.HConnection.clearCaches(ServerName)
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.client.HConnection.clearRegionCache()
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.client.HConnection.clearRegionCache(byte[]) |
org.apache.hadoop.hbase.client.HConnection.clearRegionCache(TableName)
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.configureIncrementalLoad(Job, HTable)
|
org.apache.hadoop.hbase.HTableDescriptor.convert() |
org.apache.hadoop.hbase.HColumnDescriptor.convert() |
org.apache.hadoop.hbase.ClusterStatus.convert() |
org.apache.hadoop.hbase.ClusterStatus.convert(ClusterStatusProtos.ClusterStatus) |
org.apache.hadoop.hbase.HColumnDescriptor.convert(HBaseProtos.ColumnFamilySchema) |
org.apache.hadoop.hbase.HTableDescriptor.convert(HBaseProtos.TableSchema) |
org.apache.hadoop.hbase.ProcedureInfo.convert(ProcedureProtos.Procedure) |
org.apache.hadoop.hbase.ProcedureInfo.convertToProcedureProto(ProcedureInfo) |
org.apache.hadoop.hbase.mapreduce.CellCreator.create(byte[], int, int, byte[], int, int, byte[], int, int, long, byte[], int, int, String) |
org.apache.hadoop.hbase.client.Result.createCompleteResult(List) |
org.apache.hadoop.hbase.client.HConnectionManager.createConnection(Configuration) |
org.apache.hadoop.hbase.client.HConnectionManager.createConnection(Configuration, ExecutorService) |
org.apache.hadoop.hbase.client.HConnectionManager.createConnection(Configuration, ExecutorService, User) |
org.apache.hadoop.hbase.client.HConnectionManager.createConnection(Configuration, User) |
org.apache.hadoop.hbase.client.HConnectionManager.deleteAllConnections()
kept for backward compatibility, but the behavior is broken. HBASE-8983
|
org.apache.hadoop.hbase.client.HConnectionManager.deleteAllConnections(boolean) |
org.apache.hadoop.hbase.client.HConnection.deleteCachedRegionLocation(HRegionLocation)
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.client.Delete.deleteColumn(byte[], byte[])
|
org.apache.hadoop.hbase.client.Delete.deleteColumn(byte[], byte[], long)
|
org.apache.hadoop.hbase.client.Delete.deleteColumns(byte[], byte[])
|
org.apache.hadoop.hbase.client.Delete.deleteColumns(byte[], byte[], long)
|
org.apache.hadoop.hbase.client.HConnectionManager.deleteConnection(Configuration) |
org.apache.hadoop.hbase.client.Delete.deleteFamily(byte[])
|
org.apache.hadoop.hbase.client.Delete.deleteFamily(byte[], long)
|
org.apache.hadoop.hbase.client.Delete.deleteFamilyVersion(byte[], long)
|
org.apache.hadoop.hbase.client.HConnectionManager.deleteStaleConnection(HConnection) |
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.doBulkLoad(Path, HTable)
|
org.apache.hadoop.hbase.CellUtil.estimatedHeapSizeOfWithoutTags(Cell) |
org.apache.hadoop.hbase.CellUtil.estimatedSizeOf(Cell)
please use estimatedSerializedSizeOf(Cell)
|
org.apache.hadoop.hbase.client.HConnectionManager.execute(HConnectable)
Internal method, do not use thru HConnectionManager.
|
org.apache.hadoop.hbase.rest.client.RemoteHTable.exists(List) |
org.apache.hadoop.hbase.client.HConnection.getAdmin(ServerName)
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.client.HConnection.getAdmin(ServerName, boolean)
You can pass master flag but nothing special is done.
|
org.apache.hadoop.hbase.client.HConnection.getClient(ServerName)
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.client.Result.getColumn(byte[], byte[])
|
org.apache.hadoop.hbase.client.Result.getColumnLatest(byte[], byte[])
|
org.apache.hadoop.hbase.client.Result.getColumnLatest(byte[], int, int, byte[], int, int)
|
org.apache.hadoop.hbase.client.Admin.getCompactionStateForRegion(byte[]) |
org.apache.hadoop.hbase.client.HConnectionManager.getConnection(Configuration) |
org.apache.hadoop.hbase.client.HConnection.getCurrentNrHRS()
This method will be changed from public to package protected.
|
org.apache.hadoop.hbase.HColumnDescriptor.getDataBlockEncodingOnDisk()
|
org.apache.hadoop.hbase.HRegionInfo.getDaughterRegions(Result)
use MetaTableAccessor methods for interacting with meta layouts
|
org.apache.hadoop.hbase.Cell.getFamily()
|
org.apache.hadoop.hbase.client.Mutation.getFamilyMap()
|
org.apache.hadoop.hbase.ProcedureInfo.getForeignExceptionMessage() |
org.apache.hadoop.hbase.HRegionInfo.getHRegionInfo(Result)
use MetaTableAccessor methods for interacting with meta layouts
|
org.apache.hadoop.hbase.HRegionInfo.getHRegionInfo(Result, byte[])
use MetaTableAccessor methods for interacting with meta layouts
|
org.apache.hadoop.hbase.HRegionInfo.getHRegionInfoAndServerName(Result)
use MetaTableAccessor methods for interacting with meta layouts
|
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getHTable()
|
org.apache.hadoop.hbase.mapred.TableInputFormatBase.getHTable()
|
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptor(byte[]) |
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptor(TableName) |
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptors(List) |
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptorsByTableName(List)
|
org.apache.hadoop.hbase.client.HConnection.getKeepAliveMasterService()
Since 0.96.0
|
org.apache.hadoop.hbase.client.HConnection.getMaster()
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.HRegionInfo.getMergeRegions(Result)
use MetaTableAccessor methods for interacting with meta layouts
|
org.apache.hadoop.hbase.Cell.getMvccVersion()
as of 1.0, use Cell.getSequenceId()
Internal use only. A region-specific sequence ID given to each operation. It always exists for
cells in the memstore but is not retained forever. It may survive several flushes, but
generally becomes irrelevant after the cell's row is no longer involved in any operations that
require strict consistency.
|
org.apache.hadoop.hbase.HTableDescriptor.getName()
|
org.apache.hadoop.hbase.filter.FilterList.getNextKeyHint(KeyValue) |
org.apache.hadoop.hbase.filter.Filter.getNextKeyHint(KeyValue) |
org.apache.hadoop.hbase.client.HConnection.getNonceGenerator()
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.HTableDescriptor.getOwnerString()
since 0.94.1
|
org.apache.hadoop.hbase.Cell.getQualifier()
|
org.apache.hadoop.hbase.client.HConnection.getRegionCachePrefetch(byte[])
always return false since 0.99
|
org.apache.hadoop.hbase.client.HConnection.getRegionCachePrefetch(TableName)
always return false since 0.99
|
org.apache.hadoop.hbase.client.HConnection.getRegionLocation(byte[], byte[], boolean) |
org.apache.hadoop.hbase.client.HConnection.getRegionLocation(TableName, byte[], boolean)
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.Cell.getRow()
|
org.apache.hadoop.hbase.TableName.getRowComparator()
The comparator is an internal property of the table. Should
not have been exposed here
|
org.apache.hadoop.hbase.client.Table.getRpcTimeout()
Use getReadRpcTimeout or getWriteRpcTimeout instead
|
org.apache.hadoop.hbase.rest.client.RemoteHTable.getRpcTimeout() |
org.apache.hadoop.hbase.client.Scan.getScanMetrics()
|
org.apache.hadoop.hbase.HRegionInfo.getSeqNumDuringOpen(Result)
use MetaTableAccessor methods for interacting with meta layouts
|
org.apache.hadoop.hbase.ClusterStatus.getServerInfo()
|
org.apache.hadoop.hbase.HRegionInfo.getServerName(Result)
use MetaTableAccessor methods for interacting with meta layouts
|
org.apache.hadoop.hbase.io.ImmutableBytesWritable.getSize()
|
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException.getSnapshotDescription()
since 1.3.0, will be removed in 3.0.0
|
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getSplitKey(byte[], byte[], boolean) |
org.apache.hadoop.hbase.client.Result.getStats() |
org.apache.hadoop.hbase.HTableDescriptor.getTableDir(Path, byte[]) |
org.apache.hadoop.hbase.HRegionInfo.getTableName()
|
org.apache.hadoop.hbase.HRegionInfo.getTableName(byte[])
|
org.apache.hadoop.hbase.client.HConnection.getTableNames()
|
org.apache.hadoop.hbase.Cell.getValue()
|
org.apache.hadoop.hbase.HRegionInfo.getVersion()
HRI is no longer a VersionedWritable
|
org.apache.hadoop.hbase.client.Table.getWriteBufferSize()
|
org.apache.hadoop.hbase.client.Mutation.getWriteToWAL()
|
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.initCredentialsForCluster(Job, String)
|
org.apache.hadoop.hbase.client.HConnection.isDeadServer(ServerName)
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.client.HConnection.isMasterRunning()
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.client.Result.isPartial()
|
org.apache.hadoop.hbase.client.Admin.isSnapshotFinished(HBaseProtos.SnapshotDescription) |
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(byte[]) |
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(byte[], byte[][]) |
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(TableName, byte[][])
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.client.HConnection.isTableDisabled(byte[]) |
org.apache.hadoop.hbase.client.HConnection.isTableEnabled(byte[]) |
org.apache.hadoop.hbase.client.Result.list()
|
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.listPeers()
|
org.apache.hadoop.hbase.client.Admin.listSnapshots() |
org.apache.hadoop.hbase.client.Admin.listSnapshots(Pattern) |
org.apache.hadoop.hbase.client.Admin.listSnapshots(String) |
org.apache.hadoop.hbase.client.HConnection.listTableNames()
|
org.apache.hadoop.hbase.client.HConnection.listTables()
|
org.apache.hadoop.hbase.client.Admin.listTableSnapshots(Pattern, Pattern) |
org.apache.hadoop.hbase.client.Admin.listTableSnapshots(String, String) |
org.apache.hadoop.hbase.client.HConnection.locateRegion(byte[])
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.client.HConnection.locateRegion(byte[], byte[]) |
org.apache.hadoop.hbase.client.HConnection.locateRegion(TableName, byte[])
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.client.HConnection.locateRegions(byte[]) |
org.apache.hadoop.hbase.client.HConnection.locateRegions(byte[], boolean, boolean) |
org.apache.hadoop.hbase.client.HConnection.locateRegions(TableName)
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.client.HConnection.locateRegions(TableName, boolean, boolean)
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.security.User.obtainAuthTokenForJob(Configuration, Job)
Use TokenUtil.obtainAuthTokenForJob(Connection,User,Job)
instead.
|
org.apache.hadoop.hbase.security.User.obtainAuthTokenForJob(JobConf)
Use TokenUtil.obtainAuthTokenForJob(Connection,JobConf,User)
instead.
|
org.apache.hadoop.hbase.client.HConnection.processBatch(List extends Row>, byte[], ExecutorService, Object[]) |
org.apache.hadoop.hbase.client.HConnection.processBatch(List extends Row>, TableName, ExecutorService, Object[])
|
org.apache.hadoop.hbase.client.HConnection.processBatchCallback(List extends Row>, byte[], ExecutorService, Object[], Batch.Callback) |
org.apache.hadoop.hbase.client.HConnection.processBatchCallback(List extends Row>, TableName, ExecutorService, Object[], Batch.Callback)
|
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], List) |
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], Put) |
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], Put, int) |
org.apache.hadoop.hbase.client.Result.raw()
|
org.apache.hadoop.hbase.HRegionInfo.readFields(DataInput)
Use protobuf deserialization instead.
|
org.apache.hadoop.hbase.HTableDescriptor.readFields(DataInput)
|
org.apache.hadoop.hbase.HColumnDescriptor.readFields(DataInput)
|
org.apache.hadoop.hbase.util.Bytes.readVLong(byte[], int)
|
org.apache.hadoop.hbase.client.HConnection.relocateRegion(byte[], byte[]) |
org.apache.hadoop.hbase.client.HConnection.relocateRegion(TableName, byte[])
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.reverseDNS(InetAddress)
mistakenly made public in 0.98.7. scope will change to package-private
|
org.apache.hadoop.hbase.HColumnDescriptor.setEncodeOnDisk(boolean)
As of release 0.98
(HBASE-9870).
This will be removed in HBase 2.0.0. This method does nothing now.
|
org.apache.hadoop.hbase.client.Mutation.setFamilyMap(NavigableMap>)
|
org.apache.hadoop.hbase.client.Put.setFamilyMap(NavigableMap>) |
org.apache.hadoop.hbase.client.Delete.setFamilyMap(NavigableMap>) |
org.apache.hadoop.hbase.client.Increment.setFamilyMap(NavigableMap>) |
org.apache.hadoop.hbase.client.Append.setFamilyMap(NavigableMap>) |
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.setHTable(HTable)
|
org.apache.hadoop.hbase.mapred.TableInputFormatBase.setHTable(HTable)
|
org.apache.hadoop.hbase.mapreduce.TableRecordReader.setHTable(Table)
Use setTable() instead.
|
org.apache.hadoop.hbase.HColumnDescriptor.setKeepDeletedCells(boolean)
|
org.apache.hadoop.hbase.HTableDescriptor.setName(byte[]) |
org.apache.hadoop.hbase.HTableDescriptor.setName(TableName) |
org.apache.hadoop.hbase.HTableDescriptor.setOwner(User)
since 0.94.1
|
org.apache.hadoop.hbase.HTableDescriptor.setOwnerString(String)
since 0.94.1
|
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.setPeerTableCFs(String, String)
|
org.apache.hadoop.hbase.client.HConnection.setRegionCachePrefetch(byte[], boolean)
does nothing since 0.99
|
org.apache.hadoop.hbase.client.HConnection.setRegionCachePrefetch(TableName, boolean)
does nothing since since 0.99
|
org.apache.hadoop.hbase.client.Table.setRpcTimeout(int)
Use setReadRpcTimeout or setWriteRpcTimeout instead
|
org.apache.hadoop.hbase.rest.client.RemoteHTable.setRpcTimeout(int) |
org.apache.hadoop.hbase.client.HConnectionManager.setServerSideHConnectionRetries(Configuration, String, Log)
Internal method, do not use.
|
org.apache.hadoop.hbase.client.Scan.setStartRow(byte[])
use Scan.withStartRow(byte[]) instead. This method may change the inclusive of
the stop row to keep compatible with the old behavior.
|
org.apache.hadoop.hbase.client.Result.setStatistics(ClientProtos.RegionLoadStats) |
org.apache.hadoop.hbase.client.Scan.setStopRow(byte[])
use Scan.withStartRow(byte[]) instead. This method may change the inclusive of
the stop row to keep compatible with the old behavior.
|
org.apache.hadoop.hbase.quotas.QuotaSettings.setupSetQuotaRequest(MasterProtos.SetQuotaRequest.Builder)
Removed in HBase 2.0+ as a part of removing protobuf from our API
|
org.apache.hadoop.hbase.client.Table.setWriteBufferSize(long)
|
org.apache.hadoop.hbase.client.Mutation.setWriteToWAL(boolean)
|
org.apache.hadoop.hbase.client.Put.setWriteToWAL(boolean) |
org.apache.hadoop.hbase.client.Delete.setWriteToWAL(boolean) |
org.apache.hadoop.hbase.client.Increment.setWriteToWAL(boolean) |
org.apache.hadoop.hbase.client.Append.setWriteToWAL(boolean) |
org.apache.hadoop.hbase.HColumnDescriptor.shouldCacheBloomsOnWrite()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldCacheDataInL1()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldCacheDataOnWrite()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldCacheIndexesOnWrite()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldCompressTags()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldEvictBlocksOnClose()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldPrefetchBlocksOnOpen()
|
org.apache.hadoop.hbase.client.Admin.snapshot(HBaseProtos.SnapshotDescription) |
org.apache.hadoop.hbase.client.Admin.snapshot(String, TableName, HBaseProtos.SnapshotDescription.Type) |
org.apache.hadoop.hbase.client.Admin.takeSnapshotAsync(HBaseProtos.SnapshotDescription) |
org.apache.hadoop.hbase.filter.FilterList.toString(int) |
org.apache.hadoop.hbase.filter.FilterList.transform(KeyValue) |
org.apache.hadoop.hbase.filter.Filter.transform(KeyValue) |
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.tryAtomicRegionLoad(HConnection, byte[], byte[], Collection)
|
org.apache.hadoop.hbase.client.HConnection.updateCachedLocations(byte[], byte[], Object, HRegionLocation) |
org.apache.hadoop.hbase.client.HConnection.updateCachedLocations(TableName, byte[], byte[], Object, ServerName)
internal method, do not use thru HConnection
|
org.apache.hadoop.hbase.client.HConnection.updateCachedLocations(TableName, byte[], Object, HRegionLocation) |
org.apache.hadoop.hbase.HRegionInfo.write(DataOutput)
|
org.apache.hadoop.hbase.HTableDescriptor.write(DataOutput)
Writables are going away.
Use MessageLite.toByteArray() instead.
|
org.apache.hadoop.hbase.HColumnDescriptor.write(DataOutput)
|