Modifier and Type | Field and Description |
---|---|
static TableName |
TableName.META_TABLE_NAME
The hbase:meta table's name.
|
private TableName |
HTableDescriptor.name |
static TableName |
TableName.NAMESPACE_TABLE_NAME
The Namespace table's name.
|
static TableName |
TableName.OLD_META_TABLE_NAME
TableName for old .META.
|
static TableName |
TableName.OLD_ROOT_TABLE_NAME
TableName for old -ROOT- table.
|
private TableName |
HRegionInfo.tableName |
Modifier and Type | Field and Description |
---|---|
private static Set<TableName> |
TableName.tableCache
|
Modifier and Type | Method and Description |
---|---|
private static TableName |
TableName.createTableNameIfNecessary(ByteBuffer bns,
ByteBuffer qns)
Check that the object does not exist already.
|
private static TableName |
TableName.getADummyTableName(String qualifier)
It is used to create table names for old META, and ROOT table.
|
TableName |
HRegionInfo.getTable()
Get current table name of the region
|
static TableName |
HRegionInfo.getTable(byte[] regionName)
Gets the table name from the specified region name.
|
TableName |
HTableDescriptor.getTableName()
Get the name of the table
|
static TableName |
TableName.valueOf(byte[] fullName) |
static TableName |
TableName.valueOf(byte[] namespace,
byte[] qualifier) |
static TableName |
TableName.valueOf(ByteBuffer namespace,
ByteBuffer qualifier) |
static TableName |
TableName.valueOf(String name) |
static TableName |
TableName.valueOf(String namespaceAsString,
String qualifierAsString) |
Modifier and Type | Method and Description |
---|---|
Set<TableName> |
TableStateManager.getTablesInStates(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State... states) |
Modifier and Type | Method and Description |
---|---|
void |
TableStateManager.checkAndRemoveTableState(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State states,
boolean deletePermanentState)
If the table is found in the given state the in-memory state is removed.
|
int |
TableName.compareTo(TableName tableName)
For performance reasons, the ordering is not lexicographic.
|
static byte[] |
HRegionInfo.createRegionName(TableName tableName,
byte[] startKey,
byte[] id,
boolean newFormat)
Make a region name of passed parameters.
|
static byte[] |
HRegionInfo.createRegionName(TableName tableName,
byte[] startKey,
byte[] id,
int replicaId,
boolean newFormat)
Make a region name of passed parameters.
|
static byte[] |
HRegionInfo.createRegionName(TableName tableName,
byte[] startKey,
long regionid,
boolean newFormat)
Make a region name of passed parameters.
|
static byte[] |
HRegionInfo.createRegionName(TableName tableName,
byte[] startKey,
long regionid,
int replicaId,
boolean newFormat)
Make a region name of passed parameters.
|
static byte[] |
HRegionInfo.createRegionName(TableName tableName,
byte[] startKey,
String id,
boolean newFormat)
Make a region name of passed parameters.
|
HTableDescriptor |
TableDescriptors.get(TableName tableName) |
long |
ClusterStatus.getLastMajorCompactionTsForTable(TableName table) |
static int |
MetaTableAccessor.getRegionCount(org.apache.hadoop.conf.Configuration c,
TableName tableName)
Count regions in
hbase:meta for passed table. |
static int |
MetaTableAccessor.getRegionCount(Connection connection,
TableName tableName)
Count regions in
hbase:meta for passed table. |
static Scan |
MetaTableAccessor.getScanForTableName(TableName tableName)
This method creates a Scan object that will only scan catalog rows that
belong to the specified table.
|
HTableInterface |
CoprocessorEnvironment.getTable(TableName tableName) |
HTableInterface |
CoprocessorEnvironment.getTable(TableName tableName,
ExecutorService service) |
static List<HRegionInfo> |
MetaTableAccessor.getTableRegions(ZooKeeperWatcher zkw,
Connection connection,
TableName tableName)
Gets all of the regions of the specified table.
|
static List<HRegionInfo> |
MetaTableAccessor.getTableRegions(ZooKeeperWatcher zkw,
Connection connection,
TableName tableName,
boolean excludeOfflinedSplitParents)
Gets all of the regions of the specified table.
|
static List<Pair<HRegionInfo,ServerName>> |
MetaTableAccessor.getTableRegionsAndLocations(ZooKeeperWatcher zkw,
Connection connection,
TableName tableName) |
static List<Pair<HRegionInfo,ServerName>> |
MetaTableAccessor.getTableRegionsAndLocations(ZooKeeperWatcher zkw,
Connection connection,
TableName tableName,
boolean excludeOfflinedSplitParents) |
(package private) static byte[] |
MetaTableAccessor.getTableStartRowForMeta(TableName tableName) |
(package private) static boolean |
MetaTableAccessor.isInsideTable(HRegionInfo current,
TableName tableName) |
boolean |
TableStateManager.isTablePresent(TableName tableName)
Checks if table is present.
|
boolean |
TableStateManager.isTableState(TableName tableName,
boolean checkSource,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State... states) |
boolean |
TableStateManager.isTableState(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State... states) |
HTableDescriptor |
TableDescriptors.remove(TableName tablename) |
void |
TableStateManager.setDeletedTable(TableName tableName)
Mark table as deleted.
|
private void |
HTableDescriptor.setMetaFlags(TableName name) |
HTableDescriptor |
HTableDescriptor.setName(TableName name)
Deprecated.
|
void |
TableStateManager.setTableState(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state)
Sets the table into desired state.
|
boolean |
TableStateManager.setTableStateIfInStates(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State newState,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State... states)
Sets the specified table into the newState, but only if the table is already in
one of the possibleCurrentStates (otherwise no operation is performed).
|
boolean |
TableStateManager.setTableStateIfNotInStates(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State newState,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State... states)
Sets the specified table into the newState, but only if the table is NOT in
one of the possibleCurrentStates (otherwise no operation is performed).
|
static boolean |
MetaTableAccessor.tableExists(Connection connection,
TableName tableName)
Checks if the specified table exists.
|
Constructor and Description |
---|
HRegionInfo(long regionId,
TableName tableName)
Private constructor used constructing HRegionInfo for the
first meta regions
|
HRegionInfo(long regionId,
TableName tableName,
int replicaId) |
HRegionInfo(TableName tableName) |
HRegionInfo(TableName tableName,
byte[] startKey,
byte[] endKey)
Construct HRegionInfo with explicit parameters
|
HRegionInfo(TableName tableName,
byte[] startKey,
byte[] endKey,
boolean split)
Construct HRegionInfo with explicit parameters
|
HRegionInfo(TableName tableName,
byte[] startKey,
byte[] endKey,
boolean split,
long regionid)
Construct HRegionInfo with explicit parameters
|
HRegionInfo(TableName tableName,
byte[] startKey,
byte[] endKey,
boolean split,
long regionid,
int replicaId)
Construct HRegionInfo with explicit parameters
|
HTableDescriptor(TableName name)
Construct a table descriptor specifying a TableName object
|
HTableDescriptor(TableName name,
HColumnDescriptor[] families)
INTERNAL Private constructor used internally creating table descriptors for
catalog tables,
hbase:meta and -ROOT- . |
HTableDescriptor(TableName name,
HColumnDescriptor[] families,
Map<ImmutableBytesWritable,ImmutableBytesWritable> values)
INTERNAL Private constructor used internally creating table descriptors for
catalog tables,
hbase:meta and -ROOT- . |
HTableDescriptor(TableName name,
HTableDescriptor desc)
Construct a table descriptor by cloning the descriptor passed as a parameter
but using a different table name.
|
TableExistsException(TableName t) |
TableNotDisabledException(TableName tableName) |
TableNotEnabledException(TableName tableName) |
TableNotFoundException(TableName tableName) |
Modifier and Type | Field and Description |
---|---|
private TableName |
HTable.tableName |
protected TableName |
RegionServerCallable.tableName |
private TableName |
HRegionLocator.tableName |
private TableName |
AsyncProcess.AsyncRequestFutureImpl.tableName |
private TableName |
HBaseAdmin.DeleteTableFuture.tableName |
private TableName |
HBaseAdmin.EnableTableFuture.tableName |
private TableName |
HBaseAdmin.DisableTableFuture.tableName |
private TableName |
ClientScanner.tableName |
private TableName |
MetaScanner.TableMetaScannerVisitor.tableName |
protected TableName |
RpcRetryingCallerWithReadReplicas.tableName |
protected TableName |
RegionAdminServiceCallable.tableName |
private TableName |
ScannerCallableWithReplicas.tableName |
private TableName |
BufferedMutatorParams.tableName |
private TableName |
BufferedMutatorImpl.tableName |
Modifier and Type | Field and Description |
---|---|
private ConcurrentMap<TableName,ConcurrentNavigableMap<byte[],RegionLocations>> |
MetaCache.cachedRegionLocations
Map of table to table
HRegionLocation s. |
Modifier and Type | Method and Description |
---|---|
private TableName |
HBaseAdmin.checkTableExists(TableName tableName)
Check if table exists or not
|
TableName |
HTable.getName() |
TableName |
Table.getName()
Gets the fully qualified table name instance of this table.
|
TableName |
HTablePool.PooledHTable.getName() |
TableName |
HRegionLocator.getName() |
TableName |
RegionLocator.getName()
Gets the fully qualified table name instance of this table.
|
TableName |
BufferedMutator.getName()
Gets the fully qualified table name instance of the table that this BufferedMutator writes to.
|
TableName |
BufferedMutatorImpl.getName() |
TableName |
HTableWrapper.getName() |
protected TableName |
ClientScanner.getTable() |
TableName |
RegionServerCallable.getTableName() |
TableName |
BufferedMutatorParams.getTableName() |
TableName[] |
ConnectionManager.HConnectionImplementation.listTableNames()
Deprecated.
Use
Admin.listTableNames() instead |
TableName[] |
ConnectionAdapter.listTableNames()
Deprecated.
|
TableName[] |
HBaseAdmin.listTableNames() |
TableName[] |
HConnection.listTableNames()
Deprecated.
Use
Admin.listTables() instead. |
TableName[] |
Admin.listTableNames()
List all of the names of userspace tables.
|
TableName[] |
HBaseAdmin.listTableNames(Pattern pattern) |
TableName[] |
Admin.listTableNames(Pattern pattern)
List all of the names of userspace tables.
|
TableName[] |
HBaseAdmin.listTableNames(Pattern pattern,
boolean includeSysTables) |
TableName[] |
Admin.listTableNames(Pattern pattern,
boolean includeSysTables)
List all of the names of userspace tables.
|
TableName[] |
HBaseAdmin.listTableNames(String regex) |
TableName[] |
Admin.listTableNames(String regex)
List all of the names of userspace tables.
|
TableName[] |
HBaseAdmin.listTableNames(String regex,
boolean includeSysTables) |
TableName[] |
Admin.listTableNames(String regex,
boolean includeSysTables)
List all of the names of userspace tables.
|
TableName[] |
HBaseAdmin.listTableNamesByNamespace(String name)
Get list of table names by namespace
|
TableName[] |
Admin.listTableNamesByNamespace(String name)
Get list of table names by namespace
|
Modifier and Type | Method and Description |
---|---|
void |
HBaseAdmin.addColumn(TableName tableName,
HColumnDescriptor column)
Add a column to an existing table.
|
void |
Admin.addColumn(TableName tableName,
HColumnDescriptor column)
Add a column to an existing table.
|
static NavigableMap<HRegionInfo,ServerName> |
MetaScanner.allTableRegions(org.apache.hadoop.conf.Configuration conf,
Connection connection,
TableName tableName,
boolean offlined)
Deprecated.
|
static NavigableMap<HRegionInfo,ServerName> |
MetaScanner.allTableRegions(Connection connection,
TableName tableName)
Lists all of the table regions currently in META.
|
void |
ConnectionManager.HConnectionImplementation.cacheLocation(TableName tableName,
RegionLocations location)
Put a newly discovered HRegionLocation into the cache.
|
void |
ConnectionAdapter.cacheLocation(TableName tableName,
RegionLocations location)
Deprecated.
|
void |
ClusterConnection.cacheLocation(TableName tableName,
RegionLocations location) |
void |
MetaCache.cacheLocation(TableName tableName,
RegionLocations locations)
Put a newly discovered HRegionLocation into the cache.
|
private void |
ConnectionManager.HConnectionImplementation.cacheLocation(TableName tableName,
ServerName source,
HRegionLocation location)
Put a newly discovered HRegionLocation into the cache.
|
void |
MetaCache.cacheLocation(TableName tableName,
ServerName source,
HRegionLocation location)
Put a newly discovered HRegionLocation into the cache.
|
private void |
HBaseAdmin.checkTableExistence(TableName tableName) |
private TableName |
HBaseAdmin.checkTableExists(TableName tableName)
Check if table exists or not
|
void |
MetaCache.clearCache(TableName tableName)
Delete all cached entries of a table.
|
void |
MetaCache.clearCache(TableName tableName,
byte[] row)
Delete a cached location, no matter what it is.
|
void |
MetaCache.clearCache(TableName tableName,
byte[] row,
int replicaId)
Delete a cached location, no matter what it is.
|
void |
MetaCache.clearCache(TableName tableName,
byte[] row,
ServerName serverName)
Delete a cached location for a table, row and server
|
void |
ConnectionManager.HConnectionImplementation.clearRegionCache(TableName tableName) |
void |
ConnectionAdapter.clearRegionCache(TableName tableName)
Deprecated.
|
void |
HConnection.clearRegionCache(TableName tableName)
Deprecated.
internal method, do not use thru HConnection
|
void |
ClusterConnection.clearRegionCache(TableName tableName)
Allows flushing the region cache of all locations that pertain to
tableName |
void |
ConnectionManager.HConnectionImplementation.clearRegionCache(TableName tableName,
byte[] row) |
void |
HBaseAdmin.cloneSnapshot(byte[] snapshotName,
TableName tableName)
Create a new table by cloning the snapshot content.
|
void |
Admin.cloneSnapshot(byte[] snapshotName,
TableName tableName)
Create a new table by cloning the snapshot content.
|
void |
HBaseAdmin.cloneSnapshot(String snapshotName,
TableName tableName)
Create a new table by cloning the snapshot content.
|
void |
Admin.cloneSnapshot(String snapshotName,
TableName tableName)
Create a new table by cloning the snapshot content.
|
void |
HBaseAdmin.compact(TableName tableName)
Compact a table.
|
void |
Admin.compact(TableName tableName)
Compact a table.
|
void |
HBaseAdmin.compact(TableName tableName,
byte[] columnFamily)
Compact a column family within a table.
|
void |
Admin.compact(TableName tableName,
byte[] columnFamily)
Compact a column family within a table.
|
private void |
HBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
boolean major)
Compact a table.
|
protected <CResult> AsyncProcess.AsyncRequestFutureImpl<CResult> |
AsyncProcess.createAsyncRequestFuture(TableName tableName,
List<Action<Row>> actions,
long nonceGroup,
ExecutorService pool,
Batch.Callback<CResult> callback,
Object[] results,
boolean needResults) |
protected MultiServerCallable<Row> |
AsyncProcess.createCallable(ServerName server,
TableName tableName,
MultiAction<Row> multi)
Create a callable.
|
static HTableInterface |
HTableWrapper.createWrapper(List<HTableInterface> openTables,
TableName tableName,
CoprocessorHost.Environment env,
ExecutorService pool) |
void |
HBaseAdmin.deleteColumn(TableName tableName,
byte[] columnName)
Delete a column from a table.
|
void |
Admin.deleteColumn(TableName tableName,
byte[] columnName)
Delete a column from a table.
|
void |
HBaseAdmin.deleteTable(TableName tableName)
Deletes a table.
|
void |
Admin.deleteTable(TableName tableName)
Deletes a table.
|
private Future<Void> |
HBaseAdmin.deleteTableAsyncV2(TableName tableName)
Deletes the table but does not block and wait for it be completely removed.
|
void |
HBaseAdmin.disableTable(TableName tableName)
Disable table and wait on completion.
|
void |
Admin.disableTable(TableName tableName)
Disable table and wait on completion.
|
void |
HBaseAdmin.disableTableAsync(TableName tableName)
Starts the disable of a table.
|
void |
Admin.disableTableAsync(TableName tableName)
Starts the disable of a table.
|
private Future<Void> |
HBaseAdmin.disableTableAsyncV2(TableName tableName)
Disable the table but does not block and wait for it be completely disabled.
|
void |
HBaseAdmin.enableTable(TableName tableName)
Enable a table.
|
void |
Admin.enableTable(TableName tableName)
Enable a table.
|
void |
HBaseAdmin.enableTableAsync(TableName tableName)
Brings a table on-line (enables it).
|
void |
Admin.enableTableAsync(TableName tableName)
Brings a table on-line (enables it).
|
private Future<Void> |
HBaseAdmin.enableTableAsyncV2(TableName tableName)
Enable the table but does not block and wait for it be completely enabled.
|
void |
HBaseAdmin.flush(TableName tableName)
Flush a table.
|
void |
Admin.flush(TableName tableName)
Flush a table.
|
Pair<Integer,Integer> |
HBaseAdmin.getAlterStatus(TableName tableName)
Get the status of alter command - indicates how many regions have received
the updated schema Asynchronous operation.
|
Pair<Integer,Integer> |
Admin.getAlterStatus(TableName tableName)
Get the status of alter command - indicates how many regions have received the updated schema
Asynchronous operation.
|
BufferedMutator |
ConnectionManager.HConnectionImplementation.getBufferedMutator(TableName tableName) |
BufferedMutator |
ConnectionAdapter.getBufferedMutator(TableName tableName)
Deprecated.
|
BufferedMutator |
Connection.getBufferedMutator(TableName tableName)
Retrieve a
BufferedMutator for performing client-side buffering of writes. |
(package private) RegionLocations |
ConnectionManager.HConnectionImplementation.getCachedLocation(TableName tableName,
byte[] row)
Search the cache for a location that fits our table and row key.
|
RegionLocations |
MetaCache.getCachedLocation(TableName tableName,
byte[] row)
Search the cache for a location that fits our table and row key.
|
ScannerCallableWithReplicas |
ClientSmallScanner.SmallScannerCallableFactory.getCallable(ClusterConnection connection,
TableName table,
Scan scan,
ScanMetrics scanMetrics,
byte[] localStartKey,
int cacheNum,
RpcControllerFactory controllerFactory,
ExecutorService pool,
int primaryOperationTimeout,
int retries,
int scannerTimeout,
org.apache.hadoop.conf.Configuration conf,
RpcRetryingCaller<Result[]> caller) |
ScannerCallableWithReplicas |
ClientSmallReversedScanner.SmallReversedScannerCallableFactory.getCallable(ClusterConnection connection,
TableName table,
Scan scan,
ScanMetrics scanMetrics,
byte[] localStartKey,
int cacheNum,
RpcControllerFactory controllerFactory,
ExecutorService pool,
int primaryOperationTimeout,
int retries,
int scannerTimeout,
org.apache.hadoop.conf.Configuration conf,
RpcRetryingCaller<Result[]> caller,
boolean isFirstRegionToLocate) |
private static Result |
MetaScanner.getClosestRowOrBefore(Table metaTable,
TableName userTableName,
byte[] row,
boolean useMetaReplicas) |
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState |
HBaseAdmin.getCompactionState(TableName tableName)
Get the current compaction state of a table.
|
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState |
Admin.getCompactionState(TableName tableName)
Get the current compaction state of a table.
|
HTableDescriptor |
ConnectionManager.HConnectionImplementation.getHTableDescriptor(TableName tableName)
Deprecated.
Use
Admin.getTableDescriptor(TableName) instead |
HTableDescriptor |
ConnectionAdapter.getHTableDescriptor(TableName tableName)
Deprecated.
|
HTableDescriptor |
HConnection.getHTableDescriptor(TableName tableName)
Deprecated.
|
long |
HBaseAdmin.getLastMajorCompactionTimestamp(TableName tableName) |
long |
Admin.getLastMajorCompactionTimestamp(TableName tableName)
Get the timestamp of the last major compaction for the passed table
The timestamp of the oldest HFile resulting from a major compaction of that table,
or 0 if no such HFile could be found.
|
(package private) int |
ConnectionManager.HConnectionImplementation.getNumberOfCachedRegionLocations(TableName tableName) |
int |
MetaCache.getNumberOfCachedRegionLocations(TableName tableName)
Return the number of cached region for a table.
|
static boolean |
HTable.getRegionCachePrefetch(org.apache.hadoop.conf.Configuration conf,
TableName tableName)
Deprecated.
always return false since 0.99
|
static boolean |
HTable.getRegionCachePrefetch(TableName tableName)
Deprecated.
always return false since 0.99
|
boolean |
ConnectionManager.HConnectionImplementation.getRegionCachePrefetch(TableName tableName)
Deprecated.
|
boolean |
ConnectionAdapter.getRegionCachePrefetch(TableName tableName)
Deprecated.
|
boolean |
HConnection.getRegionCachePrefetch(TableName tableName)
Deprecated.
always return false since 0.99
|
HRegionLocation |
ConnectionManager.HConnectionImplementation.getRegionLocation(TableName tableName,
byte[] row,
boolean reload) |
HRegionLocation |
ConnectionAdapter.getRegionLocation(TableName tableName,
byte[] row,
boolean reload)
Deprecated.
|
HRegionLocation |
HConnection.getRegionLocation(TableName tableName,
byte[] row,
boolean reload)
Deprecated.
internal method, do not use thru HConnection
|
HRegionLocation |
ClusterConnection.getRegionLocation(TableName tableName,
byte[] row,
boolean reload)
Find region location hosting passed row
|
(package private) static RegionLocations |
RpcRetryingCallerWithReadReplicas.getRegionLocations(boolean useCache,
int replicaId,
ClusterConnection cConnection,
TableName tableName,
byte[] row) |
static RegionLocations |
RegionAdminServiceCallable.getRegionLocations(ClusterConnection connection,
TableName tableName,
byte[] row,
boolean useCache,
int replicaId) |
RegionLocator |
ConnectionManager.HConnectionImplementation.getRegionLocator(TableName tableName) |
RegionLocator |
ConnectionAdapter.getRegionLocator(TableName tableName)
Deprecated.
|
RegionLocator |
HConnection.getRegionLocator(TableName tableName)
Deprecated.
Retrieve a RegionLocator implementation to inspect region information on a table.
|
RegionLocator |
Connection.getRegionLocator(TableName tableName)
Retrieve a RegionLocator implementation to inspect region information on a table.
|
private KeyValue.KVComparator |
MetaCache.getRowComparator(TableName tableName) |
HTableInterface |
ConnectionManager.HConnectionImplementation.getTable(TableName tableName) |
HTableInterface |
ConnectionAdapter.getTable(TableName tableName)
Deprecated.
|
HTableInterface |
HConnection.getTable(TableName tableName)
Deprecated.
Retrieve an HTableInterface implementation for access to a table.
|
Table |
Connection.getTable(TableName tableName)
Retrieve a Table implementation for accessing a table.
|
HTableInterface |
ConnectionManager.HConnectionImplementation.getTable(TableName tableName,
ExecutorService pool) |
HTableInterface |
ConnectionAdapter.getTable(TableName tableName,
ExecutorService pool)
Deprecated.
|
HTableInterface |
HConnection.getTable(TableName tableName,
ExecutorService pool)
Deprecated.
Retrieve an HTableInterface implementation for access to a table.
|
Table |
Connection.getTable(TableName tableName,
ExecutorService pool)
Retrieve a Table implementation for accessing a table.
|
HTableDescriptor |
HBaseAdmin.getTableDescriptor(TableName tableName)
Method for getting the tableDescriptor
|
HTableDescriptor |
Admin.getTableDescriptor(TableName tableName)
Method for getting the tableDescriptor
|
(package private) static HTableDescriptor |
HBaseAdmin.getTableDescriptor(TableName tableName,
HConnection connection,
RpcRetryingCallerFactory rpcCallerFactory,
RpcControllerFactory rpcControllerFactory,
int operationTimeout,
int rpcTimeout) |
private HTableDescriptor |
HBaseAdmin.getTableDescriptorByTableName(TableName tableName)
Get tableDescriptor
|
private ConcurrentNavigableMap<byte[],RegionLocations> |
MetaCache.getTableLocations(TableName tableName) |
List<HRegionInfo> |
HBaseAdmin.getTableRegions(TableName tableName)
get the regions of a given table.
|
List<HRegionInfo> |
Admin.getTableRegions(TableName tableName)
Get the regions of a given table.
|
private void |
HBaseAdmin.internalRestoreSnapshot(String snapshotName,
TableName tableName)
Execute Restore/Clone snapshot and wait for the server to complete (blocking).
|
boolean |
MetaCache.isRegionCached(TableName tableName,
byte[] row)
Check the region cache to see whether a region is cached yet or not.
|
boolean |
ConnectionManager.HConnectionImplementation.isTableAvailable(TableName tableName) |
boolean |
ConnectionAdapter.isTableAvailable(TableName tableName)
Deprecated.
|
boolean |
HBaseAdmin.isTableAvailable(TableName tableName) |
boolean |
HConnection.isTableAvailable(TableName tableName)
Deprecated.
|
boolean |
Admin.isTableAvailable(TableName tableName) |
boolean |
ConnectionManager.HConnectionImplementation.isTableAvailable(TableName tableName,
byte[][] splitKeys) |
boolean |
ConnectionAdapter.isTableAvailable(TableName tableName,
byte[][] splitKeys)
Deprecated.
|
boolean |
HBaseAdmin.isTableAvailable(TableName tableName,
byte[][] splitKeys)
Use this api to check if the table has been created with the specified number of
splitkeys which was used while creating the given table.
|
boolean |
HConnection.isTableAvailable(TableName tableName,
byte[][] splitKeys)
Deprecated.
internal method, do not use thru HConnection
|
boolean |
Admin.isTableAvailable(TableName tableName,
byte[][] splitKeys)
Use this api to check if the table has been created with the specified number of splitkeys
which was used while creating the given table.
|
boolean |
ClusterConnection.isTableAvailable(TableName tableName,
byte[][] splitKeys)
Use this api to check if the table has been created with the specified number of
splitkeys which was used while creating the given table.
|
boolean |
ConnectionManager.HConnectionImplementation.isTableDisabled(TableName tableName) |
boolean |
ConnectionUtils.MasterlessConnection.isTableDisabled(TableName tableName) |
boolean |
ConnectionAdapter.isTableDisabled(TableName tableName)
Deprecated.
|
boolean |
HBaseAdmin.isTableDisabled(TableName tableName) |
boolean |
HConnection.isTableDisabled(TableName tableName)
Deprecated.
|
boolean |
Admin.isTableDisabled(TableName tableName) |
static boolean |
HTable.isTableEnabled(org.apache.hadoop.conf.Configuration conf,
TableName tableName)
|
static boolean |
HTable.isTableEnabled(TableName tableName)
Deprecated.
|
boolean |
ConnectionManager.HConnectionImplementation.isTableEnabled(TableName tableName) |
boolean |
ConnectionAdapter.isTableEnabled(TableName tableName)
Deprecated.
|
boolean |
HBaseAdmin.isTableEnabled(TableName tableName) |
boolean |
HConnection.isTableEnabled(TableName tableName)
Deprecated.
A table that isTableEnabled == false and isTableDisabled == false
is possible.
|
boolean |
Admin.isTableEnabled(TableName tableName) |
boolean |
ZooKeeperRegistry.isTableOnlineState(TableName tableName,
boolean enabled) |
boolean |
Registry.isTableOnlineState(TableName tableName,
boolean enabled) |
static List<RegionLocations> |
MetaScanner.listTableRegionLocations(org.apache.hadoop.conf.Configuration conf,
Connection connection,
TableName tableName)
Lists table regions and locations grouped by region range from META.
|
private RegionLocations |
ConnectionManager.HConnectionImplementation.locateMeta(TableName tableName,
boolean useCache,
int replicaId) |
HRegionLocation |
ConnectionManager.HConnectionImplementation.locateRegion(TableName tableName,
byte[] row) |
HRegionLocation |
ConnectionAdapter.locateRegion(TableName tableName,
byte[] row)
Deprecated.
|
HRegionLocation |
HConnection.locateRegion(TableName tableName,
byte[] row)
Deprecated.
internal method, do not use thru HConnection
|
HRegionLocation |
ClusterConnection.locateRegion(TableName tableName,
byte[] row)
Find the location of the region of tableName that row
lives in.
|
RegionLocations |
ConnectionManager.HConnectionImplementation.locateRegion(TableName tableName,
byte[] row,
boolean useCache,
boolean retry) |
RegionLocations |
ConnectionAdapter.locateRegion(TableName tableName,
byte[] row,
boolean useCache,
boolean retry)
Deprecated.
|
RegionLocations |
ClusterConnection.locateRegion(TableName tableName,
byte[] row,
boolean useCache,
boolean retry) |
RegionLocations |
ConnectionManager.HConnectionImplementation.locateRegion(TableName tableName,
byte[] row,
boolean useCache,
boolean retry,
int replicaId) |
RegionLocations |
ConnectionAdapter.locateRegion(TableName tableName,
byte[] row,
boolean useCache,
boolean retry,
int replicaId)
Deprecated.
|
RegionLocations |
ClusterConnection.locateRegion(TableName tableName,
byte[] row,
boolean useCache,
boolean retry,
int replicaId) |
private RegionLocations |
ConnectionManager.HConnectionImplementation.locateRegionInMeta(TableName tableName,
byte[] row,
boolean useCache,
boolean retry,
int replicaId) |
List<HRegionLocation> |
ConnectionManager.HConnectionImplementation.locateRegions(TableName tableName) |
List<HRegionLocation> |
ConnectionAdapter.locateRegions(TableName tableName)
Deprecated.
|
List<HRegionLocation> |
HConnection.locateRegions(TableName tableName)
Deprecated.
internal method, do not use thru HConnection
|
List<HRegionLocation> |
ClusterConnection.locateRegions(TableName tableName)
Gets the locations of all regions in the specified table, tableName.
|
List<HRegionLocation> |
ConnectionManager.HConnectionImplementation.locateRegions(TableName tableName,
boolean useCache,
boolean offlined) |
List<HRegionLocation> |
ConnectionAdapter.locateRegions(TableName tableName,
boolean useCache,
boolean offlined)
Deprecated.
|
List<HRegionLocation> |
HConnection.locateRegions(TableName tableName,
boolean useCache,
boolean offlined)
Deprecated.
internal method, do not use thru HConnection
|
List<HRegionLocation> |
ClusterConnection.locateRegions(TableName tableName,
boolean useCache,
boolean offlined)
Gets the locations of all regions in the specified table, tableName.
|
void |
HBaseAdmin.majorCompact(TableName tableName)
Major compact a table.
|
void |
Admin.majorCompact(TableName tableName)
Major compact a table.
|
void |
HBaseAdmin.majorCompact(TableName tableName,
byte[] columnFamily)
Major compact a column family within a table.
|
void |
Admin.majorCompact(TableName tableName,
byte[] columnFamily)
Major compact a column family within a table.
|
static void |
MetaScanner.metaScan(Connection connection,
MetaScanner.MetaScannerVisitor visitor,
TableName userTableName)
Scans the meta table and calls a visitor on each RowResult.
|
static void |
MetaScanner.metaScan(Connection connection,
MetaScanner.MetaScannerVisitor visitor,
TableName userTableName,
byte[] row,
int rowLimit)
Scans the meta table and calls a visitor on each RowResult.
|
(package private) static void |
MetaScanner.metaScan(Connection connection,
MetaScanner.MetaScannerVisitor visitor,
TableName tableName,
byte[] row,
int rowLimit,
TableName metaTableName)
Scans the meta table and calls a visitor on each RowResult.
|
void |
HBaseAdmin.modifyColumn(TableName tableName,
HColumnDescriptor descriptor)
Modify an existing column family on a table.
|
void |
Admin.modifyColumn(TableName tableName,
HColumnDescriptor descriptor)
Modify an existing column family on a table.
|
void |
HBaseAdmin.modifyTable(TableName tableName,
HTableDescriptor htd)
Modify an existing table, more IRB friendly version.
|
void |
Admin.modifyTable(TableName tableName,
HTableDescriptor htd)
Modify an existing table, more IRB friendly version.
|
void |
ConnectionManager.HConnectionImplementation.processBatch(List<? extends Row> list,
TableName tableName,
ExecutorService pool,
Object[] results)
Deprecated.
|
void |
ConnectionAdapter.processBatch(List<? extends Row> actions,
TableName tableName,
ExecutorService pool,
Object[] results)
Deprecated.
|
void |
HConnection.processBatch(List<? extends Row> actions,
TableName tableName,
ExecutorService pool,
Object[] results)
Deprecated.
since 0.96 - Use
Table.batch(java.util.List<? extends org.apache.hadoop.hbase.client.Row>, java.lang.Object[]) instead |
<R> void |
ConnectionManager.HConnectionImplementation.processBatchCallback(List<? extends Row> list,
TableName tableName,
ExecutorService pool,
Object[] results,
Batch.Callback<R> callback)
|
<R> void |
ConnectionAdapter.processBatchCallback(List<? extends Row> list,
TableName tableName,
ExecutorService pool,
Object[] results,
Batch.Callback<R> callback)
Deprecated.
|
<R> void |
HConnection.processBatchCallback(List<? extends Row> list,
TableName tableName,
ExecutorService pool,
Object[] results,
Batch.Callback<R> callback)
|
List<Put> |
HTableMultiplexer.put(TableName tableName,
List<Put> puts)
The puts request will be buffered by their corresponding buffer queue.
|
boolean |
HTableMultiplexer.put(TableName tableName,
Put put)
The put request will be buffered by its corresponding buffer queue.
|
boolean |
HTableMultiplexer.put(TableName tableName,
Put put,
int maxAttempts)
The put request will be buffered by its corresponding buffer queue.
|
HRegionLocation |
ConnectionManager.HConnectionImplementation.relocateRegion(TableName tableName,
byte[] row) |
HRegionLocation |
ConnectionAdapter.relocateRegion(TableName tableName,
byte[] row)
Deprecated.
|
HRegionLocation |
HConnection.relocateRegion(TableName tableName,
byte[] row)
Deprecated.
internal method, do not use thru HConnection
|
HRegionLocation |
ClusterConnection.relocateRegion(TableName tableName,
byte[] row)
Find the location of the region of tableName that row
lives in, ignoring any value that might be in the cache.
|
RegionLocations |
ConnectionManager.HConnectionImplementation.relocateRegion(TableName tableName,
byte[] row,
int replicaId) |
RegionLocations |
ConnectionAdapter.relocateRegion(TableName tableName,
byte[] row,
int replicaId)
Deprecated.
|
RegionLocations |
ClusterConnection.relocateRegion(TableName tableName,
byte[] row,
int replicaId)
Find the location of the region of tableName that row
lives in, ignoring any value that might be in the cache.
|
static void |
HTable.setRegionCachePrefetch(org.apache.hadoop.conf.Configuration conf,
TableName tableName,
boolean enable)
Deprecated.
does nothing since 0.99
|
static void |
HTable.setRegionCachePrefetch(TableName tableName,
boolean enable)
Deprecated.
does nothing since 0.99
|
void |
ConnectionManager.HConnectionImplementation.setRegionCachePrefetch(TableName tableName,
boolean enable)
Deprecated.
|
void |
ConnectionAdapter.setRegionCachePrefetch(TableName tableName,
boolean enable)
Deprecated.
|
void |
HConnection.setRegionCachePrefetch(TableName tableName,
boolean enable)
Deprecated.
does nothing since since 0.99
|
void |
HBaseAdmin.snapshot(byte[] snapshotName,
TableName tableName)
public void snapshot(final String snapshotName,
Create a timestamp consistent snapshot for the given table.
|
void |
Admin.snapshot(byte[] snapshotName,
TableName tableName)
public void snapshot(final String snapshotName, Create a timestamp consistent snapshot for the
given table.
|
void |
HBaseAdmin.snapshot(String snapshotName,
TableName tableName)
Take a snapshot for the given table.
|
void |
Admin.snapshot(String snapshotName,
TableName tableName)
Take a snapshot for the given table.
|
void |
HBaseAdmin.snapshot(String snapshotName,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type)
Create typed snapshot of the table.
|
void |
Admin.snapshot(String snapshotName,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type)
Create typed snapshot of the table.
|
void |
HBaseAdmin.split(TableName tableName)
Split a table.
|
void |
Admin.split(TableName tableName)
Split a table.
|
void |
HBaseAdmin.split(TableName tableName,
byte[] splitPoint)
Split a table.
|
void |
Admin.split(TableName tableName,
byte[] splitPoint)
Split a table.
|
<CResult> AsyncProcess.AsyncRequestFuture |
AsyncProcess.submit(ExecutorService pool,
TableName tableName,
List<? extends Row> rows,
boolean atLeastOne,
Batch.Callback<CResult> callback,
boolean needResults)
Extract from the rows list what we can submit.
|
<CResult> AsyncProcess.AsyncRequestFuture |
AsyncProcess.submit(TableName tableName,
List<? extends Row> rows,
boolean atLeastOne,
Batch.Callback<CResult> callback,
boolean needResults)
|
<CResult> AsyncProcess.AsyncRequestFuture |
AsyncProcess.submitAll(ExecutorService pool,
TableName tableName,
List<? extends Row> rows,
Batch.Callback<CResult> callback,
Object[] results)
Submit immediately the list of rows, whatever the server status.
|
<CResult> AsyncProcess.AsyncRequestFuture |
AsyncProcess.submitAll(TableName tableName,
List<? extends Row> rows,
Batch.Callback<CResult> callback,
Object[] results)
|
(package private) <CResult> AsyncProcess.AsyncRequestFuture |
AsyncProcess.submitMultiActions(TableName tableName,
List<Action<Row>> retainedActions,
long nonceGroup,
Batch.Callback<CResult> callback,
Object[] results,
boolean needResults,
List<Exception> locationErrors,
List<Integer> locationErrorRows,
Map<ServerName,MultiAction<Row>> actionsByServer,
ExecutorService pool) |
boolean |
HBaseAdmin.tableExists(TableName tableName) |
boolean |
Admin.tableExists(TableName tableName) |
void |
HBaseAdmin.truncateTable(TableName tableName,
boolean preserveSplits)
Truncate a table.
|
void |
Admin.truncateTable(TableName tableName,
boolean preserveSplits)
Truncate a table.
|
void |
ConnectionManager.HConnectionImplementation.updateCachedLocations(TableName tableName,
byte[] regionName,
byte[] rowkey,
Object exception,
ServerName source)
Update the location with the new value (if the exception is a RegionMovedException)
or delete it from the cache.
|
void |
ConnectionAdapter.updateCachedLocations(TableName tableName,
byte[] regionName,
byte[] rowkey,
Object exception,
ServerName source)
Deprecated.
|
void |
HConnection.updateCachedLocations(TableName tableName,
byte[] regionName,
byte[] rowkey,
Object exception,
ServerName source)
Deprecated.
internal method, do not use thru HConnection
|
void |
ClusterConnection.updateCachedLocations(TableName tableName,
byte[] regionName,
byte[] rowkey,
Object exception,
ServerName source)
Update the location cache.
|
void |
ConnectionManager.HConnectionImplementation.updateCachedLocations(TableName tableName,
byte[] rowkey,
Object exception,
HRegionLocation source) |
void |
ConnectionAdapter.updateCachedLocations(TableName tableName,
byte[] rowkey,
Object exception,
HRegionLocation source)
Deprecated.
|
void |
HConnection.updateCachedLocations(TableName tableName,
byte[] rowkey,
Object exception,
HRegionLocation source)
Deprecated.
|
private void |
HBaseAdmin.waitUntilTableIsEnabled(TableName tableName)
Wait for the table to be enabled and available
If enabling the table exceeds the retry period, an exception is thrown.
|
Modifier and Type | Method and Description |
---|---|
HTableDescriptor[] |
ConnectionManager.HConnectionImplementation.getHTableDescriptorsByTableName(List<TableName> tableNames)
Deprecated.
Use
Admin.getTableDescriptorsByTableName(List) instead |
HTableDescriptor[] |
ConnectionAdapter.getHTableDescriptorsByTableName(List<TableName> tableNames)
Deprecated.
|
HTableDescriptor[] |
HConnection.getHTableDescriptorsByTableName(List<TableName> tableNames)
Deprecated.
Use
Admin.getTableDescriptor(TableName) instead. |
HTableDescriptor[] |
HBaseAdmin.getTableDescriptorsByTableName(List<TableName> tableNames)
Get tableDescriptors
|
HTableDescriptor[] |
Admin.getTableDescriptorsByTableName(List<TableName> tableNames)
Get tableDescriptors
|
Constructor and Description |
---|
AsyncProcess.AsyncRequestFutureImpl(TableName tableName,
List<Action<Row>> actions,
long nonceGroup,
ExecutorService pool,
boolean needResults,
Object[] results,
Batch.Callback<CResult> callback) |
BufferedMutatorParams(TableName tableName) |
ClientScanner(org.apache.hadoop.conf.Configuration conf,
Scan scan,
TableName tableName,
ClusterConnection connection,
RpcRetryingCallerFactory rpcFactory,
RpcControllerFactory controllerFactory,
ExecutorService pool,
int primaryOperationTimeout)
Create a new ClientScanner for the specified table Note that the passed
Scan 's start
row maybe changed changed. |
ClientSmallReversedScanner.SmallReversedScannerCallable(ClusterConnection connection,
TableName table,
Scan scan,
ScanMetrics scanMetrics,
byte[] locateStartRow,
RpcControllerFactory controllerFactory,
int caching,
int replicaId) |
ClientSmallReversedScanner(org.apache.hadoop.conf.Configuration conf,
Scan scan,
TableName tableName,
ClusterConnection connection,
RpcRetryingCallerFactory rpcFactory,
RpcControllerFactory controllerFactory,
ExecutorService pool,
int primaryOperationTimeout)
Create a new ReversibleClientScanner for the specified table.
|
ClientSmallReversedScanner(org.apache.hadoop.conf.Configuration conf,
Scan scan,
TableName tableName,
ClusterConnection connection,
RpcRetryingCallerFactory rpcFactory,
RpcControllerFactory controllerFactory,
ExecutorService pool,
int primaryOperationTimeout,
ClientSmallReversedScanner.SmallReversedScannerCallableFactory callableFactory)
Create a new ReversibleClientScanner for the specified table.
|
ClientSmallScanner.SmallScannerCallable(ClusterConnection connection,
TableName table,
Scan scan,
ScanMetrics scanMetrics,
RpcControllerFactory controllerFactory,
int caching,
int id) |
ClientSmallScanner(org.apache.hadoop.conf.Configuration conf,
Scan scan,
TableName tableName,
ClusterConnection connection,
RpcRetryingCallerFactory rpcFactory,
RpcControllerFactory controllerFactory,
ExecutorService pool,
int primaryOperationTimeout)
Create a new ShortClientScanner for the specified table.
|
ClientSmallScanner(org.apache.hadoop.conf.Configuration conf,
Scan scan,
TableName tableName,
ClusterConnection connection,
RpcRetryingCallerFactory rpcFactory,
RpcControllerFactory controllerFactory,
ExecutorService pool,
int primaryOperationTimeout,
ClientSmallScanner.SmallScannerCallableFactory callableFactory)
Create a new ShortClientScanner for the specified table.
|
FlushRegionCallable(ClusterConnection connection,
RpcControllerFactory rpcControllerFactory,
TableName tableName,
byte[] regionName,
byte[] regionStartKey,
boolean writeFlushWalMarker) |
HBaseAdmin.DeleteTableFuture(HBaseAdmin admin,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse response) |
HBaseAdmin.DisableTableFuture(HBaseAdmin admin,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse response) |
HBaseAdmin.EnableTableFuture(HBaseAdmin admin,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse response) |
HRegionLocator(TableName tableName,
ClusterConnection connection) |
HTable(org.apache.hadoop.conf.Configuration conf,
TableName tableName)
Deprecated.
Constructing HTable objects manually has been deprecated. Please use
Connection to instantiate a Table instead. |
HTable(org.apache.hadoop.conf.Configuration conf,
TableName tableName,
ExecutorService pool)
Deprecated.
Constructing HTable objects manually has been deprecated. Please use
Connection to instantiate a Table instead. |
HTable(TableName tableName,
ClusterConnection connection,
ConnectionConfiguration tableConfig,
RpcRetryingCallerFactory rpcCallerFactory,
RpcControllerFactory rpcControllerFactory,
ExecutorService pool)
Creates an object to access a HBase table.
|
HTable(TableName tableName,
Connection connection)
Deprecated.
Do not use.
|
HTable(TableName tableName,
Connection connection,
ExecutorService pool)
Deprecated.
Do not use, internal ctor.
|
HTableWrapper(List<HTableInterface> openTables,
TableName tableName,
ClusterConnection connection,
ExecutorService pool) |
MetaScanner.TableMetaScannerVisitor(TableName tableName) |
MultiServerCallable(ClusterConnection connection,
TableName tableName,
ServerName location,
RpcControllerFactory rpcFactory,
MultiAction<R> multi) |
RegionAdminServiceCallable(ClusterConnection connection,
RpcControllerFactory rpcControllerFactory,
HRegionLocation location,
TableName tableName,
byte[] row) |
RegionAdminServiceCallable(ClusterConnection connection,
RpcControllerFactory rpcControllerFactory,
HRegionLocation location,
TableName tableName,
byte[] row,
int replicaId) |
RegionAdminServiceCallable(ClusterConnection connection,
RpcControllerFactory rpcControllerFactory,
TableName tableName,
byte[] row) |
RegionServerCallable(Connection connection,
TableName tableName,
byte[] row) |
ReversedClientScanner(org.apache.hadoop.conf.Configuration conf,
Scan scan,
TableName tableName,
ClusterConnection connection,
RpcRetryingCallerFactory rpcFactory,
RpcControllerFactory controllerFactory,
ExecutorService pool,
int primaryOperationTimeout)
Create a new ReversibleClientScanner for the specified table Note that the
passed
Scan 's start row maybe changed. |
ReversedScannerCallable(ClusterConnection connection,
TableName tableName,
Scan scan,
ScanMetrics scanMetrics,
byte[] locateStartRow)
|
ReversedScannerCallable(ClusterConnection connection,
TableName tableName,
Scan scan,
ScanMetrics scanMetrics,
byte[] locateStartRow,
RpcControllerFactory rpcFactory) |
ReversedScannerCallable(ClusterConnection connection,
TableName tableName,
Scan scan,
ScanMetrics scanMetrics,
byte[] locateStartRow,
RpcControllerFactory rpcFactory,
int replicaId) |
RpcRetryingCallerWithReadReplicas(RpcControllerFactory rpcControllerFactory,
TableName tableName,
ClusterConnection cConnection,
Get get,
ExecutorService pool,
int retries,
int callTimeout,
int timeBeforeReplicas) |
ScannerCallable(ClusterConnection connection,
TableName tableName,
Scan scan,
ScanMetrics scanMetrics,
RpcControllerFactory rpcControllerFactory) |
ScannerCallable(ClusterConnection connection,
TableName tableName,
Scan scan,
ScanMetrics scanMetrics,
RpcControllerFactory rpcControllerFactory,
int id) |
ScannerCallableWithReplicas(TableName tableName,
ClusterConnection cConnection,
ScannerCallable baseCallable,
ExecutorService pool,
int timeBeforeReplicas,
Scan scan,
int retries,
int scannerTimeout,
int caching,
org.apache.hadoop.conf.Configuration conf,
RpcRetryingCaller<Result[]> caller) |
Modifier and Type | Method and Description |
---|---|
<R,S,P extends com.google.protobuf.Message,Q extends com.google.protobuf.Message,T extends com.google.protobuf.Message> |
AggregationClient.avg(TableName tableName,
ColumnInterpreter<R,S,P,Q,T> ci,
Scan scan)
This is the client side interface/handle for calling the average method for
a given cf-cq combination.
|
private <R,S,P extends com.google.protobuf.Message,Q extends com.google.protobuf.Message,T extends com.google.protobuf.Message> |
AggregationClient.getAvgArgs(TableName tableName,
ColumnInterpreter<R,S,P,Q,T> ci,
Scan scan)
It computes average while fetching sum and row count from all the
corresponding regions.
|
<R,S,P extends com.google.protobuf.Message,Q extends com.google.protobuf.Message,T extends com.google.protobuf.Message> |
AggregationClient.max(TableName tableName,
ColumnInterpreter<R,S,P,Q,T> ci,
Scan scan)
It gives the maximum value of a column for a given column family for the
given range.
|
<R,S,P extends com.google.protobuf.Message,Q extends com.google.protobuf.Message,T extends com.google.protobuf.Message> |
AggregationClient.median(TableName tableName,
ColumnInterpreter<R,S,P,Q,T> ci,
Scan scan)
This is the client side interface/handler for calling the median method for a
given cf-cq combination.
|
<R,S,P extends com.google.protobuf.Message,Q extends com.google.protobuf.Message,T extends com.google.protobuf.Message> |
AggregationClient.min(TableName tableName,
ColumnInterpreter<R,S,P,Q,T> ci,
Scan scan)
It gives the minimum value of a column for a given column family for the
given range.
|
String |
SecureBulkLoadClient.prepareBulkLoad(TableName tableName) |
<R,S,P extends com.google.protobuf.Message,Q extends com.google.protobuf.Message,T extends com.google.protobuf.Message> |
AggregationClient.rowCount(TableName tableName,
ColumnInterpreter<R,S,P,Q,T> ci,
Scan scan)
It gives the row count, by summing up the individual results obtained from
regions.
|
<R,S,P extends com.google.protobuf.Message,Q extends com.google.protobuf.Message,T extends com.google.protobuf.Message> |
AggregationClient.std(TableName tableName,
ColumnInterpreter<R,S,P,Q,T> ci,
Scan scan)
This is the client side interface/handle for calling the std method for a
given cf-cq combination.
|
<R,S,P extends com.google.protobuf.Message,Q extends com.google.protobuf.Message,T extends com.google.protobuf.Message> |
AggregationClient.sum(TableName tableName,
ColumnInterpreter<R,S,P,Q,T> ci,
Scan scan)
It sums up the value returned from various regions.
|
Modifier and Type | Field and Description |
---|---|
private static TableName |
BufferedMutatorExample.TABLE |
private TableName |
MultiThreadedClientExample.WriteExampleCallable.tableName |
private TableName |
MultiThreadedClientExample.SingleWriteExampleCallable.tableName |
private TableName |
MultiThreadedClientExample.ReadExampleCallable.tableName |
Modifier and Type | Method and Description |
---|---|
private void |
MultiThreadedClientExample.warmUpConnectionCache(Connection connection,
TableName tn) |
Constructor and Description |
---|
MultiThreadedClientExample.ReadExampleCallable(Connection connection,
TableName tableName) |
MultiThreadedClientExample.SingleWriteExampleCallable(Connection connection,
TableName tableName) |
MultiThreadedClientExample.WriteExampleCallable(Connection connection,
TableName tableName) |
Modifier and Type | Method and Description |
---|---|
static Map<TableName,List<String>> |
ReplicationAdmin.parseTableCFsFromConfig(String tableCFsConfig) |
Modifier and Type | Method and Description |
---|---|
private void |
ReplicationAdmin.checkAndSyncTableDescToPeers(TableName tableName,
byte[][] splits)
Connect to peer and check the table descriptor on peer:
Create the same table on peer when not exist.
Throw exception if the table exists on peer cluster but descriptors are not same.
|
void |
ReplicationAdmin.disableTableRep(TableName tableName)
Disable a table's replication switch.
|
void |
ReplicationAdmin.enableTableRep(TableName tableName)
Enable a table's replication switch.
|
private byte[][] |
ReplicationAdmin.getTableSplitRowKeys(TableName tableName)
Get the split row keys of table
|
private void |
ReplicationAdmin.setTableRep(TableName tableName,
boolean isRepEnabled)
Set the table's replication switch if the table's replication switch is already not set.
|
Modifier and Type | Method and Description |
---|---|
void |
ReplicationAdmin.addPeer(String id,
ReplicationPeerConfig peerConfig,
Map<TableName,? extends Collection<String>> tableCfs)
Add a new remote slave cluster for replication.
|
void |
ReplicationAdmin.appendPeerTableCFs(String id,
Map<TableName,? extends Collection<String>> tableCfs)
Append the replicable table-cf config of the specified peer
|
(package private) static String |
ReplicationAdmin.getTableCfsStr(Map<TableName,? extends Collection<String>> tableCfs) |
void |
ReplicationAdmin.removePeerTableCFs(String id,
Map<TableName,? extends Collection<String>> tableCfs)
Remove some table-cfs from config of the specified peer
|
void |
ReplicationAdmin.setPeerTableCFs(String id,
Map<TableName,? extends Collection<String>> tableCfs)
Set the replicable table-cf config of the specified peer
|
Modifier and Type | Method and Description |
---|---|
HTableInterface |
CoprocessorHost.Environment.getTable(TableName tableName)
Open a table from within the Coprocessor environment
|
HTableInterface |
CoprocessorHost.Environment.getTable(TableName tableName,
ExecutorService pool)
Open a table from within the Coprocessor environment
|
void |
MasterObserver.postAddColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column)
Called after the new column family has been created.
|
void |
BaseMasterObserver.postAddColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column) |
void |
BaseMasterAndRegionObserver.postAddColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column) |
void |
MasterObserver.postAddColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column)
Called after the new column family has been created.
|
void |
BaseMasterObserver.postAddColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column) |
void |
BaseMasterAndRegionObserver.postAddColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column) |
void |
MasterObserver.postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c)
Called after the column family has been deleted.
|
void |
BaseMasterObserver.postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c) |
void |
BaseMasterAndRegionObserver.postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c) |
void |
MasterObserver.postDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c)
Called after the column family has been deleted.
|
void |
BaseMasterObserver.postDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c) |
void |
BaseMasterAndRegionObserver.postDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c) |
void |
MasterObserver.postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called after the deleteTable operation has been requested.
|
void |
BaseMasterObserver.postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.postDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called after
HMaster deletes a
table. |
void |
BaseMasterObserver.postDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.postDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.postDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called after the disableTable operation has been requested.
|
void |
BaseMasterObserver.postDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.postDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.postDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called after the disableTable operation has been requested.
|
void |
BaseMasterObserver.postDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.postDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.postEnableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called after the enableTable operation has been requested.
|
void |
BaseMasterObserver.postEnableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.postEnableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.postEnableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called after the enableTable operation has been requested.
|
void |
BaseMasterObserver.postEnableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.postEnableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.postModifyColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor)
Called after the column family has been updated.
|
void |
BaseMasterObserver.postModifyColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor) |
void |
BaseMasterAndRegionObserver.postModifyColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor) |
void |
MasterObserver.postModifyColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor)
Called after the column family has been updated.
|
void |
BaseMasterObserver.postModifyColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor) |
void |
BaseMasterAndRegionObserver.postModifyColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor) |
void |
MasterObserver.postModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd)
Called after the modifyTable operation has been requested.
|
void |
BaseMasterObserver.postModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) |
void |
BaseMasterAndRegionObserver.postModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) |
void |
MasterObserver.postModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd)
Called after to modifying a table's properties.
|
void |
BaseMasterObserver.postModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) |
void |
BaseMasterAndRegionObserver.postModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) |
void |
MasterObserver.postSetTableQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas)
Called after the quota for the table is stored.
|
void |
BaseMasterObserver.postSetTableQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
BaseMasterAndRegionObserver.postSetTableQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
MasterObserver.postSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
String userName,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas)
Called after the quota for the user on the specified table is stored.
|
void |
BaseMasterObserver.postSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
String userName,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
BaseMasterAndRegionObserver.postSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
String userName,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
MasterObserver.postTableFlush(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called after the table memstore is flushed to disk.
|
void |
BaseMasterObserver.postTableFlush(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.postTableFlush(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called after the truncateTable operation has been requested.
|
void |
BaseMasterObserver.postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.postTruncateTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called after
HMaster truncates a
table. |
void |
BaseMasterObserver.postTruncateTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.postTruncateTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.preAddColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column)
Called prior to adding a new column family to the table.
|
void |
BaseMasterObserver.preAddColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column) |
void |
BaseMasterAndRegionObserver.preAddColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column) |
void |
MasterObserver.preAddColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column)
Called prior to adding a new column family to the table.
|
void |
BaseMasterObserver.preAddColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column) |
void |
BaseMasterAndRegionObserver.preAddColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column) |
void |
MasterObserver.preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c)
Called prior to deleting the entire column family.
|
void |
BaseMasterObserver.preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c) |
void |
BaseMasterAndRegionObserver.preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c) |
void |
MasterObserver.preDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c)
Called prior to deleting the entire column family.
|
void |
BaseMasterObserver.preDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c) |
void |
BaseMasterAndRegionObserver.preDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c) |
void |
MasterObserver.preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called before
HMaster deletes a
table. |
void |
BaseMasterObserver.preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.preDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called before
HMaster deletes a
table. |
void |
BaseMasterObserver.preDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.preDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.preDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called prior to disabling a table.
|
void |
BaseMasterObserver.preDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.preDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.preDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called prior to disabling a table.
|
void |
BaseMasterObserver.preDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.preDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.preEnableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called prior to enabling a table.
|
void |
BaseMasterObserver.preEnableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.preEnableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.preEnableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called prior to enabling a table.
|
void |
BaseMasterObserver.preEnableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.preEnableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor)
Called prior to modifying a column family's attributes.
|
void |
BaseMasterObserver.preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor) |
void |
BaseMasterAndRegionObserver.preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor) |
void |
MasterObserver.preModifyColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor)
Called prior to modifying a column family's attributes.
|
void |
BaseMasterObserver.preModifyColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor) |
void |
BaseMasterAndRegionObserver.preModifyColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor) |
void |
MasterObserver.preModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd)
Called prior to modifying a table's properties.
|
void |
BaseMasterObserver.preModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) |
void |
BaseMasterAndRegionObserver.preModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) |
void |
MasterObserver.preModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd)
Called prior to modifying a table's properties.
|
void |
BaseMasterObserver.preModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) |
void |
BaseMasterAndRegionObserver.preModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) |
void |
MasterObserver.preSetTableQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas)
Called before the quota for the table is stored.
|
void |
BaseMasterObserver.preSetTableQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
BaseMasterAndRegionObserver.preSetTableQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
MasterObserver.preSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
String userName,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas)
Called before the quota for the user on the specified table is stored.
|
void |
BaseMasterObserver.preSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
String userName,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
BaseMasterAndRegionObserver.preSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
String userName,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
MasterObserver.preTableFlush(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called before the table memstore is flushed to disk.
|
void |
BaseMasterObserver.preTableFlush(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.preTableFlush(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called before
HMaster truncates a
table. |
void |
BaseMasterObserver.preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
MasterObserver.preTruncateTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName)
Called before
HMaster truncates a
table. |
void |
BaseMasterObserver.preTruncateTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
BaseMasterAndRegionObserver.preTruncateTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
Modifier and Type | Method and Description |
---|---|
static TableName |
HFileLink.getReferencedTableName(String fileName)
Get the Table name of the referenced link
|
Modifier and Type | Method and Description |
---|---|
(package private) static Pair<TableName,String> |
HFileLink.parseBackReferenceName(String name) |
Modifier and Type | Method and Description |
---|---|
static HFileLink |
HFileLink.build(org.apache.hadoop.conf.Configuration conf,
TableName table,
String region,
String family,
String hfile)
Create an HFileLink instance from table/region/family/hfile location
|
static boolean |
HFileLink.create(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dstFamilyPath,
TableName linkedTable,
String linkedRegion,
String hfileName)
Create a new HFileLink
|
static boolean |
HFileLink.create(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dstFamilyPath,
TableName linkedTable,
String linkedRegion,
String hfileName,
boolean createBackRef)
Create a new HFileLink
|
static String |
HFileLink.createHFileLinkName(TableName tableName,
String regionName,
String hfileName)
Create a new HFileLink name
|
static org.apache.hadoop.fs.Path |
HFileLink.createPath(TableName table,
String region,
String family,
String hfile)
Create an HFileLink relative path for the table/region/family/hfile location
|
Modifier and Type | Field and Description |
---|---|
private TableName |
RegionCoprocessorRpcChannel.table |
Modifier and Type | Method and Description |
---|---|
void |
PayloadCarryingRpcController.setPriority(TableName tn) |
void |
DelegatingPayloadCarryingRpcController.setPriority(TableName tn) |
Constructor and Description |
---|
RegionCoprocessorRpcChannel(ClusterConnection conn,
TableName table,
byte[] row) |
Modifier and Type | Field and Description |
---|---|
private TableName |
TableSplit.m_tableName |
Modifier and Type | Method and Description |
---|---|
TableName |
TableSplit.getTable() |
Modifier and Type | Method and Description |
---|---|
protected void |
TableInputFormatBase.initializeTable(Connection connection,
TableName tableName)
Allows subclasses to initialize the table information.
|
Constructor and Description |
---|
TableSplit(TableName tableName,
byte[] startRow,
byte[] endRow,
String location)
Constructor
|
Modifier and Type | Field and Description |
---|---|
private TableName |
TableSplit.tableName |
Modifier and Type | Field and Description |
---|---|
private Map<TableName,TableName> |
WALPlayer.WALMapper.tables |
private Map<TableName,TableName> |
WALPlayer.WALMapper.tables |
Modifier and Type | Method and Description |
---|---|
TableName |
TableSplit.getTable()
Returns the table name.
|
Modifier and Type | Method and Description |
---|---|
static void |
TableInputFormat.configureSplitTable(org.apache.hadoop.mapreduce.Job job,
TableName tableName)
Sets split table in map-reduce job.
|
private static void |
ImportTsv.createTable(Admin admin,
TableName tableName,
String[] columns) |
private void |
LoadIncrementalHFiles.createTable(TableName tableName,
String dirPath) |
private boolean |
LoadIncrementalHFiles.doesTableExist(TableName tableName) |
protected void |
TableInputFormatBase.initializeTable(Connection connection,
TableName tableName)
Allows subclasses to initialize the table information.
|
static void |
TableMapReduceUtil.initTableMapperJob(TableName table,
Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass,
org.apache.hadoop.mapreduce.Job job)
Use this before submitting a TableMap job.
|
protected List<LoadIncrementalHFiles.LoadQueueItem> |
LoadIncrementalHFiles.tryAtomicRegionLoad(Connection conn,
TableName tableName,
byte[] first,
Collection<LoadIncrementalHFiles.LoadQueueItem> lqis)
Attempts to do an atomic load of many hfiles into a region.
|
Constructor and Description |
---|
TableSplit(TableName tableName,
byte[] startRow,
byte[] endRow,
String location)
Creates a new instance without a scanner.
|
TableSplit(TableName tableName,
byte[] startRow,
byte[] endRow,
String location,
long length)
Creates a new instance without a scanner.
|
TableSplit(TableName tableName,
Scan scan,
byte[] startRow,
byte[] endRow,
String location)
Creates a new instance while assigning all variables.
|
TableSplit(TableName tableName,
Scan scan,
byte[] startRow,
byte[] endRow,
String location,
long length)
Creates a new instance while assigning all variables.
|
Modifier and Type | Field and Description |
---|---|
(package private) TableName |
TableLockManager.ZKTableLockManager.TableLockImpl.tableName |
private TableName |
AssignmentVerificationReport.tableName |
Modifier and Type | Field and Description |
---|---|
private Set<TableName> |
SnapshotOfRegionAssignmentFromMeta.disabledTables |
private Map<TableName,Map<String,RegionState>> |
RegionStates.regionStatesTableIndex
Holds mapping of table -> region state
|
private Map<TableName,List<HRegionInfo>> |
SnapshotOfRegionAssignmentFromMeta.tableToRegionMap
the table name to region map
|
private Set<TableName> |
RegionPlacementMaintainer.targetTableSet |
Modifier and Type | Method and Description |
---|---|
protected Map<TableName,Map<ServerName,List<HRegionInfo>>> |
RegionStates.getAssignmentsByTable()
This is an EXPENSIVE clone.
|
Map<TableName,Integer> |
RegionPlacementMaintainer.getRegionsMovement(FavoredNodesPlan newPlan)
Return how many regions will move per table since their primary RS will
change
|
Set<TableName> |
SnapshotOfRegionAssignmentFromMeta.getTableSet()
Get the table set
|
Map<TableName,List<HRegionInfo>> |
SnapshotOfRegionAssignmentFromMeta.getTableToRegionMap()
Get regions for tables
|
List<TableName> |
HMaster.listTableNames(String namespace,
String regex,
boolean includeSysTables)
Returns the list of table names that match the specified request
|
List<TableName> |
HMaster.listTableNamesByNamespace(String name) |
List<TableName> |
MasterServices.listTableNamesByNamespace(String name)
Get list of table names by namespace
|
Modifier and Type | Method and Description |
---|---|
HTableDescriptor |
MasterFileSystem.addColumn(TableName tableName,
HColumnDescriptor hcd)
Add column to a table
|
void |
HMaster.addColumn(TableName tableName,
HColumnDescriptor columnDescriptor,
long nonceGroup,
long nonce) |
void |
MasterServices.addColumn(TableName tableName,
HColumnDescriptor column,
long nonceGroup,
long nonce)
Add a new column to an existing table
|
void |
HMaster.checkTableModifiable(TableName tableName) |
void |
MasterServices.checkTableModifiable(TableName tableName)
Check table is modifiable; i.e.
|
HTableDescriptor |
MasterFileSystem.deleteColumn(TableName tableName,
byte[] familyName)
Delete column of a table
|
void |
HMaster.deleteColumn(TableName tableName,
byte[] columnName,
long nonceGroup,
long nonce) |
void |
MasterServices.deleteColumn(TableName tableName,
byte[] columnName,
long nonceGroup,
long nonce)
Delete a column from an existing table
|
void |
MasterFileSystem.deleteTable(TableName tableName) |
long |
HMaster.deleteTable(TableName tableName,
long nonceGroup,
long nonce) |
long |
MasterServices.deleteTable(TableName tableName,
long nonceGroup,
long nonce)
Delete a table
|
long |
HMaster.disableTable(TableName tableName,
long nonceGroup,
long nonce) |
long |
MasterServices.disableTable(TableName tableName,
long nonceGroup,
long nonce)
Disable an existing table
|
private void |
HMaster.enableMeta(TableName metaTableName) |
long |
HMaster.enableTable(TableName tableName,
long nonceGroup,
long nonce) |
long |
MasterServices.enableTable(TableName tableName,
long nonceGroup,
long nonce)
Enable an existing table
|
void |
AssignmentVerificationReport.fillUp(TableName tableName,
SnapshotOfRegionAssignmentFromMeta snapshot,
Map<String,Map<String,Float>> regionLocalityMap) |
void |
AssignmentVerificationReport.fillUpDispersion(TableName tableName,
SnapshotOfRegionAssignmentFromMeta snapshot,
FavoredNodesPlan newPlan)
Use this to project the dispersion scores
|
private void |
RegionPlacementMaintainer.genAssignmentPlan(TableName tableName,
SnapshotOfRegionAssignmentFromMeta assignmentSnapshot,
Map<String,Map<String,Float>> regionLocalityMap,
FavoredNodesPlan plan,
boolean munkresForSecondaryAndTertiary)
Generate the assignment plan for the existing table
|
long |
HMaster.getLastMajorCompactionTimestamp(TableName table) |
long |
MasterServices.getLastMajorCompactionTimestamp(TableName table) |
(package private) Triple<Integer,Map<HRegionInfo,Result>,Map<HRegionInfo,Result>> |
CatalogJanitor.getMergedRegionsAndSplitParents(TableName tableName)
Scans hbase:meta and returns a number of scanned rows, and a map of merged
regions, and an ordered map of split parents.
|
Map<RegionState.State,List<HRegionInfo>> |
RegionStates.getRegionByStateOfTable(TableName tableName)
Gets current state of all regions of the table.
|
List<HRegionInfo> |
RegionStates.getRegionsOfTable(TableName tableName)
Gets the online regions of the specified table.
|
Pair<Integer,Integer> |
AssignmentManager.getReopenStatus(TableName tableName)
Used by the client to identify if all regions have the schema updates
|
private HTableDescriptor |
CatalogJanitor.getTableDescriptor(TableName tableName) |
(package private) Pair<HRegionInfo,ServerName> |
HMaster.getTableRegionForRow(TableName tableName,
byte[] rowKey)
Return the region and current deployment for the region containing
the given row.
|
private static boolean |
HMaster.isCatalogTable(TableName tableName) |
HTableDescriptor |
MasterFileSystem.modifyColumn(TableName tableName,
HColumnDescriptor hcd)
Modify Column of a table
|
void |
HMaster.modifyColumn(TableName tableName,
HColumnDescriptor descriptor,
long nonceGroup,
long nonce) |
void |
MasterServices.modifyColumn(TableName tableName,
HColumnDescriptor descriptor,
long nonceGroup,
long nonce)
Modify the column descriptor of an existing column in an existing table
|
void |
HMaster.modifyTable(TableName tableName,
HTableDescriptor descriptor,
long nonceGroup,
long nonce) |
void |
MasterServices.modifyTable(TableName tableName,
HTableDescriptor descriptor,
long nonceGroup,
long nonce)
Modify the descriptor of an existing table
|
org.apache.hadoop.fs.Path |
MasterFileSystem.moveTableToTemp(TableName tableName)
Move the specified table to the hbase temp directory
|
void |
MasterCoprocessorHost.postAddColumn(TableName tableName,
HColumnDescriptor column) |
void |
MasterCoprocessorHost.postAddColumnHandler(TableName tableName,
HColumnDescriptor column) |
void |
MasterCoprocessorHost.postDeleteColumn(TableName tableName,
byte[] c) |
void |
MasterCoprocessorHost.postDeleteColumnHandler(TableName tableName,
byte[] c) |
void |
MasterCoprocessorHost.postDeleteTable(TableName tableName) |
void |
MasterCoprocessorHost.postDeleteTableHandler(TableName tableName) |
void |
MasterCoprocessorHost.postDisableTable(TableName tableName) |
void |
MasterCoprocessorHost.postDisableTableHandler(TableName tableName) |
void |
MasterCoprocessorHost.postEnableTable(TableName tableName) |
void |
MasterCoprocessorHost.postEnableTableHandler(TableName tableName) |
void |
MasterCoprocessorHost.postModifyColumn(TableName tableName,
HColumnDescriptor descriptor) |
void |
MasterCoprocessorHost.postModifyColumnHandler(TableName tableName,
HColumnDescriptor descriptor) |
void |
MasterCoprocessorHost.postModifyTable(TableName tableName,
HTableDescriptor htd) |
void |
MasterCoprocessorHost.postModifyTableHandler(TableName tableName,
HTableDescriptor htd) |
void |
MasterCoprocessorHost.postSetTableQuota(TableName table,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
MasterCoprocessorHost.postSetUserQuota(String user,
TableName table,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
MasterCoprocessorHost.postTableFlush(TableName tableName) |
void |
MasterCoprocessorHost.postTruncateTable(TableName tableName) |
void |
MasterCoprocessorHost.postTruncateTableHandler(TableName tableName) |
boolean |
MasterCoprocessorHost.preAddColumn(TableName tableName,
HColumnDescriptor column) |
boolean |
MasterCoprocessorHost.preAddColumnHandler(TableName tableName,
HColumnDescriptor column) |
boolean |
MasterCoprocessorHost.preDeleteColumn(TableName tableName,
byte[] c) |
boolean |
MasterCoprocessorHost.preDeleteColumnHandler(TableName tableName,
byte[] c) |
void |
MasterCoprocessorHost.preDeleteTable(TableName tableName) |
void |
MasterCoprocessorHost.preDeleteTableHandler(TableName tableName) |
void |
MasterCoprocessorHost.preDisableTable(TableName tableName) |
void |
MasterCoprocessorHost.preDisableTableHandler(TableName tableName) |
void |
MasterCoprocessorHost.preEnableTable(TableName tableName) |
void |
MasterCoprocessorHost.preEnableTableHandler(TableName tableName) |
boolean |
MasterCoprocessorHost.preModifyColumn(TableName tableName,
HColumnDescriptor descriptor) |
boolean |
MasterCoprocessorHost.preModifyColumnHandler(TableName tableName,
HColumnDescriptor descriptor) |
void |
MasterCoprocessorHost.preModifyTable(TableName tableName,
HTableDescriptor htd) |
void |
MasterCoprocessorHost.preModifyTableHandler(TableName tableName,
HTableDescriptor htd) |
void |
MasterCoprocessorHost.preSetTableQuota(TableName table,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
MasterCoprocessorHost.preSetUserQuota(String user,
TableName table,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
MasterCoprocessorHost.preTableFlush(TableName tableName) |
void |
MasterCoprocessorHost.preTruncateTable(TableName tableName) |
void |
MasterCoprocessorHost.preTruncateTableHandler(TableName tableName) |
void |
RegionPlacementMaintainer.printDispersionScores(TableName table,
SnapshotOfRegionAssignmentFromMeta snapshot,
int numRegions,
FavoredNodesPlan newPlan,
boolean simplePrint) |
abstract TableLockManager.TableLock |
TableLockManager.readLock(TableName tableName,
String purpose)
Returns a TableLock for locking the table for shared access among read-lock holders
|
TableLockManager.TableLock |
TableLockManager.NullTableLockManager.readLock(TableName tableName,
String purpose) |
TableLockManager.TableLock |
TableLockManager.ZKTableLockManager.readLock(TableName tableName,
String purpose) |
protected void |
AssignmentManager.setEnabledTable(TableName tableName) |
void |
RegionStates.tableDeleted(TableName tableName)
A table is deleted.
|
abstract void |
TableLockManager.tableDeleted(TableName tableName)
Called after a table has been deleted, and after the table lock is released.
|
void |
TableLockManager.NullTableLockManager.tableDeleted(TableName tableName) |
void |
TableLockManager.ZKTableLockManager.tableDeleted(TableName tableName) |
void |
HMaster.truncateTable(TableName tableName,
boolean preserveSplits,
long nonceGroup,
long nonce) |
void |
MasterServices.truncateTable(TableName tableName,
boolean preserveSplits,
long nonceGroup,
long nonce)
Truncate a table
|
abstract TableLockManager.TableLock |
TableLockManager.writeLock(TableName tableName,
String purpose)
Returns a TableLock for locking the table for exclusive access
|
TableLockManager.TableLock |
TableLockManager.NullTableLockManager.writeLock(TableName tableName,
String purpose) |
TableLockManager.TableLock |
TableLockManager.ZKTableLockManager.writeLock(TableName tableName,
String purpose) |
Modifier and Type | Method and Description |
---|---|
void |
RegionPlacementMaintainer.checkDifferencesWithOldPlan(Map<TableName,Integer> movesPerTable,
Map<String,Map<String,Float>> regionLocalityMap,
FavoredNodesPlan newPlan)
Compares two plans and check whether the locality dropped or increased
(prints the information as a string) also prints the baseline locality
|
(package private) Map<HRegionInfo,ServerName> |
RegionStates.closeAllUserRegions(Set<TableName> excludedTables)
At cluster clean re/start, mark all user regions closed except those of tables
that are excluded, such as disabled/disabling/enabling tables.
|
List<HTableDescriptor> |
HMaster.listTableDescriptors(String namespace,
String regex,
List<TableName> tableNameList,
boolean includeSysTables)
Returns the list of table descriptors that match the specified request
|
void |
MasterCoprocessorHost.postGetTableDescriptors(List<TableName> tableNamesList,
List<HTableDescriptor> descriptors,
String regex) |
boolean |
MasterCoprocessorHost.preGetTableDescriptors(List<TableName> tableNamesList,
List<HTableDescriptor> descriptors)
Deprecated.
|
boolean |
MasterCoprocessorHost.preGetTableDescriptors(List<TableName> tableNamesList,
List<HTableDescriptor> descriptors,
String regex) |
Constructor and Description |
---|
TableLockManager.ZKTableLockManager.TableLockImpl(TableName tableName,
ZooKeeperWatcher zkWatcher,
ServerName serverName,
long lockTimeoutMs,
boolean isShared,
String purpose) |
Constructor and Description |
---|
SnapshotOfRegionAssignmentFromMeta(Connection connection,
Set<TableName> disabledTables,
boolean excludeOfflinedSplitParents) |
Modifier and Type | Method and Description |
---|---|
protected HTableDescriptor |
RegionLocationFinder.getTableDescriptor(TableName tableName)
return HTableDescriptor for a given tableName
|
Modifier and Type | Field and Description |
---|---|
private TableName |
EnableTableHandler.tableName |
private TableName |
DisableTableHandler.tableName |
protected TableName |
TableEventHandler.tableName |
Modifier and Type | Method and Description |
---|---|
(package private) static void |
CreateTableHandler.checkAndSetEnablingTable(AssignmentManager assignmentManager,
TableName tableName,
boolean skipTableStateCheck) |
protected List<HRegionInfo> |
CreateTableHandler.handleCreateHdfsRegions(org.apache.hadoop.fs.Path tableRootDir,
TableName tableName)
Create the on-disk structure for the table, and returns the regions info.
|
private void |
CreateTableHandler.handleCreateTable(TableName tableName)
Responsible of table creation (on-disk and META) and assignment.
|
(package private) static void |
CreateTableHandler.removeEnablingTable(AssignmentManager assignmentManager,
TableName tableName) |
Constructor and Description |
---|
DisableTableHandler(Server server,
TableName tableName,
AssignmentManager assignmentManager,
TableLockManager tableLockManager,
boolean skipTableStateCheck) |
EnableTableHandler(MasterServices services,
TableName tableName,
AssignmentManager assignmentManager,
TableLockManager tableLockManager,
boolean skipTableStateCheck) |
EnableTableHandler(Server server,
TableName tableName,
AssignmentManager assignmentManager,
TableLockManager tableLockManager,
boolean skipTableStateCheck) |
TableEventHandler(EventType eventType,
TableName tableName,
Server server,
MasterServices masterServices) |
Modifier and Type | Method and Description |
---|---|
List<NormalizationPlan> |
SimpleRegionNormalizer.computePlanForTable(TableName table)
Computes next most "urgent" normalization action on the table.
|
List<NormalizationPlan> |
RegionNormalizer.computePlanForTable(TableName table)
Computes next optimal normalization plan.
|
Modifier and Type | Field and Description |
---|---|
private TableName |
TruncateTableProcedure.tableName |
private TableName |
DeleteTableProcedure.tableName |
private TableName |
DisableTableProcedure.tableName |
private TableName |
DisableTableProcedure.BulkDisabler.tableName |
private TableName |
DeleteColumnFamilyProcedure.tableName |
private TableName |
EnableTableProcedure.tableName |
private TableName |
AddColumnFamilyProcedure.tableName |
private TableName |
ModifyColumnFamilyProcedure.tableName |
Modifier and Type | Field and Description |
---|---|
private MasterProcedureScheduler.Queue<TableName> |
MasterProcedureScheduler.tableMap |
private MasterProcedureScheduler.FairQueue<TableName> |
MasterProcedureScheduler.tableRunQueue |
private MasterProcedureScheduler.Queue<TableName> |
MasterProcedureScheduler.ProcedureEvent.waitingTables |
Modifier and Type | Method and Description |
---|---|
TableName |
TruncateTableProcedure.getTableName() |
TableName |
ModifyTableProcedure.getTableName() |
TableName |
DeleteTableProcedure.getTableName() |
TableName |
DisableTableProcedure.getTableName() |
TableName |
TableProcedureInterface.getTableName() |
TableName |
CreateTableProcedure.getTableName() |
TableName |
DeleteColumnFamilyProcedure.getTableName() |
TableName |
EnableTableProcedure.getTableName() |
TableName |
AddColumnFamilyProcedure.getTableName() |
TableName |
ModifyColumnFamilyProcedure.getTableName() |
private static TableName |
MasterProcedureScheduler.getTableName(Procedure proc) |
Modifier and Type | Method and Description |
---|---|
private MasterProcedureScheduler.Queue<TableName> |
MasterProcedureScheduler.ProcedureEvent.popWaitingTable() |
Modifier and Type | Method and Description |
---|---|
protected static void |
CreateTableProcedure.assignRegions(MasterProcedureEnv env,
TableName tableName,
List<HRegionInfo> regions) |
static void |
MasterDDLOperationHelper.checkTableModifiable(MasterProcedureEnv env,
TableName tableName)
Check whether a table is modifiable - exists and either offline or online with config set
|
private static void |
DeleteTableProcedure.cleanAnyRemainingRows(MasterProcedureEnv env,
TableName tableName)
There may be items for this table still up in hbase:meta in the case where the
info:regioninfo column was empty because of some write error.
|
List<HRegionInfo> |
CreateTableProcedure.CreateHdfsRegions.createHdfsRegions(MasterProcedureEnv env,
org.apache.hadoop.fs.Path tableRootDir,
TableName tableName,
List<HRegionInfo> newRegions) |
protected static void |
DeleteTableProcedure.deleteAssignmentState(MasterProcedureEnv env,
TableName tableName) |
static void |
MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(MasterProcedureEnv env,
TableName tableName,
List<HRegionInfo> regionInfoList,
byte[] familyName)
Remove the column family from the file system
|
protected static void |
DeleteTableProcedure.deleteFromFs(MasterProcedureEnv env,
TableName tableName,
List<HRegionInfo> regions,
boolean archive) |
protected static void |
DeleteTableProcedure.deleteFromMeta(MasterProcedureEnv env,
TableName tableName,
List<HRegionInfo> regions) |
protected static void |
DeleteTableProcedure.deleteTableDescriptorCache(MasterProcedureEnv env,
TableName tableName) |
protected static void |
DeleteTableProcedure.deleteTableStates(MasterProcedureEnv env,
TableName tableName) |
protected static List<HRegionInfo> |
ProcedureSyncWait.getRegionsFromMeta(MasterProcedureEnv env,
TableName tableName) |
private int |
MasterProcedureScheduler.getTablePriority(TableName tableName) |
private MasterProcedureScheduler.TableQueue |
MasterProcedureScheduler.getTableQueue(TableName tableName) |
private MasterProcedureScheduler.TableQueue |
MasterProcedureScheduler.getTableQueueWithLock(TableName tableName) |
private static DisableTableProcedure.MarkRegionOfflineOpResult |
DisableTableProcedure.markRegionsOffline(MasterProcedureEnv env,
TableName tableName)
Mark regions of the table offline
|
protected static DisableTableProcedure.MarkRegionOfflineOpResult |
DisableTableProcedure.markRegionsOffline(MasterProcedureEnv env,
TableName tableName,
Boolean retryRequired)
Mark regions of the table offline with retries
|
private static boolean |
EnableTableProcedure.markRegionsOnline(MasterProcedureEnv env,
TableName tableName)
Mark offline regions of the table online
|
protected static void |
EnableTableProcedure.markRegionsOnline(MasterProcedureEnv env,
TableName tableName,
Boolean retryRequired)
Mark offline regions of the table online with retry
|
protected boolean |
MasterProcedureScheduler.markTableAsDeleted(TableName table)
Tries to remove the queue and the table-lock of the specified table.
|
void |
MasterProcedureScheduler.releaseTableExclusiveLock(Procedure procedure,
TableName table)
Release the exclusive lock taken with tryAcquireTableWrite()
|
void |
MasterProcedureScheduler.releaseTableSharedLock(Procedure procedure,
TableName table)
Release the shared lock taken with tryAcquireTableRead()
|
private void |
MasterProcedureScheduler.removeTableQueue(TableName tableName) |
static boolean |
MasterDDLOperationHelper.reOpenAllRegions(MasterProcedureEnv env,
TableName tableName,
List<HRegionInfo> regionInfoList)
Reopen all regions from a table after a schema change operation.
|
protected static void |
DisableTableProcedure.setTableStateToDisabled(MasterProcedureEnv env,
TableName tableName)
Mark table state to Disabled
|
protected static void |
DisableTableProcedure.setTableStateToDisabling(MasterProcedureEnv env,
TableName tableName)
Mark table state to Disabling
|
protected static void |
EnableTableProcedure.setTableStateToEnabled(MasterProcedureEnv env,
TableName tableName)
Mark table state to Enabled
|
protected static void |
EnableTableProcedure.setTableStateToEnabling(MasterProcedureEnv env,
TableName tableName)
Mark table state to Enabling
|
private void |
MasterProcedureScheduler.suspendTableQueue(MasterProcedureScheduler.ProcedureEvent event,
TableName tableName) |
boolean |
MasterProcedureScheduler.tryAcquireTableExclusiveLock(Procedure procedure,
TableName table)
Try to acquire the exclusive lock on the specified table.
|
private MasterProcedureScheduler.TableQueue |
MasterProcedureScheduler.tryAcquireTableQueueSharedLock(Procedure procedure,
TableName table) |
boolean |
MasterProcedureScheduler.tryAcquireTableSharedLock(Procedure procedure,
TableName table)
Try to acquire the shared lock on the specified table.
|
protected static void |
CreateTableProcedure.updateTableDescCache(MasterProcedureEnv env,
TableName tableName) |
Modifier and Type | Method and Description |
---|---|
private void |
MasterProcedureScheduler.ProcedureEvent.suspendTableQueue(MasterProcedureScheduler.Queue<TableName> queue) |
Modifier and Type | Field and Description |
---|---|
protected TableName |
TakeSnapshotHandler.snapshotTable |
private TableName |
MasterSnapshotVerifier.tableName |
Modifier and Type | Field and Description |
---|---|
private Map<TableName,SnapshotSentinel> |
SnapshotManager.restoreHandlers |
private Map<TableName,SnapshotSentinel> |
SnapshotManager.snapshotHandlers |
Modifier and Type | Method and Description |
---|---|
private void |
SnapshotManager.checkAndUpdateNamespaceQuota(SnapshotManifest manifest,
TableName tableName) |
private void |
SnapshotManager.checkAndUpdateNamespaceRegionQuota(int updatedRegionCount,
TableName tableName) |
private int |
SnapshotManager.getRegionCountOfTable(TableName tableName) |
protected List<HRegionInfo> |
CloneSnapshotHandler.handleCreateHdfsRegions(org.apache.hadoop.fs.Path tableRootDir,
TableName tableName)
Create the on-disk regions, using the tableRootDir provided by the CreateTableHandler.
|
private boolean |
SnapshotManager.isRestoringTable(TableName tableName)
Verify if the restore of the specified table is in progress.
|
(package private) boolean |
SnapshotManager.isTakingSnapshot(TableName tableName)
Check to see if the specified table has a snapshot in progress.
|
void |
SnapshotManager.setSnapshotHandlerForTesting(TableName tableName,
SnapshotSentinel handler)
Set the handler for the current snapshot
|
Modifier and Type | Method and Description |
---|---|
private void |
SnapshotManager.cleanupSentinels(Map<TableName,SnapshotSentinel> sentinels)
Remove the sentinels that are marked as finished and the completion time
has exceeded the removal timeout.
|
private SnapshotSentinel |
SnapshotManager.removeSentinelIfFinished(Map<TableName,SnapshotSentinel> sentinels,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot)
Return the handler if it is currently live and has the same snapshot target name.
|
Modifier and Type | Method and Description |
---|---|
private void |
NamespaceUpgrade.removeTableInfoInPre96Format(TableName tableName)
Removes .tableinfo files that are laid in pre-96 format (i.e., the tableinfo files are under
table directory).
|
Modifier and Type | Field and Description |
---|---|
private Map<TableName,AtomicInteger> |
NamespaceTableAndRegionInfo.tableAndRegionInfo |
Modifier and Type | Method and Description |
---|---|
(package private) Set<TableName> |
NamespaceTableAndRegionInfo.getTables()
Gets the set of table names belonging to namespace.
|
Modifier and Type | Method and Description |
---|---|
(package private) void |
NamespaceTableAndRegionInfo.addTable(TableName tableName,
int regionCount) |
private void |
NamespaceStateManager.addTable(TableName tableName,
int regionCount) |
(package private) boolean |
NamespaceStateManager.checkAndUpdateNamespaceRegionCount(TableName name,
byte[] regionName,
int incr)
Check if adding a region violates namespace quota, if not update namespace cache.
|
(package private) void |
NamespaceStateManager.checkAndUpdateNamespaceRegionCount(TableName name,
int incr)
Check and update region count for an existing table.
|
(package private) void |
NamespaceStateManager.checkAndUpdateNamespaceTableCount(TableName table,
int numRegions) |
void |
NamespaceAuditor.checkQuotaToCreateTable(TableName tName,
int regions)
Check quota to create table.
|
void |
NamespaceAuditor.checkQuotaToUpdateRegion(TableName tName,
int regions)
Check and update region count quota for an existing table.
|
private void |
NamespaceAuditor.checkTableTypeAndThrowException(TableName name) |
(package private) boolean |
NamespaceTableAndRegionInfo.containsTable(TableName tableName) |
(package private) int |
NamespaceTableAndRegionInfo.decrementRegionCountForTable(TableName tableName,
int count) |
(package private) int |
NamespaceTableAndRegionInfo.getRegionCountOfTable(TableName tableName) |
int |
NamespaceAuditor.getRegionCountOfTable(TableName tName)
Get region count for table
|
(package private) int |
NamespaceTableAndRegionInfo.incRegionCountForTable(TableName tableName,
int count) |
void |
NamespaceAuditor.removeFromNamespaceUsage(TableName tableName) |
(package private) void |
NamespaceTableAndRegionInfo.removeTable(TableName tableName) |
(package private) void |
NamespaceStateManager.removeTable(TableName tableName) |
Modifier and Type | Field and Description |
---|---|
private Map<TableName,Procedure> |
MasterFlushTableProcedureManager.procMap |
Modifier and Type | Field and Description |
---|---|
static TableName |
QuotaTableUtil.QUOTA_TABLE_NAME
System table for quotas
|
private TableName |
QuotaSettings.tableName |
Modifier and Type | Field and Description |
---|---|
private Map<TableName,QuotaLimiter> |
UserQuotaState.tableLimiters |
private MasterQuotaManager.NamedLock<TableName> |
MasterQuotaManager.tableLocks |
private ConcurrentHashMap<TableName,QuotaState> |
QuotaCache.tableQuotaCache |
Modifier and Type | Method and Description |
---|---|
protected static TableName |
QuotaTableUtil.getTableFromRowKey(byte[] key) |
TableName |
QuotaSettings.getTableName() |
Modifier and Type | Method and Description |
---|---|
static Map<TableName,QuotaState> |
QuotaUtil.fetchTableQuotas(Connection connection,
List<Get> gets) |
(package private) Map<TableName,QuotaState> |
QuotaCache.getTableQuotaCache() |
Modifier and Type | Method and Description |
---|---|
static void |
QuotaUtil.addTableQuota(Connection connection,
TableName table,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas data) |
static void |
QuotaUtil.addUserQuota(Connection connection,
String user,
TableName table,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas data) |
void |
MasterQuotaManager.checkAndUpdateNamespaceRegionQuota(TableName tName,
int regions) |
void |
MasterQuotaManager.checkNamespaceTableAndRegionQuota(TableName tName,
int regions) |
static void |
QuotaUtil.deleteTableQuota(Connection connection,
TableName table) |
static void |
QuotaUtil.deleteUserQuota(Connection connection,
String user,
TableName table) |
private static List<QuotaSettings> |
QuotaSettingsFactory.fromQuotas(String userName,
TableName tableName,
String namespace,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
(package private) static List<QuotaSettings> |
QuotaSettingsFactory.fromTableQuotas(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
private static List<QuotaSettings> |
QuotaSettingsFactory.fromThrottle(String userName,
TableName tableName,
String namespace,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle throttle) |
(package private) static ThrottleSettings |
ThrottleSettings.fromTimedQuota(String userName,
TableName tableName,
String namespace,
ThrottleType type,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota timedQuota) |
(package private) static List<QuotaSettings> |
QuotaSettingsFactory.fromUserQuotas(String userName,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
OperationQuota |
RegionServerQuotaManager.getQuota(org.apache.hadoop.security.UserGroupInformation ugi,
TableName table)
Returns the quota for an operation.
|
int |
MasterQuotaManager.getRegionCountOfTable(TableName tName) |
protected static byte[] |
QuotaTableUtil.getSettingsQualifierForUserTable(TableName tableName) |
QuotaLimiter |
UserQuotaState.getTableLimiter(TableName table)
Return the limiter for the specified table associated with this quota.
|
QuotaLimiter |
QuotaCache.getTableLimiter(TableName table)
Returns the limiter associated to the specified table.
|
static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas |
QuotaTableUtil.getTableQuota(Connection connection,
TableName table) |
protected static byte[] |
QuotaTableUtil.getTableRowKey(TableName table) |
QuotaLimiter |
QuotaCache.getUserLimiter(org.apache.hadoop.security.UserGroupInformation ugi,
TableName table)
Returns the limiter associated to the specified user/table.
|
static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas |
QuotaTableUtil.getUserQuota(Connection connection,
String user,
TableName table) |
static Get |
QuotaTableUtil.makeGetForTableQuotas(TableName table) |
protected static void |
QuotaTableUtil.parseTableResult(TableName table,
Result result,
QuotaTableUtil.TableQuotasVisitor visitor) |
void |
MasterQuotaManager.removeTableFromNamespaceQuota(TableName tName)
Remove table from namespace quota.
|
void |
UserQuotaState.setQuotas(TableName table,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas)
Add the quota information of the specified table.
|
void |
MasterQuotaManager.setTableQuota(TableName table,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest req) |
void |
MasterQuotaManager.setUserQuota(String userName,
TableName table,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest req) |
private static QuotaSettings |
QuotaSettingsFactory.throttle(String userName,
TableName tableName,
String namespace,
ThrottleType type,
long limit,
TimeUnit timeUnit) |
static QuotaSettings |
QuotaSettingsFactory.throttleTable(TableName tableName,
ThrottleType type,
long limit,
TimeUnit timeUnit)
Throttle the specified table.
|
static QuotaSettings |
QuotaSettingsFactory.throttleUser(String userName,
TableName tableName,
ThrottleType type,
long limit,
TimeUnit timeUnit)
Throttle the specified user on the specified table.
|
static QuotaSettings |
QuotaSettingsFactory.unthrottleTable(TableName tableName)
Remove the throttling for the specified table.
|
static QuotaSettings |
QuotaSettingsFactory.unthrottleUser(String userName,
TableName tableName)
Remove the throttling for the specified user on the specified table.
|
void |
QuotaTableUtil.TableQuotasVisitor.visitTableQuotas(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
QuotaTableUtil.UserQuotasVisitor.visitUserQuotas(String userName,
TableName table,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
Modifier and Type | Method and Description |
---|---|
static Get |
QuotaTableUtil.makeGetForUserQuotas(String user,
Iterable<TableName> tables,
Iterable<String> namespaces) |
Constructor and Description |
---|
QuotaSettings(String userName,
TableName tableName,
String namespace) |
QuotaSettingsFactory.QuotaGlobalsSettingsBypass(String userName,
TableName tableName,
String namespace,
boolean bypassGlobals) |
ThrottleSettings(String userName,
TableName tableName,
String namespace,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest proto) |
Modifier and Type | Method and Description |
---|---|
TableName |
Store.getTableName() |
TableName |
HStore.getTableName() |
Modifier and Type | Method and Description |
---|---|
Set<TableName> |
RegionServerServices.getOnlineTables() |
Set<TableName> |
HRegionServer.getOnlineTables()
Gets the online tables in this RS.
|
Modifier and Type | Method and Description |
---|---|
List<Region> |
OnlineRegions.getOnlineRegions(TableName tableName)
Get all online regions of a table in this RS.
|
List<Region> |
HRegionServer.getOnlineRegions(TableName tableName)
Gets the online regions of the specified table.
|
Modifier and Type | Field and Description |
---|---|
private TableName |
WALEditsReplaySink.tableName |
Constructor and Description |
---|
HLogKey(byte[] encodedRegionName,
TableName tablename)
Deprecated.
|
HLogKey(byte[] encodedRegionName,
TableName tablename,
long now)
Deprecated.
|
HLogKey(byte[] encodedRegionName,
TableName tablename,
long now,
List<UUID> clusterIds,
long nonceGroup,
long nonce,
MultiVersionConcurrencyControl mvcc)
Deprecated.
Create the log key for writing to somewhere.
|
HLogKey(byte[] encodedRegionName,
TableName tablename,
long logSeqNum,
long now,
List<UUID> clusterIds,
long nonceGroup,
long nonce,
MultiVersionConcurrencyControl mvcc)
Deprecated.
Create the log key for writing to somewhere.
|
HLogKey(byte[] encodedRegionName,
TableName tablename,
long logSeqNum,
long nonceGroup,
long nonce,
MultiVersionConcurrencyControl mvcc)
Deprecated.
Create the log key for writing to somewhere.
|
HLogKey(byte[] encodedRegionName,
TableName tablename,
long logSeqNum,
long now,
UUID clusterId)
Deprecated.
|
HLogKey(byte[] encodedRegionName,
TableName tablename,
long now,
MultiVersionConcurrencyControl mvcc)
Deprecated.
|
ReplayHLogKey(byte[] encodedRegionName,
TableName tablename,
long now,
List<UUID> clusterIds,
long nonceGroup,
long nonce,
MultiVersionConcurrencyControl mvcc) |
ReplayHLogKey(byte[] encodedRegionName,
TableName tablename,
long logSeqNum,
long now,
List<UUID> clusterIds,
long nonceGroup,
long nonce,
MultiVersionConcurrencyControl mvcc) |
WALEditsReplaySink.ReplayServerCallable(HConnection connection,
TableName tableName,
HRegionLocation regionLoc,
HRegionInfo regionInfo,
List<WAL.Entry> entries) |
WALEditsReplaySink(org.apache.hadoop.conf.Configuration conf,
TableName tableName,
HConnection conn)
Create a sink for WAL log entries replay
|
Modifier and Type | Field and Description |
---|---|
private Map<TableName,List<String>> |
ReplicationPeerZKImpl.tableCFs |
Modifier and Type | Method and Description |
---|---|
Map<TableName,List<String>> |
ReplicationPeerZKImpl.getTableCFs()
Get replicable (table, cf-list) map of this peer
|
Map<TableName,List<String>> |
ReplicationPeer.getTableCFs()
Get replicable (table, cf-list) map of this peer
|
Map<TableName,List<String>> |
ReplicationPeers.getTableCFs(String peerId)
Get the table and column-family-list map of the peer.
|
Map<TableName,List<String>> |
ReplicationPeersZKImpl.getTableCFs(String id) |
Constructor and Description |
---|
ReplicationPeerZKImpl(org.apache.hadoop.conf.Configuration conf,
String id,
ReplicationPeerConfig peerConfig,
Map<TableName,List<String>> tableCFs)
Constructor that takes all the objects required to communicate with the specified peer, except
for the region server addresses.
|
Modifier and Type | Field and Description |
---|---|
(package private) com.google.common.cache.Cache<TableName,Boolean> |
RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.disabledAndDroppedTables |
private com.google.common.cache.Cache<TableName,Boolean> |
RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.memstoreReplicationEnabled |
Modifier and Type | Method and Description |
---|---|
void |
RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.append(TableName tableName,
byte[] encodedRegionName,
byte[] row,
List<WAL.Entry> entries) |
protected void |
ReplicationSink.batch(TableName tableName,
Collection<List<Row>> allRows)
Do the changes and handle the pool
|
private boolean |
RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.requiresReplication(TableName tableName,
List<WAL.Entry> entries)
returns true if the specified entry must be replicated.
|
Constructor and Description |
---|
RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable(ClusterConnection connection,
RpcControllerFactory rpcControllerFactory,
TableName tableName,
HRegionLocation location,
HRegionInfo regionInfo,
byte[] row,
List<WAL.Entry> entries,
AtomicLong skippedEntries) |
Modifier and Type | Method and Description |
---|---|
private javax.ws.rs.core.Response |
SchemaResource.replace(TableName name,
TableSchemaModel model,
javax.ws.rs.core.UriInfo uriInfo,
Admin admin) |
private javax.ws.rs.core.Response |
SchemaResource.update(TableName name,
TableSchemaModel model,
javax.ws.rs.core.UriInfo uriInfo,
Admin admin) |
Modifier and Type | Method and Description |
---|---|
TableName |
RemoteHTable.getName() |
Modifier and Type | Field and Description |
---|---|
static TableName |
AccessControlClient.ACL_TABLE_NAME |
static TableName |
AccessControlLists.ACL_TABLE_NAME
Internal storage table for access control lists
|
private TableName |
TablePermission.table |
private TableName |
AccessControlFilter.table |
private TableName |
AuthResult.table |
private TableName |
AuthResult.Params.tableName |
Modifier and Type | Field and Description |
---|---|
private Map<TableName,List<UserPermission>> |
AccessController.tableAcls |
private ConcurrentSkipListMap<TableName,TableAuthManager.PermissionCache<TablePermission>> |
TableAuthManager.tableCache |
Modifier and Type | Method and Description |
---|---|
TableName |
TablePermission.getTableName() |
TableName |
AuthResult.getTableName() |
private TableName |
AccessController.getTableName(Region region) |
private TableName |
AccessController.getTableName(RegionCoprocessorEnvironment e) |
Modifier and Type | Method and Description |
---|---|
static AuthResult |
AuthResult.allow(String request,
String reason,
User user,
Permission.Action action,
TableName table,
byte[] family,
byte[] qualifier) |
static AuthResult |
AuthResult.allow(String request,
String reason,
User user,
Permission.Action action,
TableName table,
Map<byte[],? extends Collection<?>> families) |
private boolean |
TableAuthManager.authorize(List<TablePermission> perms,
TableName table,
byte[] family,
byte[] qualifier,
Permission.Action action) |
boolean |
TableAuthManager.authorize(User user,
TableName table,
byte[] family,
byte[] qualifier,
Permission.Action action) |
boolean |
TableAuthManager.authorize(User user,
TableName table,
byte[] family,
Permission.Action action) |
boolean |
TableAuthManager.authorize(User user,
TableName table,
Cell cell,
Permission.Action action)
Authorize a user for a given KV.
|
boolean |
TableAuthManager.authorizeGroup(String groupName,
TableName table,
byte[] family,
byte[] qualifier,
Permission.Action action)
Checks authorization to a given table, column family and column for a group, based
on the stored permissions.
|
boolean |
TableAuthManager.authorizeUser(User user,
TableName table,
byte[] family,
byte[] qualifier,
Permission.Action action) |
boolean |
TableAuthManager.authorizeUser(User user,
TableName table,
byte[] family,
Permission.Action action)
Checks authorization to a given table and column family for a user, based on the
stored user permissions.
|
private org.apache.hadoop.fs.Path |
SecureBulkLoadEndpoint.createStagingDir(org.apache.hadoop.fs.Path baseDir,
User user,
TableName tableName) |
void |
ZKPermissionWatcher.deleteTableACLNode(TableName tableName)
Delete the acl notify node of table
|
static AuthResult |
AuthResult.deny(String request,
String reason,
User user,
Permission.Action action,
TableName table,
byte[] family,
byte[] qualifier) |
static AuthResult |
AuthResult.deny(String request,
String reason,
User user,
Permission.Action action,
TableName table,
Map<byte[],? extends Collection<?>> families) |
(package private) static com.google.common.collect.ListMultimap<String,TablePermission> |
AccessControlLists.getTablePermissions(org.apache.hadoop.conf.Configuration conf,
TableName tableName) |
private TableAuthManager.PermissionCache<TablePermission> |
TableAuthManager.getTablePermissions(TableName table) |
(package private) static List<UserPermission> |
AccessControlLists.getUserTablePermissions(org.apache.hadoop.conf.Configuration conf,
TableName tableName)
Returns the currently granted permissions for a given table as a list of
user plus associated permissions.
|
static void |
AccessControlClient.grant(org.apache.hadoop.conf.Configuration conf,
TableName tableName,
String userName,
byte[] family,
byte[] qual,
Permission.Action... actions)
Deprecated.
|
static void |
AccessControlClient.grant(Connection connection,
TableName tableName,
String userName,
byte[] family,
byte[] qual,
Permission.Action... actions)
Grants permission on the specified table for the specified user
|
boolean |
TableAuthManager.groupHasAccess(String groupName,
TableName table,
Permission.Action action)
Checks if the user has access to the full table or at least a family/qualifier
for the specified action.
|
private boolean |
TableAuthManager.hasAccess(List<TablePermission> perms,
TableName table,
Permission.Action action) |
boolean |
TableAuthManager.hasAccess(User user,
TableName table,
Permission.Action action) |
boolean |
TablePermission.implies(TableName table,
byte[] family,
byte[] qualifier,
Permission.Action action)
Checks that a given table operation is authorized by this permission
instance.
|
boolean |
TablePermission.implies(TableName table,
KeyValue kv,
Permission.Action action)
Checks if this permission grants access to perform the given action on
the given table and key value.
|
boolean |
TablePermission.matchesFamily(TableName table,
byte[] family,
Permission.Action action)
Returns
true if this permission matches the given column
family at least. |
boolean |
TablePermission.matchesFamilyQualifier(TableName table,
byte[] family,
byte[] qualifier,
Permission.Action action)
Returns if the given permission matches the given qualifier.
|
boolean |
TableAuthManager.matchPermission(User user,
TableName table,
byte[] family,
byte[] qualifier,
Permission.Action action) |
boolean |
TableAuthManager.matchPermission(User user,
TableName table,
byte[] family,
Permission.Action action)
Returns true if the given user has a
TablePermission matching up
to the column family portion of a permission. |
void |
AccessController.postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName,
byte[] col) |
void |
AccessController.postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName) |
void |
AccessController.postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName,
HTableDescriptor htd) |
void |
AccessController.postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
AccessController.preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName,
HColumnDescriptor column) |
void |
AccessController.preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName,
byte[] col) |
void |
AccessController.preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName) |
void |
AccessController.preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName) |
void |
AccessController.preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName) |
void |
AccessController.preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName,
HColumnDescriptor descriptor) |
void |
AccessController.preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName,
HTableDescriptor htd) |
void |
AccessController.preSetTableQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
AccessController.preSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx,
String userName,
TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas quotas) |
void |
AccessController.preTableFlush(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
AccessController.preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName) |
void |
TableAuthManager.refreshTableCacheFromWritable(TableName table,
byte[] data) |
void |
TableAuthManager.removeTable(TableName table) |
(package private) static void |
AccessControlLists.removeTablePermissions(org.apache.hadoop.conf.Configuration conf,
TableName tableName)
Remove specified table from the _acl_ table.
|
(package private) static void |
AccessControlLists.removeTablePermissions(org.apache.hadoop.conf.Configuration conf,
TableName tableName,
byte[] column)
Remove specified table column from the acl table.
|
private void |
AccessController.requireAccess(String request,
TableName tableName,
Permission.Action... permissions)
Authorizes that the current user has any of the given permissions to access the table.
|
private void |
AccessController.requireGlobalPermission(String request,
Permission.Action perm,
TableName tableName,
Map<byte[],? extends Collection<byte[]>> familyMap)
Checks that the user has the given global permission.
|
void |
AccessController.requireNamespacePermission(String request,
String namespace,
TableName tableName,
Map<byte[],? extends Collection<byte[]>> familyMap,
Permission.Action... permissions)
Checks that the user has the given global or namespace permission.
|
private void |
AccessController.requirePermission(String request,
TableName tableName,
byte[] family,
byte[] qualifier,
Permission.Action... permissions)
Authorizes that the current user has any of the given permissions for the
given table, column family and column qualifier.
|
private void |
AccessController.requireTablePermission(String request,
TableName tableName,
byte[] family,
byte[] qualifier,
Permission.Action... permissions)
Authorizes that the current user has any of the given permissions for the
given table, column family and column qualifier.
|
static void |
AccessControlClient.revoke(org.apache.hadoop.conf.Configuration conf,
TableName tableName,
String username,
byte[] family,
byte[] qualifier,
Permission.Action... actions)
Deprecated.
|
static void |
AccessControlClient.revoke(Connection connection,
TableName tableName,
String username,
byte[] family,
byte[] qualifier,
Permission.Action... actions)
Revokes the permission on the table
|
void |
TableAuthManager.setTableGroupPermissions(String group,
TableName table,
List<TablePermission> perms)
Overwrites the existing permission set for a group and triggers an update
for zookeeper synchronization.
|
AuthResult.Params |
AuthResult.Params.setTableName(TableName table) |
void |
TableAuthManager.setTableUserPermissions(String username,
TableName table,
List<TablePermission> perms)
Overwrites the existing permission set for a given user for a table, and
triggers an update for zookeeper synchronization.
|
private void |
TableAuthManager.updateTableCache(TableName table,
com.google.common.collect.ListMultimap<String,TablePermission> tablePerms)
Updates the internal permissions cache for a single table, splitting
the permissions listed into separate caches for users and groups to optimize
group lookups.
|
boolean |
TableAuthManager.userHasAccess(User user,
TableName table,
Permission.Action action)
Checks if the user has access to the full table or at least a family/qualifier
for the specified action.
|
void |
TableAuthManager.writeTableToZooKeeper(TableName table,
TableAuthManager.PermissionCache<TablePermission> tablePerms) |
Modifier and Type | Method and Description |
---|---|
void |
AccessController.postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList,
List<HTableDescriptor> descriptors,
String regex) |
void |
AccessController.preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList,
List<HTableDescriptor> descriptors,
String regex) |
Constructor and Description |
---|
AccessControlFilter(TableAuthManager mgr,
User ugi,
TableName tableName,
AccessControlFilter.Strategy strategy,
Map<ByteRange,Integer> cfVsMaxVersions) |
AuthResult(boolean allowed,
String request,
String reason,
User user,
Permission.Action action,
TableName table,
byte[] family,
byte[] qualifier) |
AuthResult(boolean allowed,
String request,
String reason,
User user,
Permission.Action action,
TableName table,
Map<byte[],? extends Collection<?>> families) |
TablePermission(String namespace,
TableName table,
byte[] family,
byte[] qualifier,
byte[] actionCodes)
Creates a new permission for the given namespace or table, family and column qualifier,
allowing the actions matching the provided byte codes to be performed.
|
TablePermission(String namespace,
TableName table,
byte[] family,
byte[] qualifier,
Permission.Action... assigned)
Creates a new permission for the given namespace or table, restricted to the given
column family and qualifier, allowing the assigned actions to be performed.
|
TablePermission(TableName table,
byte[] family,
byte[] qualifier,
byte[] actionCodes)
Creates a new permission for the given table, family and column qualifier,
allowing the actions matching the provided byte codes to be performed.
|
TablePermission(TableName table,
byte[] family,
byte[] qualifier,
Permission.Action... assigned)
Creates a new permission for the given table, restricted to the given
column family and qualifier, allowing the assigned actions to be performed.
|
TablePermission(TableName table,
byte[] family,
Permission.Action... assigned)
Create a new permission for the given table and (optionally) column family,
allowing the given actions.
|
UserPermission(byte[] user,
TableName table,
byte[] family,
byte[] qualifier,
byte[] actionCodes)
Creates a new instance for the given user, table, column family and
qualifier, matching the actions with the given codes.
|
UserPermission(byte[] user,
TableName table,
byte[] family,
byte[] qualifier,
Permission.Action... assigned)
Creates a new permission for the given user, table, column family and
column qualifier.
|
UserPermission(byte[] user,
TableName table,
byte[] family,
Permission.Action... assigned)
Creates a new instance for the given user, table and column family.
|
Modifier and Type | Field and Description |
---|---|
static TableName |
VisibilityConstants.LABELS_TABLE_NAME
Internal storage table for visibility labels
|
Modifier and Type | Method and Description |
---|---|
void |
VisibilityController.preAddColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor column) |
void |
VisibilityController.preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
byte[] c) |
void |
VisibilityController.preDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) |
void |
VisibilityController.preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor descriptor) |
void |
VisibilityController.preModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) |
Modifier and Type | Field and Description |
---|---|
private TableName |
SnapshotInfo.SnapshotStats.snapshotTable |
private TableName |
RestoreSnapshotHelper.snapshotTable |
Modifier and Type | Method and Description |
---|---|
static HRegionInfo |
RestoreSnapshotHelper.cloneRegionInfo(TableName tableName,
HRegionInfo snapshotRegionInfo) |
Constructor and Description |
---|
TablePartiallyOpenException(TableName tableName) |
Modifier and Type | Method and Description |
---|---|
private static TableName |
ThriftServerRunner.HBaseHandler.getTableName(ByteBuffer buffer) |
Modifier and Type | Field and Description |
---|---|
static TableName |
Canary.DEFAULT_WRITE_TABLE_NAME |
private TableName |
Canary.writeTableName |
private TableName |
Canary.RegionMonitor.writeTableName |
Modifier and Type | Method and Description |
---|---|
static void |
Canary.sniff(Admin admin,
TableName tableName)
Canary entry point for specified table.
|
static void |
Canary.sniff(Admin admin,
TableName tableName,
Canary.RegionTask.TaskType taskType)
Canary entry point for specified table with task type(read/write)
|
Constructor and Description |
---|
Canary.RegionMonitor(Connection connection,
String[] monitorTargets,
boolean useRegExp,
Canary.Sink sink,
ExecutorService executor,
boolean writeSniffing,
TableName writeTableName,
boolean treatFailureAsError) |
Modifier and Type | Field and Description |
---|---|
private TableName |
HMerge.OnlineMerger.tableName |
(package private) TableName |
HBaseFsck.TableInfo.tableName |
private TableName |
Merge.tableName |
Modifier and Type | Field and Description |
---|---|
private Map<TableName,HTableDescriptor> |
FSTableDescriptors.cache |
private TreeSet<TableName> |
HBaseFsck.disabledTables |
private Set<TableName> |
HBaseFsck.orphanedTableZNodes
List of orphaned table ZNodes
|
private Map<TableName,Set<String>> |
HBaseFsck.orphanTableDirs |
private Map<TableName,Set<String>> |
HBaseFsck.skippedRegions |
private Set<TableName> |
HBaseFsck.tablesIncluded |
private SortedMap<TableName,HBaseFsck.TableInfo> |
HBaseFsck.tablesInfo
This map from Tablename -> TableInfo contains the structures necessary to
detect table consistency problems (holes, dupes, overlaps).
|
Modifier and Type | Method and Description |
---|---|
TableName |
HBaseFsck.TableInfo.getName() |
TableName |
HBaseFsck.HbckInfo.getTableName() |
static TableName |
FSUtils.getTableName(org.apache.hadoop.fs.Path tablePath)
Returns the
TableName object representing
the table directory under
path rootdir |
Modifier and Type | Method and Description |
---|---|
private SortedMap<TableName,HBaseFsck.TableInfo> |
HBaseFsck.checkHdfsIntegrity(boolean fixHoles,
boolean fixOverlaps) |
(package private) SortedMap<TableName,HBaseFsck.TableInfo> |
HBaseFsck.checkIntegrity()
Checks tables integrity.
|
(package private) Set<TableName> |
HBaseFsck.getIncludedTables() |
private SortedMap<TableName,HBaseFsck.TableInfo> |
HBaseFsck.loadHdfsRegionInfos()
Populate hbi's from regionInfos loaded from file system.
|
Modifier and Type | Method and Description |
---|---|
(package private) static void |
RegionSplitter.createPresplitTable(TableName tableName,
RegionSplitter.SplitAlgorithm splitAlgo,
String[] columnFamilies,
org.apache.hadoop.conf.Configuration conf) |
void |
FSTableDescriptors.deleteTableDescriptorIfExists(TableName tableName)
Deletes all the table descriptor files from the file system.
|
private boolean |
HBaseFsck.fabricateTableInfo(FSTableDescriptors fstd,
TableName tableName,
Set<String> columns)
To fabricate a .tableinfo file with following contents
1. |
HTableDescriptor |
FSTableDescriptors.get(TableName tablename)
Get the current table descriptor for the given table, or null if none exists.
|
com.google.common.collect.Multimap<byte[],HBaseFsck.HbckInfo> |
HBaseFsck.getOverlapGroups(TableName table) |
static org.apache.hadoop.fs.Path |
HFileArchiveUtil.getRegionArchiveDir(org.apache.hadoop.fs.Path rootDir,
TableName tableName,
org.apache.hadoop.fs.Path regiondir)
Get the archive directory for a given region under the specified table
|
static org.apache.hadoop.fs.Path |
HFileArchiveUtil.getRegionArchiveDir(org.apache.hadoop.fs.Path rootDir,
TableName tableName,
String encodedRegionName)
Get the archive directory for a given region under the specified table
|
(package private) static LinkedList<Pair<byte[],byte[]>> |
RegionSplitter.getSplits(Connection connection,
TableName tableName,
RegionSplitter.SplitAlgorithm splitAlgo) |
static org.apache.hadoop.fs.Path |
HFileArchiveUtil.getStoreArchivePath(org.apache.hadoop.conf.Configuration conf,
TableName tableName,
String regionName,
String familyName)
Get the directory to archive a store directory
|
static org.apache.hadoop.fs.Path |
HFileArchiveUtil.getTableArchivePath(org.apache.hadoop.conf.Configuration conf,
TableName tableName)
Get the path to the table archive directory based on the configured archive directory.
|
static org.apache.hadoop.fs.Path |
HFileArchiveUtil.getTableArchivePath(org.apache.hadoop.fs.Path rootdir,
TableName tableName)
Get the path to the table archive directory based on the configured archive directory.
|
static HTableDescriptor |
FSTableDescriptors.getTableDescriptorFromFs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir,
TableName tableName)
Returns the latest table descriptor for the given table directly from the file system
if it exists, bypassing the local cache.
|
static HTableDescriptor |
FSTableDescriptors.getTableDescriptorFromFs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir,
TableName tableName,
boolean rewritePb)
Returns the latest table descriptor for the table located at the given directory
directly from the file system if it exists.
|
static org.apache.hadoop.fs.Path |
FSUtils.getTableDir(org.apache.hadoop.fs.Path rootdir,
TableName tableName)
Returns the
Path object representing the table directory under
path rootdir |
(package private) org.apache.hadoop.fs.Path |
FSTableDescriptors.getTableDir(TableName tableName)
Return the table directory in HDFS
|
private static Pair<org.apache.hadoop.fs.Path,org.apache.hadoop.fs.Path> |
RegionSplitter.getTableDirAndSplitFile(org.apache.hadoop.conf.Configuration conf,
TableName tableName) |
private org.apache.hadoop.fs.FileStatus |
FSTableDescriptors.getTableInfoPath(TableName tableName)
Find the most current table info file for the given table in the hbase root directory.
|
static Map<String,org.apache.hadoop.fs.Path> |
FSUtils.getTableStoreFilePathMap(Map<String,org.apache.hadoop.fs.Path> map,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir,
TableName tableName)
Runs through the HBase rootdir/tablename and creates a reverse lookup map for
table StoreFile names to the full Path.
|
static Map<String,org.apache.hadoop.fs.Path> |
FSUtils.getTableStoreFilePathMap(Map<String,org.apache.hadoop.fs.Path> map,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir,
TableName tableName,
HBaseFsck.ErrorReporter errors)
Runs through the HBase rootdir/tablename and creates a reverse lookup map for
table StoreFile names to the full Path.
|
void |
HBaseFsck.includeTable(TableName table) |
(package private) boolean |
HBaseFsck.isTableIncluded(TableName table)
Only check/fix tables specified by the list,
Empty list means all tables are included.
|
boolean |
FSTableDescriptors.isTableInfoExists(TableName tableName)
Checks if a current table info file exists for the given table
|
static void |
HMerge.merge(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
TableName tableName)
Scans the table and merges two adjacent regions if they are small.
|
static void |
HMerge.merge(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
TableName tableName,
boolean testMasterRunning)
Scans the table and merges two adjacent regions if they are small.
|
private static void |
FSTableDescriptorMigrationToSubdir.migrateTableIfExists(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootDir,
TableName tableName)
Deprecated.
|
private boolean |
Merge.notInTable(TableName tn,
byte[] rn) |
<R> void |
MultiHConnection.processBatchCallback(List<? extends Row> actions,
TableName tableName,
Object[] results,
Batch.Callback<R> callback)
Randomly pick a connection and process the batch of actions for a given table
|
HTableDescriptor |
FSTableDescriptors.remove(TableName tablename)
Removes the table descriptor from the local cache and returns it.
|
(package private) static void |
RegionSplitter.rollingSplit(TableName tableName,
RegionSplitter.SplitAlgorithm splitAlgo,
org.apache.hadoop.conf.Configuration conf) |
(package private) void |
HBaseFsck.sidelineTable(org.apache.hadoop.fs.FileSystem fs,
TableName tableName,
org.apache.hadoop.fs.Path hbaseDir,
org.apache.hadoop.fs.Path backupHbaseDir)
Side line an entire table.
|
(package private) static LinkedList<Pair<byte[],byte[]>> |
RegionSplitter.splitScan(LinkedList<Pair<byte[],byte[]>> regionList,
Connection connection,
TableName tableName,
RegionSplitter.SplitAlgorithm splitAlgo) |
Modifier and Type | Method and Description |
---|---|
private ArrayList<Put> |
HBaseFsck.generatePuts(SortedMap<TableName,HBaseFsck.TableInfo> tablesInfo)
Generate set of puts to add to new meta.
|
(package private) HTableDescriptor[] |
HBaseFsck.getHTableDescriptors(List<TableName> tableNames) |
private void |
HBaseFsck.printTableSummary(SortedMap<TableName,HBaseFsck.TableInfo> tablesInfo)
Prints summary of all tables found on the system.
|
private void |
HBaseFsck.suggestFixes(SortedMap<TableName,HBaseFsck.TableInfo> tablesInfo)
Suggest fixes for each table
|
Constructor and Description |
---|
HBaseFsck.TableInfo(TableName name) |
HMerge.Merger(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
TableName tableName) |
HMerge.OnlineMerger(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
TableName tableName) |
Modifier and Type | Field and Description |
---|---|
protected TableName |
WALKey.tablename |
(package private) TableName |
WALSplitter.RegionEntryBuffer.tableName |
Modifier and Type | Field and Description |
---|---|
private Set<TableName> |
WALSplitter.disablingOrDisabledTables |
private Map<TableName,HConnection> |
WALSplitter.LogReplayOutputSink.tableNameToHConnectionMap |
Modifier and Type | Method and Description |
---|---|
private TableName |
WALSplitter.LogReplayOutputSink.getTableFromLocationStr(String loc) |
TableName |
WALKey.getTablename() |
TableName |
WALSplitter.RegionEntryBuffer.getTableName() |
Modifier and Type | Method and Description |
---|---|
private HConnection |
WALSplitter.LogReplayOutputSink.getConnectionByTableName(TableName tableName) |
protected void |
WALKey.init(byte[] encodedRegionName,
TableName tablename,
long logSeqNum,
long now,
List<UUID> clusterIds,
long nonceGroup,
long nonce,
MultiVersionConcurrencyControl mvcc) |
(package private) void |
WALKey.internTableName(TableName tablename)
Drop this instance's tablename byte array and instead
hold a reference to the provided tablename.
|
private HRegionLocation |
WALSplitter.LogReplayOutputSink.locateRegionAndRefreshLastFlushedSequenceId(HConnection hconn,
TableName table,
byte[] row,
String originalEncodedRegionName)
Locate destination region based on table name & row.
|
Constructor and Description |
---|
WALKey(byte[] encodedRegionName,
TableName tablename) |
WALKey(byte[] encodedRegionName,
TableName tablename,
long now) |
WALKey(byte[] encodedRegionName,
TableName tablename,
long now,
List<UUID> clusterIds,
long nonceGroup,
long nonce,
MultiVersionConcurrencyControl mvcc)
Create the log key for writing to somewhere.
|
WALKey(byte[] encodedRegionName,
TableName tablename,
long logSeqNum,
long now,
List<UUID> clusterIds,
long nonceGroup,
long nonce,
MultiVersionConcurrencyControl mvcc)
Create the log key for writing to somewhere.
|
WALKey(byte[] encodedRegionName,
TableName tablename,
long logSeqNum,
long nonceGroup,
long nonce,
MultiVersionConcurrencyControl mvcc)
Create the log key for writing to somewhere.
|
WALKey(byte[] encodedRegionName,
TableName tablename,
long logSeqNum,
long now,
UUID clusterId) |
WALKey(byte[] encodedRegionName,
TableName tablename,
long now,
MultiVersionConcurrencyControl mvcc) |
WALSplitter.RegionEntryBuffer(TableName tableName,
byte[] region) |
WALSplitter.RegionServerWriter(org.apache.hadoop.conf.Configuration conf,
TableName tableName,
HConnection conn) |
Modifier and Type | Field and Description |
---|---|
private Map<TableName,org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State> |
ZKTableStateManager.cache
Cache of what we found in zookeeper so we don't have to go to zk ensemble
for every query.
|
Modifier and Type | Method and Description |
---|---|
(package private) Set<TableName> |
ZKTableStateManager.getAllTables(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State... states)
Gets a list of all the tables of specified states in zookeeper.
|
static Set<TableName> |
ZKTableStateClientSideReader.getDisabledOrDisablingTables(ZooKeeperWatcher zkw)
Gets a list of all the tables set as disabled in zookeeper.
|
static Set<TableName> |
ZKTableStateClientSideReader.getDisabledTables(ZooKeeperWatcher zkw)
Gets a list of all the tables set as disabled in zookeeper.
|
static Set<TableName> |
ZKTableStateClientSideReader.getEnablingTables(ZooKeeperWatcher zkw)
Gets a list of all the tables set as enabling in zookeeper.
|
Set<TableName> |
ZKTableStateManager.getTablesInStates(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State... states)
Gets a list of all the tables set as disabling in zookeeper.
|
private static Set<TableName> |
ZKTableStateClientSideReader.getTablesInStates(ZooKeeperWatcher zkw,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State... states)
Gets a list of tables that are set as one of the passing in states in zookeeper.
|
Modifier and Type | Method and Description |
---|---|
void |
ZKTableStateManager.checkAndRemoveTableState(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State states,
boolean deletePermanentState)
If the table is found in the given state the in-memory state is removed.
|
(package private) static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State |
ZKTableStateClientSideReader.getTableState(ZooKeeperWatcher zkw,
TableName tableName) |
private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State |
ZKTableStateManager.getTableState(ZooKeeperWatcher zkw,
TableName tableName)
Gets table state from ZK.
|
static boolean |
ZKTableStateClientSideReader.isDisabledTable(ZooKeeperWatcher zkw,
TableName tableName)
Go to zookeeper and see if state of table is
ZooKeeperProtos.Table.State#DISABLED . |
static boolean |
ZKTableStateClientSideReader.isDisablingOrDisabledTable(ZooKeeperWatcher zkw,
TableName tableName)
Go to zookeeper and see if state of table is
ZooKeeperProtos.Table.State#DISABLING
of ZooKeeperProtos.Table.State#DISABLED . |
static boolean |
ZKTableStateClientSideReader.isEnabledTable(ZooKeeperWatcher zkw,
TableName tableName)
Go to zookeeper and see if state of table is
ZooKeeperProtos.Table.State#ENABLED . |
boolean |
ZKTableStateManager.isTablePresent(TableName tableName)
check if table is present.
|
boolean |
ZKTableStateManager.isTableState(TableName tableName,
boolean checkSource,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State... states)
Checks if table is marked in specified state in ZK.
|
boolean |
ZKTableStateManager.isTableState(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State... states)
Checks if table is marked in specified state in ZK (using cache only).
|
void |
ZKTableStateManager.setDeletedTable(TableName tableName)
Deletes the table in zookeeper.
|
void |
ZKTableStateManager.setTableState(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state)
Sets table state in ZK.
|
boolean |
ZKTableStateManager.setTableStateIfInStates(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State newState,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State... states)
Checks and sets table state in ZK.
|
boolean |
ZKTableStateManager.setTableStateIfNotInStates(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State newState,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State... states)
Checks and sets table state in ZK.
|
private void |
ZKTableStateManager.setTableStateInZK(TableName tableName,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state) |
Copyright © 2007–2019 The Apache Software Foundation. All rights reserved.