@InterfaceAudience.Public public class HBaseTestingUtility extends HBaseZKTestingUtility
MiniHBaseCluster
, or a deployed cluster of type
DistributedHBaseCluster
. Not all methods work with the real cluster. Depends on log4j
being on classpath and hbase-site.xml for logging and test-run configuration. It does not set
logging levels. In the configuration properties, default values for master-info-port and
region-server-port are overridden such that a random port will be assigned (thus avoiding port
contention if another local HBase instance is already running).
To preserve test data directories, pass the system property "hbase.testing.preserve.testdir" setting it to true.
Modifier and Type | Class and Description |
---|---|
private static class |
HBaseTestingUtility.FsDatasetAsyncDiskServiceFixer |
static class |
HBaseTestingUtility.SeenRowTracker
A tracker for tracking and validating table rows generated with
loadTable(Table, byte[]) |
HBaseCommonTestingUtility.PortAllocator
Modifier and Type | Field and Description |
---|---|
static Collection<Object[]> |
BLOOM_AND_COMPRESSION_COMBINATIONS |
static byte[][] |
COLUMNS |
private AtomicReference<org.apache.hadoop.hbase.client.Connection> |
connection |
private org.apache.hadoop.fs.Path |
dataTestDirOnTestFS
Directory on test filesystem where we put the data for this instance of HBaseTestingUtility
|
static int |
DEFAULT_REGIONS_PER_SERVER
The default number of regions per regionserver when creating a pre-split table.
|
private org.apache.hadoop.hdfs.MiniDFSCluster |
dfsCluster |
private HBaseTestingUtility.FsDatasetAsyncDiskServiceFixer |
dfsClusterFixer |
static byte[] |
fam1 |
static byte[] |
fam2 |
static byte[] |
fam3 |
static char |
FIRST_CHAR |
private static String |
FS_URI
Filesystem URI used for map-reduce mini-cluster setup
|
private String |
hadoopLogDir |
private org.apache.hadoop.hbase.client.HBaseAdmin |
hbaseAdmin |
private HBaseCluster |
hbaseCluster |
static byte[][] |
KEYS |
static byte[][] |
KEYS_FOR_HBA_CREATE_TABLE |
static char |
LAST_CHAR |
private static int |
MAXVERSIONS |
static List<Object[]> |
MEMSTORETS_TAGS_PARAMETRIZED
This is for unit tests parameterized with a single boolean.
|
private boolean |
miniClusterRunning
If there is a mini cluster running for this testing utility instance.
|
private org.apache.hadoop.mapred.MiniMRCluster |
mrCluster |
static boolean |
PRESPLIT_TEST_TABLE |
static String |
PRESPLIT_TEST_TABLE_KEY |
static String |
REGIONS_PER_SERVER_KEY |
static byte[][] |
ROWS
All the row values for the data loaded by
loadTable(Table, byte[]) |
static String |
START_KEY |
static byte[] |
START_KEY_BYTES |
private static String |
TEST_DIRECTORY_KEY
Deprecated.
since 2.0.0 and will be removed in 3.0.0. Can be used only with mini dfs.
|
clusterTestDir, zooKeeperWatcher
BASE_TEST_DIRECTORY_KEY, BOOLEAN_PARAMETERIZED, COMPRESSION_ALGORITHMS, COMPRESSION_ALGORITHMS_PARAMETERIZED, conf, DEFAULT_BASE_TEST_DIRECTORY, LOG
Constructor and Description |
---|
HBaseTestingUtility()
Create an HBaseTestingUtility using a default configuration.
|
HBaseTestingUtility(org.apache.hadoop.conf.Configuration conf)
Create an HBaseTestingUtility using a given configuration.
|
Modifier and Type | Method and Description |
---|---|
static void |
assertKVListsEqual(String additionalMsg,
List<? extends org.apache.hadoop.hbase.Cell> expected,
List<? extends org.apache.hadoop.hbase.Cell> actual) |
void |
assertRegionOnlyOnServer(org.apache.hadoop.hbase.client.RegionInfo hri,
org.apache.hadoop.hbase.ServerName server,
long timeout)
Check to make sure the region is open on the specified region server, but not on any other one.
|
void |
assertRegionOnServer(org.apache.hadoop.hbase.client.RegionInfo hri,
org.apache.hadoop.hbase.ServerName server,
long timeout)
Due to async racing issue, a region may not be in the online region list of a region server
yet, after the assignment znode is deleted and the new assignment is recorded in master.
|
boolean |
assignRegion(org.apache.hadoop.hbase.client.RegionInfo regionInfo)
Uses directly the assignment manager to assign the region.
|
static boolean |
available(int port)
Checks to see if a specific port is available.
|
static void |
await(long sleepMillis,
BooleanSupplier condition)
Await the successful return of
condition , sleeping sleepMillis between
invocations. |
private static List<Object[]> |
bloomAndCompressionCombinations()
Create all combinations of Bloom filters and compression algorithms for testing.
|
String |
checksumRows(org.apache.hadoop.hbase.client.Table table)
Return an md5 digest of the entire contents of a table.
|
private void |
cleanup() |
boolean |
cleanupDataTestDirOnTestFS()
Cleans the test data directory on the test filesystem.
|
boolean |
cleanupDataTestDirOnTestFS(String subdirName)
Cleans a subdirectory under the test data directory on the test filesystem.
|
void |
closeConnection() |
static void |
closeRegionAndWAL(org.apache.hadoop.hbase.regionserver.HRegion r)
Close both the HRegion
r and it's underlying WAL. |
static void |
closeRegionAndWAL(org.apache.hadoop.hbase.regionserver.Region r)
Close both the region
r and it's underlying WAL. |
void |
compact(boolean major)
Compact all regions in the mini hbase cluster
|
void |
compact(org.apache.hadoop.hbase.TableName tableName,
boolean major)
Compact all of a table's reagion in the mini hbase cluster
|
int |
countRows(org.apache.hadoop.hbase.regionserver.InternalScanner scanner) |
int |
countRows(org.apache.hadoop.hbase.regionserver.Region region) |
int |
countRows(org.apache.hadoop.hbase.regionserver.Region region,
org.apache.hadoop.hbase.client.Scan scan) |
int |
countRows(org.apache.hadoop.hbase.client.Table table)
Return the number of rows in the given table.
|
int |
countRows(org.apache.hadoop.hbase.client.Table table,
byte[]... families) |
int |
countRows(org.apache.hadoop.hbase.TableName tableName)
Return the number of rows in the given table.
|
int |
countRows(org.apache.hadoop.hbase.client.Table table,
org.apache.hadoop.hbase.client.Scan scan) |
private String |
createDirAndSetProperty(String property) |
private String |
createDirAndSetProperty(String relPath,
String property) |
private void |
createDirsAndSetProperties()
This is used before starting HDFS and map-reduce mini-clusters Run something like the below to
check for the likes of '/tmp' references -- i.e.
|
org.apache.hadoop.hbase.regionserver.HRegion |
createLocalHRegion(byte[] tableName,
byte[] startKey,
byte[] stopKey,
String callingMethod,
org.apache.hadoop.conf.Configuration conf,
boolean isReadOnly,
org.apache.hadoop.hbase.client.Durability durability,
org.apache.hadoop.hbase.wal.WAL wal,
byte[]... families)
Deprecated.
since 2.0.0 and will be removed in 3.0.0. Use
#createLocalHRegion(TableName,
byte[], byte[], boolean, Durability, WAL, byte[]...) instead. |
org.apache.hadoop.hbase.regionserver.HRegion |
createLocalHRegion(org.apache.hadoop.hbase.HRegionInfo info,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.HTableDescriptor desc,
org.apache.hadoop.hbase.wal.WAL wal)
Create an HRegion that writes to the local tmp dirs with specified wal
|
org.apache.hadoop.hbase.regionserver.HRegion |
createLocalHRegion(org.apache.hadoop.hbase.client.RegionInfo info,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.TableDescriptor desc,
org.apache.hadoop.hbase.wal.WAL wal)
Create an HRegion that writes to the local tmp dirs with specified wal
|
org.apache.hadoop.hbase.regionserver.HRegion |
createLocalHRegion(org.apache.hadoop.hbase.client.RegionInfo info,
org.apache.hadoop.hbase.client.TableDescriptor desc)
Create an HRegion that writes to the local tmp dirs.
|
org.apache.hadoop.hbase.regionserver.HRegion |
createLocalHRegion(org.apache.hadoop.hbase.client.TableDescriptor desc,
byte[] startKey,
byte[] endKey)
Create an HRegion that writes to the local tmp dirs
|
org.apache.hadoop.hbase.regionserver.HRegion |
createLocalHRegion(org.apache.hadoop.hbase.TableName tableName,
byte[] startKey,
byte[] stopKey,
org.apache.hadoop.conf.Configuration conf,
boolean isReadOnly,
org.apache.hadoop.hbase.client.Durability durability,
org.apache.hadoop.hbase.wal.WAL wal,
byte[]... families)
nnnnn * @return A region on which you must call
closeRegionAndWAL(HRegion) when done. |
org.apache.hadoop.hbase.regionserver.HRegion |
createLocalHRegionWithInMemoryFlags(org.apache.hadoop.hbase.TableName tableName,
byte[] startKey,
byte[] stopKey,
org.apache.hadoop.conf.Configuration conf,
boolean isReadOnly,
org.apache.hadoop.hbase.client.Durability durability,
org.apache.hadoop.hbase.wal.WAL wal,
boolean[] compactedMemStore,
byte[]... families) |
static HBaseTestingUtility |
createLocalHTU()
Deprecated.
since 2.0.0 and will be removed in 3.0.0. Use
HBaseTestingUtility()
instead. |
static HBaseTestingUtility |
createLocalHTU(org.apache.hadoop.conf.Configuration c)
Deprecated.
since 2.0.0 and will be removed in 3.0.0. Use
HBaseTestingUtility(Configuration) instead. |
org.apache.hadoop.hbase.regionserver.RegionServerServices |
createMockRegionServerService()
Create a stubbed out RegionServerService, mainly for getting FS.
|
org.apache.hadoop.hbase.regionserver.RegionServerServices |
createMockRegionServerService(org.apache.hadoop.hbase.ipc.RpcServerInterface rpc)
Create a stubbed out RegionServerService, mainly for getting FS.
|
org.apache.hadoop.hbase.regionserver.RegionServerServices |
createMockRegionServerService(org.apache.hadoop.hbase.ServerName name)
Create a stubbed out RegionServerService, mainly for getting FS.
|
org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor |
createModifyableTableDescriptor(String name) |
org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor |
createModifyableTableDescriptor(org.apache.hadoop.hbase.TableName name,
int minVersions,
int versions,
int ttl,
org.apache.hadoop.hbase.KeepDeletedCells keepDeleted) |
List<org.apache.hadoop.hbase.HRegionInfo> |
createMultiRegionsInMeta(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.HTableDescriptor htd,
byte[][] startKeys)
Deprecated.
|
List<org.apache.hadoop.hbase.client.RegionInfo> |
createMultiRegionsInMeta(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.TableDescriptor htd,
byte[][] startKeys)
Create rows in hbase:meta for regions of the specified table with the specified start keys.
|
org.apache.hadoop.hbase.client.Table |
createMultiRegionTable(org.apache.hadoop.hbase.TableName tableName,
byte[] family)
Create a table with multiple regions.
|
org.apache.hadoop.hbase.client.Table |
createMultiRegionTable(org.apache.hadoop.hbase.TableName tableName,
byte[][] families)
Create a table with multiple regions.
|
org.apache.hadoop.hbase.client.Table |
createMultiRegionTable(org.apache.hadoop.hbase.TableName tableName,
byte[][] families,
int numVersions)
Create a table with multiple regions.
|
org.apache.hadoop.hbase.client.Table |
createMultiRegionTable(org.apache.hadoop.hbase.TableName tableName,
byte[] family,
int numRegions)
Create a table with multiple regions.
|
org.apache.hadoop.hbase.client.Table |
createMultiRegionTable(org.apache.hadoop.hbase.TableName tableName,
int replicaCount,
byte[][] families)
Create a table with multiple regions.
|
static int |
createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.TableDescriptor desc,
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor hcd)
Creates a pre-split table for load testing.
|
static int |
createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.TableDescriptor desc,
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor[] hcds,
int numRegionsPerServer)
Creates a pre-split table for load testing.
|
static int |
createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.TableDescriptor td,
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor[] cds,
org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm splitter,
int numRegionsPerServer)
Creates a pre-split table for load testing.
|
static int |
createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.TableDescriptor desc,
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor hcd,
int numRegionsPerServer)
Creates a pre-split table for load testing.
|
static int |
createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.TableName tableName,
byte[][] columnFamilies,
org.apache.hadoop.hbase.io.compress.Compression.Algorithm compression,
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dataBlockEncoding,
int numRegionsPerServer,
int regionReplication,
org.apache.hadoop.hbase.client.Durability durability)
Creates a pre-split table for load testing.
|
static int |
createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.TableName tableName,
byte[] columnFamily,
org.apache.hadoop.hbase.io.compress.Compression.Algorithm compression,
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dataBlockEncoding)
Creates a pre-split table for load testing.
|
static int |
createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.TableName tableName,
byte[] columnFamily,
org.apache.hadoop.hbase.io.compress.Compression.Algorithm compression,
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dataBlockEncoding,
int numRegionsPerServer,
int regionReplication,
org.apache.hadoop.hbase.client.Durability durability)
Creates a pre-split table for load testing.
|
org.apache.hadoop.hbase.client.Table |
createRandomTable(org.apache.hadoop.hbase.TableName tableName,
Collection<String> families,
int maxVersions,
int numColsPerRow,
int numFlushes,
int numRegions,
int numRowsPerFlush)
Creates a random table with the given parameters
|
static org.apache.hadoop.hbase.regionserver.HRegion |
createRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfo info,
org.apache.hadoop.fs.Path rootDir,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.TableDescriptor htd)
Create a region with it's own WAL.
|
static org.apache.hadoop.hbase.regionserver.HRegion |
createRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfo info,
org.apache.hadoop.fs.Path rootDir,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.TableDescriptor htd,
org.apache.hadoop.hbase.io.hfile.BlockCache blockCache)
Create a region with it's own WAL.
|
static org.apache.hadoop.hbase.regionserver.HRegion |
createRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfo info,
org.apache.hadoop.fs.Path rootDir,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.TableDescriptor htd,
boolean initialize)
Create a region with it's own WAL.
|
static org.apache.hadoop.hbase.regionserver.HRegion |
createRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfo info,
org.apache.hadoop.fs.Path rootDir,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.TableDescriptor htd,
org.apache.hadoop.hbase.mob.MobFileCache mobFileCache)
Create a region with it's own WAL.
|
org.apache.hadoop.fs.Path |
createRootDir()
Same as
createRootDir(boolean create) except that
create flag is false. |
org.apache.hadoop.fs.Path |
createRootDir(boolean create)
Creates an hbase rootdir in user home directory.
|
private void |
createSubDirAndSystemProperty(String propertyName,
org.apache.hadoop.fs.Path parent,
String subDirName) |
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.client.TableDescriptor htd,
byte[][] splitRows)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.client.TableDescriptor htd,
byte[][] families,
byte[][] splitKeys,
org.apache.hadoop.hbase.regionserver.BloomType type,
int blockSize,
org.apache.hadoop.conf.Configuration c)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.client.TableDescriptor htd,
byte[][] families,
byte[][] splitKeys,
org.apache.hadoop.conf.Configuration c)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.client.TableDescriptor htd,
byte[][] families,
org.apache.hadoop.conf.Configuration c)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[] family)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[][] families)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[][] families,
byte[][] splitKeys)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[][] families,
byte[][] splitKeys,
int replicaCount)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[][] families,
byte[][] splitKeys,
int replicaCount,
org.apache.hadoop.conf.Configuration c)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[][] families,
int numVersions)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[][] families,
int[] numVersions)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[][] families,
int numVersions,
byte[][] splitKeys)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[][] families,
int numVersions,
byte[] startKey,
byte[] endKey,
int numRegions) |
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[][] families,
int numVersions,
int blockSize)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[][] families,
int numVersions,
int blockSize,
String cpName) |
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[] family,
byte[][] splitRows)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
byte[] family,
int numVersions)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
String family)
Create a table.
|
org.apache.hadoop.hbase.client.Table |
createTable(org.apache.hadoop.hbase.TableName tableName,
String[] families)
Create a table.
|
org.apache.hadoop.hbase.HTableDescriptor |
createTableDescriptor(String name)
Deprecated.
since 2.0.0 and will be removed in 3.0.0. Use
createTableDescriptor(TableName, int, int, int, KeepDeletedCells) instead. |
org.apache.hadoop.hbase.HTableDescriptor |
createTableDescriptor(String name,
int minVersions,
int versions,
int ttl,
org.apache.hadoop.hbase.KeepDeletedCells keepDeleted)
Deprecated.
since 2.0.0 and will be removed in 3.0.0. Use
createTableDescriptor(TableName, int, int, int, KeepDeletedCells) instead. |
org.apache.hadoop.hbase.HTableDescriptor |
createTableDescriptor(org.apache.hadoop.hbase.TableName name)
Create a table of name
name . |
org.apache.hadoop.hbase.HTableDescriptor |
createTableDescriptor(org.apache.hadoop.hbase.TableName tableName,
byte[] family) |
org.apache.hadoop.hbase.HTableDescriptor |
createTableDescriptor(org.apache.hadoop.hbase.TableName tableName,
byte[][] families,
int maxVersions) |
org.apache.hadoop.hbase.HTableDescriptor |
createTableDescriptor(org.apache.hadoop.hbase.TableName name,
int minVersions,
int versions,
int ttl,
org.apache.hadoop.hbase.KeepDeletedCells keepDeleted) |
org.apache.hadoop.hbase.regionserver.HRegion |
createTestRegion(String tableName,
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor cd) |
org.apache.hadoop.hbase.regionserver.HRegion |
createTestRegion(String tableName,
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor cd,
org.apache.hadoop.hbase.io.hfile.BlockCache blockCache) |
static org.apache.hadoop.hbase.wal.WAL |
createWal(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.Path rootDir,
org.apache.hadoop.hbase.client.RegionInfo hri)
Create an unmanaged WAL.
|
org.apache.hadoop.fs.Path |
createWALRootDir()
Creates a hbase walDir in the user's home directory.
|
private void |
decrementMinRegionServerCount() |
private void |
decrementMinRegionServerCount(org.apache.hadoop.conf.Configuration conf) |
void |
deleteNumericRows(org.apache.hadoop.hbase.client.Table t,
byte[] f,
int startRow,
int endRow) |
void |
deleteTable(org.apache.hadoop.hbase.TableName tableName)
Drop an existing table
|
org.apache.hadoop.hbase.client.Table |
deleteTableData(org.apache.hadoop.hbase.TableName tableName)
Provide an existing table name to truncate.
|
void |
deleteTableIfAny(org.apache.hadoop.hbase.TableName tableName)
Drop an existing table
|
void |
enableDebug(Class<?> clazz)
Deprecated.
In 2.3.0, will be removed in 4.0.0. Only support changing log level on log4j now as
HBase only uses log4j. You should do this by your own as it you know which log
framework you are using then set the log level to debug is very easy.
|
private void |
enableShortCircuit()
Enable the short circuit read, unless configured differently.
|
boolean |
ensureSomeNonStoppedRegionServersAvailable(int num)
Make sure that at least the specified number of region servers are running.
|
boolean |
ensureSomeRegionServersAvailable(int num)
Make sure that at least the specified number of region servers are running
|
void |
expireMasterSession()
Expire the Master's session n
|
void |
expireRegionServerSession(int index)
Expire a region server's session
|
void |
expireSession(org.apache.hadoop.hbase.zookeeper.ZKWatcher nodeZK) |
void |
expireSession(org.apache.hadoop.hbase.zookeeper.ZKWatcher nodeZK,
boolean checkStatus)
Expire a ZooKeeper session as recommended in ZooKeeper documentation
http://hbase.apache.org/book.html#trouble.zookeeper There are issues when doing this: [1]
http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html [2]
https://issues.apache.org/jira/browse/ZOOKEEPER-1105
|
String |
explainTableAvailability(org.apache.hadoop.hbase.TableName tableName) |
String |
explainTableState(org.apache.hadoop.hbase.TableName table,
org.apache.hadoop.hbase.client.TableState.State state) |
org.apache.hadoop.hbase.client.TableState |
findLastTableState(org.apache.hadoop.hbase.TableName table) |
void |
flush()
Flushes all caches in the mini hbase cluster
|
void |
flush(org.apache.hadoop.hbase.TableName tableName)
Flushes all caches in the mini hbase cluster
|
private void |
forceChangeTaskLogDir()
Tasktracker has a bug where changing the hadoop.log.dir system property will not change its
internal static LOG_DIR variable.
|
static List<org.apache.hadoop.hbase.HColumnDescriptor> |
generateColumnDescriptors()
Create a set of column descriptors with the combination of compression, encoding, bloom codecs
available.
|
static List<org.apache.hadoop.hbase.HColumnDescriptor> |
generateColumnDescriptors(String prefix)
Create a set of column descriptors with the combination of compression, encoding, bloom codecs
available.
|
org.apache.hadoop.hbase.client.Admin |
getAdmin()
Returns an Admin instance which is shared between HBaseTestingUtility instance users.
|
static NavigableSet<String> |
getAllOnlineRegions(MiniHBaseCluster cluster) |
private org.apache.hadoop.fs.Path |
getBaseTestDirOnTestFS() |
org.apache.hadoop.hbase.client.Result |
getClosestRowBefore(org.apache.hadoop.hbase.regionserver.Region r,
byte[] row,
byte[] family) |
String |
getClusterKey() |
org.apache.hadoop.conf.Configuration |
getConfiguration()
Returns this classes's instance of
Configuration . |
org.apache.hadoop.hbase.client.Connection |
getConnection()
Get a shared Connection to the cluster.
|
org.apache.hadoop.fs.Path |
getDataTestDirOnTestFS()
Returns a Path in the test filesystem, obtained from
getTestFileSystem() to write
temporary test data. |
org.apache.hadoop.fs.Path |
getDataTestDirOnTestFS(String subdirName)
Returns a Path in the test filesystem, obtained from
getTestFileSystem() to write
temporary test data. |
org.apache.hadoop.fs.Path |
getDefaultRootDirPath()
Same as {
getDefaultRootDirPath(boolean create) except that
create flag is false. |
org.apache.hadoop.fs.Path |
getDefaultRootDirPath(boolean create)
Returns the path to the default root dir the minicluster uses.
|
org.apache.hadoop.hdfs.MiniDFSCluster |
getDFSCluster() |
static org.apache.hadoop.hbase.security.User |
getDifferentUser(org.apache.hadoop.conf.Configuration c,
String differentiatingSuffix)
This method clones the passed
c configuration setting a new user into the clone. |
static List<org.apache.hadoop.hbase.Cell> |
getFromStoreFile(org.apache.hadoop.hbase.regionserver.HStore store,
byte[] row,
NavigableSet<byte[]> columns)
Do a small get/scan against one store.
|
static List<org.apache.hadoop.hbase.Cell> |
getFromStoreFile(org.apache.hadoop.hbase.regionserver.HStore store,
org.apache.hadoop.hbase.client.Get get)
Do a small get/scan against one store.
|
org.apache.hadoop.hbase.client.HBaseAdmin |
getHBaseAdmin()
Deprecated.
Since 2.0. Will be removed in 3.0. Use
getAdmin() instead. |
MiniHBaseCluster |
getHBaseCluster()
Get the Mini HBase cluster.
|
HBaseCluster |
getHBaseClusterInterface()
Returns the HBaseCluster instance.
|
org.apache.hadoop.hbase.client.Hbck |
getHbck()
Returns an
Hbck instance. |
static int |
getMetaRSPort(org.apache.hadoop.hbase.client.Connection connection) |
org.apache.hadoop.hbase.HTableDescriptor |
getMetaTableDescriptor()
Deprecated.
since 2.0 version and will be removed in 3.0 version. Currently for test only. use
getMetaTableDescriptorBuilder() |
org.apache.hadoop.hbase.client.TableDescriptorBuilder |
getMetaTableDescriptorBuilder()
Deprecated.
Since 2.3.0. No one should be using this internal. Used in testing only.
|
List<byte[]> |
getMetaTableRows()
Returns all rows from the hbase:meta table.
|
List<byte[]> |
getMetaTableRows(org.apache.hadoop.hbase.TableName tableName)
Returns all rows from the hbase:meta table for a given user table
|
MiniHBaseCluster |
getMiniHBaseCluster() |
private org.apache.hadoop.fs.Path |
getNewDataTestDirOnTestFS()
Sets up a new path in test filesystem to be used by tests.
|
int |
getNumHFiles(org.apache.hadoop.hbase.TableName tableName,
byte[] family) |
int |
getNumHFilesForRS(org.apache.hadoop.hbase.regionserver.HRegionServer rs,
org.apache.hadoop.hbase.TableName tableName,
byte[] family) |
org.apache.hadoop.hbase.regionserver.HRegionServer |
getOtherRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer rs) |
private List<org.apache.hadoop.hbase.client.RegionInfo> |
getRegions(org.apache.hadoop.hbase.TableName tableName)
Returns all regions of the specified table
|
byte[][] |
getRegionSplitStartKeys(byte[] startKey,
byte[] endKey,
int numRegions)
Create region split keys between startkey and endKey nn * @param numRegions the number of
regions to be created.
|
org.apache.hadoop.hbase.regionserver.HRegionServer |
getRSForFirstRegionInTable(org.apache.hadoop.hbase.TableName tableName)
Tool to get the reference to the region server object that holds the region of the specified
user table.
|
org.apache.hadoop.hbase.regionserver.HRegion |
getSplittableRegion(org.apache.hadoop.hbase.TableName tableName,
int maxAttempts)
Retrieves a splittable region randomly from tableName
|
static org.apache.hadoop.hbase.io.compress.Compression.Algorithm[] |
getSupportedCompressionAlgorithms()
Get supported compression algorithms.
|
org.apache.hadoop.fs.FileSystem |
getTestFileSystem() |
void |
invalidateConnection()
Resets the connections so that the next time getConnection() is called, a new connection is
created.
|
boolean |
isNewVersionBehaviorEnabled()
Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating new column families.
|
boolean |
isReadShortCircuitOn()
Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
|
private boolean |
isTargetTable(byte[] inRow,
org.apache.hadoop.hbase.Cell c) |
void |
killMiniHBaseCluster()
Abruptly Shutdown HBase mini cluster.
|
void |
loadNumericRows(org.apache.hadoop.hbase.client.Table t,
byte[] f,
int startRow,
int endRow) |
void |
loadRandomRows(org.apache.hadoop.hbase.client.Table t,
byte[] f,
int rowSize,
int totalRows) |
int |
loadRegion(org.apache.hadoop.hbase.regionserver.HRegion r,
byte[] f) |
int |
loadRegion(org.apache.hadoop.hbase.regionserver.HRegion r,
byte[] f,
boolean flush)
Load region with rows from 'aaa' to 'zzz'.
|
int |
loadRegion(org.apache.hadoop.hbase.regionserver.Region r,
byte[] f) |
int |
loadTable(org.apache.hadoop.hbase.client.Table t,
byte[] f)
Load table with rows from 'aaa' to 'zzz'.
|
int |
loadTable(org.apache.hadoop.hbase.client.Table t,
byte[][] f)
Load table of multiple column families with rows from 'aaa' to 'zzz'.
|
int |
loadTable(org.apache.hadoop.hbase.client.Table t,
byte[][] f,
byte[] value)
Load table of multiple column families with rows from 'aaa' to 'zzz'.
|
int |
loadTable(org.apache.hadoop.hbase.client.Table t,
byte[][] f,
byte[] value,
boolean writeToWAL)
Load table of multiple column families with rows from 'aaa' to 'zzz'.
|
int |
loadTable(org.apache.hadoop.hbase.client.Table t,
byte[] f,
boolean writeToWAL)
Load table with rows from 'aaa' to 'zzz'.
|
private static List<Object[]> |
memStoreTSAndTagsCombination()
Create combination of memstoreTS and tags
|
static List<Object[]> |
memStoreTSTagsAndOffheapCombination() |
static void |
modifyTableSync(org.apache.hadoop.hbase.client.Admin admin,
org.apache.hadoop.hbase.client.TableDescriptor desc)
Modify a table, synchronous.
|
void |
moveRegionAndWait(org.apache.hadoop.hbase.client.RegionInfo destRegion,
org.apache.hadoop.hbase.ServerName destServer)
Move region to destination server and wait till region is completely moved and online
|
Waiter.ExplainingPredicate<IOException> |
predicateNoRegionsInTransition()
Returns a
Waiter.Predicate for checking that there are no regions in transition in master |
Waiter.Predicate<IOException> |
predicateTableAvailable(org.apache.hadoop.hbase.TableName tableName)
Returns a
Waiter.Predicate for checking that table is enabled |
Waiter.Predicate<IOException> |
predicateTableDisabled(org.apache.hadoop.hbase.TableName tableName)
Returns a
Waiter.Predicate for checking that table is enabled |
Waiter.Predicate<IOException> |
predicateTableEnabled(org.apache.hadoop.hbase.TableName tableName)
Returns a
Waiter.Predicate for checking that table is enabled |
static int |
randomFreePort() |
static String |
randomMultiCastAddress() |
void |
restartHBaseCluster(int servers)
Starts the hbase cluster up again after shutting it down previously in a test.
|
void |
restartHBaseCluster(int servers,
List<Integer> ports) |
void |
restartHBaseCluster(StartMiniClusterOption option) |
static <T> String |
safeGetAsStr(List<T> lst,
int i) |
void |
setDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster cluster) |
void |
setDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster cluster,
boolean requireDown)
Set the MiniDFSCluster
|
void |
setFileSystemURI(String fsURI) |
private void |
setFs() |
void |
setHBaseCluster(HBaseCluster hbaseCluster) |
private void |
setHBaseFsTmpDir() |
static void |
setMaxRecoveryErrorCount(OutputStream stream,
int max)
Set maxRecoveryErrorCount in DFSClient.
|
static void |
setReplicas(org.apache.hadoop.hbase.client.Admin admin,
org.apache.hadoop.hbase.TableName table,
int replicaCount)
Set the number of Region replicas.
|
static void |
setReplicas(org.apache.hadoop.hbase.client.AsyncAdmin admin,
org.apache.hadoop.hbase.TableName table,
int replicaCount)
Set the number of Region replicas.
|
protected org.apache.hadoop.fs.Path |
setupDataTestDir()
Home our data in a dir under
HBaseCommonTestingUtility.DEFAULT_BASE_TEST_DIRECTORY . |
private void |
setupDataTestDirOnTestFS()
Sets up a path in test filesystem to be used by tests.
|
org.apache.hadoop.minikdc.MiniKdc |
setupMiniKdc(File keytabFile)
Sets up
MiniKdc for testing security. |
void |
shutdownMiniCluster()
Stops mini hbase, zk, and hdfs clusters.
|
void |
shutdownMiniDFSCluster()
Shuts down instance created by call to
startMiniDFSCluster(int) or does nothing. |
void |
shutdownMiniHBaseCluster()
Shutdown HBase mini cluster.Does not shutdown zk or dfs if running.
|
void |
shutdownMiniMapReduceCluster()
Stops the previously started
MiniMRCluster . |
MiniHBaseCluster |
startMiniCluster()
Start up a minicluster of hbase, dfs and zookeeper all using default options.
|
MiniHBaseCluster |
startMiniCluster(boolean createWALDir)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniCluster(int numSlaves)
Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number.
|
MiniHBaseCluster |
startMiniCluster(int numSlaves,
boolean createRootDir)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniCluster(int numSlaves,
boolean createRootDir,
boolean createWALDir)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniCluster(int numMasters,
int numSlaves)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniCluster(int numMasters,
int numSlaves,
boolean createRootDir)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniCluster(int numMasters,
int numRegionServers,
int numDataNodes)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniCluster(int numMasters,
int numRegionServers,
int numDataNodes,
String[] dataNodeHosts,
Class<? extends org.apache.hadoop.hbase.master.HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniCluster(int numMasters,
int numRegionServers,
int numDataNodes,
String[] dataNodeHosts,
Class<? extends org.apache.hadoop.hbase.master.HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass,
boolean createRootDir,
boolean createWALDir)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniCluster(int numMasters,
int numSlaves,
String[] dataNodeHosts)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniCluster(int numMasters,
int numSlaves,
String[] dataNodeHosts,
boolean createRootDir)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniCluster(int numMasters,
int numSlaves,
String[] dataNodeHosts,
Class<? extends org.apache.hadoop.hbase.master.HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniCluster(StartMiniClusterOption option)
Start up a mini cluster of hbase, optionally dfs and zookeeper if needed.
|
org.apache.hadoop.hdfs.MiniDFSCluster |
startMiniDFSCluster(int servers)
Start a minidfscluster.
|
org.apache.hadoop.hdfs.MiniDFSCluster |
startMiniDFSCluster(int servers,
String[] hosts)
Start a minidfscluster.
|
org.apache.hadoop.hdfs.MiniDFSCluster |
startMiniDFSCluster(int servers,
String[] racks,
String[] hosts) |
org.apache.hadoop.hdfs.MiniDFSCluster |
startMiniDFSCluster(String[] hosts)
Start a minidfscluster.
|
org.apache.hadoop.hdfs.MiniDFSCluster |
startMiniDFSClusterForTestWAL(int namenodePort) |
MiniHBaseCluster |
startMiniHBaseCluster()
Starts up mini hbase cluster using default options.
|
MiniHBaseCluster |
startMiniHBaseCluster(int numMasters,
int numRegionServers)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniHBaseCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniHBaseCluster(int numMasters,
int numRegionServers,
List<Integer> rsPorts)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniHBaseCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniHBaseCluster(int numMasters,
int numRegionServers,
List<Integer> rsPorts,
Class<? extends org.apache.hadoop.hbase.master.HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass,
boolean createRootDir,
boolean createWALDir)
Deprecated.
since 2.2.0 and will be removed in 4.0.0. Use
startMiniHBaseCluster(StartMiniClusterOption) instead. |
MiniHBaseCluster |
startMiniHBaseCluster(StartMiniClusterOption option)
Starts up mini hbase cluster.
|
org.apache.hadoop.mapred.MiniMRCluster |
startMiniMapReduceCluster()
Starts a
MiniMRCluster with a default number of TaskTracker 's. |
private void |
startMiniMapReduceCluster(int servers)
Starts a
MiniMRCluster . |
org.apache.hadoop.hbase.client.Table |
truncateTable(org.apache.hadoop.hbase.TableName tableName)
Truncate a table using the admin command.
|
org.apache.hadoop.hbase.client.Table |
truncateTable(org.apache.hadoop.hbase.TableName tableName,
boolean preserveRegions)
Truncate a table using the admin command.
|
void |
unassignRegion(byte[] regionName)
Unassign the named region.
|
void |
unassignRegion(String regionName)
Unassign the named region.
|
void |
unassignRegionByRow(byte[] row,
org.apache.hadoop.hbase.client.RegionLocator table)
Closes the region containing the given row.
|
void |
unassignRegionByRow(String row,
org.apache.hadoop.hbase.client.RegionLocator table)
Closes the region containing the given row.
|
void |
verifyNumericRows(org.apache.hadoop.hbase.regionserver.HRegion region,
byte[] f,
int startRow,
int endRow) |
void |
verifyNumericRows(org.apache.hadoop.hbase.regionserver.HRegion region,
byte[] f,
int startRow,
int endRow,
boolean present) |
void |
verifyNumericRows(org.apache.hadoop.hbase.regionserver.Region region,
byte[] f,
int startRow,
int endRow) |
void |
verifyNumericRows(org.apache.hadoop.hbase.regionserver.Region region,
byte[] f,
int startRow,
int endRow,
boolean present) |
void |
verifyNumericRows(org.apache.hadoop.hbase.client.Table table,
byte[] f,
int startRow,
int endRow,
int replicaId) |
void |
verifyTableDescriptorIgnoreTableName(org.apache.hadoop.hbase.client.TableDescriptor ltd,
org.apache.hadoop.hbase.client.TableDescriptor rtd) |
static void |
waitForHostPort(String host,
int port) |
void |
waitLabelAvailable(long timeoutMillis,
String... labels)
Wait until labels is ready in VisibilityLabelsCache.
|
void |
waitTableAvailable(byte[] table,
long timeoutMillis)
Wait until all regions in a table have been assigned
|
void |
waitTableAvailable(org.apache.hadoop.hbase.TableName table)
Wait until all regions in a table have been assigned.
|
void |
waitTableAvailable(org.apache.hadoop.hbase.TableName table,
long timeoutMillis) |
void |
waitTableDisabled(byte[] table)
Waits for a table to be 'disabled'.
|
void |
waitTableDisabled(byte[] table,
long timeoutMillis)
Waits for a table to be 'disabled'.
|
void |
waitTableDisabled(org.apache.hadoop.hbase.TableName table,
long millisTimeout) |
void |
waitTableEnabled(byte[] table,
long timeoutMillis)
Waits for a table to be 'enabled'.
|
void |
waitTableEnabled(org.apache.hadoop.hbase.TableName table)
Waits for a table to be 'enabled'.
|
void |
waitTableEnabled(org.apache.hadoop.hbase.TableName table,
long timeoutMillis) |
void |
waitUntilAllRegionsAssigned(org.apache.hadoop.hbase.TableName tableName)
Wait until all regions for a table in hbase:meta have a non-empty info:server, up to a
configuable timeout value (default is 60 seconds) This means all regions have been deployed,
master has been informed and updated hbase:meta with the regions deployed server.
|
void |
waitUntilAllRegionsAssigned(org.apache.hadoop.hbase.TableName tableName,
long timeout)
Wait until all regions for a table in hbase:meta have a non-empty info:server, or until
timeout.
|
void |
waitUntilAllSystemRegionsAssigned()
Waith until all system table's regions get assigned n
|
void |
waitUntilNoRegionsInTransition()
Wait until no regions in transition.
|
void |
waitUntilNoRegionsInTransition(long timeout)
Wait until no regions in transition.
|
cleanupTestDir, getClusterTestDir, getZkCluster, getZooKeeperWatcher, getZooKeeperWatcher, setupClusterTestDir, setZkCluster, shutdownMiniZKCluster, startMiniZKCluster, startMiniZKCluster
cleanupTestDir, createSubDir, deleteDir, deleteOnExit, getDataTestDir, getDataTestDir, getRandomDir, getRandomUUID, waitFor, waitFor, waitFor
@Deprecated private static final String TEST_DIRECTORY_KEY
public static final String REGIONS_PER_SERVER_KEY
public static final int DEFAULT_REGIONS_PER_SERVER
public static final String PRESPLIT_TEST_TABLE_KEY
public static final boolean PRESPLIT_TEST_TABLE
private org.apache.hadoop.hdfs.MiniDFSCluster dfsCluster
private HBaseTestingUtility.FsDatasetAsyncDiskServiceFixer dfsClusterFixer
private volatile HBaseCluster hbaseCluster
private org.apache.hadoop.mapred.MiniMRCluster mrCluster
private volatile boolean miniClusterRunning
private String hadoopLogDir
private org.apache.hadoop.fs.Path dataTestDirOnTestFS
private final AtomicReference<org.apache.hadoop.hbase.client.Connection> connection
public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED
public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS
public static final byte[] fam1
public static final byte[] fam2
public static final byte[] fam3
public static final byte[][] COLUMNS
private static final int MAXVERSIONS
public static final char FIRST_CHAR
public static final char LAST_CHAR
public static final byte[] START_KEY_BYTES
public static final byte[][] ROWS
loadTable(Table, byte[])
public static final byte[][] KEYS
public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE
private org.apache.hadoop.hbase.client.HBaseAdmin hbaseAdmin
public HBaseTestingUtility()
Create an HBaseTestingUtility using a default configuration.
Initially, all tmp files are written to a local test data directory. Once
startMiniDFSCluster(int)
is called, either directly or via startMiniCluster()
, tmp
data will be written to the DFS directory instead.
Previously, there was a distinction between the type of utility returned by
createLocalHTU()
and this constructor; this is no longer the case. All
HBaseTestingUtility objects will behave as local until a DFS cluster is started, at which point
they will switch to using mini DFS for storage.
public HBaseTestingUtility(@Nullable org.apache.hadoop.conf.Configuration conf)
Create an HBaseTestingUtility using a given configuration.
Initially, all tmp files are written to a local test data directory. Once
startMiniDFSCluster(int)
is called, either directly or via startMiniCluster()
, tmp
data will be written to the DFS directory instead.
Previously, there was a distinction between the type of utility returned by
createLocalHTU()
and this constructor; this is no longer the case. All
HBaseTestingUtility objects will behave as local until a DFS cluster is started, at which point
they will switch to using mini DFS for storage.
conf
- The configuration to use for further operationspublic static boolean available(int port)
port
- the port number to check for availabilityprivate static List<Object[]> bloomAndCompressionCombinations()
private static List<Object[]> memStoreTSAndTagsCombination()
public static List<Object[]> memStoreTSTagsAndOffheapCombination()
@Deprecated public static HBaseTestingUtility createLocalHTU()
HBaseTestingUtility()
instead.HBaseTestingUtility()
,
HBASE-19841@Deprecated public static HBaseTestingUtility createLocalHTU(org.apache.hadoop.conf.Configuration c)
HBaseTestingUtility(Configuration)
instead.HBaseTestingUtility(Configuration)
,
HBASE-19841public static void closeRegionAndWAL(org.apache.hadoop.hbase.regionserver.Region r) throws IOException
r
and it's underlying WAL. For use in tests.IOException
public static void closeRegionAndWAL(org.apache.hadoop.hbase.regionserver.HRegion r) throws IOException
r
and it's underlying WAL. For use in tests.IOException
public org.apache.hadoop.conf.Configuration getConfiguration()
Configuration
. Be careful how you use the returned
Configuration since Connection
instances can be shared. The Map of Connections is keyed
by the Configuration. If say, a Connection was being used against a cluster that had been
shutdown, see shutdownMiniCluster()
, then the Connection will no longer be wholesome.
Rather than use the return direct, its usually best to make a copy and use that. Do
Configuration c = new Configuration(INSTANCE.getConfiguration());
getConfiguration
in class HBaseCommonTestingUtility
public void setHBaseCluster(HBaseCluster hbaseCluster)
protected org.apache.hadoop.fs.Path setupDataTestDir()
HBaseCommonTestingUtility.DEFAULT_BASE_TEST_DIRECTORY
. Give it a random name so can
have many concurrent tests running if we need to. It needs to amend the
TEST_DIRECTORY_KEY
System property, as it's what minidfscluster bases it data dir on.
Moding a System property is not the way to do concurrent instances -- another instance could
grab the temporary value unintentionally -- but not anything can do about it at moment; single
instance only is how the minidfscluster works. We also create the underlying directory names
for hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values in the
conf, and as a system property for hadoop.tmp.dir (We do not create them!).setupDataTestDir
in class HBaseCommonTestingUtility
private void createSubDirAndSystemProperty(String propertyName, org.apache.hadoop.fs.Path parent, String subDirName)
private org.apache.hadoop.fs.Path getBaseTestDirOnTestFS() throws IOException
IOException
setupDataTestDirOnTestFS()
,
getTestFileSystem()
@Deprecated public org.apache.hadoop.hbase.HTableDescriptor getMetaTableDescriptor()
getMetaTableDescriptorBuilder()
@Deprecated @InterfaceAudience.Private public org.apache.hadoop.hbase.client.TableDescriptorBuilder getMetaTableDescriptorBuilder()
public org.apache.hadoop.fs.Path getDataTestDirOnTestFS() throws IOException
getTestFileSystem()
to write
temporary test data. Call this method after setting up the mini dfs cluster if the test relies
on it.IOException
public org.apache.hadoop.fs.Path getDataTestDirOnTestFS(String subdirName) throws IOException
getTestFileSystem()
to write
temporary test data. Call this method after setting up the mini dfs cluster if the test relies
on it.subdirName
- name of the subdir to create under the base test dirIOException
private void setupDataTestDirOnTestFS() throws IOException
IOException
private org.apache.hadoop.fs.Path getNewDataTestDirOnTestFS() throws IOException
IOException
public boolean cleanupDataTestDirOnTestFS() throws IOException
IOException
public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException
IOException
public org.apache.hadoop.hdfs.MiniDFSCluster startMiniDFSCluster(int servers) throws Exception
servers
- How many DNs to start. n * @see #shutdownMiniDFSCluster()Exception
public org.apache.hadoop.hdfs.MiniDFSCluster startMiniDFSCluster(String[] hosts) throws Exception
hosts
- hostnames DNs to run on. n * @see #shutdownMiniDFSCluster()Exception
public org.apache.hadoop.hdfs.MiniDFSCluster startMiniDFSCluster(int servers, String[] hosts) throws Exception
servers
- How many DNs to start.hosts
- hostnames DNs to run on. n * @see #shutdownMiniDFSCluster()Exception
private void setFs() throws IOException
IOException
public org.apache.hadoop.hdfs.MiniDFSCluster startMiniDFSCluster(int servers, String[] racks, String[] hosts) throws Exception
Exception
public org.apache.hadoop.hdfs.MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException
IOException
private void createDirsAndSetProperties() throws IOException
Configuration conf = TEST_UTIL.getConfiguration(); for (Iterator<Map.Entry<String, String>> i = conf.iterator(); i.hasNext();) { Map.Entry<String, String> e = i.next(); assertFalse(e.getKey() + " " + e.getValue(), e.getValue().contains("/tmp")); }
IOException
public boolean isNewVersionBehaviorEnabled()
public boolean isReadShortCircuitOn()
private void enableShortCircuit()
private String createDirAndSetProperty(String property)
private String createDirAndSetProperty(String relPath, String property)
public void shutdownMiniDFSCluster() throws IOException
startMiniDFSCluster(int)
or does nothing. nIOException
@Deprecated public MiniHBaseCluster startMiniCluster(boolean createWALDir) throws Exception
startMiniCluster(StartMiniClusterOption)
instead.StartMiniClusterOption.Builder
.createWALDir
- Whether to create a new WAL directory.Exception
shutdownMiniCluster()
,
startMiniCluster(StartMiniClusterOption)
,
HBASE-21071@Deprecated public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir) throws Exception
startMiniCluster(StartMiniClusterOption)
instead.StartMiniClusterOption.Builder
.numSlaves
- Slave node number, for both HBase region server and HDFS data node.createRootDir
- Whether to create a new root or data directory path.Exception
shutdownMiniCluster()
,
startMiniCluster(StartMiniClusterOption)
,
HBASE-21071@Deprecated public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir, boolean createWALDir) throws Exception
startMiniCluster(StartMiniClusterOption)
instead.StartMiniClusterOption.Builder
.numSlaves
- Slave node number, for both HBase region server and HDFS data node.createRootDir
- Whether to create a new root or data directory path.createWALDir
- Whether to create a new WAL directory.Exception
shutdownMiniCluster()
,
startMiniCluster(StartMiniClusterOption)
,
HBASE-21071@Deprecated public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, boolean createRootDir) throws Exception
startMiniCluster(StartMiniClusterOption)
instead.StartMiniClusterOption.Builder
.numMasters
- Master node number.numSlaves
- Slave node number, for both HBase region server and HDFS data node.createRootDir
- Whether to create a new root or data directory path.Exception
shutdownMiniCluster()
,
startMiniCluster(StartMiniClusterOption)
,
HBASE-21071@Deprecated public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves) throws Exception
startMiniCluster(StartMiniClusterOption)
instead.StartMiniClusterOption.Builder
.numMasters
- Master node number.numSlaves
- Slave node number, for both HBase region server and HDFS data node.Exception
shutdownMiniCluster()
,
startMiniCluster(StartMiniClusterOption)
,
HBASE-21071@Deprecated public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts, boolean createRootDir) throws Exception
startMiniCluster(StartMiniClusterOption)
instead.StartMiniClusterOption.Builder
.numMasters
- Master node number.numSlaves
- Slave node number, for both HBase region server and HDFS data node.dataNodeHosts
- The hostnames of DataNodes to run on. If not null, its size will overwrite
HDFS data node number.createRootDir
- Whether to create a new root or data directory path.Exception
shutdownMiniCluster()
,
startMiniCluster(StartMiniClusterOption)
,
HBASE-21071@Deprecated public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts) throws Exception
startMiniCluster(StartMiniClusterOption)
instead.StartMiniClusterOption.Builder
.numMasters
- Master node number.numSlaves
- Slave node number, for both HBase region server and HDFS data node.dataNodeHosts
- The hostnames of DataNodes to run on. If not null, its size will overwrite
HDFS data node number.Exception
shutdownMiniCluster()
,
startMiniCluster(StartMiniClusterOption)
,
HBASE-21071@Deprecated public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes) throws Exception
startMiniCluster(StartMiniClusterOption)
instead.StartMiniClusterOption.Builder
.numMasters
- Master node number.numRegionServers
- Number of region servers.numDataNodes
- Number of datanodes.Exception
shutdownMiniCluster()
,
startMiniCluster(StartMiniClusterOption)
,
HBASE-21071@Deprecated public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts, Class<? extends org.apache.hadoop.hbase.master.HMaster> masterClass, Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass) throws Exception
startMiniCluster(StartMiniClusterOption)
instead.StartMiniClusterOption.Builder
.numMasters
- Master node number.numSlaves
- Slave node number, for both HBase region server and HDFS data node.dataNodeHosts
- The hostnames of DataNodes to run on. If not null, its size will overwrite
HDFS data node number.masterClass
- The class to use as HMaster, or null for default.rsClass
- The class to use as HRegionServer, or null for default.Exception
shutdownMiniCluster()
,
startMiniCluster(StartMiniClusterOption)
,
HBASE-21071@Deprecated public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes, String[] dataNodeHosts, Class<? extends org.apache.hadoop.hbase.master.HMaster> masterClass, Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass) throws Exception
startMiniCluster(StartMiniClusterOption)
instead.StartMiniClusterOption.Builder
.numMasters
- Master node number.numRegionServers
- Number of region servers.numDataNodes
- Number of datanodes.dataNodeHosts
- The hostnames of DataNodes to run on. If not null, its size will
overwrite HDFS data node number.masterClass
- The class to use as HMaster, or null for default.rsClass
- The class to use as HRegionServer, or null for default.Exception
shutdownMiniCluster()
,
startMiniCluster(StartMiniClusterOption)
,
HBASE-21071@Deprecated public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes, String[] dataNodeHosts, Class<? extends org.apache.hadoop.hbase.master.HMaster> masterClass, Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass, boolean createRootDir, boolean createWALDir) throws Exception
startMiniCluster(StartMiniClusterOption)
instead.StartMiniClusterOption.Builder
.numMasters
- Master node number.numRegionServers
- Number of region servers.numDataNodes
- Number of datanodes.dataNodeHosts
- The hostnames of DataNodes to run on. If not null, its size will
overwrite HDFS data node number.masterClass
- The class to use as HMaster, or null for default.rsClass
- The class to use as HRegionServer, or null for default.createRootDir
- Whether to create a new root or data directory path.createWALDir
- Whether to create a new WAL directory.Exception
shutdownMiniCluster()
,
startMiniCluster(StartMiniClusterOption)
,
HBASE-21071public MiniHBaseCluster startMiniCluster(int numSlaves) throws Exception
StartMiniClusterOption.Builder
.numSlaves
- slave node number, for both HBase region server and HDFS data node.Exception
startMiniCluster(StartMiniClusterOption option)
,
shutdownMiniDFSCluster()
public MiniHBaseCluster startMiniCluster() throws Exception
StartMiniClusterOption.Builder
.Exception
startMiniCluster(StartMiniClusterOption option)
,
shutdownMiniDFSCluster()
public MiniHBaseCluster startMiniCluster(StartMiniClusterOption option) throws Exception
Exception
shutdownMiniDFSCluster()
public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option) throws IOException, InterruptedException
startMiniCluster()
. This is useful when doing stepped startup of clusters.IOException
InterruptedException
startMiniCluster(StartMiniClusterOption)
,
shutdownMiniHBaseCluster()
public MiniHBaseCluster startMiniHBaseCluster() throws IOException, InterruptedException
StartMiniClusterOption.Builder
.@Deprecated public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers) throws IOException, InterruptedException
startMiniHBaseCluster(StartMiniClusterOption)
instead.startMiniCluster()
. All other options will use default values, defined in
StartMiniClusterOption.Builder
.numMasters
- Master node number.numRegionServers
- Number of region servers.IOException
InterruptedException
shutdownMiniHBaseCluster()
,
startMiniHBaseCluster(StartMiniClusterOption)
,
HBASE-21071@Deprecated public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers, List<Integer> rsPorts) throws IOException, InterruptedException
startMiniHBaseCluster(StartMiniClusterOption)
instead.startMiniCluster()
. All other options will use default values, defined in
StartMiniClusterOption.Builder
.numMasters
- Master node number.numRegionServers
- Number of region servers.rsPorts
- Ports that RegionServer should use.IOException
InterruptedException
shutdownMiniHBaseCluster()
,
startMiniHBaseCluster(StartMiniClusterOption)
,
HBASE-21071@Deprecated public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers, List<Integer> rsPorts, Class<? extends org.apache.hadoop.hbase.master.HMaster> masterClass, Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass, boolean createRootDir, boolean createWALDir) throws IOException, InterruptedException
startMiniHBaseCluster(StartMiniClusterOption)
instead.startMiniCluster()
. All other options will use default values, defined in
StartMiniClusterOption.Builder
.numMasters
- Master node number.numRegionServers
- Number of region servers.rsPorts
- Ports that RegionServer should use.masterClass
- The class to use as HMaster, or null for default.rsClass
- The class to use as HRegionServer, or null for default.createRootDir
- Whether to create a new root or data directory path.createWALDir
- Whether to create a new WAL directory.IOException
InterruptedException
shutdownMiniHBaseCluster()
,
startMiniHBaseCluster(StartMiniClusterOption)
,
HBASE-21071public void restartHBaseCluster(int servers) throws IOException, InterruptedException
servers
- number of region serversIOException
InterruptedException
public void restartHBaseCluster(int servers, List<Integer> ports) throws IOException, InterruptedException
IOException
InterruptedException
public void restartHBaseCluster(StartMiniClusterOption option) throws IOException, InterruptedException
IOException
InterruptedException
public MiniHBaseCluster getMiniHBaseCluster()
startMiniCluster()
.startMiniCluster()
public void shutdownMiniCluster() throws IOException
IOException
startMiniCluster(int)
public void shutdownMiniHBaseCluster() throws IOException
IOException
- in case command is unsuccessfulpublic void killMiniHBaseCluster() throws IOException
IOException
- throws in case command is unsuccessfulprivate void cleanup() throws IOException
IOException
public org.apache.hadoop.fs.Path getDefaultRootDirPath(boolean create) throws IOException
create
is true,
a new root directory path is fetched irrespective of whether it has been fetched before or not.
If false, previous path is used. Note: this does not cause the root dir to be created.IOException
public org.apache.hadoop.fs.Path getDefaultRootDirPath() throws IOException
getDefaultRootDirPath(boolean create)
except that
create
flag is false. Note: this does not cause the root dir to be created.IOException
public org.apache.hadoop.fs.Path createRootDir(boolean create) throws IOException
create
- This flag decides whether to get a new root or data directory path or not, if it
has been fetched already. Note : Directory will be made irrespective of whether
path has been fetched or not. If directory already exists, it will be overwrittenIOException
public org.apache.hadoop.fs.Path createRootDir() throws IOException
createRootDir(boolean create)
except that
create
flag is false.IOException
public org.apache.hadoop.fs.Path createWALRootDir() throws IOException
IOException
private void setHBaseFsTmpDir() throws IOException
IOException
public void flush() throws IOException
IOException
public void flush(org.apache.hadoop.hbase.TableName tableName) throws IOException
IOException
public void compact(boolean major) throws IOException
IOException
public void compact(org.apache.hadoop.hbase.TableName tableName, boolean major) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, String family) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, String[] families) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[] family) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createMultiRegionTable(org.apache.hadoop.hbase.TableName tableName, byte[] family, int numRegions) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[][] families) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createMultiRegionTable(org.apache.hadoop.hbase.TableName tableName, byte[][] families) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createMultiRegionTable(org.apache.hadoop.hbase.TableName tableName, int replicaCount, byte[][] families) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[][] families, byte[][] splitKeys) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[][] families, byte[][] splitKeys, int replicaCount) throws IOException
tableName
- the table namefamilies
- the familiessplitKeys
- the splitkeysreplicaCount
- the region replica countIOException
- throws IOExceptionpublic org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[][] families, int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.client.TableDescriptor htd, byte[][] families, org.apache.hadoop.conf.Configuration c) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.client.TableDescriptor htd, byte[][] families, byte[][] splitKeys, org.apache.hadoop.conf.Configuration c) throws IOException
htd
- table descriptorfamilies
- array of column familiessplitKeys
- array of split keysc
- Configuration to useIOException
- if getAdmin or createTable failspublic org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.client.TableDescriptor htd, byte[][] families, byte[][] splitKeys, org.apache.hadoop.hbase.regionserver.BloomType type, int blockSize, org.apache.hadoop.conf.Configuration c) throws IOException
htd
- table descriptorfamilies
- array of column familiessplitKeys
- array of split keystype
- Bloom typeblockSize
- block sizec
- Configuration to useIOException
- if getAdmin or createTable failspublic org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.client.TableDescriptor htd, byte[][] splitRows) throws IOException
htd
- table descriptorsplitRows
- array of split keysIOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[][] families, byte[][] splitKeys, int replicaCount, org.apache.hadoop.conf.Configuration c) throws IOException
tableName
- the table namefamilies
- the familiessplitKeys
- the split keysreplicaCount
- the replica countc
- Configuration to useIOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[] family, int numVersions) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[][] families, int numVersions) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[][] families, int numVersions, byte[][] splitKeys) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createMultiRegionTable(org.apache.hadoop.hbase.TableName tableName, byte[][] families, int numVersions) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[][] families, int numVersions, int blockSize) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[][] families, int numVersions, int blockSize, String cpName) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[][] families, int[] numVersions) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createTable(org.apache.hadoop.hbase.TableName tableName, byte[] family, byte[][] splitRows) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table createMultiRegionTable(org.apache.hadoop.hbase.TableName tableName, byte[] family) throws IOException
IOException
public static void modifyTableSync(org.apache.hadoop.hbase.client.Admin admin, org.apache.hadoop.hbase.client.TableDescriptor desc) throws IOException, InterruptedException
admin.rb#alter_status
.IOException
InterruptedException
public static void setReplicas(org.apache.hadoop.hbase.client.Admin admin, org.apache.hadoop.hbase.TableName table, int replicaCount) throws IOException, InterruptedException
IOException
InterruptedException
public static void setReplicas(org.apache.hadoop.hbase.client.AsyncAdmin admin, org.apache.hadoop.hbase.TableName table, int replicaCount) throws ExecutionException, IOException, InterruptedException
public void deleteTable(org.apache.hadoop.hbase.TableName tableName) throws IOException
tableName
- existing tableIOException
public void deleteTableIfAny(org.apache.hadoop.hbase.TableName tableName) throws IOException
tableName
- existing tableIOException
public org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor createModifyableTableDescriptor(String name)
public org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor createModifyableTableDescriptor(org.apache.hadoop.hbase.TableName name, int minVersions, int versions, int ttl, org.apache.hadoop.hbase.KeepDeletedCells keepDeleted)
@Deprecated public org.apache.hadoop.hbase.HTableDescriptor createTableDescriptor(String name, int minVersions, int versions, int ttl, org.apache.hadoop.hbase.KeepDeletedCells keepDeleted)
createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
instead.@Deprecated public org.apache.hadoop.hbase.HTableDescriptor createTableDescriptor(String name)
createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
instead.name
.name
- Name to give table.createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
,
HBASE-13893public org.apache.hadoop.hbase.HTableDescriptor createTableDescriptor(org.apache.hadoop.hbase.TableName name, int minVersions, int versions, int ttl, org.apache.hadoop.hbase.KeepDeletedCells keepDeleted)
public org.apache.hadoop.hbase.HTableDescriptor createTableDescriptor(org.apache.hadoop.hbase.TableName name)
name
.name
- Name to give table.public org.apache.hadoop.hbase.HTableDescriptor createTableDescriptor(org.apache.hadoop.hbase.TableName tableName, byte[] family)
public org.apache.hadoop.hbase.HTableDescriptor createTableDescriptor(org.apache.hadoop.hbase.TableName tableName, byte[][] families, int maxVersions)
public org.apache.hadoop.hbase.regionserver.HRegion createLocalHRegion(org.apache.hadoop.hbase.client.TableDescriptor desc, byte[] startKey, byte[] endKey) throws IOException
desc
- a table descriptor indicating which table the region belongs tostartKey
- the start boundary of the regionendKey
- the end boundary of the regionIOException
public org.apache.hadoop.hbase.regionserver.HRegion createLocalHRegion(org.apache.hadoop.hbase.client.RegionInfo info, org.apache.hadoop.hbase.client.TableDescriptor desc) throws IOException
closeRegionAndWAL(HRegion)
when you're finished with it.IOException
public org.apache.hadoop.hbase.regionserver.HRegion createLocalHRegion(org.apache.hadoop.hbase.client.RegionInfo info, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.TableDescriptor desc, org.apache.hadoop.hbase.wal.WAL wal) throws IOException
info
- regioninfoconf
- configurationdesc
- table descriptorwal
- wal for this region.IOException
public org.apache.hadoop.hbase.regionserver.HRegion createLocalHRegion(org.apache.hadoop.hbase.HRegionInfo info, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.HTableDescriptor desc, org.apache.hadoop.hbase.wal.WAL wal) throws IOException
info
- regioninfoinfo
- configurationdesc
- table descriptorwal
- wal for this region.IOException
@Deprecated public org.apache.hadoop.hbase.regionserver.HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, org.apache.hadoop.conf.Configuration conf, boolean isReadOnly, org.apache.hadoop.hbase.client.Durability durability, org.apache.hadoop.hbase.wal.WAL wal, byte[]... families) throws IOException
#createLocalHRegion(TableName,
byte[], byte[], boolean, Durability, WAL, byte[]...)
instead.tableName
- the name of the tablestartKey
- the start key of the regionstopKey
- the stop key of the regioncallingMethod
- the name of the calling method probably a test methodconf
- the configuration to useisReadOnly
- true
if the table is read only, false
otherwisefamilies
- the column families to usecloseRegionAndWAL(HRegion)
when done.IOException
- if an IO problem is encountered#createLocalHRegion(TableName, byte[], byte[], boolean, Durability, WAL, byte[]...)
,
HBASE-13893public org.apache.hadoop.hbase.regionserver.HRegion createLocalHRegion(org.apache.hadoop.hbase.TableName tableName, byte[] startKey, byte[] stopKey, org.apache.hadoop.conf.Configuration conf, boolean isReadOnly, org.apache.hadoop.hbase.client.Durability durability, org.apache.hadoop.hbase.wal.WAL wal, byte[]... families) throws IOException
closeRegionAndWAL(HRegion)
when done. nIOException
public org.apache.hadoop.hbase.regionserver.HRegion createLocalHRegionWithInMemoryFlags(org.apache.hadoop.hbase.TableName tableName, byte[] startKey, byte[] stopKey, org.apache.hadoop.conf.Configuration conf, boolean isReadOnly, org.apache.hadoop.hbase.client.Durability durability, org.apache.hadoop.hbase.wal.WAL wal, boolean[] compactedMemStore, byte[]... families) throws IOException
IOException
public org.apache.hadoop.hbase.client.Table deleteTableData(org.apache.hadoop.hbase.TableName tableName) throws IOException
tableName
- existing tableIOException
public org.apache.hadoop.hbase.client.Table truncateTable(org.apache.hadoop.hbase.TableName tableName, boolean preserveRegions) throws IOException
tableName
- table which must exist.preserveRegions
- keep the existing split pointsIOException
public org.apache.hadoop.hbase.client.Table truncateTable(org.apache.hadoop.hbase.TableName tableName) throws IOException
tableName
- table which must exist.IOException
public int loadTable(org.apache.hadoop.hbase.client.Table t, byte[] f) throws IOException
t
- Tablef
- FamilyIOException
public int loadTable(org.apache.hadoop.hbase.client.Table t, byte[] f, boolean writeToWAL) throws IOException
t
- Tablef
- FamilyIOException
public int loadTable(org.apache.hadoop.hbase.client.Table t, byte[][] f) throws IOException
t
- Tablef
- Array of Families to loadIOException
public int loadTable(org.apache.hadoop.hbase.client.Table t, byte[][] f, byte[] value) throws IOException
t
- Tablef
- Array of Families to loadvalue
- the values of the cells. If null is passed, the row key is used as valueIOException
public int loadTable(org.apache.hadoop.hbase.client.Table t, byte[][] f, byte[] value, boolean writeToWAL) throws IOException
t
- Tablef
- Array of Families to loadvalue
- the values of the cells. If null is passed, the row key is used as valueIOException
public int loadRegion(org.apache.hadoop.hbase.regionserver.HRegion r, byte[] f) throws IOException
IOException
public int loadRegion(org.apache.hadoop.hbase.regionserver.Region r, byte[] f) throws IOException
IOException
public int loadRegion(org.apache.hadoop.hbase.regionserver.HRegion r, byte[] f, boolean flush) throws IOException
r
- Regionf
- Familyflush
- flush the cache if trueIOException
public void loadNumericRows(org.apache.hadoop.hbase.client.Table t, byte[] f, int startRow, int endRow) throws IOException
IOException
public void loadRandomRows(org.apache.hadoop.hbase.client.Table t, byte[] f, int rowSize, int totalRows) throws IOException
IOException
public void verifyNumericRows(org.apache.hadoop.hbase.client.Table table, byte[] f, int startRow, int endRow, int replicaId) throws IOException
IOException
public void verifyNumericRows(org.apache.hadoop.hbase.regionserver.Region region, byte[] f, int startRow, int endRow) throws IOException
IOException
public void verifyNumericRows(org.apache.hadoop.hbase.regionserver.HRegion region, byte[] f, int startRow, int endRow) throws IOException
IOException
public void verifyNumericRows(org.apache.hadoop.hbase.regionserver.Region region, byte[] f, int startRow, int endRow, boolean present) throws IOException
IOException
public void verifyNumericRows(org.apache.hadoop.hbase.regionserver.HRegion region, byte[] f, int startRow, int endRow, boolean present) throws IOException
IOException
public void deleteNumericRows(org.apache.hadoop.hbase.client.Table t, byte[] f, int startRow, int endRow) throws IOException
IOException
public int countRows(org.apache.hadoop.hbase.client.Table table) throws IOException
table
- to count rowsIOException
public int countRows(org.apache.hadoop.hbase.client.Table table, org.apache.hadoop.hbase.client.Scan scan) throws IOException
IOException
public int countRows(org.apache.hadoop.hbase.client.Table table, byte[]... families) throws IOException
IOException
public int countRows(org.apache.hadoop.hbase.TableName tableName) throws IOException
IOException
public int countRows(org.apache.hadoop.hbase.regionserver.Region region) throws IOException
IOException
public int countRows(org.apache.hadoop.hbase.regionserver.Region region, org.apache.hadoop.hbase.client.Scan scan) throws IOException
IOException
public int countRows(org.apache.hadoop.hbase.regionserver.InternalScanner scanner) throws IOException
IOException
public String checksumRows(org.apache.hadoop.hbase.client.Table table) throws Exception
Exception
@Deprecated public List<org.apache.hadoop.hbase.HRegionInfo> createMultiRegionsInMeta(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.HTableDescriptor htd, byte[][] startKeys) throws IOException
createMultiRegionsInMeta(Configuration, TableDescriptor, byte[][])
IOException
public List<org.apache.hadoop.hbase.client.RegionInfo> createMultiRegionsInMeta(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.TableDescriptor htd, byte[][] startKeys) throws IOException
IOException
public static org.apache.hadoop.hbase.wal.WAL createWal(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.Path rootDir, org.apache.hadoop.hbase.client.RegionInfo hri) throws IOException
IOException
public static org.apache.hadoop.hbase.regionserver.HRegion createRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfo info, org.apache.hadoop.fs.Path rootDir, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.TableDescriptor htd) throws IOException
closeRegionAndWAL(HRegion)
to clean up all resources.IOException
public static org.apache.hadoop.hbase.regionserver.HRegion createRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfo info, org.apache.hadoop.fs.Path rootDir, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.TableDescriptor htd, org.apache.hadoop.hbase.io.hfile.BlockCache blockCache) throws IOException
closeRegionAndWAL(HRegion)
to clean up all resources.IOException
public static org.apache.hadoop.hbase.regionserver.HRegion createRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfo info, org.apache.hadoop.fs.Path rootDir, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.TableDescriptor htd, org.apache.hadoop.hbase.mob.MobFileCache mobFileCache) throws IOException
closeRegionAndWAL(HRegion)
to clean up all resources.IOException
public static org.apache.hadoop.hbase.regionserver.HRegion createRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfo info, org.apache.hadoop.fs.Path rootDir, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.TableDescriptor htd, boolean initialize) throws IOException
closeRegionAndWAL(HRegion)
to clean up all resources.IOException
public List<byte[]> getMetaTableRows() throws IOException
IOException
- When reading the rows fails.public List<byte[]> getMetaTableRows(org.apache.hadoop.hbase.TableName tableName) throws IOException
IOException
- When reading the rows fails.private List<org.apache.hadoop.hbase.client.RegionInfo> getRegions(org.apache.hadoop.hbase.TableName tableName) throws IOException
tableName
- the table nameIOException
- when getting the regions fails.public org.apache.hadoop.hbase.regionserver.HRegionServer getOtherRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer rs)
public org.apache.hadoop.hbase.regionserver.HRegionServer getRSForFirstRegionInTable(org.apache.hadoop.hbase.TableName tableName) throws IOException, InterruptedException
tableName
- user table to lookup in hbase:metaIOException
InterruptedException
public org.apache.hadoop.mapred.MiniMRCluster startMiniMapReduceCluster() throws IOException
MiniMRCluster
with a default number of TaskTracker
's.IOException
- When starting the cluster fails.private void forceChangeTaskLogDir()
private void startMiniMapReduceCluster(int servers) throws IOException
MiniMRCluster
. Call setFileSystemURI(String)
to use a different
filesystem.servers
- The number of TaskTracker
's to start.IOException
- When starting the cluster fails.public void shutdownMiniMapReduceCluster()
MiniMRCluster
.public org.apache.hadoop.hbase.regionserver.RegionServerServices createMockRegionServerService() throws IOException
IOException
public org.apache.hadoop.hbase.regionserver.RegionServerServices createMockRegionServerService(org.apache.hadoop.hbase.ipc.RpcServerInterface rpc) throws IOException
IOException
public org.apache.hadoop.hbase.regionserver.RegionServerServices createMockRegionServerService(org.apache.hadoop.hbase.ServerName name) throws IOException
IOException
@Deprecated public void enableDebug(Class<?> clazz)
clazz
- The class for which to switch to debug logging.public void expireMasterSession() throws Exception
Exception
public void expireRegionServerSession(int index) throws Exception
index
- which RSException
private void decrementMinRegionServerCount()
private void decrementMinRegionServerCount(org.apache.hadoop.conf.Configuration conf)
public void expireSession(org.apache.hadoop.hbase.zookeeper.ZKWatcher nodeZK) throws Exception
Exception
public void expireSession(org.apache.hadoop.hbase.zookeeper.ZKWatcher nodeZK, boolean checkStatus) throws Exception
nodeZK
- - the ZK watcher to expirecheckStatus
- - true to check if we can create a Table with the current configuration.Exception
public MiniHBaseCluster getHBaseCluster()
getHBaseClusterInterface()
public HBaseCluster getHBaseClusterInterface()
Returned object can be any of the subclasses of HBaseCluster, and the tests referring this
should not assume that the cluster is a mini cluster or a distributed one. If the test only
works on a mini cluster, then specific method getMiniHBaseCluster()
can be used
instead w/o the need to type-cast.
public void invalidateConnection() throws IOException
IOException
public org.apache.hadoop.hbase.client.Connection getConnection() throws IOException
IOException
@Deprecated public org.apache.hadoop.hbase.client.HBaseAdmin getHBaseAdmin() throws IOException
getAdmin()
instead.Admin
interface.
Functions in HBaseAdmin not provided by Admin
interface can be changed/deleted
anytime.IOException
public void closeConnection() throws IOException
IOException
public org.apache.hadoop.hbase.client.Admin getAdmin() throws IOException
IOException
public org.apache.hadoop.hbase.client.Hbck getHbck() throws IOException
Hbck
instance. Needs be closed when done.IOException
public void unassignRegion(String regionName) throws IOException
regionName
- The region to unassign.IOException
public void unassignRegion(byte[] regionName) throws IOException
regionName
- The region to unassign.IOException
public void unassignRegionByRow(String row, org.apache.hadoop.hbase.client.RegionLocator table) throws IOException
row
- The row to find the containing region.table
- The table to find the region.IOException
public void unassignRegionByRow(byte[] row, org.apache.hadoop.hbase.client.RegionLocator table) throws IOException
row
- The row to find the containing region.table
- The table to find the region. nIOException
public org.apache.hadoop.hbase.regionserver.HRegion getSplittableRegion(org.apache.hadoop.hbase.TableName tableName, int maxAttempts)
tableName
- name of tablemaxAttempts
- maximum number of attempts, unlimited for value of -1public org.apache.hadoop.hdfs.MiniDFSCluster getDFSCluster()
public void setDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster cluster) throws IllegalStateException, IOException
IllegalStateException
IOException
public void setDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster cluster, boolean requireDown) throws IllegalStateException, IOException
cluster
- cluster to userequireDown
- require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before it
is set.IllegalStateException
- if the passed cluster is up when it is required to be downIOException
- if the FileSystem could not be set from the passed dfs clusterpublic org.apache.hadoop.fs.FileSystem getTestFileSystem() throws IOException
IOException
public void waitTableAvailable(org.apache.hadoop.hbase.TableName table) throws InterruptedException, IOException
table
- Table to wait on. nnInterruptedException
IOException
public void waitTableAvailable(org.apache.hadoop.hbase.TableName table, long timeoutMillis) throws InterruptedException, IOException
InterruptedException
IOException
public void waitTableAvailable(byte[] table, long timeoutMillis) throws InterruptedException, IOException
table
- Table to wait on.timeoutMillis
- Timeout. nnInterruptedException
IOException
public String explainTableAvailability(org.apache.hadoop.hbase.TableName tableName) throws IOException
IOException
public String explainTableState(org.apache.hadoop.hbase.TableName table, org.apache.hadoop.hbase.client.TableState.State state) throws IOException
IOException
@Nullable public org.apache.hadoop.hbase.client.TableState findLastTableState(org.apache.hadoop.hbase.TableName table) throws IOException
IOException
public void waitTableEnabled(org.apache.hadoop.hbase.TableName table) throws InterruptedException, IOException
table
- the table to wait on.InterruptedException
- if interrupted while waitingIOException
- if an IO problem is encounteredpublic void waitTableEnabled(byte[] table, long timeoutMillis) throws InterruptedException, IOException
table
- Table to wait on.timeoutMillis
- Time to wait on it being marked enabled. nnInterruptedException
IOException
waitTableEnabled(TableName, long)
public void waitTableEnabled(org.apache.hadoop.hbase.TableName table, long timeoutMillis) throws IOException
IOException
public void waitTableDisabled(byte[] table) throws InterruptedException, IOException
table
- Table to wait on. nnInterruptedException
IOException
public void waitTableDisabled(org.apache.hadoop.hbase.TableName table, long millisTimeout) throws InterruptedException, IOException
InterruptedException
IOException
public void waitTableDisabled(byte[] table, long timeoutMillis) throws InterruptedException, IOException
table
- Table to wait on.timeoutMillis
- Time to wait on it being marked disabled. nnInterruptedException
IOException
public boolean ensureSomeRegionServersAvailable(int num) throws IOException
num
- minimum number of region servers that should be runningIOException
public boolean ensureSomeNonStoppedRegionServersAvailable(int num) throws IOException
num
- minimum number of region servers that should be runningIOException
public static org.apache.hadoop.hbase.security.User getDifferentUser(org.apache.hadoop.conf.Configuration c, String differentiatingSuffix) throws IOException
c
configuration setting a new user into the clone.
Use it getting new instances of FileSystem. Only works for DistributedFileSystem w/o Kerberos.c
- Initial configurationdifferentiatingSuffix
- Suffix to differentiate this user from others.IOException
public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster) throws IOException
IOException
public static void setMaxRecoveryErrorCount(OutputStream stream, int max)
2010-06-15 11:52:28,511 WARN [DataStreamer for file /hbase/.logs/wal.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed because recovery from primary datanode 127.0.0.1:53683 failed 4 times. Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
stream
- A DFSClient.DFSOutputStream. nnnnnpublic boolean assignRegion(org.apache.hadoop.hbase.client.RegionInfo regionInfo) throws IOException, InterruptedException
IOException
InterruptedException
public void moveRegionAndWait(org.apache.hadoop.hbase.client.RegionInfo destRegion, org.apache.hadoop.hbase.ServerName destServer) throws InterruptedException, IOException
destRegion
- region to movedestServer
- destination server of the region nnInterruptedException
IOException
public void waitUntilAllRegionsAssigned(org.apache.hadoop.hbase.TableName tableName) throws IOException
tableName
- the table name nIOException
public void waitUntilAllSystemRegionsAssigned() throws IOException
IOException
public void waitUntilAllRegionsAssigned(org.apache.hadoop.hbase.TableName tableName, long timeout) throws IOException
tableName
- the table nametimeout
- timeout, in milliseconds nIOException
public static List<org.apache.hadoop.hbase.Cell> getFromStoreFile(org.apache.hadoop.hbase.regionserver.HStore store, org.apache.hadoop.hbase.client.Get get) throws IOException
IOException
public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions)
public static List<org.apache.hadoop.hbase.Cell> getFromStoreFile(org.apache.hadoop.hbase.regionserver.HStore store, byte[] row, NavigableSet<byte[]> columns) throws IOException
IOException
public static void assertKVListsEqual(String additionalMsg, List<? extends org.apache.hadoop.hbase.Cell> expected, List<? extends org.apache.hadoop.hbase.Cell> actual)
public static <T> String safeGetAsStr(List<T> lst, int i)
public String getClusterKey()
public org.apache.hadoop.hbase.client.Table createRandomTable(org.apache.hadoop.hbase.TableName tableName, Collection<String> families, int maxVersions, int numColsPerRow, int numFlushes, int numRegions, int numRowsPerFlush) throws IOException, InterruptedException
IOException
InterruptedException
public static int randomFreePort()
public static String randomMultiCastAddress()
public static void waitForHostPort(String host, int port) throws IOException
IOException
public static int createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.TableName tableName, byte[] columnFamily, org.apache.hadoop.hbase.io.compress.Compression.Algorithm compression, org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dataBlockEncoding) throws IOException
IOException
public static int createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.TableName tableName, byte[] columnFamily, org.apache.hadoop.hbase.io.compress.Compression.Algorithm compression, org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication, org.apache.hadoop.hbase.client.Durability durability) throws IOException
IOException
public static int createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.TableName tableName, byte[][] columnFamilies, org.apache.hadoop.hbase.io.compress.Compression.Algorithm compression, org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication, org.apache.hadoop.hbase.client.Durability durability) throws IOException
IOException
public static int createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.TableDescriptor desc, org.apache.hadoop.hbase.client.ColumnFamilyDescriptor hcd) throws IOException
IOException
public static int createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.TableDescriptor desc, org.apache.hadoop.hbase.client.ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException
IOException
public static int createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.TableDescriptor desc, org.apache.hadoop.hbase.client.ColumnFamilyDescriptor[] hcds, int numRegionsPerServer) throws IOException
IOException
public static int createPreSplitLoadTestTable(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.TableDescriptor td, org.apache.hadoop.hbase.client.ColumnFamilyDescriptor[] cds, org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm splitter, int numRegionsPerServer) throws IOException
IOException
public static int getMetaRSPort(org.apache.hadoop.hbase.client.Connection connection) throws IOException
IOException
public void assertRegionOnServer(org.apache.hadoop.hbase.client.RegionInfo hri, org.apache.hadoop.hbase.ServerName server, long timeout) throws IOException, InterruptedException
IOException
InterruptedException
public void assertRegionOnlyOnServer(org.apache.hadoop.hbase.client.RegionInfo hri, org.apache.hadoop.hbase.ServerName server, long timeout) throws IOException, InterruptedException
IOException
InterruptedException
public org.apache.hadoop.hbase.regionserver.HRegion createTestRegion(String tableName, org.apache.hadoop.hbase.client.ColumnFamilyDescriptor cd) throws IOException
IOException
public org.apache.hadoop.hbase.regionserver.HRegion createTestRegion(String tableName, org.apache.hadoop.hbase.client.ColumnFamilyDescriptor cd, org.apache.hadoop.hbase.io.hfile.BlockCache blockCache) throws IOException
IOException
public void setFileSystemURI(String fsURI)
public Waiter.ExplainingPredicate<IOException> predicateNoRegionsInTransition()
Waiter.Predicate
for checking that there are no regions in transition in masterpublic Waiter.Predicate<IOException> predicateTableEnabled(org.apache.hadoop.hbase.TableName tableName)
Waiter.Predicate
for checking that table is enabledpublic Waiter.Predicate<IOException> predicateTableDisabled(org.apache.hadoop.hbase.TableName tableName)
Waiter.Predicate
for checking that table is enabledpublic Waiter.Predicate<IOException> predicateTableAvailable(org.apache.hadoop.hbase.TableName tableName)
Waiter.Predicate
for checking that table is enabledpublic void waitUntilNoRegionsInTransition(long timeout) throws IOException
timeout
- How long to wait. nIOException
public void waitUntilNoRegionsInTransition() throws IOException
IOException
public void waitLabelAvailable(long timeoutMillis, String... labels)
public static List<org.apache.hadoop.hbase.HColumnDescriptor> generateColumnDescriptors()
public static List<org.apache.hadoop.hbase.HColumnDescriptor> generateColumnDescriptors(String prefix)
prefix
- family names prefixpublic static org.apache.hadoop.hbase.io.compress.Compression.Algorithm[] getSupportedCompressionAlgorithms()
public org.apache.hadoop.hbase.client.Result getClosestRowBefore(org.apache.hadoop.hbase.regionserver.Region r, byte[] row, byte[] family) throws IOException
IOException
private boolean isTargetTable(byte[] inRow, org.apache.hadoop.hbase.Cell c)
public org.apache.hadoop.minikdc.MiniKdc setupMiniKdc(File keytabFile) throws Exception
MiniKdc
for testing security. Uses HBaseKerberosUtils
to set the given
keytab file as HBaseKerberosUtils.KRB_KEYTAB_FILE
. FYI, there is also the easier-to-use
kerby KDC server and utility for using it,
SimpleKdcServerUtil
. The kerby KDC server is preferred;
less baggage. It came in in HBASE-5291.Exception
public int getNumHFiles(org.apache.hadoop.hbase.TableName tableName, byte[] family)
public int getNumHFilesForRS(org.apache.hadoop.hbase.regionserver.HRegionServer rs, org.apache.hadoop.hbase.TableName tableName, byte[] family)
public void verifyTableDescriptorIgnoreTableName(org.apache.hadoop.hbase.client.TableDescriptor ltd, org.apache.hadoop.hbase.client.TableDescriptor rtd)
public static void await(long sleepMillis, BooleanSupplier condition) throws InterruptedException
condition
, sleeping sleepMillis
between
invocations.InterruptedException
Copyright © 2007–2020 The Apache Software Foundation. All rights reserved.