public class TestLoadIncrementalHFilesSplitRecovery extends Object
| Modifier and Type | Field and Description |
|---|---|
static HBaseClassTestRule |
CLASS_RULE |
private static byte[][] |
families |
private static org.slf4j.Logger |
LOG |
org.junit.rules.TestName |
name |
(package private) static int |
NUM_CFS |
(package private) static byte[] |
QUAL |
(package private) static int |
ROWCOUNT |
(package private) static boolean |
useSecure |
(package private) static HBaseTestingUtility |
util |
| Constructor and Description |
|---|
TestLoadIncrementalHFilesSplitRecovery() |
| Modifier and Type | Method and Description |
|---|---|
(package private) void |
assertExpectedTable(org.apache.hadoop.hbase.client.Connection connection,
org.apache.hadoop.hbase.TableName table,
int count,
int value)
Checks that all columns have the expected value and that there is the expected number of rows.
|
(package private) void |
assertExpectedTable(org.apache.hadoop.hbase.TableName table,
int count,
int value)
Checks that all columns have the expected value and that there is the expected number of rows.
|
private org.apache.hadoop.fs.Path |
buildBulkFiles(org.apache.hadoop.hbase.TableName table,
int value) |
static void |
buildHFiles(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dir,
int value) |
private org.apache.hadoop.hbase.client.TableDescriptor |
createTableDesc(org.apache.hadoop.hbase.TableName name,
int cfs) |
(package private) static String |
family(int i) |
private void |
forceSplit(org.apache.hadoop.hbase.TableName table)
Split the known table in half.
|
private org.apache.hadoop.hbase.client.ClusterConnection |
getMockedConnection(org.apache.hadoop.conf.Configuration conf) |
private void |
populateTable(org.apache.hadoop.hbase.client.Connection connection,
org.apache.hadoop.hbase.TableName table,
int value)
Populate table with known values.
|
(package private) static byte[] |
rowkey(int i) |
static void |
setupCluster() |
private void |
setupTable(org.apache.hadoop.hbase.client.Connection connection,
org.apache.hadoop.hbase.TableName table,
int cfs)
Creates a table with given table name and specified number of column families if the table does
not already exist.
|
private void |
setupTableWithSplitkeys(org.apache.hadoop.hbase.TableName table,
int cfs,
byte[][] SPLIT_KEYS)
Creates a table with given table name,specified number of column families
and splitkeys if the table does not already exist. |
static void |
teardownCluster() |
void |
testBulkLoadPhaseFailure()
Test that shows that exception thrown from the RS side will result in an exception on the
LIHFile client.
|
void |
testGroupOrSplitFailure()
This simulates an remote exception which should cause LIHF to exit with an exception.
|
void |
testGroupOrSplitPresplit()
This test splits a table and attempts to bulk load.
|
void |
testGroupOrSplitWhenRegionHoleExistsInMeta() |
void |
testRetryOnIOException()
Test that shows that exception thrown from the RS side will result in the expected number of
retries set by $
HConstants.HBASE_CLIENT_RETRIES_NUMBER when
$LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION is set |
void |
testSplitTmpFileCleanUp()
This test creates a table with many small regions.
|
void |
testSplitWhileBulkLoadPhase()
This test exercises the path where there is a split after initial validation but before the
atomic bulk load call.
|
(package private) static byte[] |
value(int i) |
public static final HBaseClassTestRule CLASS_RULE
private static final org.slf4j.Logger LOG
static HBaseTestingUtility util
static boolean useSecure
static final int NUM_CFS
static final byte[] QUAL
static final int ROWCOUNT
private static final byte[][] families
public org.junit.rules.TestName name
public TestLoadIncrementalHFilesSplitRecovery()
static byte[] rowkey(int i)
static byte[] value(int i)
public static void buildHFiles(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path dir, int value) throws IOException
IOExceptionprivate org.apache.hadoop.hbase.client.TableDescriptor createTableDesc(org.apache.hadoop.hbase.TableName name, int cfs)
private void setupTable(org.apache.hadoop.hbase.client.Connection connection, org.apache.hadoop.hbase.TableName table, int cfs) throws IOException
IOExceptionprivate void setupTableWithSplitkeys(org.apache.hadoop.hbase.TableName table, int cfs, byte[][] SPLIT_KEYS) throws IOException
table - cfs - SPLIT_KEYS - IOExceptionprivate org.apache.hadoop.fs.Path buildBulkFiles(org.apache.hadoop.hbase.TableName table, int value) throws Exception
Exceptionprivate void populateTable(org.apache.hadoop.hbase.client.Connection connection, org.apache.hadoop.hbase.TableName table, int value) throws Exception
Exceptionprivate void forceSplit(org.apache.hadoop.hbase.TableName table)
public static void setupCluster() throws Exception
Exceptionpublic static void teardownCluster() throws Exception
Exceptionvoid assertExpectedTable(org.apache.hadoop.hbase.TableName table, int count, int value) throws IOException
IOExceptionpublic void testBulkLoadPhaseFailure() throws Exception
Exceptionpublic void testRetryOnIOException() throws Exception
HConstants.HBASE_CLIENT_RETRIES_NUMBER when
$LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION is setExceptionprivate org.apache.hadoop.hbase.client.ClusterConnection getMockedConnection(org.apache.hadoop.conf.Configuration conf) throws IOException, org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
IOExceptionorg.apache.hbase.thirdparty.com.google.protobuf.ServiceExceptionpublic void testSplitWhileBulkLoadPhase() throws Exception
Exceptionpublic void testGroupOrSplitPresplit() throws Exception
Exceptionpublic void testSplitTmpFileCleanUp() throws Exception
Exceptionpublic void testGroupOrSplitFailure() throws Exception
Exceptionpublic void testGroupOrSplitWhenRegionHoleExistsInMeta() throws Exception
Exceptionvoid assertExpectedTable(org.apache.hadoop.hbase.client.Connection connection, org.apache.hadoop.hbase.TableName table, int count, int value) throws IOException
IOExceptionCopyright © 2007–2020 The Apache Software Foundation. All rights reserved.