public class TestZooKeeperTableArchiveClient extends Object
ZKTableArchiveClient
.Modifier and Type | Field and Description |
---|---|
private static org.apache.hadoop.hbase.backup.example.ZKTableArchiveClient |
archivingClient |
static HBaseClassTestRule |
CLASS_RULE |
private static org.apache.hadoop.hbase.client.ClusterConnection |
CONNECTION |
private static org.slf4j.Logger |
LOG |
private static org.apache.hadoop.hbase.master.cleaner.DirScanPool |
POOL |
private static org.apache.hadoop.hbase.regionserver.RegionServerServices |
rss |
private static String |
STRING_TABLE_NAME |
private static byte[] |
TABLE_NAME |
private static byte[] |
TEST_FAM |
private List<org.apache.hadoop.fs.Path> |
toCleanup |
private static HBaseTestingUtility |
UTIL |
Constructor and Description |
---|
TestZooKeeperTableArchiveClient() |
Modifier and Type | Method and Description |
---|---|
static void |
cleanupTest() |
private void |
createArchiveDirectory() |
private void |
createHFileInRegion(org.apache.hadoop.hbase.regionserver.HRegion region,
byte[] columnFamily)
Create a new hfile in the passed region
|
private List<org.apache.hadoop.fs.Path> |
getAllFiles(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dir)
Get all the files (non-directory entries) in the file system under the passed directory
|
private org.apache.hadoop.fs.Path |
getArchiveDir() |
private org.apache.hadoop.fs.Path |
getTableDir(String tableName) |
private void |
loadFlushAndCompact(org.apache.hadoop.hbase.regionserver.HRegion region,
byte[] family) |
private void |
runCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner cleaner,
CountDownLatch finished,
org.apache.hadoop.hbase.Stoppable stop) |
private org.apache.hadoop.hbase.master.cleaner.HFileCleaner |
setupAndCreateCleaner(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path archiveDir,
org.apache.hadoop.hbase.Stoppable stop) |
private CountDownLatch |
setupCleanerWatching(org.apache.hadoop.hbase.backup.example.LongTermArchivingHFileCleaner cleaner,
List<org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate> cleaners,
int expected)
Spy on the
LongTermArchivingHFileCleaner to ensure we can catch when the cleaner has
seen all the files |
static void |
setupCluster()
Setup the config for the cluster
|
private static void |
setupConf(org.apache.hadoop.conf.Configuration conf) |
void |
tearDown() |
void |
testArchivingEnableDisable()
Test turning on/off archiving
|
void |
testArchivingOnSingleTable() |
void |
testMultipleTables()
Test archiving/cleaning across multiple tables, where some are retained, and others aren't
|
private List<org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate> |
turnOnArchiving(String tableName,
org.apache.hadoop.hbase.master.cleaner.HFileCleaner cleaner)
Start archiving table for given hfile cleaner
|
public static final HBaseClassTestRule CLASS_RULE
private static final org.slf4j.Logger LOG
private static final HBaseTestingUtility UTIL
private static final String STRING_TABLE_NAME
private static final byte[] TEST_FAM
private static final byte[] TABLE_NAME
private static org.apache.hadoop.hbase.backup.example.ZKTableArchiveClient archivingClient
private static org.apache.hadoop.hbase.client.ClusterConnection CONNECTION
private static org.apache.hadoop.hbase.regionserver.RegionServerServices rss
private static org.apache.hadoop.hbase.master.cleaner.DirScanPool POOL
public TestZooKeeperTableArchiveClient()
public static void setupCluster() throws Exception
Exception
private static void setupConf(org.apache.hadoop.conf.Configuration conf)
public static void cleanupTest() throws Exception
Exception
public void testArchivingEnableDisable() throws Exception
Exception
public void testArchivingOnSingleTable() throws Exception
Exception
public void testMultipleTables() throws Exception
Exception
- on failureprivate void createArchiveDirectory() throws IOException
IOException
private org.apache.hadoop.fs.Path getArchiveDir() throws IOException
IOException
private org.apache.hadoop.fs.Path getTableDir(String tableName) throws IOException
IOException
private org.apache.hadoop.hbase.master.cleaner.HFileCleaner setupAndCreateCleaner(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path archiveDir, org.apache.hadoop.hbase.Stoppable stop)
private List<org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate> turnOnArchiving(String tableName, org.apache.hadoop.hbase.master.cleaner.HFileCleaner cleaner) throws IOException, org.apache.zookeeper.KeeperException
tableName
- table to archivecleaner
- cleaner to check to make sure change propagatedLongTermArchivingHFileCleaner
that is managing archivingIOException
- on failureorg.apache.zookeeper.KeeperException
- on failureprivate CountDownLatch setupCleanerWatching(org.apache.hadoop.hbase.backup.example.LongTermArchivingHFileCleaner cleaner, List<org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate> cleaners, int expected)
LongTermArchivingHFileCleaner
to ensure we can catch when the cleaner has
seen all the filesCountDownLatch
to wait on that releases when the cleaner has been called at
least the expected number of times.private List<org.apache.hadoop.fs.Path> getAllFiles(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path dir) throws IOException
dir
- directory to investigateIOException
private void loadFlushAndCompact(org.apache.hadoop.hbase.regionserver.HRegion region, byte[] family) throws IOException
IOException
private void createHFileInRegion(org.apache.hadoop.hbase.regionserver.HRegion region, byte[] columnFamily) throws IOException
region
- region to operate oncolumnFamily
- family for which to add dataIOException
- if doing the put or flush failsprivate void runCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner cleaner, CountDownLatch finished, org.apache.hadoop.hbase.Stoppable stop) throws InterruptedException
cleaner
- the cleaner to useInterruptedException
Copyright © 2007–2020 The Apache Software Foundation. All rights reserved.