@InterfaceAudience.Public public class ExportSnapshot extends AbstractHBaseTool implements org.apache.hadoop.util.Tool
Modifier and Type | Class and Description |
---|---|
static class |
ExportSnapshot.Counter |
private static class |
ExportSnapshot.ExportMapper |
private static class |
ExportSnapshot.ExportSnapshotInputFormat |
(package private) static class |
ExportSnapshot.Options |
(package private) static class |
ExportSnapshot.Testing |
Modifier and Type | Field and Description |
---|---|
private int |
bandwidthMB |
private static String |
CONF_BANDWIDTH_MB |
private static String |
CONF_BUFFER_SIZE |
private static String |
CONF_CHECKSUM_VERIFY |
static String |
CONF_DEST_PREFIX
Configuration prefix for overrides for the destination filesystem
|
private static String |
CONF_FILES_GROUP |
private static String |
CONF_FILES_MODE |
private static String |
CONF_FILES_USER |
private static String |
CONF_INPUT_ROOT |
private static String |
CONF_MAP_GROUP |
private static String |
CONF_MR_JOB_NAME |
private static String |
CONF_NUM_SPLITS |
private static String |
CONF_OUTPUT_ROOT |
protected static String |
CONF_SKIP_TMP |
private static String |
CONF_SNAPSHOT_DIR |
private static String |
CONF_SNAPSHOT_NAME |
static String |
CONF_SOURCE_PREFIX
Configuration prefix for overrides for the source filesystem
|
private String |
filesGroup |
private int |
filesMode |
private String |
filesUser |
private org.apache.hadoop.fs.Path |
inputRoot |
private static org.slf4j.Logger |
LOG |
private int |
mappers |
private static String |
MR_NUM_MAPS |
static String |
NAME |
private org.apache.hadoop.fs.Path |
outputRoot |
private boolean |
overwrite |
private String |
snapshotName |
private String |
targetName |
private boolean |
verifyChecksum |
private boolean |
verifyTarget |
cmdLineArgs, conf, EXIT_FAILURE, EXIT_SUCCESS, LONG_HELP_OPTION, options, SHORT_HELP_OPTION
Constructor and Description |
---|
ExportSnapshot() |
Modifier and Type | Method and Description |
---|---|
protected void |
addOptions()
Override this to add command-line options using
AbstractHBaseTool.addOptWithArg(java.lang.String, java.lang.String)
and similar methods. |
int |
doWork()
Execute the export snapshot by copying the snapshot metadata, hfiles and wals.
|
(package private) static List<List<Pair<org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo,Long>>> |
getBalancedSplits(List<Pair<org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo,Long>> files,
int ngroups)
Given a list of file paths and sizes, create around ngroups in as balanced a way as possible.
|
private static List<Pair<org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo,Long>> |
getSnapshotFiles(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path snapshotDir)
Extract the list of files (HFiles/WALs) to copy using Map-Reduce.
|
static void |
main(String[] args) |
protected void |
printUsage() |
protected void |
processOptions(org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine cmd)
This method is called to process the options after they have been parsed.
|
private void |
runCopyJob(org.apache.hadoop.fs.Path inputRoot,
org.apache.hadoop.fs.Path outputRoot,
String snapshotName,
org.apache.hadoop.fs.Path snapshotDir,
boolean verifyChecksum,
String filesUser,
String filesGroup,
int filesMode,
int mappers,
int bandwidthMB)
Run Map-Reduce Job to perform the files copy.
|
private void |
setOwner(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
String user,
String group,
boolean recursive)
Set path ownership.
|
private void |
setPermission(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
short filesMode,
boolean recursive)
Set path permission.
|
private void |
verifySnapshot(org.apache.hadoop.conf.Configuration baseConf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootDir,
org.apache.hadoop.fs.Path snapshotDir) |
addOption, addOptNoArg, addOptNoArg, addOptWithArg, addOptWithArg, addRequiredOption, addRequiredOptWithArg, addRequiredOptWithArg, doStaticMain, getConf, getOptionAsDouble, getOptionAsInt, getOptionAsLong, newParser, parseArgs, parseInt, parseLong, printUsage, processOldArgs, run, setConf
public static final String NAME
public static final String CONF_SOURCE_PREFIX
public static final String CONF_DEST_PREFIX
private static final org.slf4j.Logger LOG
private static final String MR_NUM_MAPS
private static final String CONF_NUM_SPLITS
private static final String CONF_SNAPSHOT_NAME
private static final String CONF_SNAPSHOT_DIR
private static final String CONF_FILES_USER
private static final String CONF_FILES_GROUP
private static final String CONF_FILES_MODE
private static final String CONF_CHECKSUM_VERIFY
private static final String CONF_OUTPUT_ROOT
private static final String CONF_INPUT_ROOT
private static final String CONF_BUFFER_SIZE
private static final String CONF_MAP_GROUP
private static final String CONF_BANDWIDTH_MB
private static final String CONF_MR_JOB_NAME
protected static final String CONF_SKIP_TMP
private boolean verifyTarget
private boolean verifyChecksum
private String snapshotName
private String targetName
private boolean overwrite
private String filesGroup
private org.apache.hadoop.fs.Path outputRoot
private org.apache.hadoop.fs.Path inputRoot
private int bandwidthMB
private int filesMode
private int mappers
public ExportSnapshot()
private static List<Pair<org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo,Long>> getSnapshotFiles(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path snapshotDir) throws IOException
IOException
static List<List<Pair<org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo,Long>>> getBalancedSplits(List<Pair<org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo,Long>> files, int ngroups)
The algorithm used is pretty straightforward; the file list is sorted by size, and then each group fetch the bigger file available, iterating through groups alternating the direction.
private void runCopyJob(org.apache.hadoop.fs.Path inputRoot, org.apache.hadoop.fs.Path outputRoot, String snapshotName, org.apache.hadoop.fs.Path snapshotDir, boolean verifyChecksum, String filesUser, String filesGroup, int filesMode, int mappers, int bandwidthMB) throws IOException, InterruptedException, ClassNotFoundException
private void verifySnapshot(org.apache.hadoop.conf.Configuration baseConf, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootDir, org.apache.hadoop.fs.Path snapshotDir) throws IOException
IOException
private void setOwner(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path, String user, String group, boolean recursive) throws IOException
IOException
private void setPermission(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path, short filesMode, boolean recursive) throws IOException
IOException
protected void processOptions(org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine cmd)
AbstractHBaseTool
processOptions
in class AbstractHBaseTool
public int doWork() throws IOException
doWork
in class AbstractHBaseTool
IOException
protected void printUsage()
printUsage
in class AbstractHBaseTool
protected void addOptions()
AbstractHBaseTool
AbstractHBaseTool.addOptWithArg(java.lang.String, java.lang.String)
and similar methods.addOptions
in class AbstractHBaseTool
Copyright © 2007–2019 The Apache Software Foundation. All rights reserved.