@InterfaceAudience.Private public class WALSplitter extends Object
Modifier and Type | Class and Description |
---|---|
(package private) static class |
WALSplitter.CorruptedLogFileException |
static class |
WALSplitter.PipelineController
Contains some methods to control WAL-entries producer / consumer interactions
|
static class |
WALSplitter.RegionEntryBuffer
A buffer of some number of edits for a given region.
|
static class |
WALSplitter.SinkWriter
Class wraps the actual writer which writes data out and related statistics
|
(package private) static class |
WALSplitter.WriterAndPath
Private data structure that wraps a Writer and its Path, also collecting statistics about the
data written to this output.
|
Modifier and Type | Field and Description |
---|---|
protected org.apache.hadoop.conf.Configuration |
conf |
private EntryBuffers |
entryBuffers |
private org.apache.hadoop.fs.FileStatus |
fileBeingSplit |
protected Map<String,Long> |
lastFlushedSequenceIds |
private static org.slf4j.Logger |
LOG |
(package private) OutputSink |
outputSink |
protected Map<String,Map<byte[],Long>> |
regionMaxSeqIdInStores |
protected LastSequenceId |
sequenceIdChecker |
static boolean |
SPLIT_SKIP_ERRORS_DEFAULT
By default we retry errors in splitting, rather than skipping.
|
static String |
SPLIT_WRITER_CREATION_BOUNDED |
private SplitLogWorkerCoordination |
splitLogWorkerCoordination |
private boolean |
splitWriterCreationBounded |
private MonitoredTask |
status |
protected org.apache.hadoop.fs.Path |
walDir |
private WALFactory |
walFactory |
protected org.apache.hadoop.fs.FileSystem |
walFS |
Constructor and Description |
---|
WALSplitter(WALFactory factory,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.Path walDir,
org.apache.hadoop.fs.FileSystem walFS,
LastSequenceId idChecker,
SplitLogWorkerCoordination splitLogWorkerCoordination) |
Modifier and Type | Method and Description |
---|---|
protected WALProvider.Writer |
createWriter(org.apache.hadoop.fs.Path logfile)
Create a new
WALProvider.Writer for writing log splits. |
(package private) org.apache.hadoop.fs.FileStatus |
getFileBeingSplit() |
private static WAL.Entry |
getNextLogLine(WAL.Reader in,
org.apache.hadoop.fs.Path path,
boolean skipErrors) |
private int |
getNumOpenWriters()
Get current open writers
|
protected WAL.Reader |
getReader(org.apache.hadoop.fs.FileStatus file,
boolean skipErrors,
CancelableProgressable reporter)
Create a new
WAL.Reader for reading logs to split. |
protected WAL.Reader |
getReader(org.apache.hadoop.fs.Path curLogFile,
CancelableProgressable reporter)
Create a new
WAL.Reader for reading logs to split. |
(package private) Map<String,Map<byte[],Long>> |
getRegionMaxSeqIdInStores() |
(package private) WALFactory |
getWalFactory() |
static List<org.apache.hadoop.fs.Path> |
split(org.apache.hadoop.fs.Path rootDir,
org.apache.hadoop.fs.Path logDir,
org.apache.hadoop.fs.Path oldLogDir,
org.apache.hadoop.fs.FileSystem walFS,
org.apache.hadoop.conf.Configuration conf,
WALFactory factory) |
(package private) boolean |
splitLogFile(org.apache.hadoop.fs.FileStatus logfile,
CancelableProgressable reporter)
log splitting implementation, splits one log file.
|
static boolean |
splitLogFile(org.apache.hadoop.fs.Path walDir,
org.apache.hadoop.fs.FileStatus logfile,
org.apache.hadoop.fs.FileSystem walFS,
org.apache.hadoop.conf.Configuration conf,
CancelableProgressable reporter,
LastSequenceId idChecker,
SplitLogWorkerCoordination splitLogWorkerCoordination,
WALFactory factory)
Splits a WAL file into region's recovered-edits directory.
|
private static final org.slf4j.Logger LOG
public static final boolean SPLIT_SKIP_ERRORS_DEFAULT
protected final org.apache.hadoop.fs.Path walDir
protected final org.apache.hadoop.fs.FileSystem walFS
protected final org.apache.hadoop.conf.Configuration conf
OutputSink outputSink
private EntryBuffers entryBuffers
private SplitLogWorkerCoordination splitLogWorkerCoordination
private final WALFactory walFactory
private MonitoredTask status
protected final LastSequenceId sequenceIdChecker
protected Map<String,Long> lastFlushedSequenceIds
protected Map<String,Map<byte[],Long>> regionMaxSeqIdInStores
private org.apache.hadoop.fs.FileStatus fileBeingSplit
private final boolean splitWriterCreationBounded
public static final String SPLIT_WRITER_CREATION_BOUNDED
WALSplitter(WALFactory factory, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.Path walDir, org.apache.hadoop.fs.FileSystem walFS, LastSequenceId idChecker, SplitLogWorkerCoordination splitLogWorkerCoordination)
WALFactory getWalFactory()
org.apache.hadoop.fs.FileStatus getFileBeingSplit()
Map<String,Map<byte[],Long>> getRegionMaxSeqIdInStores()
public static boolean splitLogFile(org.apache.hadoop.fs.Path walDir, org.apache.hadoop.fs.FileStatus logfile, org.apache.hadoop.fs.FileSystem walFS, org.apache.hadoop.conf.Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, SplitLogWorkerCoordination splitLogWorkerCoordination, WALFactory factory) throws IOException
If the log file has N regions then N recovered.edits files will be produced.
IOException
public static List<org.apache.hadoop.fs.Path> split(org.apache.hadoop.fs.Path rootDir, org.apache.hadoop.fs.Path logDir, org.apache.hadoop.fs.Path oldLogDir, org.apache.hadoop.fs.FileSystem walFS, org.apache.hadoop.conf.Configuration conf, WALFactory factory) throws IOException
IOException
boolean splitLogFile(org.apache.hadoop.fs.FileStatus logfile, CancelableProgressable reporter) throws IOException
logfile
- should be an actual log file.IOException
protected WAL.Reader getReader(org.apache.hadoop.fs.FileStatus file, boolean skipErrors, CancelableProgressable reporter) throws IOException, WALSplitter.CorruptedLogFileException
WAL.Reader
for reading logs to split.private static WAL.Entry getNextLogLine(WAL.Reader in, org.apache.hadoop.fs.Path path, boolean skipErrors) throws WALSplitter.CorruptedLogFileException, IOException
protected WALProvider.Writer createWriter(org.apache.hadoop.fs.Path logfile) throws IOException
WALProvider.Writer
for writing log splits.IOException
protected WAL.Reader getReader(org.apache.hadoop.fs.Path curLogFile, CancelableProgressable reporter) throws IOException
WAL.Reader
for reading logs to split.IOException
private int getNumOpenWriters()
Copyright © 2007–2020 The Apache Software Foundation. All rights reserved.