@InterfaceAudience.Private public interface ReplicationSourceInterface
Modifier and Type | Method and Description |
---|---|
void |
addHFileRefs(TableName tableName,
byte[] family,
List<Pair<org.apache.hadoop.fs.Path,org.apache.hadoop.fs.Path>> pairs)
Add hfile names to the queue to be replicated.
|
void |
enqueueLog(org.apache.hadoop.fs.Path log)
Add a log to the list of logs to replicate
|
org.apache.hadoop.fs.Path |
getCurrentPath()
Get the current log that's replicated
|
String |
getPeerId()
Get the id that the source is replicating to.
|
String |
getQueueId()
Get the queue id that the source is replicating to
|
ReplicationEndpoint |
getReplicationEndpoint() |
ServerName |
getServerWALsBelongTo()
The queue of WALs only belong to one region server.
|
ReplicationSourceManager |
getSourceManager() |
MetricsSource |
getSourceMetrics() |
String |
getStats()
Get a string representation of the current statistics
for this source
|
WALFileLengthProvider |
getWALFileLengthProvider() |
default Map<String,ReplicationStatus> |
getWalGroupStatus()
get the stat of replication for each wal group.
|
void |
init(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
ReplicationSourceManager manager,
ReplicationQueueStorage queueStorage,
ReplicationPeer replicationPeer,
Server server,
String queueId,
UUID clusterId,
WALFileLengthProvider walFileLengthProvider,
MetricsSource metrics)
Initializer for the source
|
boolean |
isPeerEnabled() |
default boolean |
isRecovered() |
boolean |
isSourceActive() |
void |
postShipEdits(List<WAL.Entry> entries,
int batchSize)
Call this after the shipper thread ship some entries to peer cluster.
|
void |
startup()
Start the replication
|
void |
terminate(String reason)
End the replication
|
void |
terminate(String reason,
Exception cause)
End the replication
|
void |
terminate(String reason,
Exception cause,
boolean clearMetrics)
End the replication
|
void |
tryThrottle(int batchSize)
Try to throttle when the peer config with a bandwidth
|
void init(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.FileSystem fs, ReplicationSourceManager manager, ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, String queueId, UUID clusterId, WALFileLengthProvider walFileLengthProvider, MetricsSource metrics) throws IOException
conf
- the configuration to usefs
- the file system to usemanager
- the manager to useserver
- the server for this region serverIOException
void enqueueLog(org.apache.hadoop.fs.Path log)
log
- path to the log to replicatevoid addHFileRefs(TableName tableName, byte[] family, List<Pair<org.apache.hadoop.fs.Path,org.apache.hadoop.fs.Path>> pairs) throws ReplicationException
tableName
- Name of the table these files belongs tofamily
- Name of the family these files belong topairs
- list of pairs of { HFile location in staging dir, HFile path in region dir which
will be added in the queue for replication}ReplicationException
- If failed to add hfile referencesvoid startup()
void terminate(String reason)
reason
- why it's terminatingvoid terminate(String reason, Exception cause)
reason
- why it's terminatingcause
- the error that's causing itvoid terminate(String reason, Exception cause, boolean clearMetrics)
reason
- why it's terminatingcause
- the error that's causing itclearMetrics
- removes all metrics about this Sourceorg.apache.hadoop.fs.Path getCurrentPath()
String getQueueId()
String getStats()
boolean isPeerEnabled()
boolean isSourceActive()
MetricsSource getSourceMetrics()
ReplicationEndpoint getReplicationEndpoint()
ReplicationSourceManager getSourceManager()
WALFileLengthProvider getWALFileLengthProvider()
void tryThrottle(int batchSize) throws InterruptedException
batchSize
- entries size will be pushedInterruptedException
void postShipEdits(List<WAL.Entry> entries, int batchSize)
entries
- pushedbatchSize
- entries size pushedServerName getServerWALsBelongTo()
default Map<String,ReplicationStatus> getWalGroupStatus()
default boolean isRecovered()
Copyright © 2007–2020 The Apache Software Foundation. All rights reserved.