@InterfaceAudience.Private public class FanOutOneBlockAsyncDFSOutput extends Object implements AsyncFSOutput
Use the createOutput method in FanOutOneBlockAsyncDFSOutputHelper
to create. The mainly
usage of this class is implementing WAL, so we only expose a little HDFS configurations in the
method. And we place it here under io package because we want to make it independent of WAL
implementation thus easier to move it to HDFS project finally.
Note that, although we support pipelined flush, i.e, write new data and then flush before the previous flush succeeds, the implementation is not thread safe, so you should not call its methods concurrently.
Advantages compare to DFSOutputStream:
Modifier and Type | Class and Description |
---|---|
private class |
FanOutOneBlockAsyncDFSOutput.AckHandler |
private static class |
FanOutOneBlockAsyncDFSOutput.Callback |
private static class |
FanOutOneBlockAsyncDFSOutput.State |
Modifier and Type | Field and Description |
---|---|
private long |
ackedBlockLength |
private org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator |
alloc |
private org.apache.hadoop.hdfs.protocol.ExtendedBlock |
block |
private org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf |
buf |
private org.apache.hadoop.hdfs.DFSClient |
client |
private String |
clientName |
private org.apache.hadoop.conf.Configuration |
conf |
private List<org.apache.hbase.thirdparty.io.netty.channel.Channel> |
datanodeList |
private org.apache.hadoop.hdfs.DistributedFileSystem |
dfs |
private org.apache.hadoop.crypto.Encryptor |
encryptor |
private long |
fileId |
private FSUtils |
fsUtils |
private org.apache.hadoop.hdfs.protocol.DatanodeInfo[] |
locations |
private static int |
MAX_DATA_LEN |
private int |
maxDataLen |
private org.apache.hadoop.hdfs.protocol.ClientProtocol |
namenode |
private long |
nextPacketOffsetInBlock |
private long |
nextPacketSeqno |
private SendBufSizePredictor |
sendBufSizePRedictor |
private String |
src |
private FanOutOneBlockAsyncDFSOutput.State |
state |
private org.apache.hadoop.util.DataChecksum |
summer |
private int |
trailingPartialChunkLength |
private ConcurrentLinkedDeque<FanOutOneBlockAsyncDFSOutput.Callback> |
waitingAckQueue |
Constructor and Description |
---|
FanOutOneBlockAsyncDFSOutput(org.apache.hadoop.conf.Configuration conf,
FSUtils fsUtils,
org.apache.hadoop.hdfs.DistributedFileSystem dfs,
org.apache.hadoop.hdfs.DFSClient client,
org.apache.hadoop.hdfs.protocol.ClientProtocol namenode,
String clientName,
String src,
long fileId,
org.apache.hadoop.hdfs.protocol.LocatedBlock locatedBlock,
org.apache.hadoop.crypto.Encryptor encryptor,
List<org.apache.hbase.thirdparty.io.netty.channel.Channel> datanodeList,
org.apache.hadoop.util.DataChecksum summer,
org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator alloc) |
Modifier and Type | Method and Description |
---|---|
int |
buffered()
Return the current size of buffered data.
|
void |
close()
End the current block and complete file at namenode.
|
private void |
completed(org.apache.hbase.thirdparty.io.netty.channel.Channel channel) |
private void |
endBlock() |
private void |
failed(org.apache.hbase.thirdparty.io.netty.channel.Channel channel,
Supplier<Throwable> errorSupplier) |
CompletableFuture<Long> |
flush(boolean syncBlock)
Flush the buffer out to datanodes.
|
private void |
flush0(CompletableFuture<Long> future,
boolean syncBlock) |
private void |
flushBuffer(CompletableFuture<Long> future,
org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf dataBuf,
long nextPacketOffsetInBlock,
boolean syncBlock) |
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] |
getPipeline()
Return current pipeline.
|
boolean |
isBroken()
Whether the stream is broken.
|
void |
recoverAndClose(CancelableProgressable reporter)
The close method when error occurred.
|
private void |
setupReceiver(int timeoutMs) |
void |
write(byte[] b)
Just call write(b, 0, b.length).
|
void |
write(byte[] b,
int off,
int len)
Copy the data into the buffer.
|
void |
write(ByteBuffer bb)
Copy the data in the given
bb into the buffer. |
void |
writeInt(int i)
Write an int to the buffer.
|
private static final int MAX_DATA_LEN
private final org.apache.hadoop.conf.Configuration conf
private final org.apache.hadoop.hdfs.DistributedFileSystem dfs
private final org.apache.hadoop.hdfs.DFSClient client
private final org.apache.hadoop.hdfs.protocol.ClientProtocol namenode
private final String clientName
private final long fileId
private final org.apache.hadoop.hdfs.protocol.ExtendedBlock block
private final org.apache.hadoop.hdfs.protocol.DatanodeInfo[] locations
private final org.apache.hadoop.crypto.Encryptor encryptor
private final List<org.apache.hbase.thirdparty.io.netty.channel.Channel> datanodeList
private final org.apache.hadoop.util.DataChecksum summer
private final int maxDataLen
private final org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator alloc
private final ConcurrentLinkedDeque<FanOutOneBlockAsyncDFSOutput.Callback> waitingAckQueue
private volatile long ackedBlockLength
private long nextPacketOffsetInBlock
private int trailingPartialChunkLength
private long nextPacketSeqno
private org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf buf
private final SendBufSizePredictor sendBufSizePRedictor
private volatile FanOutOneBlockAsyncDFSOutput.State state
FanOutOneBlockAsyncDFSOutput(org.apache.hadoop.conf.Configuration conf, FSUtils fsUtils, org.apache.hadoop.hdfs.DistributedFileSystem dfs, org.apache.hadoop.hdfs.DFSClient client, org.apache.hadoop.hdfs.protocol.ClientProtocol namenode, String clientName, String src, long fileId, org.apache.hadoop.hdfs.protocol.LocatedBlock locatedBlock, org.apache.hadoop.crypto.Encryptor encryptor, List<org.apache.hbase.thirdparty.io.netty.channel.Channel> datanodeList, org.apache.hadoop.util.DataChecksum summer, org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator alloc)
private void completed(org.apache.hbase.thirdparty.io.netty.channel.Channel channel)
private void failed(org.apache.hbase.thirdparty.io.netty.channel.Channel channel, Supplier<Throwable> errorSupplier)
private void setupReceiver(int timeoutMs)
public void writeInt(int i)
AsyncFSOutput
writeInt
in interface AsyncFSOutput
public void write(ByteBuffer bb)
AsyncFSOutput
bb
into the buffer.write
in interface AsyncFSOutput
public void write(byte[] b)
AsyncFSOutput
write
in interface AsyncFSOutput
AsyncFSOutput.write(byte[], int, int)
public void write(byte[] b, int off, int len)
AsyncFSOutput
AsyncFSOutput.flush(boolean)
to flush the
buffer manually.write
in interface AsyncFSOutput
public int buffered()
AsyncFSOutput
buffered
in interface AsyncFSOutput
public org.apache.hadoop.hdfs.protocol.DatanodeInfo[] getPipeline()
AsyncFSOutput
getPipeline
in interface AsyncFSOutput
private void flushBuffer(CompletableFuture<Long> future, org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf dataBuf, long nextPacketOffsetInBlock, boolean syncBlock)
private void flush0(CompletableFuture<Long> future, boolean syncBlock)
public CompletableFuture<Long> flush(boolean syncBlock)
flush
in interface AsyncFSOutput
syncBlock
- will call hsync if true, otherwise hflush.private void endBlock() throws IOException
IOException
public void recoverAndClose(CancelableProgressable reporter) throws IOException
recoverAndClose
in interface AsyncFSOutput
IOException
public void close() throws IOException
recoverAndClose(CancelableProgressable)
if this method throws an exception.close
in interface Closeable
close
in interface AutoCloseable
close
in interface AsyncFSOutput
IOException
public boolean isBroken()
AsyncFSOutput
isBroken
in interface AsyncFSOutput
Copyright © 2007–2019 The Apache Software Foundation. All rights reserved.