@InterfaceAudience.Private public final class FanOutOneBlockAsyncDFSOutputHelper extends Object
FanOutOneBlockAsyncDFSOutput
.Modifier and Type | Class and Description |
---|---|
(package private) static class |
FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose |
private static interface |
FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor |
private static interface |
FanOutOneBlockAsyncDFSOutputHelper.FileCreator |
private static interface |
FanOutOneBlockAsyncDFSOutputHelper.LeaseManager |
static class |
FanOutOneBlockAsyncDFSOutputHelper.NameNodeException
Exception other than RemoteException thrown when calling create on namenode
|
Modifier and Type | Field and Description |
---|---|
private static org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator |
ALLOC |
static String |
ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES |
static int |
DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES |
private static FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor |
DFS_CLIENT_ADAPTOR |
private static org.apache.hadoop.hdfs.protocol.DatanodeInfo[] |
EMPTY_DN_ARRAY |
private static FanOutOneBlockAsyncDFSOutputHelper.FileCreator |
FILE_CREATOR |
static long |
HEART_BEAT_SEQNO |
private static FanOutOneBlockAsyncDFSOutputHelper.LeaseManager |
LEASE_MANAGER |
private static org.slf4j.Logger |
LOG |
static int |
READ_TIMEOUT |
Modifier | Constructor and Description |
---|---|
private |
FanOutOneBlockAsyncDFSOutputHelper() |
Modifier and Type | Method and Description |
---|---|
(package private) static void |
beginFileLease(org.apache.hadoop.hdfs.DFSClient client,
long inodeId) |
(package private) static void |
completeFile(org.apache.hadoop.hdfs.DFSClient client,
org.apache.hadoop.hdfs.protocol.ClientProtocol namenode,
String src,
String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
long fileId) |
private static List<org.apache.hbase.thirdparty.io.netty.util.concurrent.Future<org.apache.hbase.thirdparty.io.netty.channel.Channel>> |
connectToDataNodes(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hdfs.DFSClient client,
String clientName,
org.apache.hadoop.hdfs.protocol.LocatedBlock locatedBlock,
long maxBytesRcvd,
long latestGS,
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage stage,
org.apache.hadoop.util.DataChecksum summer,
org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup eventLoopGroup,
Class<? extends org.apache.hbase.thirdparty.io.netty.channel.Channel> channelClass) |
(package private) static org.apache.hadoop.util.DataChecksum |
createChecksum(org.apache.hadoop.hdfs.DFSClient client) |
private static FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor |
createDFSClientAdaptor() |
private static FanOutOneBlockAsyncDFSOutputHelper.FileCreator |
createFileCreator() |
private static FanOutOneBlockAsyncDFSOutputHelper.FileCreator |
createFileCreator2() |
private static FanOutOneBlockAsyncDFSOutputHelper.FileCreator |
createFileCreator3() |
private static FanOutOneBlockAsyncDFSOutputHelper.LeaseManager |
createLeaseManager() |
static FanOutOneBlockAsyncDFSOutput |
createOutput(org.apache.hadoop.hdfs.DistributedFileSystem dfs,
org.apache.hadoop.fs.Path f,
boolean overwrite,
boolean createParent,
short replication,
long blockSize,
org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup eventLoopGroup,
Class<? extends org.apache.hbase.thirdparty.io.netty.channel.Channel> channelClass)
Create a
FanOutOneBlockAsyncDFSOutput . |
private static FanOutOneBlockAsyncDFSOutput |
createOutput(org.apache.hadoop.hdfs.DistributedFileSystem dfs,
String src,
boolean overwrite,
boolean createParent,
short replication,
long blockSize,
org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup eventLoopGroup,
Class<? extends org.apache.hbase.thirdparty.io.netty.channel.Channel> channelClass) |
(package private) static void |
endFileLease(org.apache.hadoop.hdfs.DFSClient client,
long inodeId) |
(package private) static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status |
getStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto ack) |
private static void |
initialize(org.apache.hadoop.conf.Configuration conf,
org.apache.hbase.thirdparty.io.netty.channel.Channel channel,
org.apache.hadoop.hdfs.protocol.DatanodeInfo dnInfo,
org.apache.hadoop.fs.StorageType storageType,
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder writeBlockProtoBuilder,
int timeoutMs,
org.apache.hadoop.hdfs.DFSClient client,
org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier> accessToken,
org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise<org.apache.hbase.thirdparty.io.netty.channel.Channel> promise) |
private static void |
processWriteBlockResponse(org.apache.hbase.thirdparty.io.netty.channel.Channel channel,
org.apache.hadoop.hdfs.protocol.DatanodeInfo dnInfo,
org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise<org.apache.hbase.thirdparty.io.netty.channel.Channel> promise,
int timeoutMs) |
private static void |
requestWriteBlock(org.apache.hbase.thirdparty.io.netty.channel.Channel channel,
org.apache.hadoop.fs.StorageType storageType,
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder writeBlockProtoBuilder) |
static boolean |
shouldRetryCreate(org.apache.hadoop.ipc.RemoteException e) |
(package private) static void |
sleepIgnoreInterrupt(int retry) |
private static final org.slf4j.Logger LOG
public static final String ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES
public static final int DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES
private static final org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator ALLOC
public static final long HEART_BEAT_SEQNO
public static final int READ_TIMEOUT
private static final org.apache.hadoop.hdfs.protocol.DatanodeInfo[] EMPTY_DN_ARRAY
private static final FanOutOneBlockAsyncDFSOutputHelper.LeaseManager LEASE_MANAGER
private static final FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor DFS_CLIENT_ADAPTOR
private static final FanOutOneBlockAsyncDFSOutputHelper.FileCreator FILE_CREATOR
private FanOutOneBlockAsyncDFSOutputHelper()
private static FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor createDFSClientAdaptor() throws NoSuchMethodException
NoSuchMethodException
private static FanOutOneBlockAsyncDFSOutputHelper.LeaseManager createLeaseManager() throws NoSuchMethodException
NoSuchMethodException
private static FanOutOneBlockAsyncDFSOutputHelper.FileCreator createFileCreator3() throws NoSuchMethodException
NoSuchMethodException
private static FanOutOneBlockAsyncDFSOutputHelper.FileCreator createFileCreator2() throws NoSuchMethodException
NoSuchMethodException
private static FanOutOneBlockAsyncDFSOutputHelper.FileCreator createFileCreator() throws NoSuchMethodException
NoSuchMethodException
static void beginFileLease(org.apache.hadoop.hdfs.DFSClient client, long inodeId)
static void endFileLease(org.apache.hadoop.hdfs.DFSClient client, long inodeId)
static org.apache.hadoop.util.DataChecksum createChecksum(org.apache.hadoop.hdfs.DFSClient client)
static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto ack)
private static void processWriteBlockResponse(org.apache.hbase.thirdparty.io.netty.channel.Channel channel, org.apache.hadoop.hdfs.protocol.DatanodeInfo dnInfo, org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise<org.apache.hbase.thirdparty.io.netty.channel.Channel> promise, int timeoutMs)
private static void requestWriteBlock(org.apache.hbase.thirdparty.io.netty.channel.Channel channel, org.apache.hadoop.fs.StorageType storageType, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException
IOException
private static void initialize(org.apache.hadoop.conf.Configuration conf, org.apache.hbase.thirdparty.io.netty.channel.Channel channel, org.apache.hadoop.hdfs.protocol.DatanodeInfo dnInfo, org.apache.hadoop.fs.StorageType storageType, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs, org.apache.hadoop.hdfs.DFSClient client, org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier> accessToken, org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise<org.apache.hbase.thirdparty.io.netty.channel.Channel> promise) throws IOException
IOException
private static List<org.apache.hbase.thirdparty.io.netty.util.concurrent.Future<org.apache.hbase.thirdparty.io.netty.channel.Channel>> connectToDataNodes(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hdfs.DFSClient client, String clientName, org.apache.hadoop.hdfs.protocol.LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage stage, org.apache.hadoop.util.DataChecksum summer, org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup eventLoopGroup, Class<? extends org.apache.hbase.thirdparty.io.netty.channel.Channel> channelClass)
private static FanOutOneBlockAsyncDFSOutput createOutput(org.apache.hadoop.hdfs.DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup eventLoopGroup, Class<? extends org.apache.hbase.thirdparty.io.netty.channel.Channel> channelClass) throws IOException
IOException
public static FanOutOneBlockAsyncDFSOutput createOutput(org.apache.hadoop.hdfs.DistributedFileSystem dfs, org.apache.hadoop.fs.Path f, boolean overwrite, boolean createParent, short replication, long blockSize, org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup eventLoopGroup, Class<? extends org.apache.hbase.thirdparty.io.netty.channel.Channel> channelClass) throws IOException
FanOutOneBlockAsyncDFSOutput
. The method maybe blocked so do not call it
inside an EventLoop
.IOException
public static boolean shouldRetryCreate(org.apache.hadoop.ipc.RemoteException e)
static void completeFile(org.apache.hadoop.hdfs.DFSClient client, org.apache.hadoop.hdfs.protocol.ClientProtocol namenode, String src, String clientName, org.apache.hadoop.hdfs.protocol.ExtendedBlock block, long fileId)
static void sleepIgnoreInterrupt(int retry)
Copyright © 2007–2020 The Apache Software Foundation. All rights reserved.