001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.io.asyncfs; 019 020import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor; 021import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate; 022import static org.apache.hadoop.hbase.util.LocatedBlockHelper.getLocatedBlockLocations; 023import static org.apache.hadoop.hbase.util.NettyFutureUtils.addListener; 024import static org.apache.hadoop.hbase.util.NettyFutureUtils.safeClose; 025import static org.apache.hadoop.hbase.util.NettyFutureUtils.safeWriteAndFlush; 026import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; 027import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME; 028import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT; 029import static org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE; 030import static org.apache.hbase.thirdparty.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS; 031import static org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE; 032 033import com.google.protobuf.CodedOutputStream; 034import java.io.IOException; 035import java.io.InterruptedIOException; 036import java.lang.reflect.InvocationTargetException; 037import java.lang.reflect.Method; 038import java.util.ArrayList; 039import java.util.EnumSet; 040import java.util.HashSet; 041import java.util.IdentityHashMap; 042import java.util.List; 043import java.util.Map; 044import java.util.Set; 045import java.util.concurrent.TimeUnit; 046import org.apache.hadoop.conf.Configuration; 047import org.apache.hadoop.crypto.CryptoProtocolVersion; 048import org.apache.hadoop.crypto.Encryptor; 049import org.apache.hadoop.fs.CreateFlag; 050import org.apache.hadoop.fs.FileSystem; 051import org.apache.hadoop.fs.FileSystemLinkResolver; 052import org.apache.hadoop.fs.Path; 053import org.apache.hadoop.fs.StorageType; 054import org.apache.hadoop.fs.UnresolvedLinkException; 055import org.apache.hadoop.fs.permission.FsPermission; 056import org.apache.hadoop.hbase.client.ConnectionUtils; 057import org.apache.hadoop.hbase.io.asyncfs.monitor.ExcludeDatanodeManager; 058import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; 059import org.apache.hadoop.hbase.util.CancelableProgressable; 060import org.apache.hadoop.hdfs.DFSClient; 061import org.apache.hadoop.hdfs.DFSOutputStream; 062import org.apache.hadoop.hdfs.DistributedFileSystem; 063import org.apache.hadoop.hdfs.protocol.ClientProtocol; 064import org.apache.hadoop.hdfs.protocol.DatanodeInfo; 065import org.apache.hadoop.hdfs.protocol.ExtendedBlock; 066import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; 067import org.apache.hadoop.hdfs.protocol.LocatedBlock; 068import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; 069import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; 070import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; 071import org.apache.hadoop.hdfs.protocol.datatransfer.Op; 072import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; 073import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.ECN; 074import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto; 075import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; 076import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto; 077import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto; 078import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto; 079import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; 080import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto; 081import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; 082import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; 083import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; 084import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; 085import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; 086import org.apache.hadoop.io.EnumSetWritable; 087import org.apache.hadoop.ipc.RemoteException; 088import org.apache.hadoop.net.NetUtils; 089import org.apache.hadoop.security.token.Token; 090import org.apache.hadoop.util.DataChecksum; 091import org.apache.yetus.audience.InterfaceAudience; 092import org.slf4j.Logger; 093import org.slf4j.LoggerFactory; 094 095import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; 096import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; 097import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator; 098import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream; 099import org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator; 100import org.apache.hbase.thirdparty.io.netty.channel.Channel; 101import org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture; 102import org.apache.hbase.thirdparty.io.netty.channel.ChannelFutureListener; 103import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandler; 104import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; 105import org.apache.hbase.thirdparty.io.netty.channel.ChannelInitializer; 106import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; 107import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; 108import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; 109import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; 110import org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder; 111import org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateEvent; 112import org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler; 113import org.apache.hbase.thirdparty.io.netty.util.concurrent.Future; 114import org.apache.hbase.thirdparty.io.netty.util.concurrent.FutureListener; 115import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; 116 117/** 118 * Helper class for implementing {@link FanOutOneBlockAsyncDFSOutput}. 119 */ 120@InterfaceAudience.Private 121public final class FanOutOneBlockAsyncDFSOutputHelper { 122 private static final Logger LOG = 123 LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputHelper.class); 124 125 private FanOutOneBlockAsyncDFSOutputHelper() { 126 } 127 128 public static final String ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES = "hbase.fs.async.create.retries"; 129 130 public static final int DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES = 10; 131 // use pooled allocator for performance. 132 private static final ByteBufAllocator ALLOC = PooledByteBufAllocator.DEFAULT; 133 134 // copied from DFSPacket since it is package private. 135 public static final long HEART_BEAT_SEQNO = -1L; 136 137 // Timeouts for communicating with DataNode for streaming writes/reads 138 public static final int READ_TIMEOUT = 60 * 1000; 139 140 private interface LeaseManager { 141 142 void begin(DFSClient client, long inodeId); 143 144 void end(DFSClient client, long inodeId); 145 } 146 147 private static final LeaseManager LEASE_MANAGER; 148 149 // helper class for creating files. 150 private interface FileCreator { 151 default HdfsFileStatus create(ClientProtocol instance, String src, FsPermission masked, 152 String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, 153 long blockSize, CryptoProtocolVersion[] supportedVersions) throws Exception { 154 try { 155 return (HdfsFileStatus) createObject(instance, src, masked, clientName, flag, createParent, 156 replication, blockSize, supportedVersions); 157 } catch (InvocationTargetException e) { 158 if (e.getCause() instanceof Exception) { 159 throw (Exception) e.getCause(); 160 } else { 161 throw new RuntimeException(e.getCause()); 162 } 163 } 164 } 165 166 Object createObject(ClientProtocol instance, String src, FsPermission masked, String clientName, 167 EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize, 168 CryptoProtocolVersion[] supportedVersions) throws Exception; 169 } 170 171 private static final FileCreator FILE_CREATOR; 172 173 private static LeaseManager createLeaseManager() throws NoSuchMethodException { 174 Method beginFileLeaseMethod = 175 DFSClient.class.getDeclaredMethod("beginFileLease", long.class, DFSOutputStream.class); 176 beginFileLeaseMethod.setAccessible(true); 177 Method endFileLeaseMethod = DFSClient.class.getDeclaredMethod("endFileLease", long.class); 178 endFileLeaseMethod.setAccessible(true); 179 return new LeaseManager() { 180 181 @Override 182 public void begin(DFSClient client, long inodeId) { 183 try { 184 beginFileLeaseMethod.invoke(client, inodeId, null); 185 } catch (IllegalAccessException | InvocationTargetException e) { 186 throw new RuntimeException(e); 187 } 188 } 189 190 @Override 191 public void end(DFSClient client, long inodeId) { 192 try { 193 endFileLeaseMethod.invoke(client, inodeId); 194 } catch (IllegalAccessException | InvocationTargetException e) { 195 throw new RuntimeException(e); 196 } 197 } 198 }; 199 } 200 201 private static FileCreator createFileCreator3_3() throws NoSuchMethodException { 202 Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class, 203 String.class, EnumSetWritable.class, boolean.class, short.class, long.class, 204 CryptoProtocolVersion[].class, String.class, String.class); 205 206 return (instance, src, masked, clientName, flag, createParent, replication, blockSize, 207 supportedVersions) -> { 208 return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, 209 createParent, replication, blockSize, supportedVersions, null, null); 210 }; 211 } 212 213 private static FileCreator createFileCreator3() throws NoSuchMethodException { 214 Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class, 215 String.class, EnumSetWritable.class, boolean.class, short.class, long.class, 216 CryptoProtocolVersion[].class, String.class); 217 218 return (instance, src, masked, clientName, flag, createParent, replication, blockSize, 219 supportedVersions) -> { 220 return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, 221 createParent, replication, blockSize, supportedVersions, null); 222 }; 223 } 224 225 private static FileCreator createFileCreator() throws NoSuchMethodException { 226 try { 227 return createFileCreator3_3(); 228 } catch (NoSuchMethodException e) { 229 LOG.debug("ClientProtocol::create wrong number of arguments, should be hadoop 3.2 or below"); 230 } 231 232 return createFileCreator3(); 233 } 234 235 // cancel the processing if DFSClient is already closed. 236 static final class CancelOnClose implements CancelableProgressable { 237 238 private final DFSClient client; 239 240 public CancelOnClose(DFSClient client) { 241 this.client = client; 242 } 243 244 @Override 245 public boolean progress() { 246 return client.isClientRunning(); 247 } 248 } 249 250 static { 251 try { 252 LEASE_MANAGER = createLeaseManager(); 253 FILE_CREATOR = createFileCreator(); 254 } catch (Exception e) { 255 String msg = "Couldn't properly initialize access to HDFS internals. Please " 256 + "update your WAL Provider to not make use of the 'asyncfs' provider. See " 257 + "HBASE-16110 for more information."; 258 LOG.error(msg, e); 259 throw new Error(msg, e); 260 } 261 } 262 263 static void beginFileLease(DFSClient client, long inodeId) { 264 LEASE_MANAGER.begin(client, inodeId); 265 } 266 267 static void endFileLease(DFSClient client, long inodeId) { 268 LEASE_MANAGER.end(client, inodeId); 269 } 270 271 static DataChecksum createChecksum(DFSClient client) { 272 return client.getConf().createChecksum(null); 273 } 274 275 static Status getStatus(PipelineAckProto ack) { 276 List<Integer> flagList = ack.getFlagList(); 277 Integer headerFlag; 278 if (flagList.isEmpty()) { 279 Status reply = ack.getReply(0); 280 headerFlag = PipelineAck.combineHeader(ECN.DISABLED, reply); 281 } else { 282 headerFlag = flagList.get(0); 283 } 284 return PipelineAck.getStatusFromHeader(headerFlag); 285 } 286 287 private static void processWriteBlockResponse(Channel channel, DatanodeInfo dnInfo, 288 Promise<Channel> promise, int timeoutMs) { 289 channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS), 290 new ProtobufVarint32FrameDecoder(), 291 new ProtobufDecoder(BlockOpResponseProto.getDefaultInstance()), 292 new SimpleChannelInboundHandler<BlockOpResponseProto>() { 293 294 @Override 295 protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp) 296 throws Exception { 297 Status pipelineStatus = resp.getStatus(); 298 if (PipelineAck.isRestartOOBStatus(pipelineStatus)) { 299 throw new IOException("datanode " + dnInfo + " is restarting"); 300 } 301 String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink(); 302 if (resp.getStatus() != Status.SUCCESS) { 303 if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) { 304 throw new InvalidBlockTokenException("Got access token error" + ", status message " 305 + resp.getMessage() + ", " + logInfo); 306 } else { 307 throw new IOException("Got error" + ", status=" + resp.getStatus().name() 308 + ", status message " + resp.getMessage() + ", " + logInfo); 309 } 310 } 311 // success 312 ChannelPipeline p = ctx.pipeline(); 313 for (ChannelHandler handler; (handler = p.removeLast()) != null;) { 314 // do not remove all handlers because we may have wrap or unwrap handlers at the header 315 // of pipeline. 316 if (handler instanceof IdleStateHandler) { 317 break; 318 } 319 } 320 // Disable auto read here. Enable it after we setup the streaming pipeline in 321 // FanOutOneBLockAsyncDFSOutput. 322 ctx.channel().config().setAutoRead(false); 323 promise.trySuccess(ctx.channel()); 324 } 325 326 @Override 327 public void channelInactive(ChannelHandlerContext ctx) throws Exception { 328 promise.tryFailure(new IOException("connection to " + dnInfo + " is closed")); 329 } 330 331 @Override 332 public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { 333 if (evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == READER_IDLE) { 334 promise 335 .tryFailure(new IOException("Timeout(" + timeoutMs + "ms) waiting for response")); 336 } else { 337 super.userEventTriggered(ctx, evt); 338 } 339 } 340 341 @Override 342 public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { 343 promise.tryFailure(cause); 344 } 345 }); 346 } 347 348 private static void requestWriteBlock(Channel channel, StorageType storageType, 349 OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException { 350 OpWriteBlockProto proto = 351 writeBlockProtoBuilder.setStorageType(PBHelperClient.convertStorageType(storageType)).build(); 352 int protoLen = proto.getSerializedSize(); 353 ByteBuf buffer = 354 channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen); 355 buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); 356 buffer.writeByte(Op.WRITE_BLOCK.code); 357 proto.writeDelimitedTo(new ByteBufOutputStream(buffer)); 358 safeWriteAndFlush(channel, buffer); 359 } 360 361 private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo, 362 StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs, 363 DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise) 364 throws IOException { 365 Promise<Void> saslPromise = channel.eventLoop().newPromise(); 366 trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise); 367 addListener(saslPromise, new FutureListener<Void>() { 368 369 @Override 370 public void operationComplete(Future<Void> future) throws Exception { 371 if (future.isSuccess()) { 372 // setup response processing pipeline first, then send request. 373 processWriteBlockResponse(channel, dnInfo, promise, timeoutMs); 374 requestWriteBlock(channel, storageType, writeBlockProtoBuilder); 375 } else { 376 promise.tryFailure(future.cause()); 377 } 378 } 379 }); 380 } 381 382 private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, 383 String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, 384 BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup, 385 Class<? extends Channel> channelClass) { 386 StorageType[] storageTypes = locatedBlock.getStorageTypes(); 387 DatanodeInfo[] datanodeInfos = getLocatedBlockLocations(locatedBlock); 388 boolean connectToDnViaHostname = 389 conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); 390 int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT); 391 ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); 392 blockCopy.setNumBytes(locatedBlock.getBlockSize()); 393 ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder() 394 .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelperClient.convert(blockCopy)) 395 .setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))) 396 .setClientName(clientName).build(); 397 ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer); 398 OpWriteBlockProto.Builder writeBlockProtoBuilder = 399 OpWriteBlockProto.newBuilder().setHeader(header) 400 .setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1) 401 .setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd) 402 .setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto) 403 .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build()); 404 List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length); 405 for (int i = 0; i < datanodeInfos.length; i++) { 406 DatanodeInfo dnInfo = datanodeInfos[i]; 407 StorageType storageType = storageTypes[i]; 408 Promise<Channel> promise = eventLoopGroup.next().newPromise(); 409 futureList.add(promise); 410 String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname); 411 addListener(new Bootstrap().group(eventLoopGroup).channel(channelClass) 412 .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() { 413 414 @Override 415 protected void initChannel(Channel ch) throws Exception { 416 // we need to get the remote address of the channel so we can only move on after 417 // channel connected. Leave an empty implementation here because netty does not allow 418 // a null handler. 419 } 420 }).connect(NetUtils.createSocketAddr(dnAddr)), new ChannelFutureListener() { 421 422 @Override 423 public void operationComplete(ChannelFuture future) throws Exception { 424 if (future.isSuccess()) { 425 initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, 426 timeoutMs, client, locatedBlock.getBlockToken(), promise); 427 } else { 428 promise.tryFailure(future.cause()); 429 } 430 } 431 }); 432 } 433 return futureList; 434 } 435 436 /** 437 * Exception other than RemoteException thrown when calling create on namenode 438 */ 439 public static class NameNodeException extends IOException { 440 441 private static final long serialVersionUID = 3143237406477095390L; 442 443 public NameNodeException(Throwable cause) { 444 super(cause); 445 } 446 } 447 448 private static EnumSetWritable<CreateFlag> getCreateFlags(boolean overwrite, 449 boolean noLocalWrite) { 450 List<CreateFlag> flags = new ArrayList<>(); 451 flags.add(CreateFlag.CREATE); 452 if (overwrite) { 453 flags.add(CreateFlag.OVERWRITE); 454 } 455 if (noLocalWrite) { 456 flags.add(CreateFlag.NO_LOCAL_WRITE); 457 } 458 flags.add(CreateFlag.SHOULD_REPLICATE); 459 return new EnumSetWritable<>(EnumSet.copyOf(flags)); 460 } 461 462 private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, 463 boolean overwrite, boolean createParent, short replication, long blockSize, 464 EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, StreamSlowMonitor monitor, 465 boolean noLocalWrite) throws IOException { 466 Configuration conf = dfs.getConf(); 467 DFSClient client = dfs.getClient(); 468 String clientName = client.getClientName(); 469 ClientProtocol namenode = client.getNamenode(); 470 int createMaxRetries = 471 conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES); 472 ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager(); 473 Set<DatanodeInfo> toExcludeNodes = 474 new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet()); 475 for (int retry = 0;; retry++) { 476 LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src, 477 toExcludeNodes, retry); 478 HdfsFileStatus stat; 479 try { 480 stat = FILE_CREATOR.create(namenode, src, 481 FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, 482 getCreateFlags(overwrite, noLocalWrite), createParent, replication, blockSize, 483 CryptoProtocolVersion.supported()); 484 } catch (Exception e) { 485 if (e instanceof RemoteException) { 486 throw (RemoteException) e; 487 } else { 488 throw new NameNodeException(e); 489 } 490 } 491 beginFileLease(client, stat.getFileId()); 492 boolean succ = false; 493 LocatedBlock locatedBlock = null; 494 List<Future<Channel>> futureList = null; 495 try { 496 DataChecksum summer = createChecksum(client); 497 locatedBlock = namenode.addBlock(src, client.getClientName(), null, 498 toExcludeNodes.toArray(new DatanodeInfo[0]), stat.getFileId(), null, null); 499 Map<Channel, DatanodeInfo> datanodes = new IdentityHashMap<>(); 500 futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, 501 PIPELINE_SETUP_CREATE, summer, eventLoopGroup, channelClass); 502 for (int i = 0, n = futureList.size(); i < n; i++) { 503 DatanodeInfo datanodeInfo = getLocatedBlockLocations(locatedBlock)[i]; 504 try { 505 datanodes.put(futureList.get(i).syncUninterruptibly().getNow(), datanodeInfo); 506 } catch (Exception e) { 507 // exclude the broken DN next time 508 toExcludeNodes.add(datanodeInfo); 509 excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "connect error"); 510 throw e; 511 } 512 } 513 Encryptor encryptor = createEncryptor(conf, stat, client); 514 FanOutOneBlockAsyncDFSOutput output = 515 new FanOutOneBlockAsyncDFSOutput(conf, dfs, client, namenode, clientName, src, 516 stat.getFileId(), locatedBlock, encryptor, datanodes, summer, ALLOC, monitor); 517 succ = true; 518 return output; 519 } catch (RemoteException e) { 520 LOG.warn("create fan-out dfs output {} failed, retry = {}", src, retry, e); 521 if (shouldRetryCreate(e)) { 522 if (retry >= createMaxRetries) { 523 throw e.unwrapRemoteException(); 524 } 525 } else { 526 throw e.unwrapRemoteException(); 527 } 528 } catch (IOException e) { 529 LOG.warn("create fan-out dfs output {} failed, retry = {}", src, retry, e); 530 if (retry >= createMaxRetries) { 531 throw e; 532 } 533 // overwrite the old broken file. 534 overwrite = true; 535 try { 536 Thread.sleep(ConnectionUtils.getPauseTime(100, retry)); 537 } catch (InterruptedException ie) { 538 throw new InterruptedIOException(); 539 } 540 } finally { 541 if (!succ) { 542 if (futureList != null) { 543 for (Future<Channel> f : futureList) { 544 addListener(f, new FutureListener<Channel>() { 545 546 @Override 547 public void operationComplete(Future<Channel> future) throws Exception { 548 if (future.isSuccess()) { 549 safeClose(future.getNow()); 550 } 551 } 552 }); 553 } 554 } 555 endFileLease(client, stat.getFileId()); 556 } 557 } 558 } 559 } 560 561 /** 562 * Create a {@link FanOutOneBlockAsyncDFSOutput}. The method maybe blocked so do not call it 563 * inside an {@link EventLoop}. 564 */ 565 public static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, Path f, 566 boolean overwrite, boolean createParent, short replication, long blockSize, 567 EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, 568 final StreamSlowMonitor monitor, boolean noLocalWrite) throws IOException { 569 return new FileSystemLinkResolver<FanOutOneBlockAsyncDFSOutput>() { 570 571 @Override 572 public FanOutOneBlockAsyncDFSOutput doCall(Path p) 573 throws IOException, UnresolvedLinkException { 574 return createOutput(dfs, p.toUri().getPath(), overwrite, createParent, replication, 575 blockSize, eventLoopGroup, channelClass, monitor, noLocalWrite); 576 } 577 578 @Override 579 public FanOutOneBlockAsyncDFSOutput next(FileSystem fs, Path p) throws IOException { 580 throw new UnsupportedOperationException(); 581 } 582 }.resolve(dfs, f); 583 } 584 585 public static boolean shouldRetryCreate(RemoteException e) { 586 // RetryStartFileException is introduced in HDFS 2.6+, so here we can only use the class name. 587 // For exceptions other than this, we just throw it out. This is same with 588 // DFSOutputStream.newStreamForCreate. 589 return e.getClassName().endsWith("RetryStartFileException"); 590 } 591 592 static void completeFile(DFSClient client, ClientProtocol namenode, String src, String clientName, 593 ExtendedBlock block, long fileId) { 594 for (int retry = 0;; retry++) { 595 try { 596 if (namenode.complete(src, clientName, block, fileId)) { 597 endFileLease(client, fileId); 598 return; 599 } else { 600 LOG.warn("complete file " + src + " not finished, retry = " + retry); 601 } 602 } catch (RemoteException e) { 603 IOException ioe = e.unwrapRemoteException(); 604 if (ioe instanceof LeaseExpiredException) { 605 LOG.warn("lease for file " + src + " is expired, give up", e); 606 return; 607 } else { 608 LOG.warn("complete file " + src + " failed, retry = " + retry, e); 609 } 610 } catch (Exception e) { 611 LOG.warn("complete file " + src + " failed, retry = " + retry, e); 612 } 613 sleepIgnoreInterrupt(retry); 614 } 615 } 616 617 static void sleepIgnoreInterrupt(int retry) { 618 try { 619 Thread.sleep(ConnectionUtils.getPauseTime(100, retry)); 620 } catch (InterruptedException e) { 621 } 622 } 623}