diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index 3d6d3c53f9..3e368ae4dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -892,6 +892,8 @@ void waitForAckedSeqno(long seqno) throws IOException { try (TraceScope ignored = dfsClient.getTracer(). newScope("waitForAckedSeqno")) { LOG.debug("{} waiting for ack for: {}", this, seqno); + int dnodes = nodes != null ? nodes.length : 3; + int writeTimeout = dfsClient.getDatanodeWriteTimeout(dnodes); long begin = Time.monotonicNow(); try { synchronized (dataQueue) { @@ -902,6 +904,16 @@ void waitForAckedSeqno(long seqno) throws IOException { } try { dataQueue.wait(1000); // when we receive an ack, we notify on + long duration = Time.monotonicNow() - begin; + if (duration > writeTimeout) { + LOG.error("No ack received, took {}ms (threshold={}ms). " + + "File being written: {}, block: {}, " + + "Write pipeline datanodes: {}.", + duration, writeTimeout, src, block, nodes); + throw new InterruptedIOException("No ack received after " + + duration / 1000 + "s and a timeout of " + + writeTimeout / 1000 + "s"); + } // dataQueue } catch (InterruptedException ie) { throw new InterruptedIOException(