From 415e9bdfbdeebded520e0233bcb91a487411a94b Mon Sep 17 00:00:00 2001 From: hfutatzhanghb Date: Sun, 24 Dec 2023 16:57:04 +0800 Subject: [PATCH] HDFS-17273. Change the way of computing some local variables duration for better debugging (#6321). Contributed by farmmamba. Reviewed-by: Haiyang Hu Reviewed-by: Tao Li Signed-off-by: He Xiaoqiao --- .../org/apache/hadoop/hdfs/DataStreamer.java | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index 04d6d9a565..b313a8737f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -787,7 +787,7 @@ public void run() { scope = null; dataQueue.removeFirst(); ackQueue.addLast(one); - packetSendTime.put(one.getSeqno(), Time.monotonicNow()); + packetSendTime.put(one.getSeqno(), Time.monotonicNowNanos()); dataQueue.notifyAll(); } } @@ -953,7 +953,7 @@ void waitForAckedSeqno(long seqno) throws IOException { dnodes = nodes != null ? nodes.length : 3; } int writeTimeout = dfsClient.getDatanodeWriteTimeout(dnodes); - long begin = Time.monotonicNow(); + long begin = Time.monotonicNowNanos(); try { synchronized (dataQueue) { while (!streamerClosed) { @@ -963,14 +963,14 @@ void waitForAckedSeqno(long seqno) throws IOException { } try { dataQueue.wait(1000); // when we receive an ack, we notify on - long duration = Time.monotonicNow() - begin; - if (duration > writeTimeout) { + long duration = Time.monotonicNowNanos() - begin; + if (TimeUnit.NANOSECONDS.toMillis(duration) > writeTimeout) { LOG.error("No ack received, took {}ms (threshold={}ms). " + "File being written: {}, block: {}, " + "Write pipeline datanodes: {}.", - duration, writeTimeout, src, block, nodes); + TimeUnit.NANOSECONDS.toMillis(duration), writeTimeout, src, block, nodes); throw new InterruptedIOException("No ack received after " + - duration / 1000 + "s and a timeout of " + + TimeUnit.NANOSECONDS.toSeconds(duration) + "s and a timeout of " + writeTimeout / 1000 + "s"); } // dataQueue @@ -984,11 +984,12 @@ void waitForAckedSeqno(long seqno) throws IOException { } catch (ClosedChannelException cce) { LOG.debug("Closed channel exception", cce); } - long duration = Time.monotonicNow() - begin; - if (duration > dfsclientSlowLogThresholdMs) { + long duration = Time.monotonicNowNanos() - begin; + if (TimeUnit.NANOSECONDS.toMillis(duration) > dfsclientSlowLogThresholdMs) { LOG.warn("Slow waitForAckedSeqno took {}ms (threshold={}ms). File being" + " written: {}, block: {}, Write pipeline datanodes: {}.", - duration, dfsclientSlowLogThresholdMs, src, block, nodes); + TimeUnit.NANOSECONDS.toMillis(duration), dfsclientSlowLogThresholdMs, + src, block, nodes); } } } @@ -1179,10 +1180,10 @@ public void run() { if (ack.getSeqno() != DFSPacket.HEART_BEAT_SEQNO) { Long begin = packetSendTime.get(ack.getSeqno()); if (begin != null) { - long duration = Time.monotonicNow() - begin; - if (duration > dfsclientSlowLogThresholdMs) { + long duration = Time.monotonicNowNanos() - begin; + if (TimeUnit.NANOSECONDS.toMillis(duration) > dfsclientSlowLogThresholdMs) { LOG.info("Slow ReadProcessor read fields for block " + block - + " took " + duration + "ms (threshold=" + + " took " + TimeUnit.NANOSECONDS.toMillis(duration) + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms); ack: " + ack + ", targets: " + Arrays.asList(targets)); }