HDFS-15172. Remove unnecessary deadNodeDetectInterval in DeadNodeDetector#checkDeadNodes(). Contributed by Lisheng Sun.

This commit is contained in:
Inigo Goiri 2020-02-21 16:36:30 -08:00
parent 6f84269bcd
commit ed70c115a8

View File

@ -24,7 +24,6 @@
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -104,11 +103,6 @@ public class DeadNodeDetector implements Runnable {
private Map<String, DatanodeInfo> probeInProg =
new ConcurrentHashMap<String, DatanodeInfo>();
/**
* The last time when detect dead node.
*/
private long lastDetectDeadTS = 0;
/**
* Interval time in milliseconds for probing dead node behavior.
*/
@ -416,20 +410,15 @@ private void probeCallBack(Probe probe, boolean success) {
* Check dead node periodically.
*/
private void checkDeadNodes() {
long ts = Time.monotonicNow();
if (ts - lastDetectDeadTS > deadNodeDetectInterval) {
Set<DatanodeInfo> datanodeInfos = clearAndGetDetectedDeadNodes();
for (DatanodeInfo datanodeInfo : datanodeInfos) {
LOG.debug("Add dead node to check: {}.", datanodeInfo);
if (!deadNodesProbeQueue.offer(datanodeInfo)) {
LOG.debug("Skip to add dead node {} to check " +
"since the probe queue is full.", datanodeInfo);
break;
}
Set<DatanodeInfo> datanodeInfos = clearAndGetDetectedDeadNodes();
for (DatanodeInfo datanodeInfo : datanodeInfos) {
LOG.debug("Add dead node to check: {}.", datanodeInfo);
if (!deadNodesProbeQueue.offer(datanodeInfo)) {
LOG.debug("Skip to add dead node {} to check " +
"since the probe queue is full.", datanodeInfo);
break;
}
lastDetectDeadTS = ts;
}
state = State.IDLE;
}