diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4ebf437612..8c8afedf5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -980,6 +980,9 @@ Release 2.8.0 - UNRELEASED HDFS-8053. Move DFSIn/OutputStream and related classes to hadoop-hdfs-client. (Mingliang Liu via wheat9) + HDFS-9087. Add some jitter to DataNode.checkDiskErrorThread (Elliott Clark + via Colin P. McCabe) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 1bb4485d40..337706e040 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -88,6 +88,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -356,7 +357,7 @@ public class DataNode extends ReconfigurableBase SaslDataTransferServer saslServer; private ObjectName dataNodeInfoBeanName; private Thread checkDiskErrorThread = null; - protected final int checkDiskErrorInterval = 5*1000; + protected final int checkDiskErrorInterval; private boolean checkDiskErrorFlag = false; private Object checkDiskErrorMutex = new Object(); private long lastDiskErrorCheck; @@ -387,6 +388,8 @@ public class DataNode extends ReconfigurableBase this.connectToDnViaHostname = false; this.blockScanner = new BlockScanner(this, conf); this.pipelineSupportECN = false; + this.checkDiskErrorInterval = + ThreadLocalRandom.current().nextInt(5000, (int) (5000 * 1.25)); initOOBTimeout(); } @@ -422,6 +425,9 @@ public class DataNode extends ReconfigurableBase ",hdfs-" + conf.get("hadoop.hdfs.configuration.version", "UNSPECIFIED"); + this.checkDiskErrorInterval = + ThreadLocalRandom.current().nextInt(5000, (int) (5000 * 1.25)); + // Determine whether we should try to pass file descriptors to clients. if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT)) {