From fb1d7635ae02f0f537b56f994b07456263b83d59 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Fri, 13 Mar 2020 21:10:03 +0530 Subject: [PATCH] HDFS-15155. writeIoRate of DataNodeVolumeMetrics is never used. Contributed by Haibin Huang. --- .../fsdataset/DataNodeVolumeMetrics.java | 6 +- .../datanode/TestDataNodeVolumeMetrics.java | 55 +++++++++++++++++++ 2 files changed, 58 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DataNodeVolumeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DataNodeVolumeMetrics.java index e4d8707b86..87509e5b92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DataNodeVolumeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DataNodeVolumeMetrics.java @@ -151,15 +151,15 @@ public double getReadIoStdDev() { // Based on writeIoRate public long getWriteIoSampleCount() { - return syncIoRate.lastStat().numSamples(); + return writeIoRate.lastStat().numSamples(); } public double getWriteIoMean() { - return syncIoRate.lastStat().mean(); + return writeIoRate.lastStat().mean(); } public double getWriteIoStdDev() { - return syncIoRate.lastStat().stddev(); + return writeIoRate.lastStat().stddev(); } public long getTotalFileIoErrors() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java index d2c9c62eb0..85054be630 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java @@ -19,6 +19,8 @@ import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import java.io.File; @@ -177,4 +179,57 @@ private void verifyDataNodeVolumeMetrics(final FileSystem fs, LOG.info("fileIoErrorMean : " + metrics.getFileIoErrorMean()); LOG.info("fileIoErrorStdDev : " + metrics.getFileIoErrorStdDev()); } + + @Test + public void testWriteIoVolumeMetrics() throws IOException { + Configuration conf = new HdfsConfiguration(); + conf.setInt( + DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, + 100); + MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf) + .numDataNodes(NUM_DATANODES) + .storageTypes( + new StorageType[]{StorageType.RAM_DISK, StorageType.DISK}) + .storagesPerDatanode(2).build(); + + try { + FileSystem fs = cluster.getFileSystem(); + final Path fileName = new Path("/test.dat"); + final long fileLen = Integer.MAX_VALUE + 1L; + long lastWriteIoSampleCount; + + DFSTestUtil.createFile(fs, fileName, false, BLOCK_SIZE, fileLen, + fs.getDefaultBlockSize(fileName), REPL, 1L, true); + + List datanodes = cluster.getDataNodes(); + DataNode datanode = datanodes.get(0); + + final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName); + final FsVolumeSpi volume = datanode.getFSDataset().getVolume(block); + DataNodeVolumeMetrics metrics = volume.getMetrics(); + + assertEquals(0, metrics.getSyncIoSampleCount()); + assertNotEquals(0, metrics.getWriteIoSampleCount()); + assertTrue(metrics.getFlushIoSampleCount() > metrics + .getSyncIoSampleCount()); + assertTrue(metrics.getWriteIoSampleCount() > metrics + .getFlushIoSampleCount()); + + lastWriteIoSampleCount = metrics.getWriteIoSampleCount(); + + try (FSDataOutputStream out = fs.append(fileName)) { + out.writeBytes("hello world"); + out.hflush(); + } + + assertEquals(0, metrics.getSyncIoSampleCount()); + assertTrue(metrics.getWriteIoSampleCount() > lastWriteIoSampleCount); + + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } }