From 30cd265134ba60ddf9e105ddfd395830b770a692 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Wed, 3 May 2017 16:28:47 -0700 Subject: [PATCH] Revert "HDFS-11488. JN log segment syncing should support HA upgrade. Contributed by Hanisha Koneru." This reverts commit 08fb82d6d1217695c1cbee14608b172fa76a5569. --- .../src/site/markdown/Metrics.md | 4 ++-- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 8 ++++---- .../blockmanagement/DatanodeManager.java | 7 +++---- .../hadoop/hdfs/server/common/Util.java | 12 ++++++------ .../hadoop/hdfs/server/datanode/DNConf.java | 7 +++---- .../hdfs/server/datanode/FileIoProvider.java | 4 ++-- .../datanode/ProfilingFileIoEvents.java | 19 +++++++++---------- .../src/main/resources/hdfs-default.xml | 11 ----------- .../blockmanagement/TestSlowDiskTracker.java | 4 ++-- .../server/datanode/TestDataNodeMXBean.java | 4 ++-- .../datanode/TestDataNodeVolumeMetrics.java | 4 ++-- .../namenode/TestNameNodeStatusMXBean.java | 5 ++--- .../hadoop/tools/TestHdfsConfigFields.java | 2 ++ 13 files changed, 39 insertions(+), 52 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md index 336ad85054..a8bdbebdc7 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md @@ -334,8 +334,8 @@ FsVolume Per-volume metrics contain Datanode Volume IO related statistics. Per-volume metrics are off by default. They can be enabled by setting `dfs.datanode -.fileio.profiling.percentage.fraction` to an integer value between 1 and 100. -Setting this value to 0 would mean profiling is not enabled. But enabling +.fileio.profiling.sampling.fraction` to a fraction between 0.0 and 1.0. +Setting this value to 0.0 would mean profiling is not enabled. But enabling per-volume metrics may have a performance impact. Each metrics record contains tags such as Hostname as additional information along with metrics. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 0ca344c8de..3fa383b9bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -731,10 +731,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_DEFAULT = false; public static final String - DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY = - "dfs.datanode.fileio.profiling.sampling.percentage"; - public static final int - DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT = 0; + DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY = + "dfs.datanode.fileio.profiling.sampling.fraction"; + public static final double + DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT = 0.0; //Keys with no defaults public static final String DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index a61aa78c28..c7bdca9c15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -215,10 +215,9 @@ public class DatanodeManager { this.dataNodePeerStatsEnabled = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY, DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_DEFAULT); - this.dataNodeDiskStatsEnabled = Util.isDiskStatsEnabled(conf.getInt( - DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, - DFSConfigKeys. - DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT)); + this.dataNodeDiskStatsEnabled = Util.isDiskStatsEnabled(conf.getDouble( + DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, + DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT)); final Timer timer = new Timer(); this.slowPeerTracker = dataNodePeerStatsEnabled ? diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java index e9ceeb05be..fdb09df9da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java @@ -389,17 +389,17 @@ public static List getLoggerAddresses(URI uri, return addrsList; } - public static boolean isDiskStatsEnabled(int fileIOSamplingPercentage) { + public static boolean isDiskStatsEnabled(double fileIOSamplingFraction) { final boolean isEnabled; - if (fileIOSamplingPercentage <= 0) { + if (fileIOSamplingFraction < 0.000001) { LOG.info(DFSConfigKeys - .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY + " set to " - + fileIOSamplingPercentage + ". Disabling file IO profiling"); + .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY + " set to " + + fileIOSamplingFraction + ". Disabling file IO profiling"); isEnabled = false; } else { LOG.info(DFSConfigKeys - .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY + " set to " - + fileIOSamplingPercentage + ". Enabling file IO profiling"); + .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY + " set to " + + fileIOSamplingFraction + ". Enabling file IO profiling"); isEnabled = true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java index 8e5b59743f..21ffccc213 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java @@ -175,10 +175,9 @@ public DNConf(final Configurable dn) { this.peerStatsEnabled = getConf().getBoolean( DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY, DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_DEFAULT); - this.diskStatsEnabled = Util.isDiskStatsEnabled(getConf().getInt( - DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, - DFSConfigKeys. - DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT)); + this.diskStatsEnabled = Util.isDiskStatsEnabled(getConf().getDouble( + DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, + DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT)); this.outliersReportIntervalMs = getConf().getTimeDuration( DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY, DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java index 694eadd8c5..5508e0bdd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java @@ -62,8 +62,8 @@ * * Behavior can be injected into these events by enabling the * profiling and/or fault injection event hooks through - * {@link DFSConfigKeys#DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY} - * and {@link DFSConfigKeys#DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY}. + * {@link DFSConfigKeys#DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY} and + * {@link DFSConfigKeys#DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY}. * These event hooks are disabled by default. * * Most functions accept an optional {@link FsVolumeSpi} parameter for diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java index 83ee5f6753..35118b2171 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java @@ -44,19 +44,18 @@ class ProfilingFileIoEvents { public ProfilingFileIoEvents(@Nullable Configuration conf) { if (conf != null) { - int fileIOSamplingPercentage = conf.getInt( - DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, + double fileIOSamplingFraction = conf.getDouble( + DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, DFSConfigKeys - .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT); - isEnabled = Util.isDiskStatsEnabled(fileIOSamplingPercentage); - if (fileIOSamplingPercentage > 100) { + .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT); + isEnabled = Util.isDiskStatsEnabled(fileIOSamplingFraction); + if (fileIOSamplingFraction > 1) { LOG.warn(DFSConfigKeys - .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY + - " value cannot be more than 100. Setting value to 100"); - fileIOSamplingPercentage = 100; + .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY + + " value cannot be more than 1. Setting value to 1"); + fileIOSamplingFraction = 1; } - sampleRangeMax = (int) ((double) fileIOSamplingPercentage / 100 * - Integer.MAX_VALUE); + sampleRangeMax = (int) (fileIOSamplingFraction * Integer.MAX_VALUE); } else { isEnabled = false; sampleRangeMax = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 0f33b709af..7fcea01165 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2021,17 +2021,6 @@ - - dfs.datanode.fileio.profiling.sampling.percentage - 0 - - This setting controls the percentage of file I/O events which will be - profiled for DataNode disk statistics. The default value of 0 disables - disk statistics. Set to an integer value between 1 and 100 to enable disk - statistics. - - - hadoop.user.group.metrics.percentiles.intervals diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java index 172400d46d..16dfab208a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java @@ -28,7 +28,7 @@ import com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.hdfs.DFSConfigKeys - .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY; + .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys .DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; @@ -80,7 +80,7 @@ public class TestSlowDiskTracker { static { conf = new HdfsConfiguration(); conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1L); - conf.setInt(DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 100); + conf.setDouble(DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0); conf.setTimeDuration(DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY, OUTLIERS_REPORT_INTERVAL, TimeUnit.MILLISECONDS); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java index faead188f5..b80976a2c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java @@ -218,8 +218,8 @@ private int getTotalNumBlocks(MBeanServer mbs, ObjectName mxbeanName) @Test public void testDataNodeMXBeanSlowDisksEnabled() throws Exception { Configuration conf = new Configuration(); - conf.setInt(DFSConfigKeys - .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 100); + conf.setDouble(DFSConfigKeys + .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java index 0f41d2394b..03e1deea96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java @@ -121,8 +121,8 @@ public void testVolumeMetricsWithVolumeDepartureArrival() throws Exception { private MiniDFSCluster setupClusterForVolumeMetrics() throws IOException { Configuration conf = new HdfsConfiguration(); - conf.setInt(DFSConfigKeys - .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 100); + conf.setDouble(DFSConfigKeys + .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0); SimulatedFSDataset.setFactory(conf); return new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_DATANODES) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java index f9bfc37374..8fe734e44b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java @@ -105,9 +105,8 @@ public void testNameNodeStatusMXBean() throws Exception { @Test public void testNameNodeMXBeanSlowDisksEnabled() throws Exception { Configuration conf = new Configuration(); - conf.setInt( - DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, - 100); + conf.setDouble( + DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0); conf.setTimeDuration( DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY, 1000, TimeUnit.MILLISECONDS); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java index f23b26666c..1fdf713562 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java @@ -105,6 +105,8 @@ public void initializeMemberVariables() { .add(DFSConfigKeys.DFS_NAMENODE_STARTUP_KEY); configurationPropsToSkipCompare.add(DFSConfigKeys .DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY); + configurationPropsToSkipCompare.add(DFSConfigKeys + .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY); // Allocate xmlPropsToSkipCompare = new HashSet();