diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 9371f56d60..6640ec67b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -451,11 +451,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "dfs.namenode.metrics.logger.period.seconds"; public static final int DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT = 600; - public static final String DFS_NAMENODE_METRICS_NODE_USAGE_PERCENTILE = - "dfs.namenode.metrics.node-usage.percentile"; - public static final double DFS_NAMENODE_METRICS_NODE_USAGE_PERCENTILE_DEFAULT - = 0.95; - public static final String DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY = "dfs.datanode.metrics.logger.period.seconds"; public static final int DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 02aaeda1f9..915ae9733d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -66,8 +66,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_METRICS_NODE_USAGE_PERCENTILE; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_METRICS_NODE_USAGE_PERCENTILE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY; @@ -530,8 +528,6 @@ private void logAuditEvent(boolean succeeded, private INodeAttributeProvider inodeAttributeProvider; - private final double percentileFactor; - /** * If the NN is in safemode, and not due to manual / low resources, we * assume it must be because of startup. If the NN had low resources during @@ -828,15 +824,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { alwaysUseDelegationTokensForTests = conf.getBoolean( DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT); - - this.percentileFactor = conf.getDouble( - DFS_NAMENODE_METRICS_NODE_USAGE_PERCENTILE, - DFS_NAMENODE_METRICS_NODE_USAGE_PERCENTILE_DEFAULT); - - Preconditions.checkArgument(0.0 < this.percentileFactor - && this.percentileFactor <= 1.0, "Node usage percentile " + - "factor must be between 0 and 1."); - + this.dtSecretManager = createDelegationTokenSecretManager(conf); this.dir = new FSDirectory(this, conf); this.snapshotManager = new SnapshotManager(dir); @@ -5626,7 +5614,6 @@ public String getNodeUsage() { float max = 0; float min = 0; float dev = 0; - float percentile = 0; final Map> info = new HashMap>(); @@ -5652,7 +5639,6 @@ public String getNodeUsage() { median = usages[usages.length / 2]; max = usages[usages.length - 1]; min = usages[0]; - percentile = usages[(int)((usages.length - 1) * percentileFactor)]; for (i = 0; i < usages.length; i++) { dev += (usages[i] - totalDfsUsed) * (usages[i] - totalDfsUsed); @@ -5665,11 +5651,6 @@ public String getNodeUsage() { innerInfo.put("median", StringUtils.format("%.2f%%", median)); innerInfo.put("max", StringUtils.format("%.2f%%", max)); innerInfo.put("stdDev", StringUtils.format("%.2f%%", dev)); - final Map percentileInfo = new HashMap(); - percentileInfo.put("name", StringUtils.format("%dth percentile", - (int)(percentileFactor * 100))); - percentileInfo.put("value", StringUtils.format("%.2f%%", percentile)); - innerInfo.put("percentile", percentileInfo); info.put("nodeUsage", innerInfo); return JSON.toString(info); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index bc56dc8032..856e6b4a45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -1896,17 +1896,6 @@ - - dfs.namenode.metrics.node-usage.percentile - 0.95 - - This setting specifies the percentile level to report node usage metrics. - For example 0.95 means reporting the node usage for the 95th percentile - of all DataNodes. If this setting is at 0.95 and the reported node usage - is 70%, it means 95% of DataNodes have a usage below 70%. - - - dfs.datanode.metrics.logger.period.seconds 600 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index d9a1f07456..4fa2e4c02d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -166,8 +166,8 @@ Non DFS Used:{NonDfsUsedSpace|fmt_bytes} DFS Remaining:{Free|fmt_bytes} ({PercentRemaining|fmt_percentage}) Block Pool Used:{BlockPoolUsedSpace|fmt_bytes} ({PercentBlockPoolUsed|fmt_percentage}) - {#NodeUsage.nodeUsage}DataNodes usages% (Min/Median/Max/stdDev/{percentile.name}):{/NodeUsage.nodeUsage} - {#NodeUsage.nodeUsage}{min} / {median} / {max} / {stdDev} / {percentile.value}{/NodeUsage.nodeUsage} + DataNodes usages% (Min/Median/Max/stdDev): + {#NodeUsage.nodeUsage}{min} / {median} / {max} / {stdDev}{/NodeUsage.nodeUsage} {/nn} {#fs} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 3807076385..dc8bea75db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -173,16 +173,6 @@ public void testNameNodeMXBeanInfo() throws Exception { String nodeUsage = (String) (mbs.getAttribute(mxbeanName, "NodeUsage")); assertEquals("Bad value for NodeUsage", fsn.getNodeUsage(), nodeUsage); - Map> usage - = (Map>)JSON.parse(nodeUsage); - assertTrue(usage.get("nodeUsage").containsKey("min")); - assertTrue(usage.get("nodeUsage").containsKey("median")); - assertTrue(usage.get("nodeUsage").containsKey("max")); - assertTrue(usage.get("nodeUsage").containsKey("percentile")); - Map percentileInfo - = (Map)usage.get("nodeUsage").get("percentile"); - assertTrue(percentileInfo.containsKey("name")); - assertTrue(percentileInfo.containsKey("value")); // get attribute NameJournalStatus String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName, "NameJournalStatus"));