diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b867a70cf8..beea13b9f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -798,6 +798,9 @@ Release 2.7.0 - UNRELEASED
HDFS-49. MiniDFSCluster.stopDataNode will always shut down a node in
the cluster if a matching name is not found. (stevel)
+ HDFS-7566. Remove obsolete entries from hdfs-default.xml (Ray Chiang
+ via aw)
+
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index c24f7be2ef..966f5f0ff4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -30,16 +30,6 @@
version of this configuration file
-
- dfs.namenode.logging.level
- info
-
- The logging level for dfs namenode. Other values are "dir" (trace
- namespace mutations), "block" (trace block under/over replications
- and block creations/deletions), or "all".
-
-
-
dfs.namenode.rpc-address
@@ -154,14 +144,6 @@
-
- dfs.https.enable
- false
-
- Deprecated. Use "dfs.http.policy" instead.
-
-
-
dfs.http.policy
HTTP_ONLY
@@ -1244,14 +1226,6 @@
-
- dfs.support.append
- true
-
- Does HDFS allow appends to files?
-
-
-
dfs.client.use.datanode.hostname
false
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
index d886e89eb5..608b8abf1a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
@@ -113,7 +113,6 @@
hadoop.proxyuser.user.groupsusers
dfs.namenode.name.dir.restorefalse
io.seqfile.lazydecompresstrue
-dfs.https.enablefalse
mapreduce.reduce.merge.inmem.threshold1000
mapreduce.input.fileinputformat.split.minsize0
dfs.replication3
@@ -209,7 +208,6 @@
mapreduce.job.dir/tmp/hadoop-yarn/staging/user/.staging/job_1329348432655_0001
io.map.index.skip0
net.topology.node.switch.mapping.implorg.apache.hadoop.net.ScriptBasedMapping
-dfs.namenode.logging.levelinfo
fs.s3.maxRetries4
s3native.client-write-packet-size65536
yarn.resourcemanager.amliveliness-monitor.interval-ms1000
diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
index 59ae8d755a..70ff8af515 100644
--- a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
+++ b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
@@ -4657,7 +4657,6 @@
"mapreduce.tasktracker.taskcontroller" : "org.apache.hadoop.mapred.DefaultTaskController",
"yarn.scheduler.fair.preemption" : "true",
"mapreduce.reduce.shuffle.parallelcopies" : "5",
- "dfs.support.append" : "true",
"yarn.nodemanager.env-whitelist" : "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME",
"mapreduce.jobtracker.heartbeats.in.second" : "100",
"mapreduce.job.maxtaskfailures.per.tracker" : "3",
@@ -4674,7 +4673,6 @@
"dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
"ha.zookeeper.parent-znode" : "/hadoop-ha",
"io.seqfile.lazydecompress" : "true",
- "dfs.https.enable" : "false",
"mapreduce.reduce.merge.inmem.threshold" : "1000",
"mapreduce.input.fileinputformat.split.minsize" : "0",
"dfs.replication" : "3",
@@ -4783,7 +4781,6 @@
"io.map.index.skip" : "0",
"net.topology.node.switch.mapping.impl" : "org.apache.hadoop.net.ScriptBasedMapping",
"fs.s3.maxRetries" : "4",
- "dfs.namenode.logging.level" : "info",
"ha.failover-controller.new-active.rpc-timeout.ms" : "60000",
"s3native.client-write-packet-size" : "65536",
"yarn.resourcemanager.amliveliness-monitor.interval-ms" : "1000",
@@ -9770,7 +9767,6 @@
"mapreduce.tasktracker.taskcontroller" : "org.apache.hadoop.mapred.DefaultTaskController",
"yarn.scheduler.fair.preemption" : "true",
"mapreduce.reduce.shuffle.parallelcopies" : "5",
- "dfs.support.append" : "true",
"yarn.nodemanager.env-whitelist" : "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME",
"mapreduce.jobtracker.heartbeats.in.second" : "100",
"mapreduce.job.maxtaskfailures.per.tracker" : "3",
@@ -9787,7 +9783,6 @@
"dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
"ha.zookeeper.parent-znode" : "/hadoop-ha",
"io.seqfile.lazydecompress" : "true",
- "dfs.https.enable" : "false",
"mapreduce.reduce.merge.inmem.threshold" : "1000",
"mapreduce.input.fileinputformat.split.minsize" : "0",
"dfs.replication" : "3",
@@ -9896,7 +9891,6 @@
"io.map.index.skip" : "0",
"net.topology.node.switch.mapping.impl" : "org.apache.hadoop.net.ScriptBasedMapping",
"fs.s3.maxRetries" : "4",
- "dfs.namenode.logging.level" : "info",
"ha.failover-controller.new-active.rpc-timeout.ms" : "60000",
"s3native.client-write-packet-size" : "65536",
"yarn.resourcemanager.amliveliness-monitor.interval-ms" : "1000",