diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index a6e2452ca0..3ea42c38b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -97,9 +97,6 @@ public interface HdfsClientConfigKeys {
int DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT = 3;
String DFS_CLIENT_CONTEXT = "dfs.client.context";
String DFS_CLIENT_CONTEXT_DEFAULT = "default";
- String DFS_CLIENT_USE_LEGACY_BLOCKREADER =
- "dfs.client.use.legacy.blockreader";
- boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT = false;
String DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL =
"dfs.client.use.legacy.blockreader.local";
boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT = false;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 40711a7780..cfcfd55319 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1275,12 +1275,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
@Deprecated
- public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADER =
- HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER;
- @Deprecated
- public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT =
- HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT;
- @Deprecated
public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL =
HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL;
@Deprecated
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 698a0cc9c0..54592ab815 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -3376,15 +3376,6 @@
-
- dfs.client.use.legacy.blockreader
- false
-
- If true, use the RemoteBlockReader class for local read short circuit. If false, use
- the newer RemoteBlockReader2 class.
-
-
-
dfs.client.write.byte-array-manager.count-limit
2048
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
index 9d88384dfe..2012258be0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
@@ -73,7 +73,6 @@ private void testSkipInner(MiniDFSCluster cluster) throws IOException {
@Test(timeout=60000)
public void testSkipWithRemoteBlockReader() throws IOException {
Configuration conf = new Configuration();
- conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
testSkipInner(cluster);