From 62d59e516ee7df1a1c026ac8e5552eb03442ec44 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Sun, 20 Mar 2022 14:01:45 +0800 Subject: [PATCH] HDFS-16504. Add parameter for NameNode to process getBloks request. (#4068). Contributed by Max Xie. Signed-off-by: He Xiaoqiao --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 ++++ .../hdfs/server/balancer/NameNodeConnector.java | 12 ++++++------ .../hdfs/server/namenode/FSNamesystem.java | 17 +++++++++++++++-- .../src/main/resources/hdfs-default.xml | 8 ++++++++ .../balancer/TestBalancerWithHANameNodes.java | 15 +++++++-------- 5 files changed, 40 insertions(+), 16 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b14e92d42d..38949744b4 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -746,6 +746,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { */ public static final String DFS_NAMENODE_GETBLOCKS_MAX_QPS_KEY = "dfs.namenode.get-blocks.max-qps"; public static final int DFS_NAMENODE_GETBLOCKS_MAX_QPS_DEFAULT = 20; + public static final String DFS_NAMENODE_GETBLOCKS_CHECK_OPERATION_KEY + = "dfs.namenode.get-blocks.check.operation"; + public static final boolean DFS_NAMENODE_GETBLOCKS_CHECK_OPERATION_DEFAULT + = true; public static final String DFS_BALANCER_MOVEDWINWIDTH_KEY = "dfs.balancer.movedWinWidth"; public static final long DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java index ce0fb968bb..238457bcb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java @@ -147,11 +147,11 @@ public static void checkOtherInstanceRunning(boolean toCheck) { private final BalancerProtocols namenode; /** - * If set requestToStandby true, Balancer will getBlocks from + * If set getBlocksToStandby true, Balancer will getBlocks from * Standby NameNode only and it can reduce the performance impact of Active * NameNode, especially in a busy HA mode cluster. */ - private boolean requestToStandby; + private boolean getBlocksToStandby; private String nsId; private Configuration config; private final KeyManager keyManager; @@ -191,9 +191,9 @@ public NameNodeConnector(String name, URI nameNodeUri, Path idPath, this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri, BalancerProtocols.class, fallbackToSimpleAuth).getProxy(); - this.requestToStandby = conf.getBoolean( - DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_KEY, - DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_DEFAULT); + this.getBlocksToStandby = !conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_CHECK_OPERATION_KEY, + DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_CHECK_OPERATION_DEFAULT); this.config = conf; this.fs = (DistributedFileSystem)FileSystem.get(nameNodeUri, conf); @@ -318,7 +318,7 @@ public DatanodeStorageReport[] getLiveDatanodeStorageReport() private ProxyPair getProxy() throws IOException { boolean isRequestStandby = false; ClientProtocol clientProtocol = null; - if (requestToStandby && nsId != null + if (getBlocksToStandby && nsId != null && HAUtil.isHAEnabled(config, nsId)) { List namenodes = HAUtil.getProxiesForAllNameNodesInNameservice(config, nsId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a61028bde7..c03281d6a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -505,6 +505,12 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ private final int snapshotDiffReportLimit; private final int blockDeletionIncrement; + /** + * Whether enable checkOperation when call getBlocks. + * It is enabled by default. + */ + private final boolean isGetBlocksCheckOperationEnabled; + /** Interval between each check of lease to release. */ private final long leaseRecheckIntervalMs; /** Maximum time the lock is hold to release lease. */ @@ -1066,6 +1072,10 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { Preconditions.checkArgument(blockDeletionIncrement > 0, DFSConfigKeys.DFS_NAMENODE_BLOCK_DELETION_INCREMENT_KEY + " must be a positive integer."); + this.isGetBlocksCheckOperationEnabled = conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_CHECK_OPERATION_KEY, + DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_CHECK_OPERATION_DEFAULT); + } catch(IOException e) { LOG.error(getClass().getSimpleName() + " initialization failed.", e); close(); @@ -1938,10 +1948,13 @@ public boolean isInStandbyState() { */ public BlocksWithLocations getBlocks(DatanodeID datanode, long size, long minimumBlockSize, long timeInterval) throws IOException { - checkOperation(OperationCategory.READ); + OperationCategory checkOp = + isGetBlocksCheckOperationEnabled ? OperationCategory.READ : + OperationCategory.UNCHECKED; + checkOperation(checkOp); readLock(); try { - checkOperation(OperationCategory.READ); + checkOperation(checkOp); return getBlockManager().getBlocksWithLocations(datanode, size, minimumBlockSize, timeInterval); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 584ae24ab1..6890b8a841 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4119,6 +4119,14 @@ Mover, and StoragePolicySatisfier. + + dfs.namenode.get-blocks.check.operation + true + + Set false to disable checkOperation and getBlocks for Balancer + will route to Standby NameNode for HA mode setup. + + dfs.balancer.dispatcherThreads 200 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index bb9b1a1cde..9b4ba5ccb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -18,9 +18,7 @@ package org.apache.hadoop.hdfs.server.balancer; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -146,8 +144,9 @@ void doTest(Configuration conf, boolean withHA) throws Exception { TestBalancer.createFile(cluster, TestBalancer.filePath, totalUsedSpace / numOfDatanodes, (short) numOfDatanodes, 0); - boolean isRequestStandby = conf.getBoolean( - DFS_HA_ALLOW_STALE_READ_KEY, DFS_HA_ALLOW_STALE_READ_DEFAULT); + boolean isRequestStandby = !conf.getBoolean( + DFS_NAMENODE_GETBLOCKS_CHECK_OPERATION_KEY, + DFS_NAMENODE_GETBLOCKS_CHECK_OPERATION_DEFAULT); if (isRequestStandby) { HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0), cluster.getNameNode(1)); @@ -182,7 +181,7 @@ void doTest(Configuration conf, boolean withHA) throws Exception { @Test(timeout = 60000) public void testBalancerRequestSBNWithHA() throws Exception { Configuration conf = new HdfsConfiguration(); - conf.setBoolean(DFS_HA_ALLOW_STALE_READ_KEY, true); + conf.setBoolean(DFS_NAMENODE_GETBLOCKS_CHECK_OPERATION_KEY, false); conf.setLong(DFS_HA_TAILEDITS_PERIOD_KEY, 1); //conf.setBoolean(DFS_HA_BALANCER_REQUEST_STANDBY_KEY, true); TestBalancer.initConf(conf); @@ -329,8 +328,8 @@ nsId, new Path("/test"), nncActive.close(); // Request to standby namenode. - conf.setBoolean(DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_KEY, - true); + conf.setBoolean(DFS_NAMENODE_GETBLOCKS_CHECK_OPERATION_KEY, + false); NameNodeConnector nncStandby = new NameNodeConnector( "nncStandby", namenode, nsId, new Path("/test"),