diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 1d0975111d..2c5779aeb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -290,12 +290,19 @@ public class DatanodeManager { DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval + 10 * 1000 * heartbeatIntervalSeconds; - final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds), + + // Effected block invalidate limit is the bigger value between + // value configured in hdfs-site.xml, and 20 * HB interval. + final int configuredBlockInvalidateLimit = conf.getInt( + DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT); - this.blockInvalidateLimit = conf.getInt( - DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit); + final int countedBlockInvalidateLimit = 20*(int)(heartbeatIntervalSeconds); + this.blockInvalidateLimit = Math.max(countedBlockInvalidateLimit, + configuredBlockInvalidateLimit); LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY - + "=" + this.blockInvalidateLimit); + + ": configured=" + configuredBlockInvalidateLimit + + ", counted=" + countedBlockInvalidateLimit + + ", effected=" + blockInvalidateLimit); this.checkIpHostnameInRegistration = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY, @@ -403,7 +410,8 @@ public FSClusterStats getFSClusterStats() { return fsClusterStats; } - int getBlockInvalidateLimit() { + @VisibleForTesting + public int getBlockInvalidateLimit() { return blockInvalidateLimit; } @@ -1911,7 +1919,7 @@ private void setHeartbeatInterval(long intervalSeconds, this.heartbeatExpireInterval = 2L * recheckInterval + 10 * 1000 * intervalSeconds; this.blockInvalidateLimit = Math.max(20 * (int) (intervalSeconds), - DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT); + blockInvalidateLimit); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java index 6b553df117..c0de63a8f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java @@ -40,6 +40,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT; public class TestNameNodeReconfigure { @@ -48,10 +49,13 @@ public class TestNameNodeReconfigure { .getLog(TestNameNodeReconfigure.class); private MiniDFSCluster cluster; + private final int customizedBlockInvalidateLimit = 500; @Before public void setUp() throws IOException { Configuration conf = new HdfsConfiguration(); + conf.setInt(DFS_BLOCK_INVALIDATE_LIMIT_KEY, + customizedBlockInvalidateLimit); cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); } @@ -212,6 +216,38 @@ public void testReconfigureHearbeatCheck() throws ReconfigurationException { datanodeManager.getHeartbeatRecheckInterval()); } + @Test + public void testBlockInvalidateLimitAfterReconfigured() + throws ReconfigurationException { + final NameNode nameNode = cluster.getNameNode(); + final DatanodeManager datanodeManager = nameNode.namesystem + .getBlockManager().getDatanodeManager(); + + assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set", + customizedBlockInvalidateLimit, + datanodeManager.getBlockInvalidateLimit()); + + nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, + Integer.toString(6)); + + // 20 * 6 = 120 < 500 + // Invalid block limit should stay same as before after reconfiguration. + assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + + " is not honored after reconfiguration", + customizedBlockInvalidateLimit, + datanodeManager.getBlockInvalidateLimit()); + + nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, + Integer.toString(50)); + + // 20 * 50 = 1000 > 500 + // Invalid block limit should be reset to 1000 + assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + + " is not reconfigured correctly", + 1000, + datanodeManager.getBlockInvalidateLimit()); + } + @After public void shutDown() throws IOException { if (cluster != null) {