From fb996a32a98a25c0fe34a8ebb28563b53cd6e20e Mon Sep 17 00:00:00 2001 From: Virajith Jalaparti Date: Tue, 5 Dec 2017 17:55:32 -0800 Subject: [PATCH] HDFS-12894. [READ] Skip setting block count of ProvidedDatanodeStorageInfo on DN registration update --- .../server/blockmanagement/BlockManager.java | 5 +++++ .../blockmanagement/DatanodeDescriptor.java | 4 +++- .../TestNameNodeProvidedImplementation.java | 20 ++++++++++++++++++- 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index f92c4e838a..916cbaae9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -4943,4 +4943,9 @@ private static long getBlockRecoveryTimeout(long heartbeatIntervalSecs) { public void setBlockRecoveryTimeout(long blockRecoveryTimeout) { pendingRecoveryBlocks.setRecoveryTimeoutInterval(blockRecoveryTimeout); } + + @VisibleForTesting + public ProvidedStorageMap getProvidedStorageMap() { + return providedStorageMap; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 83c608f7e3..fc587086ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -919,7 +919,9 @@ public void updateRegInfo(DatanodeID nodeReg) { // must re-process IBR after re-registration for(DatanodeStorageInfo storage : getStorageInfos()) { - storage.setBlockReportCount(0); + if (storage.getStorageType() != StorageType.PROVIDED) { + storage.setBlockReportCount(0); + } } heartbeatedSinceRegistration = false; forceRegistration = false; diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java index deaf9d57af..d0572472b2 100644 --- a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java +++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java @@ -559,7 +559,9 @@ public void testProvidedDatanodeFailures() throws Exception { DataNode providedDatanode2 = cluster.getDataNodes().get(1); DFSClient client = new DFSClient(new InetSocketAddress("localhost", - cluster.getNameNodePort()), cluster.getConfiguration(0)); + cluster.getNameNodePort()), cluster.getConfiguration(0)); + + DatanodeStorageInfo providedDNInfo = getProvidedDatanodeStorageInfo(); if (numFiles >= 1) { String filename = "/" + filePrefix + (numFiles - 1) + fileSuffix; @@ -596,10 +598,15 @@ public void testProvidedDatanodeFailures() throws Exception { providedDatanode2.getDatanodeId().getXferAddr()); getAndCheckBlockLocations(client, filename, baseFileLen, 1, 0); + // BR count for the provided ProvidedDatanodeStorageInfo should reset to + // 0, when all DNs with PROVIDED storage fail. + assertEquals(0, providedDNInfo.getBlockReportCount()); //restart the provided datanode cluster.restartDataNode(providedDNProperties1, true); cluster.waitActive(); + assertEquals(1, providedDNInfo.getBlockReportCount()); + //should find the block on the 1st provided datanode now dnInfos = getAndCheckBlockLocations(client, filename, baseFileLen, 1, 1); //not comparing UUIDs as the datanode can now have a different one. @@ -621,6 +628,8 @@ public void testTransientDeadDatanodes() throws Exception { false); DataNode providedDatanode = cluster.getDataNodes().get(0); + DatanodeStorageInfo providedDNInfo = getProvidedDatanodeStorageInfo(); + int initialBRCount = providedDNInfo.getBlockReportCount(); for (int i= 0; i < numFiles; i++) { // expect to have 2 locations as we have 2 provided Datanodes. verifyFileLocation(i, 2); @@ -631,10 +640,19 @@ public void testTransientDeadDatanodes() throws Exception { cluster.waitActive(); cluster.triggerHeartbeats(); Thread.sleep(1000); + // the report count should just continue to increase. + assertEquals(initialBRCount + i + 1, + providedDNInfo.getBlockReportCount()); verifyFileLocation(i, 2); } } + private DatanodeStorageInfo getProvidedDatanodeStorageInfo() { + ProvidedStorageMap providedStorageMap = + cluster.getNamesystem().getBlockManager().getProvidedStorageMap(); + return providedStorageMap.getProvidedStorageInfo(); + } + @Test(timeout=30000) public void testNamenodeRestart() throws Exception { createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,