From 834372f4040f1e7a00720da5c40407f9b1423b6d Mon Sep 17 00:00:00 2001 From: Shanyu Zhao Date: Mon, 6 Jul 2020 08:43:34 -0700 Subject: [PATCH] HDFS-15451. Do not discard non-initial block report for provided storage. (#2119). Contributed by Shanyu Zhao. Signed-off-by: He Xiaoqiao --- .../server/blockmanagement/BlockManager.java | 1 + .../blockmanagement/TestBlockManager.java | 53 +++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 7f0f17e7b4..f2cd6b9819 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2759,6 +2759,7 @@ public boolean processReport(final DatanodeID nodeID, storageInfo = node.updateStorage(storage); } if (namesystem.isInStartupSafeMode() + && !StorageType.PROVIDED.equals(storageInfo.getStorageType()) && storageInfo.getBlockReportCount() > 0) { blockLog.info("BLOCK* processReport 0x{}: " + "discarded non-initial block report from {}" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 11ed5ba9a3..695377aa5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -49,9 +49,11 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; +import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestProvidedImpl; import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; import org.apache.hadoop.hdfs.server.namenode.CacheManager; @@ -1051,6 +1053,57 @@ public void testSafeModeIBRBeforeFirstFullBR() throws Exception { (ds) >= 0); } + @Test + public void testSafeModeWithProvidedStorageBR() throws Exception { + DatanodeDescriptor node0 = spy(nodes.get(0)); + DatanodeStorageInfo ds0 = node0.getStorageInfos()[0]; + node0.setAlive(true); + DatanodeDescriptor node1 = spy(nodes.get(1)); + DatanodeStorageInfo ds1 = node1.getStorageInfos()[0]; + node1.setAlive(true); + + String providedStorageID = DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT; + DatanodeStorage providedStorage = new DatanodeStorage( + providedStorageID, DatanodeStorage.State.NORMAL, StorageType.PROVIDED); + + // create block manager with provided storage enabled + Configuration conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, true); + conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS, + TestProvidedImpl.TestFileRegionBlockAliasMap.class, + BlockAliasMap.class); + BlockManager bmPs = new BlockManager(fsn, false, conf); + bmPs.setBlockPoolId("BP-12344-10.1.1.2-12344"); + + // pretend to be in safemode + doReturn(true).when(fsn).isInStartupSafeMode(); + + // register new node + DatanodeRegistration nodeReg0 = + new DatanodeRegistration(node0, null, null, ""); + bmPs.getDatanodeManager().registerDatanode(nodeReg0); + bmPs.getDatanodeManager().addDatanode(node0); + DatanodeRegistration nodeReg1 = + new DatanodeRegistration(node1, null, null, ""); + bmPs.getDatanodeManager().registerDatanode(nodeReg1); + bmPs.getDatanodeManager().addDatanode(node1); + + // process reports of provided storage and disk storage + bmPs.processReport(node0, providedStorage, BlockListAsLongs.EMPTY, null); + bmPs.processReport(node0, new DatanodeStorage(ds0.getStorageID()), + BlockListAsLongs.EMPTY, null); + bmPs.processReport(node1, providedStorage, BlockListAsLongs.EMPTY, null); + bmPs.processReport(node1, new DatanodeStorage(ds1.getStorageID()), + BlockListAsLongs.EMPTY, null); + + // The provided stoage report should not affect disk storage report + DatanodeStorageInfo dsPs = + bmPs.getProvidedStorageMap().getProvidedStorageInfo(); + assertEquals(2, dsPs.getBlockReportCount()); + assertEquals(1, ds0.getBlockReportCount()); + assertEquals(1, ds1.getBlockReportCount()); + } + @Test public void testFullBR() throws Exception { doReturn(true).when(fsn).isRunning();