diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt index 8bb94ac6f6..916aace153 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt @@ -12,3 +12,6 @@ IMPROVEMENTS: HDFS-4987. Namenode changes to track multiple storages per datanode. (szetszwo) + + HDFS-5154. Fix TestBlockManager and TestDatanodeDescriptor after HDFS-4987. + (Junping Du via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 0c3ff98fc4..bc1107beb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -247,9 +247,12 @@ public Collection getStorageInfos() { */ boolean removeBlock(BlockInfo b) { int index = b.findStorageInfo(this); - DatanodeStorageInfo s = b.getStorageInfo(index); - if (s != null) { - return s.removeBlock(b); + // if block exists on this datanode + if (index >= 0) { + DatanodeStorageInfo s = b.getStorageInfo(index); + if (s != null) { + return s.removeBlock(b); + } } return false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java index 35639fd076..006ee42bb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java @@ -24,9 +24,11 @@ import java.util.Set; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.util.Daemon; import org.junit.Assert; @@ -215,4 +217,23 @@ public static void setWritingPrefersLocalNode( public static void checkHeartbeat(BlockManager bm) { bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck(); } + + public static DatanodeDescriptor getLocalDatanodeDescriptor( + boolean initializeStorage) { + DatanodeDescriptor dn = new DatanodeDescriptor(DFSTestUtil.getLocalDatanodeID()); + if (initializeStorage) { + dn.updateStorage(new DatanodeStorage(DatanodeStorage.newStorageID())); + } + return dn; + } + + public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr, + String rackLocation, boolean initializeStorage) { + DatanodeDescriptor dn = DFSTestUtil.getDatanodeDescriptor(ipAddr, + DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, rackLocation); + if (initializeStorage) { + dn.updateStorage(new DatanodeStorage(DatanodeStorage.newStorageID())); + } + return dn; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 67306cf782..1ddb91cfe1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map.Entry; @@ -39,6 +40,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.net.NetworkTopology; import org.junit.Before; import org.junit.Test; @@ -80,17 +82,17 @@ public void setupMockCluster() throws IOException { Mockito.doReturn(true).when(fsn).hasWriteLock(); bm = new BlockManager(fsn, fsn, conf); nodes = ImmutableList.of( - DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"), - DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"), - DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"), - DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackB"), - DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackB"), - DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackB") - ); + BlockManagerTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA", true), + BlockManagerTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA", true), + BlockManagerTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA", true), + BlockManagerTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackB", true), + BlockManagerTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackB", true), + BlockManagerTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackB", true) + ); rackA = nodes.subList(0, 3); rackB = nodes.subList(3, 6); } - + private void addNodes(Iterable nodesToAdd) { NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); // construct network topology @@ -282,6 +284,7 @@ private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception { // the third off-rack replica. DatanodeDescriptor rackCNode = DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC"); + rackCNode.updateStorage(new DatanodeStorage(DatanodeStorage.newStorageID())); addNodes(ImmutableList.of(rackCNode)); try { DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo); @@ -322,15 +325,15 @@ private void doTestSufficientlyReplBlocksUsesNewRack(int testIndex) { @Test public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception { List nodes = ImmutableList.of( - DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"), - DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"), - DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"), - DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA"), - DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA"), - DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA") + BlockManagerTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA", true), + BlockManagerTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA", true), + BlockManagerTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA", true), + BlockManagerTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA", true), + BlockManagerTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA", true), + BlockManagerTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA", true) ); addNodes(nodes); - List origNodes = nodes.subList(0, 3);; + List origNodes = nodes.subList(0, 3); for (int i = 0; i < NUM_TEST_ITERS; i++) { doTestSingleRackClusterIsSufficientlyReplicated(i, origNodes); } @@ -353,7 +356,17 @@ private void doTestSingleRackClusterIsSufficientlyReplicated(int testIndex, private void fulfillPipeline(BlockInfo blockInfo, DatanodeDescriptor[] pipeline) throws IOException { for (int i = 1; i < pipeline.length; i++) { - bm.addBlock(pipeline[i], "STORAGE_ID", blockInfo, null); + DatanodeDescriptor dn = pipeline[i]; + + Iterator iterator = dn.getStorageInfos().iterator(); + if (iterator.hasNext()) { + DatanodeStorageInfo storage = iterator.next(); + bm.addBlock(dn, storage.getStorageID(), blockInfo, null); + blockInfo.addStorage(storage); + } else { + throw new RuntimeException("Storage info on node: " + dn.getHostName() + + " is invalid."); + } } } @@ -494,7 +507,10 @@ public void testHighestPriReplSrcChosenDespiteMaxReplLimit() throws Exception { @Test public void testSafeModeIBR() throws Exception { DatanodeDescriptor node = spy(nodes.get(0)); - node.setStorageID("dummy-storage"); + Iterator i = node.getStorageInfos().iterator(); + DatanodeStorageInfo ds = i.next(); + node.setStorageID(ds.getStorageID()); + node.isAlive = true; DatanodeRegistration nodeReg = @@ -510,12 +526,15 @@ public void testSafeModeIBR() throws Exception { assertTrue(node.isFirstBlockReport()); // send block report, should be processed reset(node); - bm.processReport(node, null, "pool", new BlockListAsLongs(null, null)); + + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool", + new BlockListAsLongs(null, null)); verify(node).receivedBlockReport(); assertFalse(node.isFirstBlockReport()); // send block report again, should NOT be processed reset(node); - bm.processReport(node, null, "pool", new BlockListAsLongs(null, null)); + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool", + new BlockListAsLongs(null, null)); verify(node, never()).receivedBlockReport(); assertFalse(node.isFirstBlockReport()); @@ -527,7 +546,8 @@ public void testSafeModeIBR() throws Exception { assertTrue(node.isFirstBlockReport()); // ready for report again // send block report, should be processed after restart reset(node); - bm.processReport(node, null, "pool", new BlockListAsLongs(null, null)); + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool", + new BlockListAsLongs(null, null)); verify(node).receivedBlockReport(); assertFalse(node.isFirstBlockReport()); } @@ -535,7 +555,9 @@ public void testSafeModeIBR() throws Exception { @Test public void testSafeModeIBRAfterIncremental() throws Exception { DatanodeDescriptor node = spy(nodes.get(0)); - node.setStorageID("dummy-storage"); + Iterator i = node.getStorageInfos().iterator(); + DatanodeStorageInfo ds = i.next(); + node.setStorageID(ds.getStorageID()); node.isAlive = true; DatanodeRegistration nodeReg = @@ -552,7 +574,8 @@ public void testSafeModeIBRAfterIncremental() throws Exception { // send block report while pretending to already have blocks reset(node); doReturn(1).when(node).numBlocks(); - bm.processReport(node, null, "pool", new BlockListAsLongs(null, null)); + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool", + new BlockListAsLongs(null, null)); verify(node).receivedBlockReport(); assertFalse(node.isFirstBlockReport()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java index 41afa739d6..bf240d7d46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue; import java.util.ArrayList; +import java.util.Iterator; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; @@ -55,11 +56,13 @@ public void testGetInvalidateBlocks() throws Exception { @Test public void testBlocksCounter() throws Exception { - DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor(); + DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true); assertEquals(0, dd.numBlocks()); BlockInfo blk = new BlockInfo(new Block(1L), 1); BlockInfo blk1 = new BlockInfo(new Block(2L), 2); - final String storageID = "STORAGE_ID"; + Iterator iterator = dd.getStorageInfos().iterator(); + assertTrue(iterator.hasNext()); + final String storageID = iterator.next().getStorageID(); // add first block assertTrue(dd.addBlock(storageID, blk)); assertEquals(1, dd.numBlocks());