HDFS-5154. Fix TestBlockManager and TestDatanodeDescriptor after HDFS-4987. Contributed by Junping Du

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1519548 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-09-03 04:04:22 +00:00
parent b305f1ff08
commit f7e3bc553d
5 changed files with 80 additions and 27 deletions

View File

@ -12,3 +12,6 @@ IMPROVEMENTS:
HDFS-4987. Namenode changes to track multiple storages per datanode.
(szetszwo)
HDFS-5154. Fix TestBlockManager and TestDatanodeDescriptor after HDFS-4987.
(Junping Du via szetszwo)

View File

@ -247,10 +247,13 @@ public Collection<DatanodeStorageInfo> getStorageInfos() {
*/
boolean removeBlock(BlockInfo b) {
int index = b.findStorageInfo(this);
// if block exists on this datanode
if (index >= 0) {
DatanodeStorageInfo s = b.getStorageInfo(index);
if (s != null) {
return s.removeBlock(b);
}
}
return false;
}

View File

@ -24,9 +24,11 @@
import java.util.Set;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.Daemon;
import org.junit.Assert;
@ -215,4 +217,23 @@ public static void setWritingPrefersLocalNode(
public static void checkHeartbeat(BlockManager bm) {
bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
public static DatanodeDescriptor getLocalDatanodeDescriptor(
boolean initializeStorage) {
DatanodeDescriptor dn = new DatanodeDescriptor(DFSTestUtil.getLocalDatanodeID());
if (initializeStorage) {
dn.updateStorage(new DatanodeStorage(DatanodeStorage.newStorageID()));
}
return dn;
}
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
String rackLocation, boolean initializeStorage) {
DatanodeDescriptor dn = DFSTestUtil.getDatanodeDescriptor(ipAddr,
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, rackLocation);
if (initializeStorage) {
dn.updateStorage(new DatanodeStorage(DatanodeStorage.newStorageID()));
}
return dn;
}
}

View File

@ -25,6 +25,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map.Entry;
@ -39,6 +40,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.net.NetworkTopology;
import org.junit.Before;
import org.junit.Test;
@ -80,12 +82,12 @@ public void setupMockCluster() throws IOException {
Mockito.doReturn(true).when(fsn).hasWriteLock();
bm = new BlockManager(fsn, fsn, conf);
nodes = ImmutableList.of(
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackB"),
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackB"),
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackB")
BlockManagerTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackB", true),
BlockManagerTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackB", true),
BlockManagerTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackB", true)
);
rackA = nodes.subList(0, 3);
rackB = nodes.subList(3, 6);
@ -282,6 +284,7 @@ private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
// the third off-rack replica.
DatanodeDescriptor rackCNode =
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC");
rackCNode.updateStorage(new DatanodeStorage(DatanodeStorage.newStorageID()));
addNodes(ImmutableList.of(rackCNode));
try {
DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
@ -322,15 +325,15 @@ private void doTestSufficientlyReplBlocksUsesNewRack(int testIndex) {
@Test
public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception {
List<DatanodeDescriptor> nodes = ImmutableList.of(
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA")
BlockManagerTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA", true)
);
addNodes(nodes);
List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);;
List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);
for (int i = 0; i < NUM_TEST_ITERS; i++) {
doTestSingleRackClusterIsSufficientlyReplicated(i, origNodes);
}
@ -353,7 +356,17 @@ private void doTestSingleRackClusterIsSufficientlyReplicated(int testIndex,
private void fulfillPipeline(BlockInfo blockInfo,
DatanodeDescriptor[] pipeline) throws IOException {
for (int i = 1; i < pipeline.length; i++) {
bm.addBlock(pipeline[i], "STORAGE_ID", blockInfo, null);
DatanodeDescriptor dn = pipeline[i];
Iterator<DatanodeStorageInfo> iterator = dn.getStorageInfos().iterator();
if (iterator.hasNext()) {
DatanodeStorageInfo storage = iterator.next();
bm.addBlock(dn, storage.getStorageID(), blockInfo, null);
blockInfo.addStorage(storage);
} else {
throw new RuntimeException("Storage info on node: " + dn.getHostName()
+ " is invalid.");
}
}
}
@ -494,7 +507,10 @@ public void testHighestPriReplSrcChosenDespiteMaxReplLimit() throws Exception {
@Test
public void testSafeModeIBR() throws Exception {
DatanodeDescriptor node = spy(nodes.get(0));
node.setStorageID("dummy-storage");
Iterator<DatanodeStorageInfo> i = node.getStorageInfos().iterator();
DatanodeStorageInfo ds = i.next();
node.setStorageID(ds.getStorageID());
node.isAlive = true;
DatanodeRegistration nodeReg =
@ -510,12 +526,15 @@ public void testSafeModeIBR() throws Exception {
assertTrue(node.isFirstBlockReport());
// send block report, should be processed
reset(node);
bm.processReport(node, null, "pool", new BlockListAsLongs(null, null));
bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool",
new BlockListAsLongs(null, null));
verify(node).receivedBlockReport();
assertFalse(node.isFirstBlockReport());
// send block report again, should NOT be processed
reset(node);
bm.processReport(node, null, "pool", new BlockListAsLongs(null, null));
bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool",
new BlockListAsLongs(null, null));
verify(node, never()).receivedBlockReport();
assertFalse(node.isFirstBlockReport());
@ -527,7 +546,8 @@ public void testSafeModeIBR() throws Exception {
assertTrue(node.isFirstBlockReport()); // ready for report again
// send block report, should be processed after restart
reset(node);
bm.processReport(node, null, "pool", new BlockListAsLongs(null, null));
bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool",
new BlockListAsLongs(null, null));
verify(node).receivedBlockReport();
assertFalse(node.isFirstBlockReport());
}
@ -535,7 +555,9 @@ public void testSafeModeIBR() throws Exception {
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
DatanodeDescriptor node = spy(nodes.get(0));
node.setStorageID("dummy-storage");
Iterator<DatanodeStorageInfo> i = node.getStorageInfos().iterator();
DatanodeStorageInfo ds = i.next();
node.setStorageID(ds.getStorageID());
node.isAlive = true;
DatanodeRegistration nodeReg =
@ -552,7 +574,8 @@ public void testSafeModeIBRAfterIncremental() throws Exception {
// send block report while pretending to already have blocks
reset(node);
doReturn(1).when(node).numBlocks();
bm.processReport(node, null, "pool", new BlockListAsLongs(null, null));
bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool",
new BlockListAsLongs(null, null));
verify(node).receivedBlockReport();
assertFalse(node.isFirstBlockReport());
}

View File

@ -22,6 +22,7 @@
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.Iterator;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
@ -55,11 +56,13 @@ public void testGetInvalidateBlocks() throws Exception {
@Test
public void testBlocksCounter() throws Exception {
DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
assertEquals(0, dd.numBlocks());
BlockInfo blk = new BlockInfo(new Block(1L), 1);
BlockInfo blk1 = new BlockInfo(new Block(2L), 2);
final String storageID = "STORAGE_ID";
Iterator<DatanodeStorageInfo> iterator = dd.getStorageInfos().iterator();
assertTrue(iterator.hasNext());
final String storageID = iterator.next().getStorageID();
// add first block
assertTrue(dd.addBlock(storageID, blk));
assertEquals(1, dd.numBlocks());