HDFS-3401. Cleanup DatanodeDescriptor creation in the tests. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336972 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-05-11 00:03:37 +00:00
parent dfd733401d
commit fcb4b1995b
12 changed files with 99 additions and 64 deletions

View File

@ -447,6 +447,10 @@ Release 2.0.0 - UNRELEASED
HDFS-3134. harden edit log loader against malformed or malicious input.
(Colin Patrick McCabe via eli)
HDFS-3230. Cleanup DatanodeID creation in the tests. (eli)
HDFS-3401. Cleanup DatanodeDescriptor creation in the tests. (eli)
OPTIMIZATIONS
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@ -640,8 +644,6 @@ Release 2.0.0 - UNRELEASED
HDFS-3395. NN doesn't start with HA+security enabled and HTTP address
set to 0.0.0.0. (atm)
HDFS-3230. Cleanup DatanodeID creation in the tests. (eli)
BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

View File

@ -59,6 +59,7 @@
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -706,12 +707,19 @@ public static void setFederatedConfiguration(MiniDFSCluster cluster,
.join(nameservices));
}
private static DatanodeID getDatanodeID(String ipAddr) {
return new DatanodeID(ipAddr, "localhost",
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
}
public static DatanodeID getLocalDatanodeID() {
return new DatanodeID("127.0.0.1", "localhost", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
return new DatanodeID("127.0.0.1", "localhost",
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
}
public static DatanodeID getLocalDatanodeID(int port) {
return new DatanodeID("127.0.0.1", "localhost", "", port, port, port);
return new DatanodeID("127.0.0.1", "localhost", "",
port, port, port);
}
public static DatanodeDescriptor getLocalDatanodeDescriptor() {
@ -722,7 +730,36 @@ public static DatanodeInfo getLocalDatanodeInfo() {
return new DatanodeInfo(getLocalDatanodeID());
}
public static DatanodeInfo getDatanodeInfo(String ipAddr) {
return new DatanodeInfo(getDatanodeID(ipAddr));
}
public static DatanodeInfo getLocalDatanodeInfo(int port) {
return new DatanodeInfo(getLocalDatanodeID(port));
}
public static DatanodeInfo getDatanodeInfo(String ipAddr,
String host, int port) {
return new DatanodeInfo(new DatanodeID(ipAddr, host, port));
}
public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
String hostname, AdminStates adminState) {
return new DatanodeInfo(ipAddr, hostname, "storage",
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
1, 2, 3, 4, 5, 6, "local", adminState);
}
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
String rackLocation) {
return getDatanodeDescriptor(ipAddr, DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
rackLocation);
}
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
int port, String rackLocation) {
return new DatanodeDescriptor(new DatanodeID(ipAddr, port), rackLocation);
}
}

View File

@ -333,7 +333,7 @@ private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
LocatedBlock badLocatedBlock = new LocatedBlock(
goodLocatedBlock.getBlock(),
new DatanodeInfo[] {
new DatanodeInfo(new DatanodeID("255.255.255.255", 234))
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
},
goodLocatedBlock.getStartOffset(),
false);

View File

@ -121,8 +121,7 @@ public void testGetBlocks() throws Exception {
getBlocksWithException(namenode, dataNodes[0], -1);
// get blocks of size BlockSize from a non-existent datanode
DatanodeInfo info = DFSTestUtil.getLocalDatanodeInfo();
info.setIpAddr("1.2.3.4");
DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4");
getBlocksWithException(namenode, info, 2);
} finally {
cluster.shutdown();

View File

@ -400,16 +400,11 @@ private void compare(Token<BlockTokenIdentifier> expected,
@Test
public void testConvertLocatedBlock() {
DatanodeInfo [] dnInfos = new DatanodeInfo[3];
dnInfos[0] = new DatanodeInfo("127.0.0.1", "host1", "0",
5000, 5001, 5002, 20000, 10001, 9999,
59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS);
dnInfos[1] = new DatanodeInfo("127.0.0.1", "host2", "1",
5000, 5001, 5002, 20000, 10001, 9999,
59, 69, 32, "local", AdminStates.DECOMMISSIONED);
dnInfos[2] = new DatanodeInfo("127.0.0.1", "host3", "2",
5000, 5001, 5002, 20000, 10001, 9999,
59, 69, 32, "local", AdminStates.NORMAL);
DatanodeInfo [] dnInfos = {
DFSTestUtil.getLocalDatanodeInfo("1.1.1.1", "h1", AdminStates.DECOMMISSION_INPROGRESS),
DFSTestUtil.getLocalDatanodeInfo("2.2.2.2", "h2", AdminStates.DECOMMISSIONED),
DFSTestUtil.getLocalDatanodeInfo("3.3.3.3", "h3", AdminStates.NORMAL)
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
LocatedBlockProto lbProto = PBHelper.convert(lb);

View File

@ -26,6 +26,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@ -76,12 +77,12 @@ public void setupMockCluster() throws IOException {
Mockito.doReturn(true).when(fsn).hasWriteLock();
bm = new BlockManager(fsn, fsn, conf);
nodes = ImmutableList.of(
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/rackB"),
new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/rackB"),
new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/rackB")
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackB"),
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackB"),
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackB")
);
rackA = nodes.subList(0, 3);
rackB = nodes.subList(3, 6);
@ -277,7 +278,7 @@ private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
// the block is still under-replicated. Add a new node. This should allow
// the third off-rack replica.
DatanodeDescriptor rackCNode =
new DatanodeDescriptor(new DatanodeID("7.7.7.7", 100), "/rackC");
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC");
addNodes(ImmutableList.of(rackCNode));
try {
DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
@ -317,13 +318,13 @@ private void doTestSufficientlyReplBlocksUsesNewRack(int testIndex) {
@Test
public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception {
List<DatanodeDescriptor> nodes = ImmutableList.of(
new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackA")
List<DatanodeDescriptor> nodes = ImmutableList.of(
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA"),
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA")
);
addNodes(nodes);
List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.junit.Before;
import org.junit.Test;
@ -32,10 +32,10 @@ public class TestHost2NodesMap {
@Before
public void setup() {
dataNodes = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5030), "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", 5021, "/d1/r2"),
};
for (DatanodeDescriptor node : dataNodes) {
map.add(node);
@ -46,7 +46,7 @@ public void setup() {
@Test
public void testContains() throws Exception {
DatanodeDescriptor nodeNotInMap =
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5040), "/d1/r4");
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
for (int i = 0; i < dataNodes.length; i++) {
assertTrue(map.contains(dataNodes[i]));
}
@ -66,7 +66,7 @@ public void testGetDatanodeByHost() throws Exception {
@Test
public void testRemove() throws Exception {
DatanodeDescriptor nodeNotInMap =
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5040), "/d1/r4");
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
assertFalse(map.remove(nodeNotInMap));
assertTrue(map.remove(dataNodes[0]));

View File

@ -56,12 +56,12 @@ public class TestReplicationPolicy {
public static void setupCluster() throws Exception {
Configuration conf = new HdfsConfiguration();
dataNodes = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/d2/r3"),
new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/d2/r3")
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d2/r3"),
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3")
};
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
@ -329,6 +329,7 @@ public void testChoooseTarget4() throws Exception {
HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
}
}
/**
* In this testcase, client is is a node outside of file system.
* So the 1st replica can be placed on any node.
@ -338,8 +339,8 @@ public void testChoooseTarget4() throws Exception {
*/
@Test
public void testChooseTarget5() throws Exception {
DatanodeDescriptor writerDesc =
new DatanodeDescriptor(new DatanodeID("7.7.7.7", 5020), "/d2/r4");
DatanodeDescriptor writerDesc =
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4");
DatanodeDescriptor[] targets;
targets = replicator.chooseTarget(filename,

View File

@ -115,7 +115,7 @@ private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx)
0, HdfsConstants.LAYOUT_VERSION))
.when(mock).versionRequest();
Mockito.doReturn(new DatanodeRegistration("fake-node", 100))
Mockito.doReturn(new DatanodeRegistration("1.2.3.4", 100))
.when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
Mockito.doAnswer(new HeartbeatAnswer(nnIdx))

View File

@ -47,6 +47,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@ -197,9 +198,9 @@ private void testSyncReplicas(ReplicaRecoveryInfo replica1,
locs, RECOVERY_ID);
ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
BlockRecord record1 = new BlockRecord(
new DatanodeID("xx", "yy", "zz", 1, 2, 3), dn1, replica1);
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
BlockRecord record2 = new BlockRecord(
new DatanodeID("aa", "bb", "cc", 1, 2, 3), dn2, replica2);
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
syncList.add(record1);
syncList.add(record2);
@ -401,8 +402,7 @@ public void testRWRReplicas() throws IOException {
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
DatanodeInfo mockOtherDN = new DatanodeInfo(
new DatanodeID("127.0.0.1", "localhost", "storage-1234", 0, 0, 0));
DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] locs = new DatanodeInfo[] {
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
mockOtherDN };

View File

@ -356,8 +356,7 @@ public void testInterDNProtocolTimeout() throws Throwable {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = new DatanodeID(
"localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
InterDatanodeProtocol proxy = null;

View File

@ -21,6 +21,7 @@
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@ -36,13 +37,13 @@ public class TestNetworkTopology {
@Before
public void setupDatanodes() {
dataNodes = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/d2/r3"),
new DatanodeDescriptor(new DatanodeID("7.7.7.7", 5020), "/d2/r3")
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3")
};
for (int i = 0; i < dataNodes.length; i++) {
cluster.add(dataNodes[i]);
@ -52,7 +53,7 @@ public void setupDatanodes() {
@Test
public void testContains() throws Exception {
DatanodeDescriptor nodeNotInMap =
new DatanodeDescriptor(new DatanodeID("8.8.8.8", 5020), "/d2/r4");
DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r4");
for (int i=0; i < dataNodes.length; i++) {
assertTrue(cluster.contains(dataNodes[i]));
}
@ -68,9 +69,9 @@ public void testNumOfChildren() throws Exception {
public void testCreateInvalidTopology() throws Exception {
NetworkTopology invalCluster = new NetworkTopology();
DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1")
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1")
};
invalCluster.add(invalDataNodes[0]);
invalCluster.add(invalDataNodes[1]);