From 2542d69d65544bab75052a0b9c97a720f3c80cd5 Mon Sep 17 00:00:00 2001 From: Devaraj Das Date: Tue, 30 Apr 2013 21:46:02 +0000 Subject: [PATCH] HDFS-4778. Fixes some issues that the first patch on HDFS-2576 missed. Contributed by Devaraj Das. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1477849 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../BlockPlacementPolicyDefault.java | 3 +- .../blockmanagement/DatanodeManager.java | 1 - .../namenode/TestFavoredNodesEndToEnd.java | 32 +++++++++---------- 4 files changed, 21 insertions(+), 18 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 00502f0684..77380f4db5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -271,6 +271,9 @@ Trunk (Unreleased) HDFS-4687. TestDelegationTokenForProxyUser#testWebHdfsDoAs is flaky with JDK7. (Andrew Wang via atm) + HDFS-4778. Fixes some issues that the first patch on HDFS-2576 missed. + (ddas) + BREAKDOWN OF HADOOP-8562 and HDFS-3602 SUBTASKS AND RELATED JIRAS HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 61a61a0b5d..af5aab420a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -170,7 +170,8 @@ DatanodeDescriptor[] chooseTarget(String src, int numOfReplicas, results.add(remainingTargets[i]); } } - return results.toArray(new DatanodeDescriptor[results.size()]); + return getPipeline(writer, + results.toArray(new DatanodeDescriptor[results.size()])); } catch (NotEnoughReplicasException nr) { // Fall back to regular block placement disregarding favored nodes hint return chooseTarget(src, numOfReplicas, writer, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 670bea82ec..098033603b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -339,7 +339,6 @@ public DatanodeDescriptor getDatanodeByXferAddr(String host, int xferPort) { * * @param address hostaddress:transfer address * @return the best match for the given datanode - * @throws IOException when no datanode is found for given address */ DatanodeDescriptor getDatanodeDescriptor(String address) { DatanodeDescriptor node = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java index 015c021b06..ea5bb7a91e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java @@ -22,6 +22,7 @@ import java.util.ArrayList; import java.util.Random; +import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -32,6 +33,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -67,7 +69,7 @@ public static void tearDownAfterClass() throws Exception { } } - @Test + @Test(timeout=180000) public void testFavoredNodesEndToEnd() throws Exception { //create 10 files with random preferred nodes for (int i = 0; i < NUM_FILES; i++) { @@ -80,11 +82,7 @@ public void testFavoredNodesEndToEnd() throws Exception { 4096, (short)3, (long)4096, null, datanode); out.write(SOME_BYTES); out.close(); - BlockLocation[] locations = - dfs.getClient().getBlockLocations(p.toUri().getPath(), 0, - Long.MAX_VALUE); - //make sure we have exactly one block location, and three hosts - assertTrue(locations.length == 1 && locations[0].getHosts().length == 3); + BlockLocation[] locations = getBlockLocations(p); //verify the files got created in the right nodes for (BlockLocation loc : locations) { String[] hosts = loc.getNames(); @@ -94,7 +92,7 @@ public void testFavoredNodesEndToEnd() throws Exception { } } - @Test + @Test(timeout=180000) public void testWhenFavoredNodesNotPresent() throws Exception { //when we ask for favored nodes but the nodes are not there, we should //get some other nodes. In other words, the write to hdfs should not fail @@ -110,13 +108,10 @@ public void testWhenFavoredNodesNotPresent() throws Exception { 4096, (short)3, (long)4096, null, arbitraryAddrs); out.write(SOME_BYTES); out.close(); - BlockLocation[] locations = - dfs.getClient().getBlockLocations(p.toUri().getPath(), 0, - Long.MAX_VALUE); - assertTrue(locations.length == 1 && locations[0].getHosts().length == 3); + getBlockLocations(p); } - @Test + @Test(timeout=180000) public void testWhenSomeNodesAreNotGood() throws Exception { //make some datanode not "good" so that even if the client prefers it, //the namenode would not give it as a replica to write to @@ -136,12 +131,9 @@ public void testWhenSomeNodesAreNotGood() throws Exception { 4096, (short)3, (long)4096, null, addrs); out.write(SOME_BYTES); out.close(); - BlockLocation[] locations = - dfs.getClient().getBlockLocations(p.toUri().getPath(), 0, - Long.MAX_VALUE); //reset the state d.stopDecommission(); - assertTrue(locations.length == 1 && locations[0].getHosts().length == 3); + BlockLocation[] locations = getBlockLocations(p); //also make sure that the datanode[0] is not in the list of hosts String datanode0 = datanodes.get(0).getXferAddress().getAddress().getHostAddress() @@ -153,6 +145,14 @@ public void testWhenSomeNodesAreNotGood() throws Exception { } } + private BlockLocation[] getBlockLocations(Path p) throws Exception { + DFSTestUtil.waitReplication(dfs, p, (short)3); + BlockLocation[] locations = dfs.getClient().getBlockLocations( + p.toUri().getPath(), 0, Long.MAX_VALUE); + assertTrue(locations.length == 1 && locations[0].getHosts().length == 3); + return locations; + } + private String[] getStringForInetSocketAddrs(InetSocketAddress[] datanode) { String strs[] = new String[datanode.length]; for (int i = 0; i < datanode.length; i++) {