HDFS-4778. Fixes some issues that the first patch on HDFS-2576 missed. Contributed by Devaraj Das.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1477849 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Devaraj Das 2013-04-30 21:46:02 +00:00
parent bfef63719d
commit 2542d69d65
4 changed files with 21 additions and 18 deletions

View File

@ -271,6 +271,9 @@ Trunk (Unreleased)
HDFS-4687. TestDelegationTokenForProxyUser#testWebHdfsDoAs is flaky with
JDK7. (Andrew Wang via atm)
HDFS-4778. Fixes some issues that the first patch on HDFS-2576 missed.
(ddas)
BREAKDOWN OF HADOOP-8562 and HDFS-3602 SUBTASKS AND RELATED JIRAS
HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao,

View File

@ -170,7 +170,8 @@ DatanodeDescriptor[] chooseTarget(String src, int numOfReplicas,
results.add(remainingTargets[i]);
}
}
return results.toArray(new DatanodeDescriptor[results.size()]);
return getPipeline(writer,
results.toArray(new DatanodeDescriptor[results.size()]));
} catch (NotEnoughReplicasException nr) {
// Fall back to regular block placement disregarding favored nodes hint
return chooseTarget(src, numOfReplicas, writer,

View File

@ -339,7 +339,6 @@ public DatanodeDescriptor getDatanodeByXferAddr(String host, int xferPort) {
*
* @param address hostaddress:transfer address
* @return the best match for the given datanode
* @throws IOException when no datanode is found for given address
*/
DatanodeDescriptor getDatanodeDescriptor(String address) {
DatanodeDescriptor node = null;

View File

@ -22,6 +22,7 @@
import java.util.ArrayList;
import java.util.Random;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
@ -32,6 +33,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -67,7 +69,7 @@ public static void tearDownAfterClass() throws Exception {
}
}
@Test
@Test(timeout=180000)
public void testFavoredNodesEndToEnd() throws Exception {
//create 10 files with random preferred nodes
for (int i = 0; i < NUM_FILES; i++) {
@ -80,11 +82,7 @@ public void testFavoredNodesEndToEnd() throws Exception {
4096, (short)3, (long)4096, null, datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations =
dfs.getClient().getBlockLocations(p.toUri().getPath(), 0,
Long.MAX_VALUE);
//make sure we have exactly one block location, and three hosts
assertTrue(locations.length == 1 && locations[0].getHosts().length == 3);
BlockLocation[] locations = getBlockLocations(p);
//verify the files got created in the right nodes
for (BlockLocation loc : locations) {
String[] hosts = loc.getNames();
@ -94,7 +92,7 @@ public void testFavoredNodesEndToEnd() throws Exception {
}
}
@Test
@Test(timeout=180000)
public void testWhenFavoredNodesNotPresent() throws Exception {
//when we ask for favored nodes but the nodes are not there, we should
//get some other nodes. In other words, the write to hdfs should not fail
@ -110,13 +108,10 @@ public void testWhenFavoredNodesNotPresent() throws Exception {
4096, (short)3, (long)4096, null, arbitraryAddrs);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations =
dfs.getClient().getBlockLocations(p.toUri().getPath(), 0,
Long.MAX_VALUE);
assertTrue(locations.length == 1 && locations[0].getHosts().length == 3);
getBlockLocations(p);
}
@Test
@Test(timeout=180000)
public void testWhenSomeNodesAreNotGood() throws Exception {
//make some datanode not "good" so that even if the client prefers it,
//the namenode would not give it as a replica to write to
@ -136,12 +131,9 @@ public void testWhenSomeNodesAreNotGood() throws Exception {
4096, (short)3, (long)4096, null, addrs);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations =
dfs.getClient().getBlockLocations(p.toUri().getPath(), 0,
Long.MAX_VALUE);
//reset the state
d.stopDecommission();
assertTrue(locations.length == 1 && locations[0].getHosts().length == 3);
BlockLocation[] locations = getBlockLocations(p);
//also make sure that the datanode[0] is not in the list of hosts
String datanode0 =
datanodes.get(0).getXferAddress().getAddress().getHostAddress()
@ -153,6 +145,14 @@ public void testWhenSomeNodesAreNotGood() throws Exception {
}
}
private BlockLocation[] getBlockLocations(Path p) throws Exception {
DFSTestUtil.waitReplication(dfs, p, (short)3);
BlockLocation[] locations = dfs.getClient().getBlockLocations(
p.toUri().getPath(), 0, Long.MAX_VALUE);
assertTrue(locations.length == 1 && locations[0].getHosts().length == 3);
return locations;
}
private String[] getStringForInetSocketAddrs(InetSocketAddress[] datanode) {
String strs[] = new String[datanode.length];
for (int i = 0; i < datanode.length; i++) {