HDFS-2245. Fix a NullPointerException in BlockManager.chooseTarget(..).
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1156490 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
cde987996a
commit
6a7c0306bd
@ -951,6 +951,9 @@ Trunk (unreleased changes)
|
|||||||
HDFS-2196. Make ant build system work with hadoop-common JAR generated
|
HDFS-2196. Make ant build system work with hadoop-common JAR generated
|
||||||
by Maven. (Alejandro Abdelnur via tomwhite)
|
by Maven. (Alejandro Abdelnur via tomwhite)
|
||||||
|
|
||||||
|
HDFS-2245. Fix a NullPointerException in BlockManager.chooseTarget(..).
|
||||||
|
(szetszwo)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-1073 SUBTASKS
|
BREAKDOWN OF HDFS-1073 SUBTASKS
|
||||||
|
|
||||||
HDFS-1521. Persist transaction ID on disk between NN restarts.
|
HDFS-1521. Persist transaction ID on disk between NN restarts.
|
||||||
|
@ -1221,12 +1221,13 @@ public DatanodeDescriptor[] chooseTarget(final String src,
|
|||||||
final DatanodeDescriptor targets[] = blockplacement.chooseTarget(
|
final DatanodeDescriptor targets[] = blockplacement.chooseTarget(
|
||||||
src, numOfReplicas, client, excludedNodes, blocksize);
|
src, numOfReplicas, client, excludedNodes, blocksize);
|
||||||
if (targets.length < minReplication) {
|
if (targets.length < minReplication) {
|
||||||
throw new IOException("File " + src + " could only be replicated to " +
|
throw new IOException("File " + src + " could only be replicated to "
|
||||||
targets.length + " nodes, instead of " +
|
+ targets.length + " nodes instead of minReplication (="
|
||||||
minReplication + ". There are "
|
+ minReplication + "). There are "
|
||||||
+ getDatanodeManager().getNetworkTopology().getNumOfLeaves()
|
+ getDatanodeManager().getNetworkTopology().getNumOfLeaves()
|
||||||
+ " datanode(s) running but "+excludedNodes.size() +
|
+ " datanode(s) running and "
|
||||||
" node(s) are excluded in this operation.");
|
+ (excludedNodes == null? "no": excludedNodes.size())
|
||||||
|
+ " node(s) are excluded in this operation.");
|
||||||
}
|
}
|
||||||
return targets;
|
return targets;
|
||||||
}
|
}
|
||||||
|
@ -404,6 +404,36 @@ public void testFileCreationError2() throws IOException {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** test addBlock(..) when replication<min and excludeNodes==null. */
|
||||||
|
public void testFileCreationError3() throws IOException {
|
||||||
|
System.out.println("testFileCreationError3 start");
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
// create cluster
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
DistributedFileSystem dfs = null;
|
||||||
|
try {
|
||||||
|
cluster.waitActive();
|
||||||
|
dfs = (DistributedFileSystem)cluster.getFileSystem();
|
||||||
|
DFSClient client = dfs.dfs;
|
||||||
|
|
||||||
|
// create a new file.
|
||||||
|
final Path f = new Path("/foo.txt");
|
||||||
|
createFile(dfs, f, 3);
|
||||||
|
try {
|
||||||
|
cluster.getNameNode().addBlock(f.toString(),
|
||||||
|
client.clientName, null, null);
|
||||||
|
fail();
|
||||||
|
} catch(IOException ioe) {
|
||||||
|
FileSystem.LOG.info("GOOD!", ioe);
|
||||||
|
}
|
||||||
|
|
||||||
|
System.out.println("testFileCreationError3 successful");
|
||||||
|
} finally {
|
||||||
|
IOUtils.closeStream(dfs);
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test that file leases are persisted across namenode restarts.
|
* Test that file leases are persisted across namenode restarts.
|
||||||
* This test is currently not triggered because more HDFS work is
|
* This test is currently not triggered because more HDFS work is
|
||||||
|
Loading…
Reference in New Issue
Block a user