MAPREDUCE-4681. Fix unit tests broken by HDFS-3910. Contributed by Arun C. Murthy.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1392075 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1c29c78299
commit
57807d50bf
@ -156,6 +156,8 @@ Release 2.0.3-alpha - Unreleased
|
|||||||
MAPREDUCE-4674. Hadoop examples secondarysort has a typo
|
MAPREDUCE-4674. Hadoop examples secondarysort has a typo
|
||||||
"secondarysrot" in the usage. (Robert Justice via eli)
|
"secondarysrot" in the usage. (Robert Justice via eli)
|
||||||
|
|
||||||
|
MAPREDUCE-4681. Fix unit tests broken by HDFS-3910. (acmurthy)
|
||||||
|
|
||||||
Release 2.0.2-alpha - 2012-09-07
|
Release 2.0.2-alpha - 2012-09-07
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -98,7 +98,7 @@ protected void tearDown() throws Exception {
|
|||||||
dfsCluster.shutdown();
|
dfsCluster.shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testJobQueues() throws IOException {
|
public void testJobQueues() throws Exception {
|
||||||
JobClient jc = new JobClient(mrCluster.createJobConf());
|
JobClient jc = new JobClient(mrCluster.createJobConf());
|
||||||
String expectedQueueInfo = "Maximum Tasks Per Job :: 10";
|
String expectedQueueInfo = "Maximum Tasks Per Job :: 10";
|
||||||
JobQueueInfo[] queueInfos = jc.getQueues();
|
JobQueueInfo[] queueInfos = jc.getQueues();
|
||||||
|
@ -149,7 +149,7 @@ private RunningJob launchJobWithWaitingSetupAndCleanup(MiniMRCluster mr)
|
|||||||
private void testSetupAndCleanupKill(MiniMRCluster mr,
|
private void testSetupAndCleanupKill(MiniMRCluster mr,
|
||||||
MiniDFSCluster dfs,
|
MiniDFSCluster dfs,
|
||||||
boolean commandLineKill)
|
boolean commandLineKill)
|
||||||
throws IOException {
|
throws Exception {
|
||||||
// launch job with waiting setup/cleanup
|
// launch job with waiting setup/cleanup
|
||||||
RunningJob job = launchJobWithWaitingSetupAndCleanup(mr);
|
RunningJob job = launchJobWithWaitingSetupAndCleanup(mr);
|
||||||
|
|
||||||
@ -223,7 +223,7 @@ private void killTaskWithLostTracker(MiniMRCluster mr,
|
|||||||
// Also Tests the command-line kill for setup/cleanup attempts.
|
// Also Tests the command-line kill for setup/cleanup attempts.
|
||||||
// tests the setup/cleanup attempts getting killed if
|
// tests the setup/cleanup attempts getting killed if
|
||||||
// they were running on a lost tracker
|
// they were running on a lost tracker
|
||||||
public void testWithDFS() throws IOException {
|
public void testWithDFS() throws Exception {
|
||||||
MiniDFSCluster dfs = null;
|
MiniDFSCluster dfs = null;
|
||||||
MiniMRCluster mr = null;
|
MiniMRCluster mr = null;
|
||||||
FileSystem fileSys = null;
|
FileSystem fileSys = null;
|
||||||
|
@ -449,7 +449,7 @@ static String getTaskSignalParameter(boolean isMap) {
|
|||||||
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
|
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
|
||||||
String mapSignalFile,
|
String mapSignalFile,
|
||||||
String reduceSignalFile, int replication)
|
String reduceSignalFile, int replication)
|
||||||
throws IOException {
|
throws Exception {
|
||||||
writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile),
|
writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile),
|
||||||
(short)replication);
|
(short)replication);
|
||||||
writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile),
|
writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile),
|
||||||
@ -462,7 +462,7 @@ static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
|
|||||||
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
|
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
|
||||||
boolean isMap, String mapSignalFile,
|
boolean isMap, String mapSignalFile,
|
||||||
String reduceSignalFile)
|
String reduceSignalFile)
|
||||||
throws IOException {
|
throws Exception {
|
||||||
// signal the maps to complete
|
// signal the maps to complete
|
||||||
writeFile(dfs.getNameNode(), fileSys.getConf(),
|
writeFile(dfs.getNameNode(), fileSys.getConf(),
|
||||||
isMap
|
isMap
|
||||||
@ -483,7 +483,7 @@ static String getReduceSignalFile(Path dir) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void writeFile(NameNode namenode, Configuration conf, Path name,
|
static void writeFile(NameNode namenode, Configuration conf, Path name,
|
||||||
short replication) throws IOException {
|
short replication) throws Exception {
|
||||||
FileSystem fileSys = FileSystem.get(conf);
|
FileSystem fileSys = FileSystem.get(conf);
|
||||||
SequenceFile.Writer writer =
|
SequenceFile.Writer writer =
|
||||||
SequenceFile.createWriter(fileSys, conf, name,
|
SequenceFile.createWriter(fileSys, conf, name,
|
||||||
|
Loading…
Reference in New Issue
Block a user