MAPREDUCE-3176. Fixed ant mapreduce tests that are timing out because of wrong framework name. Contributed by Hitesh Shah.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1186368 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e3bb120e9f
commit
94e1703b72
@ -1683,6 +1683,9 @@ Release 0.23.0 - Unreleased
|
|||||||
MAPREDUCE-3162. Separated application-init and container-init event types
|
MAPREDUCE-3162. Separated application-init and container-init event types
|
||||||
in NodeManager's Application state machine. (Todd Lipcon via vinodkv)
|
in NodeManager's Application state machine. (Todd Lipcon via vinodkv)
|
||||||
|
|
||||||
|
MAPREDUCE-3176. Fixed ant mapreduce tests that are timing out because
|
||||||
|
of wrong framework name. (Hitesh Shah via vinodkv)
|
||||||
|
|
||||||
Release 0.22.0 - Unreleased
|
Release 0.22.0 - Unreleased
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -342,9 +342,15 @@ public void run(JobConf job, final TaskUmbilicalProtocol umbilical)
|
|||||||
RawKeyValueIterator rIter = null;
|
RawKeyValueIterator rIter = null;
|
||||||
|
|
||||||
boolean isLocal = false;
|
boolean isLocal = false;
|
||||||
// local iff framework == local
|
// local if
|
||||||
String framework = job.get(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
|
// 1) framework == local or
|
||||||
isLocal = framework.equals(MRConfig.LOCAL_FRAMEWORK_NAME);
|
// 2) framework == null and job tracker address == local
|
||||||
|
String framework = job.get(MRConfig.FRAMEWORK_NAME);
|
||||||
|
String masterAddr = job.get(MRConfig.MASTER_ADDRESS, "local");
|
||||||
|
if ((framework == null && masterAddr.equals("local"))
|
||||||
|
|| (framework != null && framework.equals(MRConfig.LOCAL_FRAMEWORK_NAME))) {
|
||||||
|
isLocal = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (!isLocal) {
|
if (!isLocal) {
|
||||||
Class combinerClass = conf.getCombinerClass();
|
Class combinerClass = conf.getCombinerClass();
|
||||||
|
@ -25,6 +25,8 @@
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.ServiceLoader;
|
import java.util.ServiceLoader;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
@ -62,6 +64,7 @@ public static enum JobTrackerStatus {INITIALIZING, RUNNING};
|
|||||||
private Path sysDir = null;
|
private Path sysDir = null;
|
||||||
private Path stagingAreaDir = null;
|
private Path stagingAreaDir = null;
|
||||||
private Path jobHistoryDir = null;
|
private Path jobHistoryDir = null;
|
||||||
|
private static final Log LOG = LogFactory.getLog(Cluster.class);
|
||||||
|
|
||||||
static {
|
static {
|
||||||
ConfigUtil.loadResources();
|
ConfigUtil.loadResources();
|
||||||
@ -83,17 +86,31 @@ private void initialize(InetSocketAddress jobTrackAddr, Configuration conf)
|
|||||||
|
|
||||||
for (ClientProtocolProvider provider : ServiceLoader
|
for (ClientProtocolProvider provider : ServiceLoader
|
||||||
.load(ClientProtocolProvider.class)) {
|
.load(ClientProtocolProvider.class)) {
|
||||||
ClientProtocol clientProtocol = null;
|
LOG.debug("Trying ClientProtocolProvider : "
|
||||||
if (jobTrackAddr == null) {
|
+ provider.getClass().getName());
|
||||||
clientProtocol = provider.create(conf);
|
ClientProtocol clientProtocol = null;
|
||||||
} else {
|
try {
|
||||||
clientProtocol = provider.create(jobTrackAddr, conf);
|
if (jobTrackAddr == null) {
|
||||||
}
|
clientProtocol = provider.create(conf);
|
||||||
|
} else {
|
||||||
if (clientProtocol != null) {
|
clientProtocol = provider.create(jobTrackAddr, conf);
|
||||||
clientProtocolProvider = provider;
|
}
|
||||||
client = clientProtocol;
|
|
||||||
break;
|
if (clientProtocol != null) {
|
||||||
|
clientProtocolProvider = provider;
|
||||||
|
client = clientProtocol;
|
||||||
|
LOG.debug("Picked " + provider.getClass().getName()
|
||||||
|
+ " as the ClientProtocolProvider");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
LOG.info("Cannot pick " + provider.getClass().getName()
|
||||||
|
+ " as the ClientProtocolProvider - returned null protocol");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (Exception e) {
|
||||||
|
LOG.info("Failed to use " + provider.getClass().getName()
|
||||||
|
+ " due to error: " + e.getMessage());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,7 +94,13 @@ public static void testCommandFormat() throws Exception {
|
|||||||
CommandFormat cf;
|
CommandFormat cf;
|
||||||
cf= new CommandFormat("copyToLocal", 2,2,"crc","ignoreCrc");
|
cf= new CommandFormat("copyToLocal", 2,2,"crc","ignoreCrc");
|
||||||
assertEquals(cf.parse(new String[] {"-get","file", "-"}, 1).get(1), "-");
|
assertEquals(cf.parse(new String[] {"-get","file", "-"}, 1).get(1), "-");
|
||||||
assertEquals(cf.parse(new String[] {"-get","file","-ignoreCrc","/foo"}, 1).get(1),"/foo");
|
try {
|
||||||
|
cf.parse(new String[] {"-get","file","-ignoreCrc","/foo"}, 1);
|
||||||
|
fail("Expected parsing to fail as it should stop at first non-option");
|
||||||
|
}
|
||||||
|
catch (Exception e) {
|
||||||
|
// Expected
|
||||||
|
}
|
||||||
cf = new CommandFormat("tail", 1, 1, "f");
|
cf = new CommandFormat("tail", 1, 1, "f");
|
||||||
assertEquals(cf.parse(new String[] {"-tail","fileName"}, 1).get(0),"fileName");
|
assertEquals(cf.parse(new String[] {"-tail","fileName"}, 1).get(0),"fileName");
|
||||||
assertEquals(cf.parse(new String[] {"-tail","-f","fileName"}, 1).get(0),"fileName");
|
assertEquals(cf.parse(new String[] {"-tail","-f","fileName"}, 1).get(0),"fileName");
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
import org.apache.hadoop.mapred.JobConf;
|
import org.apache.hadoop.mapred.JobConf;
|
||||||
import org.apache.hadoop.mapred.JobStatus;
|
import org.apache.hadoop.mapred.JobStatus;
|
||||||
import org.apache.hadoop.mapred.MiniMRCluster;
|
import org.apache.hadoop.mapred.MiniMRCluster;
|
||||||
|
import org.apache.hadoop.mapreduce.MRConfig;
|
||||||
import org.apache.hadoop.net.StandardSocketFactory;
|
import org.apache.hadoop.net.StandardSocketFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -92,6 +93,7 @@ public void testSocketFactory() throws IOException {
|
|||||||
JobConf jconf = new JobConf(cconf);
|
JobConf jconf = new JobConf(cconf);
|
||||||
jconf.set("mapred.job.tracker", String.format("localhost:%d",
|
jconf.set("mapred.job.tracker", String.format("localhost:%d",
|
||||||
jobTrackerPort + 10));
|
jobTrackerPort + 10));
|
||||||
|
jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
|
||||||
client = new JobClient(jconf);
|
client = new JobClient(jconf);
|
||||||
|
|
||||||
JobStatus[] jobs = client.jobsToComplete();
|
JobStatus[] jobs = client.jobsToComplete();
|
||||||
|
@ -96,4 +96,25 @@ public void testClusterWithJTClientProvider() throws Exception {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testClusterException() {
|
||||||
|
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
|
||||||
|
conf.set(JTConfig.JT_IPC_ADDRESS, "local");
|
||||||
|
|
||||||
|
// initializing a cluster with this conf should throw an error.
|
||||||
|
// However the exception thrown should not be specific to either
|
||||||
|
// the job tracker client provider or the local provider
|
||||||
|
boolean errorThrown = false;
|
||||||
|
try {
|
||||||
|
Cluster cluster = new Cluster(conf);
|
||||||
|
cluster.close();
|
||||||
|
fail("Not expected - cluster init should have failed");
|
||||||
|
} catch (IOException e) {
|
||||||
|
errorThrown = true;
|
||||||
|
assert(e.getMessage().contains("Cannot initialize Cluster. Please check"));
|
||||||
|
}
|
||||||
|
assert(errorThrown);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user