diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7a898741fa..21eda1b32f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -145,6 +145,8 @@ Trunk (unreleased changes)
HADOOP-7761. Improve the performance of raw comparisons. (todd)
+ HADOOP_7917. compilation of protobuf files fails in windows/cygwin. (tucu)
+
Release 0.23.1 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index f477d385a5..aa3548e666 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -296,17 +296,29 @@
-
- PROTO_DIR=${basedir}/src/main/proto
- ls $PROTO_DIR &> /dev/null
- if [ $? = 0 ]; then
- JAVA_DIR=${project.build.directory}/generated-sources/java
- mkdir -p $JAVA_DIR
- ls $PROTO_DIR/*.proto | xargs -n 1 protoc -I$PROTO_DIR --java_out=$JAVA_DIR
- fi
+
+ PROTO_DIR=src/main/proto
+ JAVA_DIR=target/generated-sources/java
+ which cygpath 2> /dev/null
+ if [ $? = 1 ]; then
+ IS_WIN=false
+ else
+ IS_WIN=true
+ WIN_PROTO_DIR=`cygpath --windows $PROTO_DIR`
+ WIN_JAVA_DIR=`cygpath --windows $JAVA_DIR`
+ fi
+ mkdir -p $JAVA_DIR 2> /dev/null
+ for PROTO_FILE in `ls $PROTO_DIR/*.proto 2> /dev/null`
+ do
+ if [ "$IS_WIN" = "true" ]; then
+ protoc -I$WIN_PROTO_DIR --java_out=$WIN_JAVA_DIR $PROTO_FILE
+ else
+ protoc -I$PROTO_DIR --java_out=$JAVA_DIR $PROTO_FILE
+ fi
+ done
-
-
+
+
@@ -319,17 +331,29 @@
-
- PROTO_DIR=${basedir}/src/test/proto
- ls $PROTO_DIR &> /dev/null
- if [ $? = 0 ]; then
- JAVA_DIR=${project.build.directory}/generated-test-sources/java
- mkdir -p $JAVA_DIR
- ls $PROTO_DIR/*.proto | xargs -n 1 protoc -I$PROTO_DIR --java_out=$JAVA_DIR
- fi
+
+ PROTO_DIR=src/test/proto
+ JAVA_DIR=target/generated-test-sources/java
+ which cygpath 2> /dev/null
+ if [ $? = 1 ]; then
+ IS_WIN=false
+ else
+ IS_WIN=true
+ WIN_PROTO_DIR=`cygpath --windows $PROTO_DIR`
+ WIN_JAVA_DIR=`cygpath --windows $JAVA_DIR`
+ fi
+ mkdir -p $JAVA_DIR 2> /dev/null
+ for PROTO_FILE in `ls $PROTO_DIR/*.proto 2> /dev/null`
+ do
+ if [ "$IS_WIN" = "true" ]; then
+ protoc -I$WIN_PROTO_DIR --java_out=$WIN_JAVA_DIR $PROTO_FILE
+ else
+ protoc -I$PROTO_DIR --java_out=$JAVA_DIR $PROTO_FILE
+ fi
+ done
-
-
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 1df04254d9..510f9d0140 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -228,17 +228,29 @@
-
- PROTO_DIR=${basedir}/src/main/proto
- ls $PROTO_DIR &> /dev/null
- if [ $? = 0 ]; then
- JAVA_DIR=${project.build.directory}/generated-sources/java
- mkdir -p $JAVA_DIR
- ls $PROTO_DIR/*.proto | xargs -n 1 protoc -I$PROTO_DIR --java_out=$JAVA_DIR
- fi
+
+ PROTO_DIR=src/main/proto
+ JAVA_DIR=target/generated-sources/java
+ which cygpath 2> /dev/null
+ if [ $? = 1 ]; then
+ IS_WIN=false
+ else
+ IS_WIN=true
+ WIN_PROTO_DIR=`cygpath --windows $PROTO_DIR`
+ WIN_JAVA_DIR=`cygpath --windows $JAVA_DIR`
+ fi
+ mkdir -p $JAVA_DIR 2> /dev/null
+ for PROTO_FILE in `ls $PROTO_DIR/*.proto 2> /dev/null`
+ do
+ if [ "$IS_WIN" = "true" ]; then
+ protoc -I$WIN_PROTO_DIR --java_out=$WIN_JAVA_DIR $PROTO_FILE
+ else
+ protoc -I$PROTO_DIR --java_out=$JAVA_DIR $PROTO_FILE
+ fi
+ done
-
-
+
+
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 6748d60a7f..041e83ff66 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -95,6 +95,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-2863. Support web services for YARN and MR components. (Thomas
Graves via vinodkv)
+ MAPREDUCE-3251. Network ACLs can prevent some clients to talk to MR ApplicationMaster
+ (Anupam Seth via mahadev)
+
IMPROVEMENTS
MAPREDUCE-3297. Moved log related components into yarn-common so that
@@ -302,6 +305,15 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3530. Fixed an NPE occuring during scheduling in the
ResourceManager. (Arun C Murthy via vinodkv)
+ MAPREDUCE-3484. Fixed JobEndNotifier to not get interrupted before completing
+ all its retries. (Ravi Prakash via vinodkv)
+
+ MAPREDUCE-3531. Fixed a race in ContainerTokenSecretManager. (Robert Joseph
+ Evans via sseth)
+
+ MAPREDUCE-3560. TestRMNodeTransitions is failing on trunk.
+ (Siddharth Seth via mahadev)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index f9de4bc503..33c1fd3cc0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -375,6 +375,16 @@ public void handle(JobFinishEvent event) {
// this is the only job, so shut down the Appmaster
// note in a workflow scenario, this may lead to creation of a new
// job (FIXME?)
+ try {
+ LOG.info("Job end notification started for jobID : "
+ + job.getReport().getJobId());
+ JobEndNotifier notifier = new JobEndNotifier();
+ notifier.setConf(getConfig());
+ notifier.notify(job.getReport());
+ } catch (InterruptedException ie) {
+ LOG.warn("Job end notification interrupted for jobID : "
+ + job.getReport().getJobId(), ie );
+ }
// TODO:currently just wait for some time so clients can know the
// final states. Will be removed once RM come on.
@@ -390,16 +400,6 @@ public void handle(JobFinishEvent event) {
stop();
// Send job-end notification
- try {
- LOG.info("Job end notification started for jobID : "
- + job.getReport().getJobId());
- JobEndNotifier notifier = new JobEndNotifier();
- notifier.setConf(getConfig());
- notifier.notify(job.getReport());
- } catch (InterruptedException ie) {
- LOG.warn("Job end notification interrupted for jobID : "
- + job.getReport().getJobId(), ie );
- }
} catch (Throwable t) {
LOG.warn("Graceful stop failed ", t);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
index 46cb11e924..3cf6ea9c70 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
@@ -96,13 +96,20 @@ public void testNotifyRetries() throws InterruptedException {
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL, "http://nonexistent");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "3");
conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS, "3");
+ conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "3");
+ conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL, "3");
JobReport jobReport = Mockito.mock(JobReport.class);
+ long startTime = System.currentTimeMillis();
this.notificationCount = 0;
this.setConf(conf);
this.notify(jobReport);
+ long endTime = System.currentTimeMillis();
Assert.assertEquals("Only 3 retries were expected but was : "
+ this.notificationCount, this.notificationCount, 3);
+ Assert.assertTrue("Should have taken more than 9 seconds it took "
+ + (endTime - startTime), endTime - startTime > 9000);
+
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 71ea84bb8c..3b6fc9f618 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -68,6 +68,7 @@
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -156,30 +157,37 @@ private MRClientProtocol getProxy() throws YarnRemoteException {
application = rm.getApplicationReport(appId);
continue;
}
- UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(
- UserGroupInformation.getCurrentUser().getUserName());
- serviceAddr = application.getHost() + ":" + application.getRpcPort();
- if (UserGroupInformation.isSecurityEnabled()) {
- String clientTokenEncoded = application.getClientToken();
- Token clientToken =
- new Token();
- clientToken.decodeFromUrlString(clientTokenEncoded);
- // RPC layer client expects ip:port as service for tokens
- InetSocketAddress addr = NetUtils.createSocketAddr(application
- .getHost(), application.getRpcPort());
- clientToken.setService(new Text(addr.getAddress().getHostAddress()
- + ":" + addr.getPort()));
- newUgi.addToken(clientToken);
- }
- LOG.info("The url to track the job: " + application.getTrackingUrl());
- LOG.debug("Connecting to " + serviceAddr);
- final String tempStr = serviceAddr;
- realProxy = newUgi.doAs(new PrivilegedExceptionAction() {
- @Override
- public MRClientProtocol run() throws IOException {
- return instantiateAMProxy(tempStr);
+ if(!conf.getBoolean(YarnConfiguration.RM_AM_NETWORK_ACL_CLOSED, false)) {
+ UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(
+ UserGroupInformation.getCurrentUser().getUserName());
+ serviceAddr = application.getHost() + ":" + application.getRpcPort();
+ if (UserGroupInformation.isSecurityEnabled()) {
+ String clientTokenEncoded = application.getClientToken();
+ Token clientToken =
+ new Token();
+ clientToken.decodeFromUrlString(clientTokenEncoded);
+ // RPC layer client expects ip:port as service for tokens
+ InetSocketAddress addr = NetUtils.createSocketAddr(application
+ .getHost(), application.getRpcPort());
+ clientToken.setService(new Text(addr.getAddress().getHostAddress()
+ + ":" + addr.getPort()));
+ newUgi.addToken(clientToken);
}
- });
+ LOG.info("The url to track the job: " + application.getTrackingUrl());
+ LOG.debug("Connecting to " + serviceAddr);
+ final String tempStr = serviceAddr;
+ realProxy = newUgi.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public MRClientProtocol run() throws IOException {
+ return instantiateAMProxy(tempStr);
+ }
+ });
+ } else {
+ logApplicationReportInfo(application);
+ LOG.info("Network ACL closed to AM for job " + jobId
+ + ". Redirecting to job history server.");
+ return checkAndGetHSProxy(null, JobState.RUNNING);
+ }
return realProxy;
} catch (IOException e) {
//possibly the AM has crashed
@@ -240,10 +248,55 @@ public MRClientProtocol run() throws IOException {
return realProxy;
}
+ private void logApplicationReportInfo(ApplicationReport application) {
+ if(application == null) {
+ return;
+ }
+ LOG.info("AppId: " + application.getApplicationId()
+ + " # reserved containers: "
+ + application.getApplicationResourceUsageReport().getNumReservedContainers()
+ + " # used containers: "
+ + application.getApplicationResourceUsageReport().getNumUsedContainers()
+ + " Needed resources (memory): "
+ + application.getApplicationResourceUsageReport().getNeededResources().getMemory()
+ + " Reserved resources (memory): "
+ + application.getApplicationResourceUsageReport().getReservedResources().getMemory()
+ + " Used resources (memory): "
+ + application.getApplicationResourceUsageReport().getUsedResources().getMemory()
+ + " Diagnostics: "
+ + application.getDiagnostics()
+ + " Start time: "
+ + application.getStartTime()
+ + " Finish time: "
+ + application.getFinishTime()
+ + " Host: "
+ + application.getHost()
+ + " Name: "
+ + application.getName()
+ + " Orig. tracking url: "
+ + application.getOriginalTrackingUrl()
+ + " Queue: "
+ + application.getQueue()
+ + " RPC port: "
+ + application.getRpcPort()
+ + " Tracking url: "
+ + application.getTrackingUrl()
+ + " User: "
+ + application.getUser()
+ + " Client token: "
+ + application.getClientToken()
+ + " Final appl. status: "
+ + application.getFinalApplicationStatus()
+ + " Yarn appl. state: "
+ + application.getYarnApplicationState()
+ );
+ }
+
private MRClientProtocol checkAndGetHSProxy(
ApplicationReport applicationReport, JobState state) {
if (null == historyServerProxy) {
- LOG.warn("Job History Server is not configured.");
+ LOG.warn("Job History Server is not configured or " +
+ "job information not yet available on History Server.");
return getNotRunningJob(applicationReport, state);
}
return historyServerProxy;
@@ -452,4 +505,4 @@ public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
throw new IOException("Cannot get log path for a in-progress job");
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
index b17cb427d3..7bd94a7268 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
@@ -22,6 +22,8 @@
import static org.mockito.Mockito.*;
import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
import junit.framework.Assert;
@@ -31,8 +33,13 @@
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.records.Counter;
+import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
@@ -45,15 +52,30 @@
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
/**
* Tests for ClientServiceDelegate.java
*/
+@RunWith(value = Parameterized.class)
public class TestClientServiceDelegate {
private JobID oldJobId = JobID.forName("job_1315895242400_2");
private org.apache.hadoop.mapreduce.v2.api.records.JobId jobId = TypeConverter
.toYarn(oldJobId);
+ private boolean isAMReachableFromClient;
+
+ public TestClientServiceDelegate(boolean isAMReachableFromClient) {
+ this.isAMReachableFromClient = isAMReachableFromClient;
+ }
+
+ @Parameters
+ public static Collection