From e49ef2e6c5e2dba5677ff742a32344b5fc82435a Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Tue, 20 Sep 2011 13:11:12 +0000
Subject: [PATCH 01/68] HADOOP-7639. Enhance HttpServer to allow passing
path-specs for filtering, so that servers like Yarn WebApp can get filtered
the paths served by their own injected servlets. Contributed by Thomas
Graves.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173130 13f79535-47bb-0310-9956-ffa450edef68
---
.../hadoop-common/CHANGES.txt | 4 +
.../org/apache/hadoop/http/HttpServer.java | 23 +++
.../hadoop/http/HttpServerFunctionalTest.java | 27 ++++
.../apache/hadoop/http/TestPathFilter.java | 145 ++++++++++++++++++
4 files changed, 199 insertions(+)
create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a253040539..50311ecc39 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -388,6 +388,10 @@ Release 0.23.0 - Unreleased
HADOOP-7599. Script improvements to setup a secure Hadoop cluster
(Eric Yang via ddas)
+ HADOOP-7639. Enhance HttpServer to allow passing path-specs for filtering,
+ so that servers like Yarn WebApp can get filtered the paths served by
+ their own injected servlets. (Thomas Graves via vinodkv)
+
OPTIMIZATIONS
HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
index 00cdf32746..de506c91b2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
@@ -124,6 +124,29 @@ public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, Connector connector) throws IOException {
this(name, bindAddress, port, findPort, conf, null, connector);
}
+
+ /**
+ * Create a status server on the given port. Allows you to specify the
+ * path specifications that this server will be serving so that they will be
+ * added to the filters properly.
+ *
+ * @param name The name of the server
+ * @param bindAddress The address for this server
+ * @param port The port to use on the server
+ * @param findPort whether the server should start at the given port and
+ * increment by 1 until it finds a free port.
+ * @param conf Configuration
+ * @param pathSpecs Path specifications that this httpserver will be serving.
+ * These will be added to any filters.
+ */
+ public HttpServer(String name, String bindAddress, int port,
+ boolean findPort, Configuration conf, String[] pathSpecs) throws IOException {
+ this(name, bindAddress, port, findPort, conf, null, null);
+ for (String path : pathSpecs) {
+ LOG.info("adding path spec: " + path);
+ addFilterPathMapping(path, webAppContext);
+ }
+ }
/**
* Create a status server on the given port.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
index 07688137d5..aff74b573b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
@@ -70,6 +70,21 @@ public static HttpServer createTestServer(Configuration conf)
return createServer(TEST, conf);
}
+ /**
+ * Create but do not start the test webapp server. The test webapp dir is
+ * prepared/checked in advance.
+ * @param conf the server configuration to use
+ * @return the server instance
+ *
+ * @throws IOException if a problem occurs
+ * @throws AssertionError if a condition was not met
+ */
+ public static HttpServer createTestServer(Configuration conf,
+ String[] pathSpecs) throws IOException {
+ prepareTestWebapp();
+ return createServer(TEST, conf, pathSpecs);
+ }
+
/**
* Prepare the test webapp by creating the directory from the test properties
* fail if the directory cannot be created.
@@ -104,6 +119,18 @@ public static HttpServer createServer(String webapp, Configuration conf)
throws IOException {
return new HttpServer(webapp, "0.0.0.0", 0, true, conf);
}
+ /**
+ * Create an HttpServer instance for the given webapp
+ * @param webapp the webapp to work with
+ * @param conf the configuration to use for the server
+ * @param pathSpecs the paths specifications the server will service
+ * @return the server
+ * @throws IOException if it could not be created
+ */
+ public static HttpServer createServer(String webapp, Configuration conf,
+ String[] pathSpecs) throws IOException {
+ return new HttpServer(webapp, "0.0.0.0", 0, true, conf, pathSpecs);
+ }
/**
* Create and start a server with the test webapp
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java
new file mode 100644
index 0000000000..73aebea486
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.http;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.Set;
+import java.util.TreeSet;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+public class TestPathFilter extends HttpServerFunctionalTest {
+ static final Log LOG = LogFactory.getLog(HttpServer.class);
+ static final Set RECORDS = new TreeSet();
+
+ /** A very simple filter that records accessed uri's */
+ static public class RecordingFilter implements Filter {
+ private FilterConfig filterConfig = null;
+
+ public void init(FilterConfig filterConfig) {
+ this.filterConfig = filterConfig;
+ }
+
+ public void destroy() {
+ this.filterConfig = null;
+ }
+
+ public void doFilter(ServletRequest request, ServletResponse response,
+ FilterChain chain) throws IOException, ServletException {
+ if (filterConfig == null)
+ return;
+
+ String uri = ((HttpServletRequest)request).getRequestURI();
+ LOG.info("filtering " + uri);
+ RECORDS.add(uri);
+ chain.doFilter(request, response);
+ }
+
+ /** Configuration for RecordingFilter */
+ static public class Initializer extends FilterInitializer {
+ public Initializer() {}
+
+ public void initFilter(FilterContainer container, Configuration conf) {
+ container.addFilter("recording", RecordingFilter.class.getName(), null);
+ }
+ }
+ }
+
+
+ /** access a url, ignoring some IOException such as the page does not exist */
+ static void access(String urlstring) throws IOException {
+ LOG.warn("access " + urlstring);
+ URL url = new URL(urlstring);
+
+ URLConnection connection = url.openConnection();
+ connection.connect();
+
+ try {
+ BufferedReader in = new BufferedReader(new InputStreamReader(
+ connection.getInputStream()));
+ try {
+ for(; in.readLine() != null; );
+ } finally {
+ in.close();
+ }
+ } catch(IOException ioe) {
+ LOG.warn("urlstring=" + urlstring, ioe);
+ }
+ }
+
+ @Test
+ public void testPathSpecFilters() throws Exception {
+ Configuration conf = new Configuration();
+
+ //start a http server with CountingFilter
+ conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+ RecordingFilter.Initializer.class.getName());
+ String[] pathSpecs = { "/path", "/path/*" };
+ HttpServer http = createTestServer(conf, pathSpecs);
+ http.start();
+
+ final String baseURL = "/path";
+ final String baseSlashURL = "/path/";
+ final String addedURL = "/path/nodes";
+ final String addedSlashURL = "/path/nodes/";
+ final String longURL = "/path/nodes/foo/job";
+ final String rootURL = "/";
+ final String allURL = "/*";
+
+ final String[] filteredUrls = {baseURL, baseSlashURL, addedURL,
+ addedSlashURL, longURL};
+ final String[] notFilteredUrls = {rootURL, allURL};
+
+ // access the urls and verify our paths specs got added to the
+ // filters
+ final String prefix = "http://localhost:" + http.getPort();
+ try {
+ for(int i = 0; i < filteredUrls.length; i++) {
+ access(prefix + filteredUrls[i]);
+ }
+ for(int i = 0; i < notFilteredUrls.length; i++) {
+ access(prefix + notFilteredUrls[i]);
+ }
+ } finally {
+ http.stop();
+ }
+
+ LOG.info("RECORDS = " + RECORDS);
+
+ //verify records
+ for(int i = 0; i < filteredUrls.length; i++) {
+ assertTrue(RECORDS.remove(filteredUrls[i]));
+ }
+ assertTrue(RECORDS.isEmpty());
+ }
+}
From adcebcbc03d43b9ae8e2a5960d0f13c8f0d233a2 Mon Sep 17 00:00:00 2001
From: Matthew Foley
Date: Tue, 20 Sep 2011 22:09:06 +0000
Subject: [PATCH 02/68] HADOOP-7630. hadoop-metrics2.properties should have a
property *.period set to a default value for metrics. Contributed by Eric
Yang.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173402 13f79535-47bb-0310-9956-ffa450edef68
---
.../hadoop-common/CHANGES.txt | 3 +++
.../src/main/packages/hadoop-setup-conf.sh | 5 ++++-
.../templates/conf/hadoop-metrics2.properties | 20 +++++++++++++++++++
3 files changed, 27 insertions(+), 1 deletion(-)
create mode 100644 hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-metrics2.properties
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 50311ecc39..a20b02ed23 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -402,6 +402,9 @@ Release 0.23.0 - Unreleased
BUG FIXES
+ HADOOP-7630. hadoop-metrics2.properties should have a property *.period
+ set to a default value for metrics. (Eric Yang via mattf)
+
HADOOP-7327. FileSystem.listStatus() throws NullPointerException instead of
IOException upon access permission failure. (mattf)
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh b/hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh
index 8e903cf308..96a989fc39 100644
--- a/hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh
+++ b/hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh
@@ -475,7 +475,10 @@ else
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/taskcontroller.cfg ${HADOOP_CONF_DIR}/taskcontroller.cfg
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-metrics2.properties ${HADOOP_CONF_DIR}/hadoop-metrics2.properties
if [ ! -e ${HADOOP_CONF_DIR}/capacity-scheduler.xml ]; then
- template_generator ${HADOOP_PREFIX}/share/hadoop/templates/conf/capacity-scheduler.xml ${HADOOP_CONF_DIR}/capacity-scheduler.xml
+ template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/capacity-scheduler.xml ${HADOOP_CONF_DIR}/capacity-scheduler.xml
+ fi
+ if [ ! -e ${HADOOP_CONF_DIR}/hadoop-metrics2.properties ]; then
+ cp ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-metrics2.properties ${HADOOP_CONF_DIR}/hadoop-metrics2.properties
fi
if [ ! -e ${HADOOP_CONF_DIR}/log4j.properties ]; then
cp ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/log4j.properties ${HADOOP_CONF_DIR}/log4j.properties
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-metrics2.properties b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-metrics2.properties
new file mode 100644
index 0000000000..4a1019385c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-metrics2.properties
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+*.period=60
+
From 8cd6eb67f329ea22f0d99c4a9518df1fea815dbe Mon Sep 17 00:00:00 2001
From: Matthew Foley
Date: Tue, 20 Sep 2011 22:40:14 +0000
Subject: [PATCH 03/68] HADOOP-7633. Missed this file in previous commit.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173411 13f79535-47bb-0310-9956-ffa450edef68
---
.../packages/templates/conf/log4j.properties | 195 ++++++++++++++++++
1 file changed, 195 insertions(+)
create mode 100644 hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
new file mode 100644
index 0000000000..d765e96c4f
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
@@ -0,0 +1,195 @@
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security appender
+#
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+#new logger
+# Define some default values that can be overridden by system properties
+hadoop.security.logger=INFO,console
+log4j.category.SecurityLogger=${hadoop.security.logger}
+
+# hdfs audit logging
+
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+# mapred audit logging
+
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+#
+# FSNamesystem Audit logging
+# All audit events are logged at INFO level
+#
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender
+#
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+
+#
+# MapReduce Audit Log Appender
+#
+
+# Set the MapReduce audit log filename
+#hadoop.mapreduce.audit.log.file=hadoop-mapreduce.audit.log
+
+# Appender for AuditLogger.
+# Requires the following system properties to be set
+# - hadoop.log.dir (Hadoop Log directory)
+# - hadoop.mapreduce.audit.log.file (MapReduce audit log filename)
+
+#log4j.logger.org.apache.hadoop.mapred.AuditLogger=INFO,MRAUDIT
+#log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+#log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.MRAUDIT.File=${hadoop.log.dir}/${hadoop.mapreduce.audit.log.file}
+#log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+#log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+#log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Yarn ResourceManager Application Summary Log
+#
+# Set the ResourceManager summary log filename
+#yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
+# Set the ResourceManager summary log level and appender
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# Appender for ResourceManager Application Summary Log - rolled daily
+# Requires the following properties to be set
+# - hadoop.log.dir (Hadoop Log directory)
+# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+
+#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd
From b8e8b8da75baf62ac7465e64acf17f280475bb20 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Wed, 21 Sep 2011 01:10:49 +0000
Subject: [PATCH 04/68] MAPREDUCE-3018. Fixed -file option for streaming.
Contributed by Mahadev Konar.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173451 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 2 ++
.../apache/hadoop/streaming/StreamJob.java | 25 +++++++++++++------
2 files changed, 19 insertions(+), 8 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 1d637419eb..0fc0b4eb1d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1370,6 +1370,8 @@ Release 0.23.0 - Unreleased
YarnClientProtocolProvider and ensured MiniMRYarnCluster sets JobHistory
configuration for tests. (acmurthy)
+ MAPREDUCE-3018. Fixed -file option for streaming. (mahadev via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java b/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
index 3212a1fcdf..27629476d9 100644
--- a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
+++ b/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
@@ -22,8 +22,10 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
+import java.net.URISyntaxException;
import java.net.URLEncoder;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
@@ -43,6 +45,7 @@
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.mapred.FileInputFormat;
@@ -277,19 +280,25 @@ void parseArgv() {
if (values != null && values.length > 0) {
LOG.warn("-file option is deprecated, please use generic option" +
" -files instead.");
- StringBuilder unpackRegex = new StringBuilder(
- config_.getPattern(MRJobConfig.JAR_UNPACK_PATTERN,
- JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern());
+
+ String fileList = null;
for (String file : values) {
packageFiles_.add(file);
- String fname = new File(file).getName();
- unpackRegex.append("|(?:").append(Pattern.quote(fname)).append(")");
+ try {
+ URI pathURI = new URI(file);
+ Path path = new Path(pathURI);
+ FileSystem localFs = FileSystem.getLocal(config_);
+ String finalPath = path.makeQualified(localFs).toString();
+ fileList = fileList == null ? finalPath : fileList + "," + finalPath;
+ } catch (Exception e) {
+ throw new IllegalArgumentException(e);
+ }
}
- config_.setPattern(MRJobConfig.JAR_UNPACK_PATTERN,
- Pattern.compile(unpackRegex.toString()));
+ config_.set("tmpfiles", config_.get("tmpfiles", "") +
+ (fileList == null ? "" : fileList));
validate(packageFiles_);
}
-
+
String fsName = cmdLine.getOptionValue("dfs");
if (null != fsName){
LOG.warn("-dfs option is deprecated, please use -fs instead.");
From 339b85b88ead760c6d4dc0f63a72780d6d5df8c2 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Wed, 21 Sep 2011 01:14:20 +0000
Subject: [PATCH 05/68] MAPREDUCE-3036. Fixed metrics for reserved resources in
CS. Contributed by Robert Evans.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173453 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 ++
.../scheduler/QueueMetrics.java | 54 ++++++++++++++++++-
.../scheduler/capacity/LeafQueue.java | 9 ++--
.../scheduler/capacity/TestLeafQueue.java | 26 +++++++++
4 files changed, 87 insertions(+), 5 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 0fc0b4eb1d..6930fcb342 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1372,6 +1372,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3018. Fixed -file option for streaming. (mahadev via acmurthy)
+ MAPREDUCE-3036. Fixed metrics for reserved resources in CS. (Robert Evans
+ via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 61c829507e..6928cdb19d 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -32,10 +32,8 @@
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterInt;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
-import org.apache.hadoop.yarn.api.records.ApplicationState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
-import org.apache.hadoop.yarn.util.Self;
import static org.apache.hadoop.yarn.server.resourcemanager.resource.Resources.*;
import org.slf4j.LoggerFactory;
@@ -282,4 +280,56 @@ public void unreserveResource(String user, Resource res) {
parent.unreserveResource(user, res);
}
}
+
+ public int getAppsSubmitted() {
+ return appsSubmitted.value();
+ }
+
+ public int getAppsRunning() {
+ return appsRunning.value();
+ }
+
+ public int getAppsPending() {
+ return appsPending.value();
+ }
+
+ public int getAppsCompleted() {
+ return appsCompleted.value();
+ }
+
+ public int getAppsKilled() {
+ return appsKilled.value();
+ }
+
+ public int getAppsFailed() {
+ return appsFailed.value();
+ }
+
+ public int getAllocatedGB() {
+ return allocatedGB.value();
+ }
+
+ public int getAllocatedContainers() {
+ return allocatedContainers.value();
+ }
+
+ public int getAvailableGB() {
+ return availableGB.value();
+ }
+
+ public int getPendingGB() {
+ return pendingGB.value();
+ }
+
+ public int getPendingContainers() {
+ return pendingContainers.value();
+ }
+
+ public int getReservedGB() {
+ return reservedGB.value();
+ }
+
+ public int getReservedContainers() {
+ return reservedContainers.value();
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 9a3b1c4da3..0753e3795c 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -1152,14 +1152,17 @@ private Resource assignContainer(Resource clusterResource, SchedulerNode node,
private void reserve(SchedulerApp application, Priority priority,
SchedulerNode node, RMContainer rmContainer, Container container) {
- rmContainer = application.reserve(node, priority, rmContainer, container);
- node.reserveResource(application, priority, rmContainer);
-
// Update reserved metrics if this is the first reservation
if (rmContainer == null) {
getMetrics().reserveResource(
application.getUser(), container.getResource());
}
+
+ // Inform the application
+ rmContainer = application.reserve(node, priority, rmContainer, container);
+
+ // Update the node
+ node.reserveResource(application, priority, rmContainer);
}
private void unreserve(SchedulerApp application, Priority priority,
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 3ea0100332..70c4d1a1f4 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -202,6 +202,8 @@ public void testSingleQueueWithOneUser() throws Exception {
assertEquals(1*GB, a.getUsedResources().getMemory());
assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0, a.getMetrics().getReservedGB());
+ assertEquals(1, a.getMetrics().getAllocatedGB());
// Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
// you can get one container more than user-limit
@@ -209,12 +211,16 @@ public void testSingleQueueWithOneUser() throws Exception {
assertEquals(2*GB, a.getUsedResources().getMemory());
assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0, a.getMetrics().getReservedGB());
+ assertEquals(2, a.getMetrics().getAllocatedGB());
// Can't allocate 3rd due to user-limit
a.assignContainers(clusterResource, node_0);
assertEquals(2*GB, a.getUsedResources().getMemory());
assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0, a.getMetrics().getReservedGB());
+ assertEquals(2, a.getMetrics().getAllocatedGB());
// Bump up user-limit-factor, now allocate should work
a.setUserLimitFactor(10);
@@ -222,12 +228,16 @@ public void testSingleQueueWithOneUser() throws Exception {
assertEquals(3*GB, a.getUsedResources().getMemory());
assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0, a.getMetrics().getReservedGB());
+ assertEquals(3, a.getMetrics().getAllocatedGB());
// One more should work, for app_1, due to user-limit-factor
a.assignContainers(clusterResource, node_0);
assertEquals(4*GB, a.getUsedResources().getMemory());
assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0, a.getMetrics().getReservedGB());
+ assertEquals(4, a.getMetrics().getAllocatedGB());
// Test max-capacity
// Now - no more allocs since we are at max-cap
@@ -236,6 +246,8 @@ public void testSingleQueueWithOneUser() throws Exception {
assertEquals(4*GB, a.getUsedResources().getMemory());
assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0, a.getMetrics().getReservedGB());
+ assertEquals(4, a.getMetrics().getAllocatedGB());
// Release each container from app_0
for (RMContainer rmContainer : app_0.getLiveContainers()) {
@@ -245,6 +257,8 @@ public void testSingleQueueWithOneUser() throws Exception {
assertEquals(1*GB, a.getUsedResources().getMemory());
assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0, a.getMetrics().getReservedGB());
+ assertEquals(1, a.getMetrics().getAllocatedGB());
// Release each container from app_1
for (RMContainer rmContainer : app_1.getLiveContainers()) {
@@ -254,6 +268,8 @@ public void testSingleQueueWithOneUser() throws Exception {
assertEquals(0*GB, a.getUsedResources().getMemory());
assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0, a.getMetrics().getReservedGB());
+ assertEquals(0, a.getMetrics().getAllocatedGB());
}
@Test
@@ -473,6 +489,8 @@ public void testReservation() throws Exception {
assertEquals(1*GB, a.getUsedResources().getMemory());
assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0, a.getMetrics().getReservedGB());
+ assertEquals(1, a.getMetrics().getAllocatedGB());
// Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
// you can get one container more than user-limit
@@ -480,6 +498,8 @@ public void testReservation() throws Exception {
assertEquals(2*GB, a.getUsedResources().getMemory());
assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0, a.getMetrics().getReservedGB());
+ assertEquals(2, a.getMetrics().getAllocatedGB());
// Now, reservation should kick in for app_1
a.assignContainers(clusterResource, node_0);
@@ -488,6 +508,8 @@ public void testReservation() throws Exception {
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
assertEquals(2*GB, node_0.getUsedResource().getMemory());
+ assertEquals(4, a.getMetrics().getReservedGB());
+ assertEquals(2, a.getMetrics().getAllocatedGB());
// Now free 1 container from app_0 i.e. 1G
a.completedContainer(clusterResource, app_0, node_0,
@@ -498,6 +520,8 @@ public void testReservation() throws Exception {
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
assertEquals(1*GB, node_0.getUsedResource().getMemory());
+ assertEquals(4, a.getMetrics().getReservedGB());
+ assertEquals(1, a.getMetrics().getAllocatedGB());
// Now finish another container from app_0 and fulfill the reservation
a.completedContainer(clusterResource, app_0, node_0,
@@ -508,6 +532,8 @@ public void testReservation() throws Exception {
assertEquals(4*GB, app_1.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentReservation().getMemory());
assertEquals(4*GB, node_0.getUsedResource().getMemory());
+ assertEquals(0, a.getMetrics().getReservedGB());
+ assertEquals(4, a.getMetrics().getAllocatedGB());
}
@Test
From 1d067c6e2b14e08943a46129f4ed521890d3ca22 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Wed, 21 Sep 2011 01:26:48 +0000
Subject: [PATCH 06/68] MAPREDUCE-2998. Fixed a bug in TaskAttemptImpl which
caused it to fork bin/mapred too many times. Contributed by Vinod K V.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173456 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 6930fcb342..62436d3a4a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1375,6 +1375,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3036. Fixed metrics for reserved resources in CS. (Robert Evans
via acmurthy)
+ MAPREDUCE-2998. Fixed a bug in TaskAttemptImpl which caused it to fork
+ bin/mapred too many times. (vinodkv via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index cc9f6bddf0..95c344bfbe 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -153,7 +153,7 @@ public abstract class TaskAttemptImpl implements
private Token jobToken;
private static AtomicBoolean initialClasspathFlag = new AtomicBoolean();
private static String initialClasspath = null;
- private final Object classpathLock = new Object();
+ private static final Object classpathLock = new Object();
private long launchTime;
private long finishTime;
private WrappedProgressSplitsBlock progressSplitBlock;
From 0e870d7d18900058bfff720ff3b9e3a4a6078e9c Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Wed, 21 Sep 2011 01:32:14 +0000
Subject: [PATCH 07/68] MAPREDUCE-3023. Fixed clients to display queue state
correctly. Contributed by Ravi Prakash.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173458 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../apache/hadoop/mapreduce/TypeConverter.java | 16 +++++++++++++---
.../hadoop/mapreduce/TestTypeConverter.java | 13 +++++++++++++
3 files changed, 29 insertions(+), 3 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 62436d3a4a..308014eb07 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1378,6 +1378,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2998. Fixed a bug in TaskAttemptImpl which caused it to fork
bin/mapred too many times. (vinodkv via acmurthy)
+ MAPREDUCE-3023. Fixed clients to display queue state correctly. (Ravi
+ Prakash via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
index a678e4660e..9cbc9ad6d4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
@@ -47,6 +47,7 @@
import org.apache.hadoop.yarn.api.records.ApplicationState;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -290,6 +291,15 @@ public static org.apache.hadoop.mapred.JobStatus fromYarn(
jobFile, trackingUrl);
}
+ public static org.apache.hadoop.mapreduce.QueueState fromYarn(
+ QueueState state) {
+ org.apache.hadoop.mapreduce.QueueState qState =
+ org.apache.hadoop.mapreduce.QueueState.getState(
+ state.toString().toLowerCase());
+ return qState;
+ }
+
+
public static int fromYarn(JobState state) {
switch (state) {
case NEW:
@@ -431,9 +441,9 @@ public static JobStatus[] fromYarnApps(List applications,
public static QueueInfo fromYarn(org.apache.hadoop.yarn.api.records.QueueInfo
queueInfo, Configuration conf) {
- return new QueueInfo(queueInfo.getQueueName(),
- queueInfo.toString(), QueueState.RUNNING,
- TypeConverter.fromYarnApps(queueInfo.getApplications(), conf));
+ return new QueueInfo(queueInfo.getQueueName(),queueInfo.toString(),
+ fromYarn(queueInfo.getQueueState()), TypeConverter.fromYarnApps(
+ queueInfo.getApplications(), conf));
}
public static QueueInfo[] fromYarnQueueInfo(
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
index bda7fb9d65..1aeae987c8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
@@ -19,11 +19,14 @@
import junit.framework.Assert;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationState;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.QueueInfoPBImpl;
+
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.junit.Test;
@@ -67,4 +70,14 @@ public void testFromYarnApplicationReport() {
Assert.assertEquals("jobId set incorrectly", 6789, status.getJobID().getId());
Assert.assertEquals("state set incorrectly", JobStatus.State.KILLED, status.getState());
}
+
+ @Test
+ public void testFromYarnQueueInfo() {
+ org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = new QueueInfoPBImpl();
+ queueInfo.setQueueState(org.apache.hadoop.yarn.api.records.QueueState.STOPPED);
+ org.apache.hadoop.mapreduce.QueueInfo returned =
+ TypeConverter.fromYarn(queueInfo, new Configuration());
+ Assert.assertEquals("queueInfo translation didn't work.",
+ returned.getState().toString(), queueInfo.getQueueState().toString().toLowerCase());
+ }
}
From 4dc4e9e63f7385ddd1d64ae1345e0d32a4acb9de Mon Sep 17 00:00:00 2001
From: Tsz-wo Sze
Date: Wed, 21 Sep 2011 02:56:08 +0000
Subject: [PATCH 08/68] HDFS-2340. Support getFileBlockLocations and
getDelegationToken in webhdfs.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173468 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hadoop/hdfs/protocol/DatanodeInfo.java | 20 ++
.../server/namenode/NameNodeRpcServer.java | 8 +-
.../web/resources/NamenodeWebHdfsMethods.java | 59 +++-
.../org/apache/hadoop/hdfs/web/JsonUtil.java | 286 +++++++++++++++++-
.../hadoop/hdfs/web/WebHdfsFileSystem.java | 48 ++-
.../hadoop/hdfs/web/resources/GetOpParam.java | 3 +
.../hdfs/web/resources/OverwriteParam.java | 2 +-
.../hdfs/web/resources/RenewerParam.java | 41 +++
.../hdfs/security/TestDelegationToken.java | 36 ++-
.../web/TestWebHdfsFileSystemContract.java | 13 +
11 files changed, 500 insertions(+), 19 deletions(-)
create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 459d2325d2..a44d09ec32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -16,6 +16,9 @@ Trunk (unreleased changes)
HDFS-2318. Provide authentication to webhdfs using SPNEGO and delegation
tokens. (szetszwo)
+ HDFS-2340. Support getFileBlockLocations and getDelegationToken in webhdfs.
+ (szetszwo)
+
IMPROVEMENTS
HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index 17a09f695e..af3283ee71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -115,6 +115,26 @@ protected DatanodeInfo(DatanodeID nodeID, String location, String hostName) {
this.location = location;
this.hostName = hostName;
}
+
+ /** Constructor */
+ public DatanodeInfo(final String name, final String storageID,
+ final int infoPort, final int ipcPort,
+ final long capacity, final long dfsUsed, final long remaining,
+ final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
+ final String networkLocation, final String hostName,
+ final AdminStates adminState) {
+ super(name, storageID, infoPort, ipcPort);
+
+ this.capacity = capacity;
+ this.dfsUsed = dfsUsed;
+ this.remaining = remaining;
+ this.blockPoolUsed = blockPoolUsed;
+ this.lastUpdate = lastUpdate;
+ this.xceiverCount = xceiverCount;
+ this.location = networkLocation;
+ this.hostName = hostName;
+ this.adminState = adminState;
+ }
/** The raw capacity. */
public long getCapacity() { return capacity; }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index b0411bdd84..a461c5bd7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -62,6 +62,7 @@
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -909,8 +910,11 @@ void verifyVersion(int version) throws IOException {
}
private static String getClientMachine() {
- String clientMachine = Server.getRemoteAddress();
- if (clientMachine == null) {
+ String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
+ if (clientMachine == null) { //not a web client
+ clientMachine = Server.getRemoteAddress();
+ }
+ if (clientMachine == null) { //not a RPC client
clientMachine = "";
}
return clientMachine;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 948466f638..c72437faf1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -78,6 +78,7 @@
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
+import org.apache.hadoop.hdfs.web.resources.RenewerParam;
import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
@@ -92,7 +93,14 @@
/** Web-hdfs NameNode implementation. */
@Path("")
public class NamenodeWebHdfsMethods {
- private static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
+ public static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
+
+ private static final ThreadLocal REMOTE_ADDRESS = new ThreadLocal();
+
+ /** @return the remote client address. */
+ public static String getRemoteAddress() {
+ return REMOTE_ADDRESS.get();
+ }
private @Context ServletContext context;
private @Context HttpServletRequest request;
@@ -215,6 +223,8 @@ public Response put(
return ugi.doAs(new PrivilegedExceptionAction() {
@Override
public Response run() throws IOException, URISyntaxException {
+ REMOTE_ADDRESS.set(request.getRemoteAddr());
+ try {
final String fullpath = path.getAbsolutePath();
final NameNode namenode = (NameNode)context.getAttribute("name.node");
@@ -272,6 +282,10 @@ public Response run() throws IOException, URISyntaxException {
default:
throw new UnsupportedOperationException(op + " is not supported");
}
+
+ } finally {
+ REMOTE_ADDRESS.set(null);
+ }
}
});
}
@@ -301,6 +315,8 @@ public Response post(
return ugi.doAs(new PrivilegedExceptionAction() {
@Override
public Response run() throws IOException, URISyntaxException {
+ REMOTE_ADDRESS.set(request.getRemoteAddr());
+ try {
final String fullpath = path.getAbsolutePath();
final NameNode namenode = (NameNode)context.getAttribute("name.node");
@@ -315,6 +331,10 @@ public Response run() throws IOException, URISyntaxException {
default:
throw new UnsupportedOperationException(op + " is not supported");
}
+
+ } finally {
+ REMOTE_ADDRESS.set(null);
+ }
}
});
}
@@ -335,10 +355,12 @@ public Response root(
final OffsetParam offset,
@QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
final LengthParam length,
+ @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
+ final RenewerParam renewer,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize
) throws IOException, URISyntaxException, InterruptedException {
- return get(ugi, delegation, ROOT, op, offset, length, bufferSize);
+ return get(ugi, delegation, ROOT, op, offset, length, renewer, bufferSize);
}
/** Handle HTTP GET request. */
@@ -356,19 +378,23 @@ public Response get(
final OffsetParam offset,
@QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
final LengthParam length,
+ @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
+ final RenewerParam renewer,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize
) throws IOException, URISyntaxException, InterruptedException {
if (LOG.isTraceEnabled()) {
LOG.trace(op + ": " + path + ", ugi=" + ugi
- + Param.toSortedString(", ", offset, length, bufferSize));
+ + Param.toSortedString(", ", offset, length, renewer, bufferSize));
}
return ugi.doAs(new PrivilegedExceptionAction() {
@Override
public Response run() throws IOException, URISyntaxException {
+ REMOTE_ADDRESS.set(request.getRemoteAddr());
+ try {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final String fullpath = path.getAbsolutePath();
@@ -381,6 +407,15 @@ public Response run() throws IOException, URISyntaxException {
op.getValue(), offset.getValue(), offset, length, bufferSize);
return Response.temporaryRedirect(uri).build();
}
+ case GETFILEBLOCKLOCATIONS:
+ {
+ final long offsetValue = offset.getValue();
+ final Long lengthValue = length.getValue();
+ final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
+ offsetValue, lengthValue != null? lengthValue: offsetValue + 1);
+ final String js = JsonUtil.toJsonString(locatedblocks);
+ return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+ }
case GETFILESTATUS:
{
final HdfsFileStatus status = np.getFileInfo(fullpath);
@@ -392,9 +427,20 @@ public Response run() throws IOException, URISyntaxException {
final StreamingOutput streaming = getListingStream(np, fullpath);
return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
}
+ case GETDELEGATIONTOKEN:
+ {
+ final Token extends TokenIdentifier> token = generateDelegationToken(
+ namenode, ugi, renewer.getValue());
+ final String js = JsonUtil.toJsonString(token);
+ return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+ }
default:
throw new UnsupportedOperationException(op + " is not supported");
}
+
+ } finally {
+ REMOTE_ADDRESS.set(null);
+ }
}
});
}
@@ -462,6 +508,9 @@ public Response delete(
return ugi.doAs(new PrivilegedExceptionAction() {
@Override
public Response run() throws IOException {
+ REMOTE_ADDRESS.set(request.getRemoteAddr());
+ try {
+
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final String fullpath = path.getAbsolutePath();
@@ -475,6 +524,10 @@ public Response run() throws IOException {
default:
throw new UnsupportedOperationException(op + " is not supported");
}
+
+ } finally {
+ REMOTE_ADDRESS.set(null);
+ }
}
});
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 1c18dc334e..314d53b38f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -17,19 +17,31 @@
*/
package org.apache.hadoop.hdfs.web;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
import org.mortbay.util.ajax.JSON;
/** JSON Utilities */
public class JsonUtil {
- private static final ThreadLocal
-
hadoop.http.authentication.signature.secret: The signature secret for
- signing the authentication tokens. If not set a random secret is generated at
+
hadoop.http.authentication.signature.secret.file: The signature secret
+ file for signing the authentication tokens. If not set a random secret is generated at
startup time. The same secret should be used for all nodes in the cluster, JobTracker,
- NameNode, DataNode and TastTracker. The default value is a hadoop value.
+ NameNode, DataNode and TastTracker. The default value is
+ ${user.home}/hadoop-http-auth-signature-secret.
+ IMPORTANT: This file should be readable only by the Unix user running the daemons.
hadoop.http.authentication.cookie.domain: The domain to use for the HTTP
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
index cd6ab7b326..666632d5bf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
@@ -22,6 +22,9 @@
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.Reader;
import java.util.HashMap;
import java.util.Map;
@@ -40,8 +43,10 @@
*/
public class AuthenticationFilterInitializer extends FilterInitializer {
- private static final String PREFIX = "hadoop.http.authentication.";
+ static final String PREFIX = "hadoop.http.authentication.";
+ static final String SIGNATURE_SECRET_FILE = AuthenticationFilter.SIGNATURE_SECRET + ".file";
+
/**
* Initializes Alfredo AuthenticationFilter.
*
@@ -67,6 +72,25 @@ public void initFilter(FilterContainer container, Configuration conf) {
}
}
+ String signatureSecretFile = filterConfig.get(SIGNATURE_SECRET_FILE);
+ if (signatureSecretFile == null) {
+ throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);
+ }
+
+ try {
+ StringBuilder secret = new StringBuilder();
+ Reader reader = new FileReader(signatureSecretFile);
+ int c = reader.read();
+ while (c > -1) {
+ secret.append((char)c);
+ c = reader.read();
+ }
+ reader.close();
+ filterConfig.put(AuthenticationFilter.SIGNATURE_SECRET, secret.toString());
+ } catch (IOException ex) {
+ throw new RuntimeException("Could not read HTTP signature secret file: " + signatureSecretFile);
+ }
+
container.addFilter("authentication",
AuthenticationFilter.class.getName(),
filterConfig);
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index d4b4030559..e34c202373 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -808,8 +808,8 @@
- hadoop.http.authentication.signature.secret
- hadoop
+ hadoop.http.authentication.signature.secret.file
+ ${user.home}/hadoop-http-auth-signature-secret
The signature secret for signing the authentication tokens.
If not set a random secret is generated at startup time.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
index 7a21e4c6b8..2d699ddcf1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
@@ -25,14 +25,28 @@
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.Writer;
import java.util.Map;
public class TestAuthenticationFilter extends TestCase {
@SuppressWarnings("unchecked")
- public void testConfiguration() {
+ public void testConfiguration() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.http.authentication.foo", "bar");
+
+ File testDir = new File(System.getProperty("test.build.data",
+ "target/test-dir"));
+ testDir.mkdirs();
+ File secretFile = new File(testDir, "http-secret.txt");
+ Writer writer = new FileWriter(new File(testDir, "http-secret.txt"));
+ writer.write("hadoop");
+ writer.close();
+ conf.set(AuthenticationFilterInitializer.PREFIX +
+ AuthenticationFilterInitializer.SIGNATURE_SECRET_FILE,
+ secretFile.getAbsolutePath());
FilterContainer container = Mockito.mock(FilterContainer.class);
Mockito.doAnswer(
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 290d33d02f..aa9b43f7fd 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -76,6 +76,9 @@
https://repository.apache.org/content/repositories/snapshots1.0.3
+
+ ${project.build.directory}/test-dir
+ ${test.build.dir}
@@ -554,6 +557,25 @@
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+
+
+ create-testdirs
+ validate
+
+ run
+
+
+
+
+
+
+
+
+
+ org.apache.maven.pluginsmaven-compiler-plugin
From d00b3c49f6fb3f6a617add6203c6b55f6c345940 Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Wed, 21 Sep 2011 18:28:23 +0000
Subject: [PATCH 14/68] MAPREDUCE-2880. Improved classpath-construction for
mapreduce AM and containers. Contributed by Arun C Murthy.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173783 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../hadoop/mapred/MapReduceChildJVM.java | 216 +++++++++---------
.../org/apache/hadoop/mapred/YarnChild.java | 7 +-
.../hadoop/mapreduce/v2/app/MRAppMaster.java | 7 +-
.../mapreduce/v2/app/job/impl/JobImpl.java | 6 +-
.../v2/app/job/impl/TaskAttemptImpl.java | 83 +++----
.../v2/app/speculate/DefaultSpeculator.java | 3 +-
.../hadoop/mapreduce/v2/MRConstants.java | 50 ----
.../hadoop/mapreduce/v2/util/MRApps.java | 115 ++++++----
.../hadoop/mapreduce/v2/util/TestMRApps.java | 4 +-
.../org/apache/hadoop/mapred/BackupStore.java | 3 +-
.../org/apache/hadoop/mapred/Constants.java | 27 ---
.../org/apache/hadoop/mapred/JobConf.java | 1 +
.../org/apache/hadoop/mapred/MRConstants.java | 9 +-
.../apache/hadoop/mapred/MROutputFiles.java | 23 +-
.../apache/hadoop/mapreduce/MRJobConfig.java | 62 +++++
.../hadoop/mapred/ResourceMgrDelegate.java | 4 +-
.../org/apache/hadoop/mapred/YARNRunner.java | 48 ++--
.../mapreduce/v2/MiniMRYarnCluster.java | 8 +-
.../hadoop/mapreduce/v2/TestMRJobs.java | 2 +-
.../hadoop/yarn/api/ApplicationConstants.java | 113 +++++++++
.../hadoop/yarn/conf/YarnConfiguration.java | 6 +
.../nodemanager/DefaultContainerExecutor.java | 6 +-
.../nodemanager/LinuxContainerExecutor.java | 6 +-
.../launcher/ContainerLaunch.java | 103 +++++++--
25 files changed, 557 insertions(+), 358 deletions(-)
delete mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/MRConstants.java
delete mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Constants.java
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 36a5f24c03..6977854d41 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -321,6 +321,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-901. Efficient framework counters. (llu via acmurthy)
+ MAPREDUCE-2880. Improve classpath-construction for mapreduce AM and
+ containers. (Arun C Murthy via vinodkv)
+
BUG FIXES
MAPREDUCE-2603. Disable High-Ram emulation in system tests.
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
index d9d5b1f307..fc25d06da2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
@@ -18,27 +18,25 @@
package org.apache.hadoop.mapred;
-import java.io.File;
import java.net.InetSocketAddress;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Vector;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.TaskLog.LogName;
import org.apache.hadoop.mapreduce.ID;
-import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
public class MapReduceChildJVM {
- private static final String SYSTEM_PATH_SEPARATOR =
- System.getProperty("path.separator");
- private static final Log LOG = LogFactory.getLog(MapReduceChildJVM.class);
-
- private static File getTaskLogFile(String logDir, LogName filter) {
- return new File(logDir, filter.toString());
+ private static String getTaskLogFile(LogName filter) {
+ return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR +
+ filter.toString();
}
private static String getChildEnv(JobConf jobConf, boolean isMap) {
@@ -50,32 +48,53 @@ private static String getChildEnv(JobConf jobConf, boolean isMap) {
jobConf.get(jobConf.MAPRED_TASK_ENV));
}
- public static void setVMEnv(Map env,
- List classPaths, String pwd, String containerLogDir,
- String nmLdLibraryPath, Task task, CharSequence applicationTokensFile) {
+ private static String getChildLogLevel(JobConf conf, boolean isMap) {
+ if (isMap) {
+ return conf.get(
+ MRJobConfig.MAP_LOG_LEVEL,
+ JobConf.DEFAULT_LOG_LEVEL.toString()
+ );
+ } else {
+ return conf.get(
+ MRJobConfig.REDUCE_LOG_LEVEL,
+ JobConf.DEFAULT_LOG_LEVEL.toString()
+ );
+ }
+ }
+
+ public static void setVMEnv(Map environment,
+ Task task) {
JobConf conf = task.conf;
- // Add classpath.
- CharSequence cp = env.get("CLASSPATH");
- String classpath = StringUtils.join(SYSTEM_PATH_SEPARATOR, classPaths);
- if (null == cp) {
- env.put("CLASSPATH", classpath);
- } else {
- env.put("CLASSPATH", classpath + SYSTEM_PATH_SEPARATOR + cp);
- }
+ // Shell
+ environment.put(
+ Environment.SHELL.name(),
+ conf.get(
+ MRJobConfig.MAPRED_ADMIN_USER_SHELL,
+ MRJobConfig.DEFAULT_SHELL)
+ );
+
+ // Add pwd to LD_LIBRARY_PATH, add this before adding anything else
+ MRApps.addToEnvironment(
+ environment,
+ Environment.LD_LIBRARY_PATH.name(),
+ Environment.PWD.$());
- /////// Environmental variable LD_LIBRARY_PATH
- StringBuilder ldLibraryPath = new StringBuilder();
+ // Add the env variables passed by the user & admin
+ String mapredChildEnv = getChildEnv(conf, task.isMapTask());
+ MRApps.setEnvFromInputString(environment, mapredChildEnv);
+ MRApps.setEnvFromInputString(
+ environment,
+ conf.get(
+ MRJobConfig.MAPRED_ADMIN_USER_ENV,
+ MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV)
+ );
- ldLibraryPath.append(nmLdLibraryPath);
- ldLibraryPath.append(SYSTEM_PATH_SEPARATOR);
- ldLibraryPath.append(pwd);
- env.put("LD_LIBRARY_PATH", ldLibraryPath.toString());
- /////// Environmental variable LD_LIBRARY_PATH
-
- // for the child of task jvm, set hadoop.root.logger
- env.put("HADOOP_ROOT_LOGGER", "DEBUG,CLA"); // TODO: Debug
+ // Set logging level
+ environment.put(
+ "HADOOP_ROOT_LOGGER",
+ getChildLogLevel(conf, task.isMapTask()) + ",CLA");
// TODO: The following is useful for instance in streaming tasks. Should be
// set in ApplicationMaster's env by the RM.
@@ -89,76 +108,69 @@ public static void setVMEnv(Map env,
// properties.
long logSize = TaskLog.getTaskLogLength(conf);
Vector logProps = new Vector(4);
- setupLog4jProperties(logProps, logSize, containerLogDir);
+ setupLog4jProperties(logProps, logSize);
Iterator it = logProps.iterator();
StringBuffer buffer = new StringBuffer();
while (it.hasNext()) {
buffer.append(" " + it.next());
}
hadoopClientOpts = hadoopClientOpts + buffer.toString();
-
- env.put("HADOOP_CLIENT_OPTS", hadoopClientOpts);
+ environment.put("HADOOP_CLIENT_OPTS", hadoopClientOpts);
- // add the env variables passed by the user
- String mapredChildEnv = getChildEnv(conf, task.isMapTask());
- if (mapredChildEnv != null && mapredChildEnv.length() > 0) {
- String childEnvs[] = mapredChildEnv.split(",");
- for (String cEnv : childEnvs) {
- String[] parts = cEnv.split("="); // split on '='
- String value = (String) env.get(parts[0]);
- if (value != null) {
- // replace $env with the child's env constructed by tt's
- // example LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
- value = parts[1].replace("$" + parts[0], value);
- } else {
- // this key is not configured by the tt for the child .. get it
- // from the tt's env
- // example PATH=$PATH:/tmp
- value = System.getenv(parts[0]); // Get from NM?
- if (value != null) {
- // the env key is present in the tt's env
- value = parts[1].replace("$" + parts[0], value);
- } else {
- // the env key is note present anywhere .. simply set it
- // example X=$X:/tmp or X=/tmp
- value = parts[1].replace("$" + parts[0], "");
- }
- }
- env.put(parts[0], value);
- }
- }
-
- //This should not be set here (If an OS check is requied. moved to ContainerLuanch)
- // env.put("JVM_PID", "`echo $$`");
-
- env.put(Constants.STDOUT_LOGFILE_ENV,
- getTaskLogFile(containerLogDir, TaskLog.LogName.STDOUT).toString());
- env.put(Constants.STDERR_LOGFILE_ENV,
- getTaskLogFile(containerLogDir, TaskLog.LogName.STDERR).toString());
+ // Add stdout/stderr env
+ environment.put(
+ MRJobConfig.STDOUT_LOGFILE_ENV,
+ getTaskLogFile(TaskLog.LogName.STDOUT)
+ );
+ environment.put(
+ MRJobConfig.STDERR_LOGFILE_ENV,
+ getTaskLogFile(TaskLog.LogName.STDERR)
+ );
}
private static String getChildJavaOpts(JobConf jobConf, boolean isMapTask) {
+ String userClasspath = "";
+ String adminClasspath = "";
if (isMapTask) {
- return jobConf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, jobConf.get(
- JobConf.MAPRED_TASK_JAVA_OPTS,
- JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS));
+ userClasspath =
+ jobConf.get(
+ JobConf.MAPRED_MAP_TASK_JAVA_OPTS,
+ jobConf.get(
+ JobConf.MAPRED_TASK_JAVA_OPTS,
+ JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)
+ );
+ adminClasspath =
+ jobConf.get(
+ MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,
+ MRJobConfig.DEFAULT_MAPRED_ADMIN_JAVA_OPTS);
+ } else {
+ userClasspath =
+ jobConf.get(
+ JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,
+ jobConf.get(
+ JobConf.MAPRED_TASK_JAVA_OPTS,
+ JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)
+ );
+ adminClasspath =
+ jobConf.get(
+ MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,
+ MRJobConfig.DEFAULT_MAPRED_ADMIN_JAVA_OPTS);
}
- return jobConf
- .get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, jobConf.get(
- JobConf.MAPRED_TASK_JAVA_OPTS,
- JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS));
+
+ // Add admin classpath first so it can be overridden by user.
+ return adminClasspath + " " + userClasspath;
}
private static void setupLog4jProperties(Vector vargs,
- long logSize, String containerLogDir) {
+ long logSize) {
vargs.add("-Dlog4j.configuration=container-log4j.properties");
- vargs.add("-Dhadoop.yarn.mr.containerLogDir=" + containerLogDir);
+ vargs.add("-Dhadoop.yarn.mr.containerLogDir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
vargs.add("-Dhadoop.yarn.mr.totalLogFileSize=" + logSize);
}
public static List getVMCommand(
- InetSocketAddress taskAttemptListenerAddr, Task task, String javaHome,
- String workDir, String logDir, String childTmpDir, ID jvmID) {
+ InetSocketAddress taskAttemptListenerAddr, Task task,
+ ID jvmID) {
TaskAttemptID attemptID = task.getTaskID();
JobConf conf = task.conf;
@@ -166,7 +178,7 @@ public static List getVMCommand(
Vector vargs = new Vector(8);
vargs.add("exec");
- vargs.add(javaHome + "/bin/java");
+ vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
// Add child (task) java-vm options.
//
@@ -199,44 +211,26 @@ public static List getVMCommand(
String javaOpts = getChildJavaOpts(conf, task.isMapTask());
javaOpts = javaOpts.replace("@taskid@", attemptID.toString());
String [] javaOptsSplit = javaOpts.split(" ");
-
- // Add java.library.path; necessary for loading native libraries.
- //
- // 1. We add the 'cwd' of the task to it's java.library.path to help
- // users distribute native libraries via the DistributedCache.
- // 2. The user can also specify extra paths to be added to the
- // java.library.path via mapred.{map|reduce}.child.java.opts.
- //
- String libraryPath = workDir;
- boolean hasUserLDPath = false;
- for(int i=0; i getVMCommand(
// Finally add the jvmID
vargs.add(String.valueOf(jvmID.getId()));
- vargs.add("1>" + getTaskLogFile(logDir, TaskLog.LogName.STDERR));
- vargs.add("2>" + getTaskLogFile(logDir, TaskLog.LogName.STDOUT));
+ vargs.add("1>" + getTaskLogFile(TaskLog.LogName.STDERR));
+ vargs.add("2>" + getTaskLogFile(TaskLog.LogName.STDOUT));
// Final commmand
StringBuilder mergedCommand = new StringBuilder();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
index 3021004f9d..0ab220bf38 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
@@ -47,7 +47,6 @@
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.security.Credentials;
@@ -71,7 +70,7 @@ public static void main(String[] args) throws Throwable {
LOG.debug("Child starting");
final JobConf defaultConf = new JobConf();
- defaultConf.addResource(MRConstants.JOB_CONF_FILE);
+ defaultConf.addResource(MRJobConfig.JOB_CONF_FILE);
UserGroupInformation.setConfiguration(defaultConf);
String host = args[0];
@@ -238,7 +237,7 @@ private static void configureLocalDirs(Task task, JobConf job) {
private static JobConf configureTask(Task task, Credentials credentials,
Token jt) throws IOException {
- final JobConf job = new JobConf(MRConstants.JOB_CONF_FILE);
+ final JobConf job = new JobConf(MRJobConfig.JOB_CONF_FILE);
job.setCredentials(credentials);
// set tcp nodelay
job.setBoolean("ipc.client.tcpnodelay", true);
@@ -260,7 +259,7 @@ private static JobConf configureTask(Task task, Credentials credentials,
// Overwrite the localized task jobconf which is linked to in the current
// work-dir.
- Path localTaskFile = new Path(Constants.JOBFILE);
+ Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE);
writeLocalJobFile(localTaskFile, job);
task.setJobFile(localTaskFile.toString());
task.setConf(job);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index 20c7e9779e..41a86f1271 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -39,7 +39,6 @@
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
@@ -243,10 +242,10 @@ protected Job createJob(Configuration conf) {
// Read the file-system tokens from the localized tokens-file.
Path jobSubmitDir =
FileContext.getLocalFSFileContext().makeQualified(
- new Path(new File(MRConstants.JOB_SUBMIT_DIR)
+ new Path(new File(MRJobConfig.JOB_SUBMIT_DIR)
.getAbsolutePath()));
Path jobTokenFile =
- new Path(jobSubmitDir, MRConstants.APPLICATION_TOKENS_FILE);
+ new Path(jobSubmitDir, MRJobConfig.APPLICATION_TOKENS_FILE);
fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile="
+ jobTokenFile);
@@ -658,7 +657,7 @@ public static void main(String[] args) {
Runtime.getRuntime().addShutdownHook(
new CompositeServiceShutdownHook(appMaster));
YarnConfiguration conf = new YarnConfiguration(new JobConf());
- conf.addResource(new Path(MRConstants.JOB_CONF_FILE));
+ conf.addResource(new Path(MRJobConfig.JOB_CONF_FILE));
conf.set(MRJobConfig.USER_NAME,
System.getProperty("user.name"));
UserGroupInformation.setConfiguration(conf);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 69de493b16..e822cab80d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -42,6 +42,7 @@
import org.apache.hadoop.mapred.FileOutputCommitter;
import org.apache.hadoop.mapred.JobACLsManager;
import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MapReduceChildJVM;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -64,7 +65,6 @@
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
@@ -1007,7 +1007,7 @@ protected void setup(JobImpl job) throws IOException {
FileSystem.get(job.conf).makeQualified(
new Path(path, oldJobIDString));
job.remoteJobConfFile =
- new Path(job.remoteJobSubmitDir, MRConstants.JOB_CONF_FILE);
+ new Path(job.remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
// Prepare the TaskAttemptListener server for authentication of Containers
// TaskAttemptListener gets the information via jobTokenSecretManager.
@@ -1033,7 +1033,7 @@ protected void setup(JobImpl job) throws IOException {
Path remoteJobTokenFile =
new Path(job.remoteJobSubmitDir,
- MRConstants.APPLICATION_TOKENS_FILE);
+ MRJobConfig.APPLICATION_TOKENS_FILE);
tokenStorage.writeTokenStorageFile(remoteJobTokenFile, job.conf);
LOG.info("Writing back the job-token file on the remote file system:"
+ remoteJobTokenFile.toString());
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 95c344bfbe..495d00e22c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -21,7 +21,6 @@
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
-import java.net.URI;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
@@ -62,7 +61,6 @@
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
@@ -103,6 +101,7 @@
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerToken;
@@ -117,7 +116,6 @@
import org.apache.hadoop.yarn.state.SingleArcTransition;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
-import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.RackResolver;
@@ -518,8 +516,8 @@ private String getInitialClasspath() throws IOException {
return initialClasspath;
}
Map env = new HashMap();
- MRApps.setInitialClasspath(env);
- initialClasspath = env.get(MRApps.CLASSPATH);
+ MRApps.setClasspath(env);
+ initialClasspath = env.get(Environment.CLASSPATH.name());
initialClasspathFlag.set(true);
return initialClasspath;
}
@@ -531,16 +529,18 @@ private String getInitialClasspath() throws IOException {
*/
private ContainerLaunchContext createContainerLaunchContext() {
- ContainerLaunchContext container =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
// Application resources
Map localResources =
new HashMap();
// Application environment
Map environment = new HashMap();
-
+
+ // Service data
+ Map serviceData = new HashMap();
+
+ // Tokens
+ ByteBuffer tokens = ByteBuffer.wrap(new byte[]{});
try {
FileSystem remoteFS = FileSystem.get(conf);
@@ -550,7 +550,7 @@ private ContainerLaunchContext createContainerLaunchContext() {
MRJobConfig.JAR))).makeQualified(remoteFS.getUri(),
remoteFS.getWorkingDirectory());
localResources.put(
- MRConstants.JOB_JAR,
+ MRJobConfig.JOB_JAR,
createLocalResource(remoteFS, recordFactory, remoteJobJar,
LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
LOG.info("The job-jar file on the remote FS is "
@@ -570,9 +570,9 @@ private ContainerLaunchContext createContainerLaunchContext() {
Path remoteJobSubmitDir =
new Path(path, oldJobId.toString());
Path remoteJobConfPath =
- new Path(remoteJobSubmitDir, MRConstants.JOB_CONF_FILE);
+ new Path(remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
localResources.put(
- MRConstants.JOB_CONF_FILE,
+ MRJobConfig.JOB_CONF_FILE,
createLocalResource(remoteFS, recordFactory, remoteJobConfPath,
LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
LOG.info("The job-conf file on the remote FS is "
@@ -580,12 +580,8 @@ private ContainerLaunchContext createContainerLaunchContext() {
// //////////// End of JobConf setup
// Setup DistributedCache
- MRApps.setupDistributedCache(conf, localResources, environment);
+ MRApps.setupDistributedCache(conf, localResources);
- // Set local-resources and environment
- container.setLocalResources(localResources);
- container.setEnvironment(environment);
-
// Setup up tokens
Credentials taskCredentials = new Credentials();
@@ -606,52 +602,43 @@ private ContainerLaunchContext createContainerLaunchContext() {
LOG.info("Size of containertokens_dob is "
+ taskCredentials.numberOfTokens());
taskCredentials.writeTokenStorageToStream(containerTokens_dob);
- container.setContainerTokens(
+ tokens =
ByteBuffer.wrap(containerTokens_dob.getData(), 0,
- containerTokens_dob.getLength()));
+ containerTokens_dob.getLength());
// Add shuffle token
LOG.info("Putting shuffle token in serviceData");
- Map serviceData = new HashMap();
serviceData.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID,
ShuffleHandler.serializeServiceData(jobToken));
- container.setServiceData(serviceData);
- MRApps.addToClassPath(container.getEnvironment(), getInitialClasspath());
+ MRApps.addToEnvironment(
+ environment,
+ Environment.CLASSPATH.name(),
+ getInitialClasspath());
} catch (IOException e) {
throw new YarnException(e);
}
+
+ // Setup environment
+ MapReduceChildJVM.setVMEnv(environment, remoteTask);
+
+ // Set up the launch command
+ List commands = MapReduceChildJVM.getVMCommand(
+ taskAttemptListener.getAddress(), remoteTask,
+ jvmID);
- container.setContainerId(containerID);
- container.setUser(conf.get(MRJobConfig.USER_NAME)); // TODO: Fix
-
- File workDir = new File("$PWD"); // Will be expanded by the shell.
- String containerLogDir =
- new File(ApplicationConstants.LOG_DIR_EXPANSION_VAR).toString();
- String childTmpDir = new File(workDir, "tmp").toString();
- String javaHome = "${JAVA_HOME}"; // Will be expanded by the shell.
- String nmLdLibraryPath = "{LD_LIBRARY_PATH}"; // Expanded by the shell?
- List classPaths = new ArrayList();
-
- String localizedApplicationTokensFile =
- new File(workDir, MRConstants.APPLICATION_TOKENS_FILE).toString();
- classPaths.add(MRConstants.JOB_JAR);
- classPaths.add(MRConstants.YARN_MAPREDUCE_APP_JAR_PATH);
- classPaths.add(workDir.toString()); // TODO
-
- // Construct the actual Container
- container.setCommands(MapReduceChildJVM.getVMCommand(
- taskAttemptListener.getAddress(), remoteTask, javaHome,
- workDir.toString(), containerLogDir, childTmpDir, jvmID));
-
- MapReduceChildJVM.setVMEnv(container.getEnvironment(), classPaths,
- workDir.toString(), containerLogDir, nmLdLibraryPath, remoteTask,
- localizedApplicationTokensFile);
-
// Construct the actual Container
+ ContainerLaunchContext container =
+ recordFactory.newRecordInstance(ContainerLaunchContext.class);
container.setContainerId(containerID);
container.setUser(conf.get(MRJobConfig.USER_NAME));
container.setResource(assignedCapability);
+ container.setLocalResources(localResources);
+ container.setEnvironment(environment);
+ container.setCommands(commands);
+ container.setServiceData(serviceData);
+ container.setContainerTokens(tokens);
+
return container;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
index feb019fe16..ab7d23ef9d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
@@ -35,7 +35,6 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
@@ -87,7 +86,7 @@ public class DefaultSpeculator extends AbstractService implements
private final ConcurrentMap reduceContainerNeeds
= new ConcurrentHashMap();
- private final Set mayHaveSpeculated = new HashSet();
+ private final Set mayHaveSpeculated = new HashSet();
private final Configuration conf;
private AppContext context;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/MRConstants.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/MRConstants.java
deleted file mode 100644
index 6ac05361dc..0000000000
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/MRConstants.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements. See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership. The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License. You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.mapreduce.v2;
-
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface MRConstants {
- // This should be the directory where splits file gets localized on the node
- // running ApplicationMaster.
- public static final String JOB_SUBMIT_DIR = "jobSubmitDir";
-
- // This should be the name of the localized job-configuration file on the node
- // running ApplicationMaster and Task
- public static final String JOB_CONF_FILE = "job.xml";
- // This should be the name of the localized job-jar file on the node running
- // individual containers/tasks.
- public static final String JOB_JAR = "job.jar";
-
- public static final String HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME =
- "hadoop-mapreduce-client-app-0.24.0-SNAPSHOT.jar";
-
- public static final String YARN_MAPREDUCE_APP_JAR_PATH =
- "$YARN_HOME/modules/" + HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME;
-
- // The token file for the application. Should contain tokens for access to
- // remote file system and may optionally contain application specific tokens.
- // For now, generated by the AppManagers and used by NodeManagers and the
- // Containers.
- public static final String APPLICATION_TOKENS_FILE = "appTokens";
-}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index 68499497ac..9094da39ba 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -39,14 +39,14 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
@@ -167,7 +167,7 @@ public static TaskAttemptStateUI taskAttemptState(String attemptStateStr) {
return TaskAttemptStateUI.valueOf(attemptStateStr);
}
- public static void setInitialClasspath(
+ private static void setMRFrameworkClasspath(
Map environment) throws IOException {
InputStream classpathFileStream = null;
BufferedReader reader = null;
@@ -182,30 +182,17 @@ public static void setInitialClasspath(
reader = new BufferedReader(new InputStreamReader(classpathFileStream));
String cp = reader.readLine();
if (cp != null) {
- addToClassPath(environment, cp.trim());
+ addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim());
}
// Put the file itself on classpath for tasks.
- addToClassPath(environment,
+ addToEnvironment(
+ environment,
+ Environment.CLASSPATH.name(),
thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile());
- // If runtime env is different.
- if (System.getenv().get("YARN_HOME") != null) {
- ShellCommandExecutor exec =
- new ShellCommandExecutor(new String[] {
- System.getenv().get("YARN_HOME") + "/bin/yarn",
- "classpath" });
- exec.execute();
- addToClassPath(environment, exec.getOutput().trim());
- }
-
- // Get yarn mapreduce-app classpath
- if (System.getenv().get("HADOOP_MAPRED_HOME")!= null) {
- ShellCommandExecutor exec =
- new ShellCommandExecutor(new String[] {
- System.getenv().get("HADOOP_MAPRED_HOME") + "/bin/mapred",
- "classpath" });
- exec.execute();
- addToClassPath(environment, exec.getOutput().trim());
+ // Add standard Hadoop classes
+ for (String c : ApplicationConstants.APPLICATION_CLASSPATH) {
+ addToEnvironment(environment, Environment.CLASSPATH.name(), c);
}
} finally {
if (classpathFileStream != null) {
@@ -217,20 +204,35 @@ public static void setInitialClasspath(
}
// TODO: Remove duplicates.
}
+
+ private static final String SYSTEM_PATH_SEPARATOR =
+ System.getProperty("path.separator");
- public static void addToClassPath(
- Map environment, String fileName) {
- String classpath = environment.get(CLASSPATH);
- if (classpath == null) {
- classpath = fileName;
+ public static void addToEnvironment(
+ Map environment,
+ String variable, String value) {
+ String val = environment.get(variable);
+ if (val == null) {
+ val = value;
} else {
- classpath = classpath + ":" + fileName;
+ val = val + SYSTEM_PATH_SEPARATOR + value;
}
- environment.put(CLASSPATH, classpath);
+ environment.put(variable, val);
}
- public static final String CLASSPATH = "CLASSPATH";
-
+ public static void setClasspath(Map environment)
+ throws IOException {
+ MRApps.addToEnvironment(
+ environment,
+ Environment.CLASSPATH.name(),
+ MRJobConfig.JOB_JAR);
+ MRApps.addToEnvironment(
+ environment,
+ Environment.CLASSPATH.name(),
+ Environment.PWD.$() + Path.SEPARATOR + "*");
+ MRApps.setMRFrameworkClasspath(environment);
+ }
+
private static final String STAGING_CONSTANT = ".staging";
public static Path getStagingAreaDir(Configuration conf, String user) {
return new Path(
@@ -241,7 +243,7 @@ public static Path getStagingAreaDir(Configuration conf, String user) {
public static String getJobFile(Configuration conf, String user,
org.apache.hadoop.mapreduce.JobID jobId) {
Path jobFile = new Path(MRApps.getStagingAreaDir(conf, user),
- jobId.toString() + Path.SEPARATOR + MRConstants.JOB_CONF_FILE);
+ jobId.toString() + Path.SEPARATOR + MRJobConfig.JOB_CONF_FILE);
return jobFile.toString();
}
@@ -260,12 +262,11 @@ private static long[] parseTimeStamps(String[] strs) {
public static void setupDistributedCache(
Configuration conf,
- Map localResources,
- Map env)
+ Map localResources)
throws IOException {
// Cache archives
- parseDistributedCacheArtifacts(conf, localResources, env,
+ parseDistributedCacheArtifacts(conf, localResources,
LocalResourceType.ARCHIVE,
DistributedCache.getCacheArchives(conf),
parseTimeStamps(DistributedCache.getArchiveTimestamps(conf)),
@@ -275,7 +276,7 @@ public static void setupDistributedCache(
// Cache files
parseDistributedCacheArtifacts(conf,
- localResources, env,
+ localResources,
LocalResourceType.FILE,
DistributedCache.getCacheFiles(conf),
parseTimeStamps(DistributedCache.getFileTimestamps(conf)),
@@ -290,7 +291,6 @@ public static void setupDistributedCache(
private static void parseDistributedCacheArtifacts(
Configuration conf,
Map localResources,
- Map env,
LocalResourceType type,
URI[] uris, long[] timestamps, long[] sizes, boolean visibilities[],
Path[] pathsToPutOnClasspath) throws IOException {
@@ -339,9 +339,6 @@ private static void parseDistributedCacheArtifacts(
: LocalResourceVisibility.PRIVATE,
sizes[i], timestamps[i])
);
- if (classPaths.containsKey(u.getPath())) {
- MRApps.addToClassPath(env, linkName);
- }
}
}
}
@@ -358,6 +355,42 @@ private static long[] getFileSizes(Configuration conf, String key) {
}
return result;
}
+
+ public static void setEnvFromInputString(Map env,
+ String envString) {
+ if (envString != null && envString.length() > 0) {
+ String childEnvs[] = envString.split(",");
+ for (String cEnv : childEnvs) {
+ String[] parts = cEnv.split("="); // split on '='
+ String value = env.get(parts[0]);
+
+ if (value != null) {
+ // Replace $env with the child's env constructed by NM's
+ // For example: LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
+ value = parts[1].replace("$" + parts[0], value);
+ } else {
+ // example PATH=$PATH:/tmp
+ value = System.getenv(parts[0]);
+ if (value != null) {
+ // the env key is present in the tt's env
+ value = parts[1].replace("$" + parts[0], value);
+ } else {
+ // check for simple variable substitution
+ // for e.g. ROOT=$HOME
+ String envValue = System.getenv(parts[1].substring(1));
+ if (envValue != null) {
+ value = envValue;
+ } else {
+ // the env key is note present anywhere .. simply set it
+ // example X=$X:/tmp or X=/tmp
+ value = parts[1].replace("$" + parts[0], "");
+ }
+ }
+ }
+ addToEnvironment(env, parts[0], value);
+ }
+ }
+ }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
index 7a2ee00a92..1158998062 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
@@ -25,7 +25,6 @@
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -115,7 +114,8 @@ public class TestMRApps {
@Test public void testGetJobFileWithUser() {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/my/path/to/staging");
- String jobFile = MRApps.getJobFile(conf, "dummy-user", new JobID("dummy-job", 12345));
+ String jobFile = MRApps.getJobFile(conf, "dummy-user",
+ new JobID("dummy-job", 12345));
assertNotNull("getJobFile results in null.", jobFile);
assertEquals("jobFile with specified user is not as expected.",
"/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml", jobFile);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
index 026793c537..f409d2298e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.mapred.IFile.Writer;
import org.apache.hadoop.mapred.Merger.Segment;
import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
/**
@@ -560,7 +561,7 @@ void createInDiskSegment() throws IOException {
private Writer createSpillFile() throws IOException {
Path tmp =
- new Path(Constants.OUTPUT + "/backup_" + tid.getId() + "_"
+ new Path(MRJobConfig.OUTPUT + "/backup_" + tid.getId() + "_"
+ (spillNumber++) + ".out");
LOG.info("Created file: " + tmp);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Constants.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Constants.java
deleted file mode 100644
index e8a202ed44..0000000000
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Constants.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-public class Constants {
- static final String OUTPUT = "output";
- public static final String HADOOP_WORK_DIR = "HADOOP_WORK_DIR";
- public static final String JOBFILE = "job.xml";
- public static final String STDOUT_LOGFILE_ENV = "STDOUT_LOGFILE_ENV";
- public static final String STDERR_LOGFILE_ENV = "STDERR_LOGFILE_ENV";
-}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index 49d12d764d..b489d41b17 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -348,6 +348,7 @@ public class JobConf extends Configuration {
*/
public static final Level DEFAULT_LOG_LEVEL = Level.INFO;
+
/**
* Construct a map/reduce job configuration.
*/
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
index e2c16fbfac..806b856652 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
@@ -17,11 +17,16 @@
*/
package org.apache.hadoop.mapred;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
/*******************************
* Some handy constants
*
*******************************/
-interface MRConstants {
+@Private
+@Unstable
+public class MRConstants {
//
// Timeouts, constants
//
@@ -52,6 +57,4 @@ interface MRConstants {
* The reduce task number for which this map output is being transferred
*/
public static final String FOR_REDUCE_TASK = "for-reduce-task";
-
- public static final String WORKDIR = "work";
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MROutputFiles.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MROutputFiles.java
index e81e11d3fb..a9e25f287d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MROutputFiles.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MROutputFiles.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* Manipulate the working area for the transient store for maps and reduces.
@@ -54,7 +55,7 @@ public MROutputFiles() {
@Override
public Path getOutputFile()
throws IOException {
- return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + Path.SEPARATOR
+ return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + Path.SEPARATOR
+ MAP_OUTPUT_FILENAME_STRING, getConf());
}
@@ -68,7 +69,7 @@ public Path getOutputFile()
@Override
public Path getOutputFileForWrite(long size)
throws IOException {
- return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + Path.SEPARATOR
+ return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + Path.SEPARATOR
+ MAP_OUTPUT_FILENAME_STRING, size, getConf());
}
@@ -89,7 +90,7 @@ public Path getOutputFileForWriteInVolume(Path existing) {
@Override
public Path getOutputIndexFile()
throws IOException {
- return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + Path.SEPARATOR
+ return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + Path.SEPARATOR
+ MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
getConf());
}
@@ -104,7 +105,7 @@ public Path getOutputIndexFile()
@Override
public Path getOutputIndexFileForWrite(long size)
throws IOException {
- return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + Path.SEPARATOR
+ return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + Path.SEPARATOR
+ MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
size, getConf());
}
@@ -128,7 +129,7 @@ public Path getOutputIndexFileForWriteInVolume(Path existing) {
@Override
public Path getSpillFile(int spillNumber)
throws IOException {
- return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + "/spill"
+ return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + "/spill"
+ spillNumber + ".out", getConf());
}
@@ -143,7 +144,7 @@ public Path getSpillFile(int spillNumber)
@Override
public Path getSpillFileForWrite(int spillNumber, long size)
throws IOException {
- return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + "/spill"
+ return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + "/spill"
+ spillNumber + ".out", size, getConf());
}
@@ -157,7 +158,7 @@ public Path getSpillFileForWrite(int spillNumber, long size)
@Override
public Path getSpillIndexFile(int spillNumber)
throws IOException {
- return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + "/spill"
+ return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + "/spill"
+ spillNumber + ".out.index", getConf());
}
@@ -172,7 +173,7 @@ public Path getSpillIndexFile(int spillNumber)
@Override
public Path getSpillIndexFileForWrite(int spillNumber, long size)
throws IOException {
- return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + "/spill"
+ return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + "/spill"
+ spillNumber + ".out.index", size, getConf());
}
@@ -187,7 +188,7 @@ public Path getSpillIndexFileForWrite(int spillNumber, long size)
public Path getInputFile(int mapId)
throws IOException {
return lDirAlloc.getLocalPathToRead(String.format(
- REDUCE_INPUT_FILE_FORMAT_STRING, Constants.OUTPUT, Integer
+ REDUCE_INPUT_FILE_FORMAT_STRING, MRJobConfig.OUTPUT, Integer
.valueOf(mapId)), getConf());
}
@@ -204,7 +205,7 @@ public Path getInputFileForWrite(org.apache.hadoop.mapreduce.TaskID mapId,
long size)
throws IOException {
return lDirAlloc.getLocalPathForWrite(String.format(
- REDUCE_INPUT_FILE_FORMAT_STRING, Constants.OUTPUT, mapId.getId()),
+ REDUCE_INPUT_FILE_FORMAT_STRING, MRJobConfig.OUTPUT, mapId.getId()),
size, getConf());
}
@@ -212,7 +213,7 @@ public Path getInputFileForWrite(org.apache.hadoop.mapreduce.TaskID mapId,
@Override
public void removeAll()
throws IOException {
- ((JobConf)getConf()).deleteLocalFiles(Constants.OUTPUT);
+ ((JobConf)getConf()).deleteLocalFiles(MRJobConfig.OUTPUT);
}
@Override
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 33884bb82e..a493ed1cf7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -210,6 +210,8 @@ public interface MRJobConfig {
public static final String REDUCE_LOG_LEVEL = "mapreduce.reduce.log.level";
+ public static final String DEFAULT_LOG_LEVEL = "INFO";
+
public static final String REDUCE_MERGE_INMEM_THRESHOLD = "mapreduce.reduce.merge.inmem.threshold";
public static final String REDUCE_INPUT_BUFFER_PERCENT = "mapreduce.reduce.input.buffer.percent";
@@ -400,4 +402,64 @@ public interface MRJobConfig {
*/
public static final String MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR =
MR_AM_PREFIX + "create-intermediate-jh-base-dir";
+
+ public static final String MAPRED_MAP_ADMIN_JAVA_OPTS =
+ "mapreduce.admin.map.child.java.opts";
+
+ public static final String MAPRED_REDUCE_ADMIN_JAVA_OPTS =
+ "mapreduce.admin.reduce.child.java.opts";
+
+ public static final String DEFAULT_MAPRED_ADMIN_JAVA_OPTS =
+ "-Djava.net.preferIPv4Stack=true " +
+ "-Dhadoop.metrics.log.level=WARN ";
+
+ public static final String MAPRED_ADMIN_USER_SHELL =
+ "mapreduce.admin.user.shell";
+
+ public static final String DEFAULT_SHELL = "/bin/bash";
+
+ public static final String MAPRED_ADMIN_USER_ENV =
+ "mapreduce.admin.user.env";
+
+ public static final String DEFAULT_MAPRED_ADMIN_USER_ENV =
+ "LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib";
+
+ public static final String WORKDIR = "work";
+
+ public static final String OUTPUT = "output";
+
+ public static final String HADOOP_WORK_DIR = "HADOOP_WORK_DIR";
+
+ public static final String STDOUT_LOGFILE_ENV = "STDOUT_LOGFILE_ENV";
+
+ public static final String STDERR_LOGFILE_ENV = "STDERR_LOGFILE_ENV";
+
+ // This should be the directory where splits file gets localized on the node
+ // running ApplicationMaster.
+ public static final String JOB_SUBMIT_DIR = "jobSubmitDir";
+
+ // This should be the name of the localized job-configuration file on the node
+ // running ApplicationMaster and Task
+ public static final String JOB_CONF_FILE = "job.xml";
+
+ // This should be the name of the localized job-jar file on the node running
+ // individual containers/tasks.
+ public static final String JOB_JAR = "job.jar";
+
+ public static final String JOB_SPLIT = "job.split";
+
+ public static final String JOB_SPLIT_METAINFO = "job.splitmetainfo";
+
+ public static final String APPLICATION_MASTER_CLASS =
+ "org.apache.hadoop.mapreduce.v2.app.MRAppMaster";
+
+ // The token file for the application. Should contain tokens for access to
+ // remote file system and may optionally contain application specific tokens.
+ // For now, generated by the AppManagers and used by NodeManagers and the
+ // Containers.
+ public static final String APPLICATION_TOKENS_FILE = "appTokens";
+
+ public static final String MAPREDUCE_V2_CHILD_CLASS =
+ "org.apache.hadoop.mapred.YarnChild";
+
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 8e8081abe4..be5b862100 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -32,12 +32,12 @@
import org.apache.hadoop.mapreduce.ClusterMetrics;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.QueueAclsInfo;
import org.apache.hadoop.mapreduce.QueueInfo;
import org.apache.hadoop.mapreduce.TaskTrackerInfo;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityInfo;
@@ -254,7 +254,7 @@ public String getStagingAreaDir() throws IOException, InterruptedException {
public String getSystemDir() throws IOException, InterruptedException {
- Path sysDir = new Path(MRConstants.JOB_SUBMIT_DIR);
+ Path sysDir = new Path(MRJobConfig.JOB_SUBMIT_DIR);
//FileContext.getFileContext(conf).delete(sysDir, true);
return sysDir.toString();
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
index 82134c7520..a8853b928e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
@@ -51,7 +51,6 @@
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.Credentials;
@@ -60,6 +59,7 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationState;
@@ -210,7 +210,7 @@ public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
// Upload only in security mode: TODO
Path applicationTokensFile =
- new Path(jobSubmitDir, MRConstants.APPLICATION_TOKENS_FILE);
+ new Path(jobSubmitDir, MRJobConfig.APPLICATION_TOKENS_FILE);
try {
ts.writeTokenStorageFile(applicationTokensFile, conf);
} catch (IOException e) {
@@ -226,7 +226,9 @@ public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
ApplicationReport appMaster = resMgrDelegate
.getApplicationReport(applicationId);
- String diagnostics = (appMaster == null ? "application report is null" : appMaster.getDiagnostics());
+ String diagnostics =
+ (appMaster == null ?
+ "application report is null" : appMaster.getDiagnostics());
if (appMaster == null || appMaster.getState() == ApplicationState.FAILED
|| appMaster.getState() == ApplicationState.KILLED) {
throw new IOException("Failed to run job : " +
@@ -263,7 +265,7 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
Map localResources =
new HashMap();
- Path jobConfPath = new Path(jobSubmitDir, MRConstants.JOB_CONF_FILE);
+ Path jobConfPath = new Path(jobSubmitDir, MRJobConfig.JOB_CONF_FILE);
URL yarnUrlForJobSubmitDir = ConverterUtils
.getYarnUrlFromPath(defaultFileContext.getDefaultFileSystem()
@@ -272,13 +274,13 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
LOG.debug("Creating setup context, jobSubmitDir url is "
+ yarnUrlForJobSubmitDir);
- localResources.put(MRConstants.JOB_CONF_FILE,
+ localResources.put(MRJobConfig.JOB_CONF_FILE,
createApplicationResource(defaultFileContext,
jobConfPath));
if (jobConf.get(MRJobConfig.JAR) != null) {
- localResources.put(MRConstants.JOB_JAR,
+ localResources.put(MRJobConfig.JOB_JAR,
createApplicationResource(defaultFileContext,
- new Path(jobSubmitDir, MRConstants.JOB_JAR)));
+ new Path(jobSubmitDir, MRJobConfig.JOB_JAR)));
} else {
// Job jar may be null. For e.g, for pipes, the job jar is the hadoop
// mapreduce jar itself which is already on the classpath.
@@ -287,10 +289,12 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
}
// TODO gross hack
- for (String s : new String[] { "job.split", "job.splitmetainfo",
- MRConstants.APPLICATION_TOKENS_FILE }) {
+ for (String s : new String[] {
+ MRJobConfig.JOB_SPLIT,
+ MRJobConfig.JOB_SPLIT_METAINFO,
+ MRJobConfig.APPLICATION_TOKENS_FILE }) {
localResources.put(
- MRConstants.JOB_SUBMIT_DIR + "/" + s,
+ MRJobConfig.JOB_SUBMIT_DIR + "/" + s,
createApplicationResource(defaultFileContext,
new Path(jobSubmitDir, s)));
}
@@ -304,9 +308,8 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
}
// Setup the command to run the AM
- String javaHome = "$JAVA_HOME";
Vector vargs = new Vector(8);
- vargs.add(javaHome + "/bin/java");
+ vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
vargs.add("-Dhadoop.root.logger="
+ conf.get(MRJobConfig.MR_AM_LOG_OPTS,
MRJobConfig.DEFAULT_MR_AM_LOG_OPTS) + ",console");
@@ -314,12 +317,15 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
vargs.add(conf.get(MRJobConfig.MR_AM_COMMAND_OPTS,
MRJobConfig.DEFAULT_MR_AM_COMMAND_OPTS));
- vargs.add("org.apache.hadoop.mapreduce.v2.app.MRAppMaster");
+ vargs.add(MRJobConfig.APPLICATION_MASTER_CLASS);
vargs.add(String.valueOf(applicationId.getClusterTimestamp()));
vargs.add(String.valueOf(applicationId.getId()));
vargs.add(ApplicationConstants.AM_FAIL_COUNT_STRING);
- vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout");
- vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr");
+ vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
+ Path.SEPARATOR + ApplicationConstants.STDOUT);
+ vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
+ Path.SEPARATOR + ApplicationConstants.STDERR);
+
Vector vargsFinal = new Vector(8);
// Final commmand
@@ -332,15 +338,13 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
LOG.info("Command to launch container for ApplicationMaster is : "
+ mergedCommand);
- // Setup the environment - Add { job jar, MR app jar } to classpath.
+ // Setup the CLASSPATH in environment
+ // i.e. add { job jar, CWD, Hadoop jars} to classpath.
Map environment = new HashMap();
- MRApps.setInitialClasspath(environment);
- MRApps.addToClassPath(environment, MRConstants.JOB_JAR);
- MRApps.addToClassPath(environment,
- MRConstants.YARN_MAPREDUCE_APP_JAR_PATH);
-
+ MRApps.setClasspath(environment);
+
// Parse distributed cache
- MRApps.setupDistributedCache(jobConf, localResources, environment);
+ MRApps.setupDistributedCache(jobConf, localResources);
// Setup ContainerLaunchContext for AM container
ContainerLaunchContext amContainer =
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
index fcb2a79faf..49a63db44b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
@@ -43,9 +43,15 @@
*/
public class MiniMRYarnCluster extends MiniYARNCluster {
+ public static final String HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME =
+ "hadoop-mapreduce-client-app-0.24.0-SNAPSHOT.jar";
+
+ public static final String YARN_MAPREDUCE_APP_JAR_PATH =
+ "$YARN_HOME/modules/" + HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME;
+
public static final String APPJAR =
"../hadoop-mapreduce-client-app/target/"
- + MRConstants.HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME;
+ + HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME;
private static final Log LOG = LogFactory.getLog(MiniMRYarnCluster.class);
private JobHistoryServer historyServer;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
index 0a1943c013..aa832aa1cc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
@@ -402,7 +402,7 @@ public void setup(Context context) throws IOException {
// both should be reachable via the class loader.
Assert.assertNotNull(cl.getResource("distributed.jar.inside2"));
Assert.assertNotNull(cl.getResource("distributed.jar.inside3"));
- Assert.assertNull(cl.getResource("distributed.jar.inside4"));
+ Assert.assertNotNull(cl.getResource("distributed.jar.inside4"));
// Check that the symlink for the renaming was created in the cwd;
File symlinkFile = new File("distributed.first.symlink");
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index 212ca671c8..591035b046 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -46,4 +46,117 @@ public interface ApplicationConstants {
public static final String LOCAL_DIR_ENV = "YARN_LOCAL_DIRS";
public static final String LOG_DIR_EXPANSION_VAR = "";
+
+ public static final String STDERR = "stderr";
+
+ public static final String STDOUT = "stdout";
+
+ /**
+ * Classpath for typical applications.
+ */
+ public static final String[] APPLICATION_CLASSPATH =
+ new String[] {
+ "$HADOOP_CONF_DIR",
+ "$HADOOP_COMMON_HOME/share/hadoop/common/*",
+ "$HADOOP_COMMON_HOME/share/hadoop/common/lib/*",
+ "$HADOOP_HDFS_HOME/share/hadoop/hdfs/*",
+ "$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*",
+ "$YARN_HOME/modules/*",
+ "$YARN_HOME/lib/*"
+ };
+
+ /**
+ * Environment for Applications.
+ *
+ * Some of the environment variables for applications are final
+ * i.e. they cannot be modified by the applications.
+ */
+ public enum Environment {
+ /**
+ * $USER
+ * Final, non-modifiable.
+ */
+ USER("USER"),
+
+ /**
+ * $LOGNAME
+ * Final, non-modifiable.
+ */
+ LOGNAME("LOGNAME"),
+
+ /**
+ * $HOME
+ * Final, non-modifiable.
+ */
+ HOME("HOME"),
+
+ /**
+ * $PWD
+ * Final, non-modifiable.
+ */
+ PWD("PWD"),
+
+ /**
+ * $PATH
+ */
+ PATH("PATH"),
+
+ /**
+ * $SHELL
+ */
+ SHELL("SHELL"),
+
+ /**
+ * $JAVA_HOME
+ */
+ JAVA_HOME("JAVA_HOME"),
+
+ /**
+ * $CLASSPATH
+ */
+ CLASSPATH("CLASSPATH"),
+
+ /**
+ * $LD_LIBRARY_PATH
+ */
+ LD_LIBRARY_PATH("LD_LIBRARY_PATH"),
+
+ /**
+ * $HADOOP_CONF_DIR
+ * Final, non-modifiable.
+ */
+ HADOOP_CONF_DIR("HADOOP_CONF_DIR"),
+
+ /**
+ * $HADOOP_COMMON_HOME
+ */
+ HADOOP_COMMON_HOME("HADOOP_COMMON_HOME"),
+
+ /**
+ * $HADOOP_HDFS_HOME
+ */
+ HADOOP_HDFS_HOME("HADOOP_HDFS_HOME"),
+
+ /**
+ * $YARN_HOME
+ */
+ YARN_HOME("YARN_HOME");
+
+ private final String variable;
+ private Environment(String variable) {
+ this.variable = variable;
+ }
+
+ public String key() {
+ return variable;
+ }
+
+ public String toString() {
+ return variable;
+ }
+
+ public String $() {
+ return "$" + variable;
+ }
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ba23134170..f34830c605 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -357,6 +357,12 @@ public class YarnConfiguration extends Configuration {
public static final String NM_AUX_SERVICE_FMT =
NM_PREFIX + "aux-services.%s.class";
+ public static final String NM_USER_HOME_DIR =
+ NM_PREFIX + "user-home-dir";
+
+ public static final String DEFAULT_NM_USER_HOME_DIR= "/home/";
+
+
public static final int INVALID_CONTAINER_EXIT_STATUS = -1000;
public static final int ABORTED_CONTAINER_EXIT_STATUS = -100;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index a7e82a2d41..8387287679 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -133,8 +133,10 @@ public int launchContainer(Container container,
String[] command =
new String[] { "bash", "-c", launchDst.toUri().getPath().toString() };
LOG.info("launchContainer: " + Arrays.toString(command));
- shExec = new ShellCommandExecutor(command,
- new File(containerWorkDir.toUri().getPath()));
+ shExec = new ShellCommandExecutor(
+ command,
+ new File(containerWorkDir.toUri().getPath()),
+ container.getLaunchContext().getEnvironment()); // sanitized env
launchCommandObjs.put(containerId, shExec);
shExec.execute();
} catch (IOException e) {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 97721f72a3..0779d3b158 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -161,7 +161,11 @@ public int launchContainer(Container container,
nmPrivateCotainerScriptPath.toUri().getPath().toString(),
nmPrivateTokensPath.toUri().getPath().toString()));
String[] commandArray = command.toArray(new String[command.size()]);
- ShellCommandExecutor shExec = new ShellCommandExecutor(commandArray);
+ ShellCommandExecutor shExec =
+ new ShellCommandExecutor(
+ commandArray,
+ null, // NM's cwd
+ container.getLaunchContext().getEnvironment()); // sanitized env
launchCommandObjs.put(containerId, shExec);
// DEBUG
LOG.info("launchContainer: " + Arrays.toString(commandArray));
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 497460d3e7..43afa4cb85 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -44,6 +44,7 @@
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
@@ -89,7 +90,6 @@ public Integer call() {
final Map localResources = container.getLocalizedResources();
String containerIdStr = ConverterUtils.toString(container.getContainerID());
final String user = launchContext.getUser();
- final Map env = launchContext.getEnvironment();
final List command = launchContext.getCommands();
int ret = -1;
@@ -109,16 +109,16 @@ public Integer call() {
}
launchContext.setCommands(newCmds);
- Map envs = launchContext.getEnvironment();
- Map newEnvs = new HashMap(envs.size());
- for (Entry entry : envs.entrySet()) {
- newEnvs.put(
- entry.getKey(),
- entry.getValue().replace(
+ Map environment = launchContext.getEnvironment();
+ // Make a copy of env to iterate & do variable expansion
+ for (Entry entry : environment.entrySet()) {
+ String value = entry.getValue();
+ entry.setValue(
+ value.replace(
ApplicationConstants.LOG_DIR_EXPANSION_VAR,
- containerLogDir.toUri().getPath()));
+ containerLogDir.toUri().getPath())
+ );
}
- launchContext.setEnvironment(newEnvs);
// /////////////////////////// End of variable expansion
FileContext lfs = FileContext.getLocalFSFileContext();
@@ -164,11 +164,18 @@ public Integer call() {
EnumSet.of(CREATE, OVERWRITE));
// Set the token location too.
- env.put(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME, new Path(
- containerWorkDir, FINAL_CONTAINER_TOKENS_FILE).toUri().getPath());
+ environment.put(
+ ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME,
+ new Path(containerWorkDir,
+ FINAL_CONTAINER_TOKENS_FILE).toUri().getPath());
- writeLaunchEnv(containerScriptOutStream, env, localResources,
- launchContext.getCommands(), appDirs);
+ // Sanitize the container's environment
+ sanitizeEnv(environment, containerWorkDir, appDirs);
+
+ // Write out the environment
+ writeLaunchEnv(containerScriptOutStream, environment, localResources,
+ launchContext.getCommands());
+
// /////////// End of writing out container-script
// /////////// Write out the container-tokens in the nmPrivate space.
@@ -275,19 +282,71 @@ public String toString() {
}
+ private static void putEnvIfNotNull(
+ Map environment, String variable, String value) {
+ if (value != null) {
+ environment.put(variable, value);
+ }
+ }
+
+ private static void putEnvIfAbsent(
+ Map environment, String variable) {
+ if (environment.get(variable) == null) {
+ putEnvIfNotNull(environment, variable, System.getenv(variable));
+ }
+ }
+
+ public void sanitizeEnv(Map environment,
+ Path pwd, List appDirs) {
+ /**
+ * Non-modifiable environment variables
+ */
+
+ putEnvIfNotNull(environment, Environment.USER.name(), container.getUser());
+
+ putEnvIfNotNull(environment,
+ Environment.LOGNAME.name(),container.getUser());
+
+ putEnvIfNotNull(environment,
+ Environment.HOME.name(),
+ conf.get(
+ YarnConfiguration.NM_USER_HOME_DIR,
+ YarnConfiguration.DEFAULT_NM_USER_HOME_DIR
+ )
+ );
+
+ putEnvIfNotNull(environment, Environment.PWD.name(), pwd.toString());
+
+ putEnvIfNotNull(environment,
+ Environment.HADOOP_CONF_DIR.name(),
+ System.getenv(Environment.HADOOP_CONF_DIR.name())
+ );
+
+ putEnvIfNotNull(environment,
+ ApplicationConstants.LOCAL_DIR_ENV,
+ StringUtils.join(",", appDirs)
+ );
+
+ if (!Shell.WINDOWS) {
+ environment.put("JVM_PID", "$$");
+ }
+
+ /**
+ * Modifiable environment variables
+ */
+
+ putEnvIfAbsent(environment, Environment.JAVA_HOME.name());
+ putEnvIfAbsent(environment, Environment.HADOOP_COMMON_HOME.name());
+ putEnvIfAbsent(environment, Environment.HADOOP_HDFS_HOME.name());
+ putEnvIfAbsent(environment, Environment.YARN_HOME.name());
+
+ }
+
private static void writeLaunchEnv(OutputStream out,
Map environment, Map resources,
- List command, List appDirs)
+ List command)
throws IOException {
ShellScriptBuilder sb = new ShellScriptBuilder();
- if (System.getenv("YARN_HOME") != null) {
- // TODO: Get from whitelist.
- sb.env("YARN_HOME", System.getenv("YARN_HOME"));
- }
- sb.env(ApplicationConstants.LOCAL_DIR_ENV, StringUtils.join(",", appDirs));
- if (!Shell.WINDOWS) {
- sb.env("JVM_PID", "$$");
- }
if (environment != null) {
for (Map.Entry env : environment.entrySet()) {
sb.env(env.getKey().toString(), env.getValue().toString());
From 32f28e6994dc62b3165e501fd7b528b1fa9036d5 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Wed, 21 Sep 2011 20:37:08 +0000
Subject: [PATCH 15/68] MAPREDUCE-3062. Fixed default RMAdmin address.
Contributed by Chris Riccomini.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173838 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../hadoop/yarn/server/resourcemanager/AdminService.java | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 6977854d41..f9b12d8113 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1390,6 +1390,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2970. Fixed NPEs in corner cases with different configurations
for mapreduce.framework.name. (Venu Gopala Rao via vinodkv)
+ MAPREDUCE-3062. Fixed default RMAdmin address. (Chris Riccomini
+ via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 1fc34f0dfd..6237f8961f 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -84,7 +84,7 @@ public void init(Configuration conf) {
super.init(conf);
String bindAddress =
conf.get(YarnConfiguration.RM_ADMIN_ADDRESS,
- YarnConfiguration.RM_ADMIN_ADDRESS);
+ YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS);
masterServiceAddress = NetUtils.createSocketAddr(bindAddress);
adminAcl =
new AccessControlList(
From ed7eda00cb1f509ebe7f967cb65bdb1ece06334c Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Wed, 21 Sep 2011 23:32:31 +0000
Subject: [PATCH 16/68] MAPREDUCE-3066. Fixed default ResourceTracker address
for the NodeManager. Contributed by Chris Riccomini.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173904 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index f9b12d8113..51a4c189c3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1393,6 +1393,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3062. Fixed default RMAdmin address. (Chris Riccomini
via acmurthy)
+ MAPREDUCE-3066. Fixed default ResourceTracker address for the NodeManager.
+ (Chris Riccomini via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 641e74b801..1b1fd46b9e 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -101,7 +101,7 @@ public NodeStatusUpdaterImpl(Context context, Dispatcher dispatcher,
public synchronized void init(Configuration conf) {
this.rmAddress =
conf.get(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
- YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS);
+ YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS);
this.heartBeatInterval =
conf.getLong(YarnConfiguration.NM_TO_RM_HEARTBEAT_INTERVAL_MS,
YarnConfiguration.DEFAULT_NM_TO_RM_HEARTBEAT_INTERVAL_MS);
From 3557202a92ff9efcd34dbe60ed31d75a4a33326e Mon Sep 17 00:00:00 2001
From: Mahadev Konar
Date: Wed, 21 Sep 2011 23:52:50 +0000
Subject: [PATCH 17/68] MAPREDUCE-3044. Pipes jobs stuck without making
progress. (mahadev)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173909 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 2 ++
.../org/apache/hadoop/mapred/MapReduceChildJVM.java | 6 ++++--
.../main/java/org/apache/hadoop/mapred/TaskLog.java | 13 +++++++++++--
.../org/apache/hadoop/mapred/pipes/Application.java | 5 +++--
.../org/apache/hadoop/mapreduce/MRJobConfig.java | 7 ++++++-
.../src/main/resources/container-log4j.properties | 8 ++++----
6 files changed, 30 insertions(+), 11 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 51a4c189c3..d722bdeb27 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1396,6 +1396,8 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3066. Fixed default ResourceTracker address for the NodeManager.
(Chris Riccomini via acmurthy)
+ MAPREDUCE-3044. Pipes jobs stuck without making progress. (mahadev)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
index fc25d06da2..4f2941a78c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
@@ -31,6 +31,8 @@
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
public class MapReduceChildJVM {
@@ -164,8 +166,8 @@ private static String getChildJavaOpts(JobConf jobConf, boolean isMapTask) {
private static void setupLog4jProperties(Vector vargs,
long logSize) {
vargs.add("-Dlog4j.configuration=container-log4j.properties");
- vargs.add("-Dhadoop.yarn.mr.containerLogDir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
- vargs.add("-Dhadoop.yarn.mr.totalLogFileSize=" + logSize);
+ vargs.add("-D" + MRJobConfig.TASK_LOG_DIR + "=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
+ vargs.add("-D" + MRJobConfig.TASK_LOG_SIZE + "=" + logSize);
}
public static List getVMCommand(
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
index 7e978e9cf9..597b2edaa3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
@@ -44,6 +44,7 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SecureIOUtils;
import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.util.ProcessTree;
import org.apache.hadoop.util.Shell;
import org.apache.log4j.Appender;
@@ -75,10 +76,18 @@ public class TaskLog {
}
}
}
-
+
+ public static String getMRv2LogDir() {
+ return System.getProperty(MRJobConfig.TASK_LOG_DIR);
+ }
+
public static File getTaskLogFile(TaskAttemptID taskid, boolean isCleanup,
LogName filter) {
- return new File(getAttemptDir(taskid, isCleanup), filter.toString());
+ if (getMRv2LogDir() != null) {
+ return new File(getMRv2LogDir(), filter.toString());
+ } else {
+ return new File(getAttemptDir(taskid, isCleanup), filter.toString());
+ }
}
static File getRealTaskLogFileLocation(TaskAttemptID taskid,
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
index cb8b476ac7..0a108d73b6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.mapred.pipes;
+import java.io.BufferedInputStream;
import java.io.File;
import java.io.IOException;
import java.net.ServerSocket;
@@ -26,6 +27,7 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Map.Entry;
import java.util.Random;
import javax.crypto.SecretKey;
@@ -111,7 +113,6 @@ class Application
Date: Thu, 22 Sep 2011 06:32:35 +0000
Subject: [PATCH 18/68] MAPREDUCE-3048. Fixed test-patch to run tests via "mvn
clean install test" (vinodkv)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173962 13f79535-47bb-0310-9956-ffa450edef68
---
dev-support/test-patch.sh | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 76b020a01e..6325e6a193 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -598,8 +598,8 @@ runTests () {
echo ""
echo ""
- echo "$MVN clean test -Pnative -D${PROJECT_NAME}PatchProcess"
- $MVN clean test -Pnative -D${PROJECT_NAME}PatchProcess
+ echo "$MVN clean install test -Pnative -D${PROJECT_NAME}PatchProcess"
+ $MVN clean install test -Pnative -D${PROJECT_NAME}PatchProcess
if [[ $? != 0 ]] ; then
### Find and format names of failed tests
failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E "
Date: Thu, 22 Sep 2011 08:25:20 +0000
Subject: [PATCH 19/68] HDFS-46. Change default namespace quota of root
directory from Integer.MAX_VALUE to Long.MAX_VALUE. Contributed by Uma
Maheswara Rao G
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173990 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hdfs/server/namenode/FSDirectory.java | 2 +-
.../org/apache/hadoop/hdfs/TestQuota.java | 20 ++++++++++++++-----
.../hdfs/server/namenode/FSImageTestUtil.java | 7 +++++++
4 files changed, 26 insertions(+), 6 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f56807302c..9ced9e42bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -62,6 +62,9 @@ Trunk (unreleased changes)
IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
Rao G via szetszwo)
+ HDFS-46. Change default namespace quota of root directory from
+ Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo)
+
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 4d7f2b9ca6..654c3a231d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -120,7 +120,7 @@ boolean hasReadLock() {
this.cond = dirLock.writeLock().newCondition();
rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
ns.createFsOwnerPermissions(new FsPermission((short)0755)),
- Integer.MAX_VALUE, UNKNOWN_DISK_SPACE);
+ Long.MAX_VALUE, UNKNOWN_DISK_SPACE);
this.fsImage = fsImage;
int configuredLimit = conf.getInt(
DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index a0727a6c90..d7ee516b0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -17,6 +17,10 @@
*/
package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
import java.io.OutputStream;
import java.security.PrivilegedExceptionAction;
@@ -24,17 +28,15 @@
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
-
import org.junit.Test;
-import static org.junit.Assert.*;
/** A class for testing quota-related commands */
public class TestQuota {
@@ -841,6 +843,14 @@ public void testMultipleFilesSmallerThanOneBlock() throws Exception {
DFSAdmin admin = new DFSAdmin(conf);
try {
+
+ //Test for deafult NameSpace Quota
+ long nsQuota = FSImageTestUtil.getNSQuota(cluster.getNameNode()
+ .getNamesystem());
+ assertTrue(
+ "Default namespace quota expected as long max. But the value is :"
+ + nsQuota, nsQuota == Long.MAX_VALUE);
+
Path dir = new Path("/test");
boolean exceededQuota = false;
ContentSummary c;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
index c90b2900db..39e7db17dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
@@ -412,4 +412,11 @@ public static void logStorageContents(Log LOG, NNStorage storage) {
public static FSImage getFSImage(NameNode node) {
return node.getFSImage();
}
+
+ /**
+ * get NameSpace quota.
+ */
+ public static long getNSQuota(FSNamesystem ns) {
+ return ns.dir.rootDir.getNsQuota();
+ }
}
From 4806d7ba74c668817ea6f35421c559eaf57a997e Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Thu, 22 Sep 2011 15:14:31 +0000
Subject: [PATCH 20/68] MAPREDUCE-2754. Fixed MR AM stdout, stderr and syslog
to redirect to correct log-files. Contributed by Ravi Teja Ch N V.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1174194 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/mapred/MapReduceChildJVM.java | 4 ++--
.../java/org/apache/hadoop/mapreduce/MRJobConfig.java | 2 +-
.../main/java/org/apache/hadoop/mapred/YARNRunner.java | 9 ++++++---
4 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index d722bdeb27..653325dbbf 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1398,6 +1398,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3044. Pipes jobs stuck without making progress. (mahadev)
+ MAPREDUCE-2754. Fixed MR AM stdout, stderr and syslog to redirect to
+ correct log-files. (Ravi Teja Ch N V via vinodkv)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
index 4f2941a78c..ce6557abd0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
@@ -245,8 +245,8 @@ public static List getVMCommand(
// Finally add the jvmID
vargs.add(String.valueOf(jvmID.getId()));
- vargs.add("1>" + getTaskLogFile(TaskLog.LogName.STDERR));
- vargs.add("2>" + getTaskLogFile(TaskLog.LogName.STDOUT));
+ vargs.add("1>" + getTaskLogFile(TaskLog.LogName.STDOUT));
+ vargs.add("2>" + getTaskLogFile(TaskLog.LogName.STDERR));
// Final commmand
StringBuilder mergedCommand = new StringBuilder();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index a022229fcd..c456d52deb 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -462,7 +462,7 @@ public interface MRJobConfig {
/** The log directory for the containers */
public static final String TASK_LOG_DIR = MR_PREFIX + "container.log.dir";
- public static final String TASK_LOG_SIZE = MR_PREFIX + "log.filesize";
+ public static final String TASK_LOG_SIZE = MR_PREFIX + "container.log.filesize";
public static final String MAPREDUCE_V2_CHILD_CLASS =
"org.apache.hadoop.mapred.YarnChild";
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
index a8853b928e..7a6443b6c9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
@@ -310,9 +310,12 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
// Setup the command to run the AM
Vector vargs = new Vector(8);
vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
- vargs.add("-Dhadoop.root.logger="
- + conf.get(MRJobConfig.MR_AM_LOG_OPTS,
- MRJobConfig.DEFAULT_MR_AM_LOG_OPTS) + ",console");
+
+ long logSize = TaskLog.getTaskLogLength(new JobConf(conf));
+ vargs.add("-Dlog4j.configuration=container-log4j.properties");
+ vargs.add("-D" + MRJobConfig.TASK_LOG_DIR + "="
+ + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
+ vargs.add("-D" + MRJobConfig.TASK_LOG_SIZE + "=" + logSize);
vargs.add(conf.get(MRJobConfig.MR_AM_COMMAND_OPTS,
MRJobConfig.DEFAULT_MR_AM_COMMAND_OPTS));
From 1eb7008753926f48f0b0442ef9a92f4075bad96d Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Thu, 22 Sep 2011 18:05:30 +0000
Subject: [PATCH 21/68] MAPREDUCE-3073. Fixed build issues in MR1. Contributed
by Mahadev Konar.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1174294 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 2 ++
.../src/main/java/org/apache/hadoop/mapred/MRConstants.java | 5 ++++-
2 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 653325dbbf..8852a3bedb 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1401,6 +1401,8 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2754. Fixed MR AM stdout, stderr and syslog to redirect to
correct log-files. (Ravi Teja Ch N V via vinodkv)
+ MAPREDUCE-3073. Fixed build issues in MR1. (mahadev via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
index 806b856652..3d7363e5fa 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
@@ -26,7 +26,7 @@
*******************************/
@Private
@Unstable
-public class MRConstants {
+public interface MRConstants {
//
// Timeouts, constants
//
@@ -57,4 +57,7 @@ public class MRConstants {
* The reduce task number for which this map output is being transferred
*/
public static final String FOR_REDUCE_TASK = "for-reduce-task";
+
+ /** Used in MRv1, mostly in TaskTracker code **/
+ public static final String WORKDIR = "work";
}
From 36d7ab3489dc2c90b8ebc87c38b8524062f2e94f Mon Sep 17 00:00:00 2001
From: Harsh J
Date: Fri, 23 Sep 2011 07:26:00 +0000
Subject: [PATCH 22/68] HADOOP-7542. Change Configuration XML format to 1.1 to
support for serializing additional characters Contributed by Christopher
Egner.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1174562 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
.../org/apache/hadoop/conf/Configuration.java | 4 ++++
.../apache/hadoop/conf/TestConfiguration.java | 16 ++++++++++++++--
3 files changed, 20 insertions(+), 2 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6245bcf8f9..c69ec9d3ee 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -26,6 +26,8 @@ Trunk (unreleased changes)
HADOOP-7621. alfredo config should be in a file not readable by users
(Alejandro Abdelnur via atm)
+ HADOOP-7542. Change Configuration XML format to 1.1 to support for serializing additional characters (Christopher Egner via harsh)
+
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 4fb1d19066..c310aa65e6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -1632,6 +1632,10 @@ private synchronized Document asXmlDocument() throws IOException {
try {
doc =
DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument();
+
+ // Allow a broader set of control characters to appear in job confs.
+ // cf https://issues.apache.org/jira/browse/MAPREDUCE-109
+ doc.setXmlVersion( "1.1" );
} catch (ParserConfigurationException pe) {
throw new IOException(pe);
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index f9f14fb848..5842db199d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -58,7 +58,7 @@ protected void tearDown() throws Exception {
}
private void startConfig() throws IOException{
- out.write("\n");
+ out.write("\n");
out.write("\n");
}
@@ -221,6 +221,18 @@ public void testCommentsInValue() throws IOException {
assertEquals("this contains a comment", conf.get("my.comment"));
}
+ public void testControlAInValue() throws IOException {
+ out = new BufferedWriter(new FileWriter(CONFIG));
+ startConfig();
+ appendProperty("my.char", "");
+ appendProperty("my.string", "somestring");
+ endConfig();
+ Path fileResource = new Path(CONFIG);
+ conf.addResource(fileResource);
+ assertEquals("\u0001", conf.get("my.char"));
+ assertEquals("some\u0001string", conf.get("my.string"));
+ }
+
public void testTrim() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
@@ -298,7 +310,7 @@ public void testWriteXml() throws IOException {
conf.writeXml(baos);
String result = baos.toString();
assertTrue("Result has proper header", result.startsWith(
- ""));
+ ""));
assertTrue("Result has proper footer", result.endsWith(""));
}
From 516fc91187cabe99aa192abaafdd19f0fb788768 Mon Sep 17 00:00:00 2001
From: Steve Loughran
Date: Fri, 23 Sep 2011 10:44:32 +0000
Subject: [PATCH 23/68] marking HADOOP-7542 as potentially incompatible
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1174644 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-common-project/hadoop-common/CHANGES.txt | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index c69ec9d3ee..d0d7ab1b02 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -26,8 +26,6 @@ Trunk (unreleased changes)
HADOOP-7621. alfredo config should be in a file not readable by users
(Alejandro Abdelnur via atm)
- HADOOP-7542. Change Configuration XML format to 1.1 to support for serializing additional characters (Christopher Egner via harsh)
-
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
@@ -37,6 +35,9 @@ Release 0.23.0 - Unreleased
HADOOP-6432. Add Statistics support in FileContext. (jitendra)
HADOOP-7136. Remove failmon contrib component. (nigel)
+
+ HADOOP-7542. Change Configuration XML format to 1.1 to support for serializing additional
+ characters. This requires XML1.1 support in the XML parser (Christopher Egner via harsh)
NEW FEATURES
From 864630b92f8cce780f70b526c796ecfe0bb58734 Mon Sep 17 00:00:00 2001
From: Steve Loughran
Date: Fri, 23 Sep 2011 11:04:28 +0000
Subject: [PATCH 24/68] HADOOP-7669 Fix newly introduced release audit
warning.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1174653 13f79535-47bb-0310-9956-ffa450edef68
---
.../hadoop-common/CHANGES.txt | 3 +++
.../packages/templates/conf/log4j.properties | 18 ++++++++++++++++++
2 files changed, 21 insertions(+)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index d0d7ab1b02..03f9559140 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -25,6 +25,9 @@ Trunk (unreleased changes)
HADOOP-7621. alfredo config should be in a file not readable by users
(Alejandro Abdelnur via atm)
+
+ HADOOP-7669 Fix newly introduced release audit warning.
+ (Uma Maheswara Rao G via stevel)
Release 0.23.0 - Unreleased
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
index d765e96c4f..16c6aa6890 100644
--- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
@@ -1,3 +1,21 @@
+# Copyright 2011 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# Define some default values that can be overridden by system properties
hadoop.root.logger=INFO,console
hadoop.log.dir=.
From 272639877c1c884493a2f10f3a1bc9fc63cbcb78 Mon Sep 17 00:00:00 2001
From: Harsh J
Date: Fri, 23 Sep 2011 12:06:58 +0000
Subject: [PATCH 25/68] Move the CHANGES.txt note for HADOOP-7542 to the right
release section.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1174684 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-common-project/hadoop-common/CHANGES.txt | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 03f9559140..26be711214 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -2,6 +2,12 @@ Hadoop Change Log
Trunk (unreleased changes)
+ INCOMPATIBLE CHANGES
+
+ HADOOP-7542. Change Configuration XML format to 1.1 to add support for
+ serializing additional characters. This requires XML1.1
+ support in the XML parser (Christopher Egner via harsh)
+
IMPROVEMENTS
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
@@ -38,9 +44,6 @@ Release 0.23.0 - Unreleased
HADOOP-6432. Add Statistics support in FileContext. (jitendra)
HADOOP-7136. Remove failmon contrib component. (nigel)
-
- HADOOP-7542. Change Configuration XML format to 1.1 to support for serializing additional
- characters. This requires XML1.1 support in the XML parser (Christopher Egner via harsh)
NEW FEATURES
From b549c107825581b15fd14494099a943ff3213c6f Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Fri, 23 Sep 2011 14:07:42 +0000
Subject: [PATCH 26/68] MAPREDUCE-3055. Simplified ApplicationAttemptId passing
to ApplicationMaster via environment variable. (vinodkv)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1174785 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../hadoop/mapreduce/v2/app/MRAppMaster.java | 21 +-
.../org/apache/hadoop/mapred/YARNRunner.java | 3 -
.../hadoop/yarn/api/ApplicationConstants.java | 7 +-
.../hadoop/yarn/util/ConverterUtils.java | 52 +++--
.../nodemanager/webapp/ContainerLogsPage.java | 14 +-
.../nodemanager/webapp/ContainerPage.java | 23 ++-
.../amlauncher/AMLauncher.java | 29 +--
.../amlauncher/ApplicationMasterLauncher.java | 4 +-
.../yarn/server/resourcemanager/MockRM.java | 1 +
.../TestApplicationMasterLauncher.java | 159 +++++++++++++++
.../TestApplicationMasterLauncher.java | 193 ------------------
12 files changed, 251 insertions(+), 258 deletions(-)
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
delete mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationMasterLauncher.java
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 8852a3bedb..1ec852aef3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -310,6 +310,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2726. Added job-file to the AM and JobHistoryServer web
interfaces. (Jeffrey Naisbitt via vinodkv)
+ MAPREDUCE-3055. Simplified ApplicationAttemptId passing to
+ ApplicationMaster via environment variable. (vinodkv)
+
OPTIMIZATIONS
MAPREDUCE-2026. Make JobTracker.getJobCounters() and
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index 41a86f1271..ab8dec169c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -77,6 +77,7 @@
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.SystemClock;
import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -87,6 +88,7 @@
import org.apache.hadoop.yarn.service.AbstractService;
import org.apache.hadoop.yarn.service.CompositeService;
import org.apache.hadoop.yarn.service.Service;
+import org.apache.hadoop.yarn.util.ConverterUtils;
/**
* The Map-Reduce Application Master.
@@ -647,13 +649,18 @@ public void handle(SpeculatorEvent event) {
public static void main(String[] args) {
try {
- //Configuration.addDefaultResource("job.xml");
- ApplicationId applicationId = RecordFactoryProvider
- .getRecordFactory(null).newRecordInstance(ApplicationId.class);
- applicationId.setClusterTimestamp(Long.valueOf(args[0]));
- applicationId.setId(Integer.valueOf(args[1]));
- int failCount = Integer.valueOf(args[2]);
- MRAppMaster appMaster = new MRAppMaster(applicationId, failCount);
+ String applicationAttemptIdStr = System
+ .getenv(ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV);
+ if (applicationAttemptIdStr == null) {
+ String msg = ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV
+ + " is null";
+ LOG.error(msg);
+ throw new IOException(msg);
+ }
+ ApplicationAttemptId applicationAttemptId = ConverterUtils
+ .toApplicationAttemptId(applicationAttemptIdStr);
+ MRAppMaster appMaster = new MRAppMaster(applicationAttemptId
+ .getApplicationId(), applicationAttemptId.getAttemptId());
Runtime.getRuntime().addShutdownHook(
new CompositeServiceShutdownHook(appMaster));
YarnConfiguration conf = new YarnConfiguration(new JobConf());
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
index 7a6443b6c9..20bd976b8d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
@@ -321,9 +321,6 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
MRJobConfig.DEFAULT_MR_AM_COMMAND_OPTS));
vargs.add(MRJobConfig.APPLICATION_MASTER_CLASS);
- vargs.add(String.valueOf(applicationId.getClusterTimestamp()));
- vargs.add(String.valueOf(applicationId.getId()));
- vargs.add(ApplicationConstants.AM_FAIL_COUNT_STRING);
vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
Path.SEPARATOR + ApplicationConstants.STDOUT);
vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index 591035b046..99f145fbdc 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -37,8 +37,11 @@ public interface ApplicationConstants {
public static final String APPLICATION_CLIENT_SECRET_ENV_NAME =
"AppClientTokenEnv";
- // TODO: Weird. This is part of AM command line. Instead it should be a env.
- public static final String AM_FAIL_COUNT_STRING = "";
+ /**
+ * The environmental variable for APPLICATION_ATTEMPT_ID. Set in
+ * ApplicationMaster's environment only.
+ */
+ public static final String APPLICATION_ATTEMPT_ID_ENV = "APPLICATION_ATTEMPT_ID";
public static final String CONTAINER_TOKEN_FILE_ENV_NAME =
UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
index ab6bd7395d..6f5e904319 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
@@ -20,6 +20,7 @@
import static org.apache.hadoop.yarn.util.StringHelper._split;
+import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.text.NumberFormat;
@@ -45,6 +46,8 @@
public class ConverterUtils {
public static final String APPLICATION_PREFIX = "application";
+ public static final String CONTAINER_PREFIX = "container";
+ public static final String APPLICATION_ATTEMPT_PREFIX = "appattempt";
/**
* return a hadoop path from a given url
@@ -132,14 +135,12 @@ private static ApplicationId toApplicationId(RecordFactory recordFactory,
}
private static ApplicationAttemptId toApplicationAttemptId(
- RecordFactory recordFactory,
- Iterator it) {
- ApplicationId appId =
- recordFactory.newRecordInstance(ApplicationId.class);
+ Iterator it) throws NumberFormatException {
+ ApplicationId appId = Records.newRecord(ApplicationId.class);
appId.setClusterTimestamp(Long.parseLong(it.next()));
appId.setId(Integer.parseInt(it.next()));
- ApplicationAttemptId appAttemptId =
- recordFactory.newRecordInstance(ApplicationAttemptId.class);
+ ApplicationAttemptId appAttemptId = Records
+ .newRecord(ApplicationAttemptId.class);
appAttemptId.setApplicationId(appId);
appAttemptId.setAttemptId(Integer.parseInt(it.next()));
return appAttemptId;
@@ -149,16 +150,35 @@ public static String toString(ContainerId cId) {
return cId.toString();
}
- public static ContainerId toContainerId(RecordFactory recordFactory,
- String containerIdStr) {
+ public static ContainerId toContainerId(String containerIdStr)
+ throws IOException {
Iterator it = _split(containerIdStr).iterator();
- it.next(); // prefix. TODO: Validate container prefix
- ApplicationAttemptId appAttemptID =
- toApplicationAttemptId(recordFactory, it);
- ContainerId containerId =
- recordFactory.newRecordInstance(ContainerId.class);
- containerId.setApplicationAttemptId(appAttemptID);
- containerId.setId(Integer.parseInt(it.next()));
- return containerId;
+ if (!it.next().equals(CONTAINER_PREFIX)) {
+ throw new IOException("Invalid ContainerId prefix: " + containerIdStr);
+ }
+ try {
+ ApplicationAttemptId appAttemptID = toApplicationAttemptId(it);
+ ContainerId containerId = Records.newRecord(ContainerId.class);
+ containerId.setApplicationAttemptId(appAttemptID);
+ containerId.setId(Integer.parseInt(it.next()));
+ return containerId;
+ } catch (NumberFormatException n) {
+ throw new IOException("Invalid ContainerId: " + containerIdStr, n);
+ }
+ }
+
+ public static ApplicationAttemptId toApplicationAttemptId(
+ String applicationAttmeptIdStr) throws IOException {
+ Iterator it = _split(applicationAttmeptIdStr).iterator();
+ if (!it.next().equals(APPLICATION_ATTEMPT_PREFIX)) {
+ throw new IOException("Invalid AppAttemptId prefix: "
+ + applicationAttmeptIdStr);
+ }
+ try {
+ return toApplicationAttemptId(it);
+ } catch (NumberFormatException n) {
+ throw new IOException("Invalid AppAttemptId: "
+ + applicationAttmeptIdStr, n);
+ }
}
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
index 68b0686a25..e0795613b6 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
@@ -31,8 +31,6 @@
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.factories.RecordFactory;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
@@ -56,22 +54,26 @@ public static class ContainersLogsBlock extends HtmlBlock implements
private final Configuration conf;
private final LocalDirAllocator logsSelector;
private final Context nmContext;
- private final RecordFactory recordFactory;
@Inject
public ContainersLogsBlock(Configuration conf, Context context) {
this.conf = conf;
this.logsSelector = new LocalDirAllocator(YarnConfiguration.NM_LOG_DIRS);
this.nmContext = context;
- this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
}
@Override
protected void render(Block html) {
DIV div = html.div("#content");
- ContainerId containerId =
- ConverterUtils.toContainerId(this.recordFactory, $(CONTAINER_ID));
+ ContainerId containerId;
+ try {
+ containerId = ConverterUtils.toContainerId($(CONTAINER_ID));
+ } catch (IOException e) {
+ div.h1("Invalid containerId " + $(CONTAINER_ID))._();
+ return;
+ }
+
Container container = this.nmContext.getContainers().get(containerId);
if (container == null) {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
index 27be38a029..5425032eec 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
@@ -18,16 +18,15 @@
package org.apache.hadoop.yarn.server.nodemanager.webapp;
+import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
-import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.factories.RecordFactory;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.util.ConverterUtils;
@@ -53,21 +52,23 @@ protected Class extends SubView> content() {
public static class ContainerBlock extends HtmlBlock implements NMWebParams {
- private final Configuration conf;
private final Context nmContext;
- private final RecordFactory recordFactory;
@Inject
- public ContainerBlock(Configuration conf, Context nmContext) {
- this.conf = conf;
+ public ContainerBlock(Context nmContext) {
this.nmContext = nmContext;
- this.recordFactory = RecordFactoryProvider.getRecordFactory(this.conf);
}
@Override
protected void render(Block html) {
- ContainerId containerID =
- ConverterUtils.toContainerId(this.recordFactory, $(CONTAINER_ID));
+ ContainerId containerID;
+ try {
+ containerID = ConverterUtils.toContainerId($(CONTAINER_ID));
+ } catch (IOException e) {
+ html.p()._("Invalid containerId " + $(CONTAINER_ID))._();
+ return;
+ }
+
Container container = this.nmContext.getContainers().get(containerID);
ContainerStatus containerData = container.cloneAndGetContainerStatus();
int exitCode = containerData.getExitStatus();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index b394faa85d..337f481689 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -136,7 +136,7 @@ private void cleanup() throws IOException {
containerMgrProxy.stopContainer(stopRequest);
}
- private ContainerManager getContainerMgrProxy(
+ protected ContainerManager getContainerMgrProxy(
final ApplicationId applicationID) throws IOException {
Container container = application.getMasterContainer();
@@ -173,23 +173,11 @@ private ContainerLaunchContext createAMContainerLaunchContext(
// Construct the actual Container
ContainerLaunchContext container =
applicationMasterContext.getAMContainerSpec();
- StringBuilder mergedCommand = new StringBuilder();
- String failCount = Integer.toString(application.getAppAttemptId()
- .getAttemptId());
- List commandList = new ArrayList();
- for (String str : container.getCommands()) {
- // This is out-right wrong. AM FAIL count should be passed via env.
- String result =
- str.replaceFirst(ApplicationConstants.AM_FAIL_COUNT_STRING,
- failCount);
- mergedCommand.append(result).append(" ");
- commandList.add(result);
- }
- container.setCommands(commandList);
- /** add the failed count to the app master command line */
-
- LOG.info("Command to launch container " +
- containerID + " : " + mergedCommand);
+ LOG.info("Command to launch container "
+ + containerID
+ + " : "
+ + StringUtils.arrayToString(container.getCommands().toArray(
+ new String[0])));
// Finalize the container
container.setContainerId(containerID);
@@ -203,6 +191,11 @@ private void setupTokensAndEnv(
ContainerLaunchContext container)
throws IOException {
Map environment = container.getEnvironment();
+
+ // Set the AppAttemptId to be consumable by the AM.
+ environment.put(ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV,
+ application.getAppAttemptId().toString());
+
if (UserGroupInformation.isSecurityEnabled()) {
// TODO: Security enabled/disabled info should come from RM.
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
index d1ef1d1400..67f0c8a016 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
@@ -42,9 +42,9 @@ public class ApplicationMasterLauncher extends AbstractService implements
private final BlockingQueue masterEvents
= new LinkedBlockingQueue();
- private ApplicationTokenSecretManager applicationTokenSecretManager;
+ protected ApplicationTokenSecretManager applicationTokenSecretManager;
private ClientToAMSecretManager clientToAMSecretManager;
- private final RMContext context;
+ protected final RMContext context;
public ApplicationMasterLauncher(ApplicationTokenSecretManager
applicationTokenSecretManager, ClientToAMSecretManager clientToAMSecretManager,
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 4be2739967..2123ee806c 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -195,6 +195,7 @@ public void stop() {
};
}
+ @Override
protected AdminService createAdminService() {
return new AdminService(getConfig(), scheduler, getRMContext(),
this.nodesListManager){
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
new file mode 100644
index 0000000000..a12049f9e8
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
@@ -0,0 +1,159 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestApplicationMasterLauncher {
+
+ private static final Log LOG = LogFactory
+ .getLog(TestApplicationMasterLauncher.class);
+
+ private static final class MyContainerManagerImpl implements
+ ContainerManager {
+
+ boolean launched = false;
+ boolean cleanedup = false;
+ String attemptIdAtContainerManager = null;
+
+ @Override
+ public StartContainerResponse
+ startContainer(StartContainerRequest request)
+ throws YarnRemoteException {
+ LOG.info("Container started by MyContainerManager: " + request);
+ launched = true;
+ attemptIdAtContainerManager = request.getContainerLaunchContext()
+ .getEnvironment().get(
+ ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV);
+ return null;
+ }
+
+ @Override
+ public StopContainerResponse stopContainer(StopContainerRequest request)
+ throws YarnRemoteException {
+ LOG.info("Container cleaned up by MyContainerManager");
+ cleanedup = true;
+ return null;
+ }
+
+ @Override
+ public GetContainerStatusResponse getContainerStatus(
+ GetContainerStatusRequest request) throws YarnRemoteException {
+ return null;
+ }
+
+ }
+
+ private static final class MockRMWithCustomAMLauncher extends MockRM {
+
+ private final ContainerManager containerManager;
+
+ public MockRMWithCustomAMLauncher(ContainerManager containerManager) {
+ this.containerManager = containerManager;
+ }
+
+ @Override
+ protected ApplicationMasterLauncher createAMLauncher() {
+ return new ApplicationMasterLauncher(super.appTokenSecretManager,
+ super.clientToAMSecretManager, getRMContext()) {
+ @Override
+ protected Runnable createRunnableLauncher(RMAppAttempt application,
+ AMLauncherEventType event) {
+ return new AMLauncher(context, application, event,
+ applicationTokenSecretManager, clientToAMSecretManager,
+ getConfig()) {
+ @Override
+ protected ContainerManager getContainerMgrProxy(
+ ApplicationId applicationID) throws IOException {
+ return containerManager;
+ }
+ };
+ }
+ };
+ }
+ }
+
+ @Test
+ public void testAMLaunchAndCleanup() throws Exception {
+ Logger rootLogger = LogManager.getRootLogger();
+ rootLogger.setLevel(Level.DEBUG);
+ MyContainerManagerImpl containerManager = new MyContainerManagerImpl();
+ MockRMWithCustomAMLauncher rm = new MockRMWithCustomAMLauncher(
+ containerManager);
+ rm.start();
+ MockNM nm1 = rm.registerNode("h1:1234", 5120);
+
+ RMApp app = rm.submitApp(2000);
+
+ // kick the scheduling
+ nm1.nodeHeartbeat(true);
+
+ int waitCount = 0;
+ while (containerManager.launched == false && waitCount++ < 20) {
+ LOG.info("Waiting for AM Launch to happen..");
+ Thread.sleep(1000);
+ }
+ Assert.assertTrue(containerManager.launched);
+
+ RMAppAttempt attempt = app.getCurrentAppAttempt();
+ ApplicationAttemptId appAttemptId = attempt.getAppAttemptId();
+ Assert.assertEquals(appAttemptId.toString(),
+ containerManager.attemptIdAtContainerManager);
+
+ MockAM am = new MockAM(rm.getRMContext(), rm
+ .getApplicationMasterService(), appAttemptId);
+ am.registerAppAttempt();
+ am.unregisterAppAttempt();
+
+ waitCount = 0;
+ while (containerManager.cleanedup == false && waitCount++ < 20) {
+ LOG.info("Waiting for AM Cleanup to happen..");
+ Thread.sleep(1000);
+ }
+ Assert.assertTrue(containerManager.cleanedup);
+
+ am.waitForState(RMAppAttemptState.FINISHED);
+ rm.stop();
+ }
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationMasterLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationMasterLauncher.java
deleted file mode 100644
index 8cc948400e..0000000000
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestApplicationMasterLauncher.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements. See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership. The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License. You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
-
-import java.util.concurrent.atomic.AtomicInteger;
-
-import junit.framework.Assert;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationState;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.factories.RecordFactory;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
-import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
-import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
-import org.apache.hadoop.yarn.util.Records;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Testing the applications manager launcher.
- *
- */
-public class TestApplicationMasterLauncher {
-// private static final Log LOG = LogFactory.getLog(TestApplicationMasterLauncher.class);
-// private static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
-// private ApplicationMasterLauncher amLauncher;
-// private DummyEventHandler asmHandle;
-// private final ApplicationTokenSecretManager applicationTokenSecretManager =
-// new ApplicationTokenSecretManager();
-// private final ClientToAMSecretManager clientToAMSecretManager =
-// new ClientToAMSecretManager();
-//
-// Object doneLaunching = new Object();
-// AtomicInteger launched = new AtomicInteger();
-// AtomicInteger cleanedUp = new AtomicInteger();
-// private RMContext context = new RMContextImpl(new MemStore(), null, null,
-// null);
-//
-// private Configuration conf = new Configuration();
-//
-// private class DummyEventHandler implements EventHandler {
-// @Override
-// public void handle(ApplicationEvent appEvent) {
-// ApplicationEventType event = appEvent.getType();
-// switch (event) {
-// case FINISH:
-// synchronized(doneLaunching) {
-// doneLaunching.notify();
-// }
-// break;
-//
-// default:
-// break;
-// }
-// }
-// }
-//
-// private class DummyLaunch implements Runnable {
-// public void run() {
-// launched.incrementAndGet();
-// }
-// }
-//
-// private class DummyCleanUp implements Runnable {
-// private EventHandler eventHandler;
-//
-// public DummyCleanUp(EventHandler eventHandler) {
-// this.eventHandler = eventHandler;
-// }
-// public void run() {
-// cleanedUp.incrementAndGet();
-// eventHandler.handle(new AMFinishEvent(null,
-// ApplicationState.COMPLETED, "", ""));
-// }
-// }
-//
-// private class DummyApplicationMasterLauncher extends
-// ApplicationMasterLauncher {
-// private EventHandler eventHandler;
-//
-// public DummyApplicationMasterLauncher(
-// ApplicationTokenSecretManager applicationTokenSecretManager,
-// ClientToAMSecretManager clientToAMSecretManager,
-// EventHandler eventHandler) {
-// super(applicationTokenSecretManager, clientToAMSecretManager, context);
-// this.eventHandler = eventHandler;
-// }
-//
-// @Override
-// protected Runnable createRunnableLauncher(RMAppAttempt application,
-// AMLauncherEventType event) {
-// Runnable r = null;
-// switch (event) {
-// case LAUNCH:
-// r = new DummyLaunch();
-// break;
-// case CLEANUP:
-// r = new DummyCleanUp(eventHandler);
-// default:
-// break;
-// }
-// return r;
-// }
-// }
-//
-// @Before
-// public void setUp() {
-// asmHandle = new DummyEventHandler();
-// amLauncher = new DummyApplicationMasterLauncher(applicationTokenSecretManager,
-// clientToAMSecretManager, asmHandle);
-// context.getDispatcher().init(conf);
-// amLauncher.init(conf);
-// context.getDispatcher().start();
-// amLauncher.start();
-//
-// }
-//
-// @After
-// public void tearDown() {
-// amLauncher.stop();
-// }
-//
-// @Test
-// public void testAMLauncher() throws Exception {
-//
-// // Creat AppId
-// ApplicationId appId = recordFactory
-// .newRecordInstance(ApplicationId.class);
-// appId.setClusterTimestamp(System.currentTimeMillis());
-// appId.setId(1);
-//
-// ApplicationAttemptId appAttemptId = Records
-// .newRecord(ApplicationAttemptId.class);
-// appAttemptId.setApplicationId(appId);
-// appAttemptId.setAttemptId(1);
-//
-// // Create submissionContext
-// ApplicationSubmissionContext submissionContext = recordFactory
-// .newRecordInstance(ApplicationSubmissionContext.class);
-// submissionContext.setApplicationId(appId);
-// submissionContext.setUser("dummyuser");
-//
-// RMAppAttempt appAttempt = new RMAppAttemptImpl(appAttemptId,
-// "dummyclienttoken", context, null, submissionContext);
-//
-// // Tell AMLauncher to launch the appAttempt
-// amLauncher.handle(new AMLauncherEvent(AMLauncherEventType.LAUNCH,
-// appAttempt));
-//
-// // Tell AMLauncher to cleanup the appAttempt
-// amLauncher.handle(new AMLauncherEvent(AMLauncherEventType.CLEANUP,
-// appAttempt));
-//
-// synchronized (doneLaunching) {
-// doneLaunching.wait(10000);
-// }
-// Assert.assertEquals(1, launched.get());
-// Assert.assertEquals(1, cleanedUp.get());
-// }
-}
\ No newline at end of file
From 7e0a224a36195558d6ec5085980be12a5d87bc3b Mon Sep 17 00:00:00 2001
From: Tsz-wo Sze
Date: Fri, 23 Sep 2011 16:18:19 +0000
Subject: [PATCH 27/68] Add "target" to svn:ignore for hadoop-common-project
and hadoop-hdfs-project.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1174862 13f79535-47bb-0310-9956-ffa450edef68
From 83a83d3b733fe18541428aaae2c2923318626e49 Mon Sep 17 00:00:00 2001
From: Tsz-wo Sze
Date: Sat, 24 Sep 2011 06:15:50 +0000
Subject: [PATCH 28/68] HDFS-2356. Support case insensitive query parameter
names in webhdfs.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1175113 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +-
.../hadoop/hdfs/server/common/JspHelper.java | 3 +-
.../web/resources/DatanodeWebHdfsMethods.java | 4 +
.../web/resources/NamenodeWebHdfsMethods.java | 4 +
.../apache/hadoop/hdfs/web/ParamFilter.java | 85 +++++++++++++++++++
.../hadoop/hdfs/web/WebHdfsFileSystem.java | 2 +-
.../hdfs/web/resources/AccessTimeParam.java | 2 +-
.../hdfs/web/resources/BlockSizeParam.java | 2 +-
.../hdfs/web/resources/BufferSizeParam.java | 2 +-
.../hdfs/web/resources/DelegationParam.java | 3 +-
.../hdfs/web/resources/DeleteOpParam.java | 3 -
.../hdfs/web/resources/DstPathParam.java | 2 +-
.../hadoop/hdfs/web/resources/GetOpParam.java | 3 -
.../hdfs/web/resources/HttpOpParam.java | 3 +
.../web/resources/ModificationTimeParam.java | 2 +-
.../hdfs/web/resources/PostOpParam.java | 3 -
.../hadoop/hdfs/web/resources/PutOpParam.java | 3 -
.../web/resources/RenameOptionSetParam.java | 2 +-
.../web/TestWebHdfsFileSystemContract.java | 31 +++++++
19 files changed, 142 insertions(+), 23 deletions(-)
create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ParamFilter.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9ced9e42bb..3dccb99b66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -38,7 +38,11 @@ Trunk (unreleased changes)
not use ArrayWritable for writing non-array items. (Uma Maheswara Rao G
via szetszwo)
- HDFS-2351 Change Namenode and Datanode to register each of their protocols seperately (Sanjay Radia)
+ HDFS-2351 Change Namenode and Datanode to register each of their protocols
+ seperately (Sanjay Radia)
+
+ HDFS-2356. Support case insensitive query parameter names in webhdfs.
+ (szetszwo)
BUG FIXES
HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
index e2ce26df6b..82ec3bd771 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
@@ -54,6 +54,7 @@
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
+import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.http.HtmlQuoting;
import org.apache.hadoop.io.Text;
@@ -68,7 +69,7 @@
public class JspHelper {
public static final String CURRENT_CONF = "current.conf";
final static public String WEB_UGI_PROPERTY_NAME = DFSConfigKeys.DFS_WEB_UGI_KEY;
- public static final String DELEGATION_PARAMETER_NAME = "delegation";
+ public static final String DELEGATION_PARAMETER_NAME = DelegationParam.NAME;
public static final String NAMENODE_ADDRESS = "nnaddr";
static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
"=";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
index 4c5c61aac7..0305024e4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
@@ -50,6 +50,7 @@
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.web.ParamFilter;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
@@ -66,8 +67,11 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
+import com.sun.jersey.spi.container.ResourceFilters;
+
/** Web-hdfs DataNode implementation. */
@Path("")
+@ResourceFilters(ParamFilter.class)
public class DatanodeWebHdfsMethods {
public static final Log LOG = LogFactory.getLog(DatanodeWebHdfsMethods.class);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index c72437faf1..2dd1db3341 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -57,6 +57,7 @@
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.JsonUtil;
+import org.apache.hadoop.hdfs.web.ParamFilter;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@@ -90,8 +91,11 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
+import com.sun.jersey.spi.container.ResourceFilters;
+
/** Web-hdfs NameNode implementation. */
@Path("")
+@ResourceFilters(ParamFilter.class)
public class NamenodeWebHdfsMethods {
public static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ParamFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ParamFilter.java
new file mode 100644
index 0000000000..687b874767
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ParamFilter.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import java.net.URI;
+import java.util.List;
+import java.util.Map;
+
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.core.UriBuilder;
+
+import com.sun.jersey.spi.container.ContainerRequest;
+import com.sun.jersey.spi.container.ContainerRequestFilter;
+import com.sun.jersey.spi.container.ContainerResponseFilter;
+import com.sun.jersey.spi.container.ResourceFilter;
+
+/**
+ * A filter to change parameter names to lower cases
+ * so that parameter names are considered as case insensitive.
+ */
+public class ParamFilter implements ResourceFilter {
+ private static final ContainerRequestFilter LOWER_CASE
+ = new ContainerRequestFilter() {
+ @Override
+ public ContainerRequest filter(final ContainerRequest request) {
+ final MultivaluedMap parameters = request.getQueryParameters();
+ if (containsUpperCase(parameters.keySet())) {
+ //rebuild URI
+ final URI lower = rebuildQuery(request.getRequestUri(), parameters);
+ request.setUris(request.getBaseUri(), lower);
+ }
+ return request;
+ }
+ };
+
+ @Override
+ public ContainerRequestFilter getRequestFilter() {
+ return LOWER_CASE;
+ }
+
+ @Override
+ public ContainerResponseFilter getResponseFilter() {
+ return null;
+ }
+
+ /** Do the strings contain upper case letters? */
+ private static boolean containsUpperCase(final Iterable strings) {
+ for(String s : strings) {
+ for(int i = 0; i < s.length(); i++) {
+ if (Character.isUpperCase(s.charAt(i))) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /** Rebuild the URI query with lower case parameter names. */
+ private static URI rebuildQuery(final URI uri,
+ final MultivaluedMap parameters) {
+ UriBuilder b = UriBuilder.fromUri(uri).replaceQuery("");
+ for(Map.Entry> e : parameters.entrySet()) {
+ final String key = e.getKey().toLowerCase();
+ for(String v : e.getValue()) {
+ b = b.queryParam(key, v);
+ }
+ }
+ return b.build();
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 061d44bbe4..b2b1fac75e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -172,7 +172,7 @@ private static void validateResponse(final HttpOpParam.Op op,
}
}
- private URL toUrl(final HttpOpParam.Op op, final Path fspath,
+ URL toUrl(final HttpOpParam.Op op, final Path fspath,
final Param,?>... parameters) throws IOException {
//initialize URI path and query
final String path = "/" + PATH_PREFIX
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
index 830e5cd32d..8d82131c70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
@@ -20,7 +20,7 @@
/** Access time parameter. */
public class AccessTimeParam extends LongParam {
/** Parameter name. */
- public static final String NAME = "accessTime";
+ public static final String NAME = "accesstime";
/** Default parameter value. */
public static final String DEFAULT = "-1";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
index e50b282f33..9611496807 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
@@ -25,7 +25,7 @@
/** Block size parameter. */
public class BlockSizeParam extends LongParam {
/** Parameter name. */
- public static final String NAME = "blockSize";
+ public static final String NAME = "blocksize";
/** Default parameter value. */
public static final String DEFAULT = NULL;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
index 424e5ba253..148834b102 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
@@ -23,7 +23,7 @@
/** Buffer size parameter. */
public class BufferSizeParam extends IntegerParam {
/** Parameter name. */
- public static final String NAME = "bufferSize";
+ public static final String NAME = "buffersize";
/** Default parameter value. */
public static final String DEFAULT = NULL;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
index 80f0c4b0b3..ad08773ea2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
@@ -17,13 +17,12 @@
*/
package org.apache.hadoop.hdfs.web.resources;
-import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.security.UserGroupInformation;
/** Delegation token parameter. */
public class DelegationParam extends StringParam {
/** Parameter name. */
- public static final String NAME = JspHelper.DELEGATION_PARAMETER_NAME;
+ public static final String NAME = "delegation";
/** Default parameter value. */
public static final String DEFAULT = "";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
index e61e858ee4..12962b4a4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
@@ -21,9 +21,6 @@
/** Http DELETE operation parameter. */
public class DeleteOpParam extends HttpOpParam {
- /** Parameter name. */
- public static final String NAME = "deleteOp";
-
/** Delete operations. */
public static enum Op implements HttpOpParam.Op {
DELETE(HttpURLConnection.HTTP_OK),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java
index 7d522a3877..5fa52456f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java
@@ -22,7 +22,7 @@
/** Destination path parameter. */
public class DstPathParam extends StringParam {
/** Parameter name. */
- public static final String NAME = "dstPath";
+ public static final String NAME = "dstpath";
/** Default parameter value. */
public static final String DEFAULT = "";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
index aeb3135404..d547f1b1b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
@@ -21,9 +21,6 @@
/** Http GET operation parameter. */
public class GetOpParam extends HttpOpParam {
- /** Parameter name. */
- public static final String NAME = "getOp";
-
/** Get operations. */
public static enum Op implements HttpOpParam.Op {
OPEN(HttpURLConnection.HTTP_OK),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
index 644c4032db..422ec0f2f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
@@ -20,6 +20,9 @@
/** Http operation parameter. */
public abstract class HttpOpParam & HttpOpParam.Op>
extends EnumParam {
+ /** Parameter name. */
+ public static final String NAME = "op";
+
/** Default parameter value. */
public static final String DEFAULT = NULL;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
index d43da07328..a0e38a97e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
@@ -20,7 +20,7 @@
/** Modification time parameter. */
public class ModificationTimeParam extends LongParam {
/** Parameter name. */
- public static final String NAME = "modificationTime";
+ public static final String NAME = "modificationtime";
/** Default parameter value. */
public static final String DEFAULT = "-1";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
index 116d6af8b3..b553ecc670 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
@@ -21,9 +21,6 @@
/** Http POST operation parameter. */
public class PostOpParam extends HttpOpParam {
- /** Parameter name. */
- public static final String NAME = "postOp";
-
/** Post operations. */
public static enum Op implements HttpOpParam.Op {
APPEND(HttpURLConnection.HTTP_OK),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
index 00703fefbc..dcfaa6f06c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
@@ -21,9 +21,6 @@
/** Http POST operation parameter. */
public class PutOpParam extends HttpOpParam {
- /** Parameter name. */
- public static final String NAME = "putOp";
-
/** Put operations. */
public static enum Op implements HttpOpParam.Op {
CREATE(true, HttpURLConnection.HTTP_CREATED),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
index ec66a51c78..d7c157d508 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
@@ -22,7 +22,7 @@
/** Rename option set parameter. */
public class RenameOptionSetParam extends EnumSetParam {
/** Parameter name. */
- public static final String NAME = "renameOptions";
+ public static final String NAME = "renameoptions";
/** Default parameter value. */
public static final String DEFAULT = "";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
index 47ae417430..abe07fc51f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
@@ -18,8 +18,12 @@
package org.apache.hadoop.hdfs.web;
+import java.io.BufferedReader;
import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.HttpURLConnection;
import java.net.URI;
+import java.net.URL;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.conf.Configuration;
@@ -30,6 +34,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
@@ -127,4 +132,30 @@ public void testGetFileBlockLocations() throws IOException {
assertEquals(expected[i].toString(), computed[i].toString());
}
}
+
+ public void testCaseInsensitive() throws IOException {
+ final Path p = new Path("/test/testCaseInsensitive");
+ final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
+ final PutOpParam.Op op = PutOpParam.Op.MKDIRS;
+
+ //replace query with mix case letters
+ final URL url = webhdfs.toUrl(op, p);
+ WebHdfsFileSystem.LOG.info("url = " + url);
+ final URL replaced = new URL(url.toString().replace(op.toQueryString(),
+ "Op=mkDIrs"));
+ WebHdfsFileSystem.LOG.info("replaced = " + replaced);
+
+ //connect with the replaced URL.
+ final HttpURLConnection conn = (HttpURLConnection)replaced.openConnection();
+ conn.setRequestMethod(op.getType().toString());
+ conn.connect();
+ final BufferedReader in = new BufferedReader(new InputStreamReader(
+ conn.getInputStream()));
+ for(String line; (line = in.readLine()) != null; ) {
+ WebHdfsFileSystem.LOG.info("> " + line);
+ }
+
+ //check if the command successes.
+ assertTrue(fs.getFileStatus(p).isDirectory());
+ }
}
From 5a6f8e38c044c376ac11b5e4e97a7b06f78d4c80 Mon Sep 17 00:00:00 2001
From: Konstantin Shvachko
Date: Sat, 24 Sep 2011 15:04:34 +0000
Subject: [PATCH 29/68] HDFS-2290. Block with corrupt replica is not getting
replicated. Contributed by Benoy Antony.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1175175 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 +
.../server/blockmanagement/BlockManager.java | 4 +-
.../namenode/TestProcessCorruptBlocks.java | 290 ++++++++++++++++++
3 files changed, 296 insertions(+), 2 deletions(-)
create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3dccb99b66..f39834f0e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1619,7 +1619,11 @@ Release 0.22.0 - Unreleased
HDFS-2232. Generalize regular expressions in TestHDFSCLI.
(Plamen Jeliazkov via shv)
+ HDFS-2290. Block with corrupt replica is not getting replicated.
+ (Benoy Antony via shv)
+
Release 0.21.1 - Unreleased
+
HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
HDFS-874. TestHDFSFileContextMainOperations fails on weirdly
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 682d272922..402e95c3cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -842,7 +842,7 @@ private void markBlockAsCorrupt(BlockInfo storedBlock,
// Add this replica to corruptReplicas Map
corruptReplicas.addToCorruptReplicasMap(storedBlock, node);
- if (countNodes(storedBlock).liveReplicas() > inode.getReplication()) {
+ if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
// the block is over-replicated so invalidate the replicas immediately
invalidateBlock(storedBlock, node);
} else if (namesystem.isPopulatingReplQueues()) {
@@ -867,7 +867,7 @@ private void invalidateBlock(Block blk, DatanodeInfo dn)
// Check how many copies we have of the block. If we have at least one
// copy on a live node, then we can delete it.
int count = countNodes(blk).liveReplicas();
- if (count > 1) {
+ if (count >= 1) {
addToInvalidates(blk, dn);
removeStoredBlock(blk, node);
if(NameNode.stateChangeLog.isDebugEnabled()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
new file mode 100644
index 0000000000..a843962e08
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
+import org.junit.Test;
+
+public class TestProcessCorruptBlocks {
+ /**
+ * The corrupt block has to be removed when the number of valid replicas
+ * matches replication factor for the file. In this the above condition is
+ * tested by reducing the replication factor
+ * The test strategy :
+ * Bring up Cluster with 3 DataNodes
+ * Create a file of replication factor 3
+ * Corrupt one replica of a block of the file
+ * Verify that there are still 2 good replicas and 1 corrupt replica
+ * (corrupt replica should not be removed since number of good
+ * replicas (2) is less than replication factor (3))
+ * Set the replication factor to 2
+ * Verify that the corrupt replica is removed.
+ * (corrupt replica should not be removed since number of good
+ * replicas (2) is equal to replication factor (2))
+ */
+ @Test
+ public void testWhenDecreasingReplication() throws IOException {
+ Configuration conf = new HdfsConfiguration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
+ conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ FileSystem fs = cluster.getFileSystem();
+ final FSNamesystem namesystem = cluster.getNamesystem();
+
+ try {
+ final Path fileName = new Path("/foo1");
+ DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
+ DFSTestUtil.waitReplication(fs, fileName, (short) 3);
+
+ ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+ corruptBlock(cluster, fs, fileName, 0, block);
+
+ DFSTestUtil.waitReplication(fs, fileName, (short) 2);
+
+ assertEquals(2, countReplicas(namesystem, block).liveReplicas());
+ assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
+
+ namesystem.setReplication(fileName.toString(), (short) 2);
+
+ // wait for 3 seconds so that all block reports are processed.
+ try {
+ Thread.sleep(3000);
+ } catch (InterruptedException ignored) {
+ }
+
+ assertEquals(2, countReplicas(namesystem, block).liveReplicas());
+ assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
+
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * The corrupt block has to be removed when the number of valid replicas
+ * matches replication factor for the file. In this test, the above
+ * condition is achieved by increasing the number of good replicas by
+ * replicating on a new Datanode.
+ * The test strategy :
+ * Bring up Cluster with 3 DataNodes
+ * Create a file of replication factor 3
+ * Corrupt one replica of a block of the file
+ * Verify that there are still 2 good replicas and 1 corrupt replica
+ * (corrupt replica should not be removed since number of good replicas
+ * (2) is less than replication factor (3))
+ * Start a new data node
+ * Verify that the a new replica is created and corrupt replica is
+ * removed.
+ *
+ */
+ @Test
+ public void testByAddingAnExtraDataNode() throws IOException {
+ Configuration conf = new HdfsConfiguration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
+ conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+ FileSystem fs = cluster.getFileSystem();
+ final FSNamesystem namesystem = cluster.getNamesystem();
+ DataNodeProperties dnPropsFourth = cluster.stopDataNode(3);
+
+ try {
+ final Path fileName = new Path("/foo1");
+ DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
+ DFSTestUtil.waitReplication(fs, fileName, (short) 3);
+
+ ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+ corruptBlock(cluster, fs, fileName, 0, block);
+
+ DFSTestUtil.waitReplication(fs, fileName, (short) 2);
+
+ assertEquals(2, countReplicas(namesystem, block).liveReplicas());
+ assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
+
+ cluster.restartDataNode(dnPropsFourth);
+
+ DFSTestUtil.waitReplication(fs, fileName, (short) 3);
+
+ assertEquals(3, countReplicas(namesystem, block).liveReplicas());
+ assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * The corrupt block has to be removed when the number of valid replicas
+ * matches replication factor for the file. The above condition should hold
+ * true as long as there is one good replica. This test verifies that.
+ *
+ * The test strategy :
+ * Bring up Cluster with 2 DataNodes
+ * Create a file of replication factor 2
+ * Corrupt one replica of a block of the file
+ * Verify that there is one good replicas and 1 corrupt replica
+ * (corrupt replica should not be removed since number of good
+ * replicas (1) is less than replication factor (2)).
+ * Set the replication factor to 1
+ * Verify that the corrupt replica is removed.
+ * (corrupt replica should be removed since number of good
+ * replicas (1) is equal to replication factor (1))
+ */
+ @Test
+ public void testWithReplicationFactorAsOne() throws IOException {
+ Configuration conf = new HdfsConfiguration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
+ conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+ FileSystem fs = cluster.getFileSystem();
+ final FSNamesystem namesystem = cluster.getNamesystem();
+
+ try {
+ final Path fileName = new Path("/foo1");
+ DFSTestUtil.createFile(fs, fileName, 2, (short) 2, 0L);
+ DFSTestUtil.waitReplication(fs, fileName, (short) 2);
+
+ ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+ corruptBlock(cluster, fs, fileName, 0, block);
+
+ DFSTestUtil.waitReplication(fs, fileName, (short) 1);
+
+ assertEquals(1, countReplicas(namesystem, block).liveReplicas());
+ assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
+
+ namesystem.setReplication(fileName.toString(), (short) 1);
+
+ // wait for 3 seconds so that all block reports are processed.
+ try {
+ Thread.sleep(3000);
+ } catch (InterruptedException ignored) {
+ }
+
+ assertEquals(1, countReplicas(namesystem, block).liveReplicas());
+ assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
+
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * None of the blocks can be removed if all blocks are corrupt.
+ *
+ * The test strategy :
+ * Bring up Cluster with 3 DataNodes
+ * Create a file of replication factor 3
+ * Corrupt all three replicas
+ * Verify that all replicas are corrupt and 3 replicas are present.
+ * Set the replication factor to 1
+ * Verify that all replicas are corrupt and 3 replicas are present.
+ */
+ @Test
+ public void testWithAllCorruptReplicas() throws IOException {
+ Configuration conf = new HdfsConfiguration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
+ conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ FileSystem fs = cluster.getFileSystem();
+ final FSNamesystem namesystem = cluster.getNamesystem();
+
+ try {
+ final Path fileName = new Path("/foo1");
+ DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
+ DFSTestUtil.waitReplication(fs, fileName, (short) 3);
+
+ ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+ corruptBlock(cluster, fs, fileName, 0, block);
+
+ corruptBlock(cluster, fs, fileName, 1, block);
+
+ corruptBlock(cluster, fs, fileName, 2, block);
+
+ // wait for 3 seconds so that all block reports are processed.
+ try {
+ Thread.sleep(3000);
+ } catch (InterruptedException ignored) {
+ }
+
+ assertEquals(0, countReplicas(namesystem, block).liveReplicas());
+ assertEquals(3, countReplicas(namesystem, block).corruptReplicas());
+
+ namesystem.setReplication(fileName.toString(), (short) 1);
+
+ // wait for 3 seconds so that all block reports are processed.
+ try {
+ Thread.sleep(3000);
+ } catch (InterruptedException ignored) {
+ }
+
+ assertEquals(0, countReplicas(namesystem, block).liveReplicas());
+ assertEquals(3, countReplicas(namesystem, block).corruptReplicas());
+
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ private static NumberReplicas countReplicas(final FSNamesystem namesystem, ExtendedBlock block) {
+ return namesystem.getBlockManager().countNodes(block.getLocalBlock());
+ }
+
+ private void corruptBlock(MiniDFSCluster cluster, FileSystem fs, final Path fileName,
+ int dnIndex, ExtendedBlock block) throws IOException {
+ // corrupt the block on datanode dnIndex
+ // the indexes change once the nodes are restarted.
+ // But the datadirectory will not change
+ assertTrue(MiniDFSCluster.corruptReplica(dnIndex, block));
+
+ DataNodeProperties dnProps = cluster.stopDataNode(0);
+
+ // Each datanode has multiple data dirs, check each
+ for (int dirIndex = 0; dirIndex < 2; dirIndex++) {
+ final String bpid = cluster.getNamesystem().getBlockPoolId();
+ File storageDir = MiniDFSCluster.getStorageDir(dnIndex, dirIndex);
+ File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+ File scanLogFile = new File(dataDir, "dncp_block_verification.log.curr");
+ if (scanLogFile.exists()) {
+ // wait for one minute for deletion to succeed;
+ for (int i = 0; !scanLogFile.delete(); i++) {
+ assertTrue("Could not delete log file in one minute", i < 60);
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException ignored) {
+ }
+ }
+ }
+ }
+
+ // restart the detained so the corrupt replica will be detected
+ cluster.restartDataNode(dnProps);
+ }
+}
From 029914af8470ff0e1905273f884cdd69159d532c Mon Sep 17 00:00:00 2001
From: Konstantin Shvachko
Date: Sat, 24 Sep 2011 17:57:08 +0000
Subject: [PATCH 30/68] HADOOP-7457. Move to 0.22 branch section in CHANGES.txt
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1175206 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 26be711214..a52d593ff3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -299,9 +299,6 @@ Release 0.23.0 - Unreleased
HADOOP-7430. Improve error message when moving to trash fails due to
quota issue. (Ravi Prakash via mattf)
- HADOOP-7457. Remove out-of-date Chinese language documentation.
- (Jakob Homan via eli)
-
HADOOP-7444. Add Checksum API to verify and calculate checksums "in bulk"
(todd)
@@ -1140,6 +1137,9 @@ Release 0.22.0 - Unreleased
HADOOP-7568. SequenceFile should not print into stdout.
(Plamen Jeliazkov via shv)
+ HADOOP-7457. Remove out-of-date Chinese language documentation.
+ (Jakob Homan via eli)
+
Release 0.21.1 - Unreleased
IMPROVEMENTS
From e7b63aebcb99f9015a0d9e86a4c5c41995c1a4ec Mon Sep 17 00:00:00 2001
From: Konstantin Shvachko
Date: Sat, 24 Sep 2011 18:00:48 +0000
Subject: [PATCH 31/68] HADOOP-7663. Fix TestHDFSTrash failure. Contributed by
Mayank Bansal.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1175207 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
.../src/test/java/org/apache/hadoop/fs/TestTrash.java | 5 +++--
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a52d593ff3..182344d8d0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1137,6 +1137,8 @@ Release 0.22.0 - Unreleased
HADOOP-7568. SequenceFile should not print into stdout.
(Plamen Jeliazkov via shv)
+ HADOOP-7663. Fix TestHDFSTrash failure. (Mayank Bansal via shv)
+
HADOOP-7457. Remove out-of-date Chinese language documentation.
(Jakob Homan via eli)
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 3d739a07d8..782e4e4167 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -486,6 +486,9 @@ public void testTrashEmptier() throws Exception {
conf.set(FS_TRASH_INTERVAL_KEY, "0.2"); // 12 seconds
conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
conf.set(FS_TRASH_CHECKPOINT_INTERVAL_KEY, "0.1"); // 6 seconds
+ FileSystem fs = FileSystem.getLocal(conf);
+ conf.set("fs.default.name", fs.getUri().toString());
+
Trash trash = new Trash(conf);
// Start Emptier in background
@@ -493,8 +496,6 @@ public void testTrashEmptier() throws Exception {
Thread emptierThread = new Thread(emptier);
emptierThread.start();
- FileSystem fs = FileSystem.getLocal(conf);
- conf.set("fs.defaultFS", fs.getUri().toString());
FsShell shell = new FsShell();
shell.setConf(conf);
shell.init();
From d09ceac1f7a85fce688b20528a1b095a8042bebd Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Sun, 25 Sep 2011 04:43:51 +0000
Subject: [PATCH 32/68] MAPREDUCE-2691. Increase threadpool size for launching
containers in MapReduce ApplicationMaster. Contributed by Vinod K V.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1175294 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 ++
.../app/launcher/ContainerLauncherImpl.java | 35 +++++++++++++++----
.../apache/hadoop/mapreduce/MRJobConfig.java | 12 +++++--
3 files changed, 41 insertions(+), 9 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 1ec852aef3..3060dfdad7 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1406,6 +1406,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3073. Fixed build issues in MR1. (mahadev via acmurthy)
+ MAPREDUCE-2691. Increase threadpool size for launching containers in
+ MapReduce ApplicationMaster. (vinodkv via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
index 982f7d334a..95e17d8f4f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
@@ -73,6 +73,8 @@ public class ContainerLauncherImpl extends AbstractService implements
private AppContext context;
private ThreadPoolExecutor launcherPool;
+ private static final int INITIAL_POOL_SIZE = 10;
+ private int limitOnPoolSize;
private Thread eventHandlingThread;
private BlockingQueue eventQueue =
new LinkedBlockingQueue();
@@ -96,16 +98,17 @@ public synchronized void init(Configuration conf) {
YarnConfiguration.YARN_SECURITY_INFO,
ContainerManagerSecurityInfo.class, SecurityInfo.class);
this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
+ this.limitOnPoolSize = conf.getInt(
+ MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,
+ MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
super.init(myLocalConfig);
}
public void start() {
- launcherPool =
- new ThreadPoolExecutor(getConfig().getInt(
- MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT, 10),
- Integer.MAX_VALUE, 1, TimeUnit.HOURS,
- new LinkedBlockingQueue());
- launcherPool.prestartAllCoreThreads(); // Wait for work.
+ // Start with a default core-pool size of 10 and change it dynamically.
+ launcherPool = new ThreadPoolExecutor(INITIAL_POOL_SIZE,
+ Integer.MAX_VALUE, 1, TimeUnit.HOURS,
+ new LinkedBlockingQueue());
eventHandlingThread = new Thread(new Runnable() {
@Override
public void run() {
@@ -117,6 +120,26 @@ public void run() {
LOG.error("Returning, interrupted : " + e);
return;
}
+
+ int poolSize = launcherPool.getCorePoolSize();
+
+ // See if we need up the pool size only if haven't reached the
+ // maximum limit yet.
+ if (poolSize != limitOnPoolSize) {
+
+ // nodes where containers will run at *this* point of time. This is
+ // *not* the cluster size and doesn't need to be.
+ int numNodes = ugiMap.size();
+ int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
+
+ if (poolSize <= idealPoolSize) {
+ // Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
+ // later is just a buffer so we are not always increasing the
+ // pool-size
+ launcherPool.setCorePoolSize(idealPoolSize + INITIAL_POOL_SIZE);
+ }
+ }
+
// the events from the queue are handled in parallel
// using a thread pool
launcherPool.execute(new EventProcessor(event));
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index c456d52deb..accfdddc3d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -332,9 +332,15 @@ public interface MRJobConfig {
MR_AM_PREFIX+"num-progress-splits";
public static final int DEFAULT_MR_AM_NUM_PROGRESS_SPLITS = 12;
- /** Number of threads user to launch containers in the app master.*/
- public static final String MR_AM_CONTAINERLAUNCHER_THREAD_COUNT =
- MR_AM_PREFIX+"containerlauncher.thread-count";
+ /**
+ * Upper limit on the number of threads user to launch containers in the app
+ * master. Expect level config, you shouldn't be needing it in most cases.
+ */
+ public static final String MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT =
+ MR_AM_PREFIX+"containerlauncher.thread-count-limit";
+
+ public static final int DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT =
+ 500;
/** Number of threads to handle job client RPC requests.*/
public static final String MR_AM_JOB_CLIENT_THREAD_COUNT =
From 5ace0cabe5b88bc4f9d807e01181647d0a28db92 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Sun, 25 Sep 2011 09:36:12 +0000
Subject: [PATCH 33/68] MAPREDUCE-2990. Fixed display of NodeHealthStatus.
Contributed by Subroto Sanyal.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1175351 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../resourcemanager/rmnode/RMNodeImpl.java | 23 ++++++++++++++++---
.../resourcemanager/TestResourceManager.java | 21 +++++++++++++++++
3 files changed, 44 insertions(+), 3 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 3060dfdad7..eee8aa68aa 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1409,6 +1409,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2691. Increase threadpool size for launching containers in
MapReduce ApplicationMaster. (vinodkv via acmurthy)
+ MAPREDUCE-2990. Fixed display of NodeHealthStatus. (Subroto Sanyal via
+ acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index dd8d7f840f..81de047bc0 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -147,6 +147,7 @@ public RMNodeImpl(NodeId nodeId, RMContext context, String hostName,
this.httpAddress = hostName + ":" + httpPort;;
this.node = node;
this.nodeHealthStatus.setIsNodeHealthy(true);
+ this.nodeHealthStatus.setHealthReport("Healthy");
this.nodeHealthStatus.setLastHealthReportTime(System.currentTimeMillis());
this.latestHeartBeatResponse.setResponseId(0);
@@ -222,6 +223,18 @@ public NodeHealthStatus getNodeHealthStatus() {
}
}
+ private void setNodeHealthStatus(NodeHealthStatus status)
+ {
+ this.writeLock.lock();
+ try {
+ this.nodeHealthStatus.setHealthReport(status.getHealthReport());
+ this.nodeHealthStatus.setIsNodeHealthy(status.getIsNodeHealthy());
+ this.nodeHealthStatus.setLastHealthReportTime(status.getLastHealthReportTime());
+ } finally {
+ this.writeLock.unlock();
+ }
+ }
+
@Override
public RMNodeState getState() {
this.readLock.lock();
@@ -345,7 +358,10 @@ public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
// Switch the last heartbeatresponse.
rmNode.latestHeartBeatResponse = statusEvent.getLatestResponse();
- if (!statusEvent.getNodeHealthStatus().getIsNodeHealthy()) {
+ NodeHealthStatus remoteNodeHealthStatus =
+ statusEvent.getNodeHealthStatus();
+ rmNode.setNodeHealthStatus(remoteNodeHealthStatus);
+ if (!remoteNodeHealthStatus.getIsNodeHealthy()) {
// Inform the scheduler
rmNode.context.getDispatcher().getEventHandler().handle(
new NodeRemovedSchedulerEvent(rmNode));
@@ -392,8 +408,9 @@ public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
// Switch the last heartbeatresponse.
rmNode.latestHeartBeatResponse = statusEvent.getLatestResponse();
-
- if (statusEvent.getNodeHealthStatus().getIsNodeHealthy()) {
+ NodeHealthStatus remoteNodeHealthStatus = statusEvent.getNodeHealthStatus();
+ rmNode.setNodeHealthStatus(remoteNodeHealthStatus);
+ if (remoteNodeHealthStatus.getIsNodeHealthy()) {
rmNode.context.getDispatcher().getEventHandler().handle(
new NodeAddedSchedulerEvent(rmNode));
return RMNodeState.RUNNING;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
index 60a227bc6d..536aa672d7 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -18,12 +18,16 @@
package org.apache.hadoop.yarn.server.resourcemanager;
+import static org.junit.Assert.assertNotNull;
+
import java.io.IOException;
+import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
@@ -153,6 +157,23 @@ public void testResourceAllocation() throws IOException {
LOG.info("--- END: testResourceAllocation ---");
}
+
+ @Test
+ public void testNodeHealthReportIsNotNull() throws Exception{
+ String host1 = "host1";
+ final int memory = 4 * 1024;
+ org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm1 =
+ registerNode(host1, 1234, 2345, NetworkTopology.DEFAULT_RACK, memory);
+ nm1.heartbeat();
+ nm1.heartbeat();
+ Collection values = resourceManager.getRMContext().getRMNodes().values();
+ for (RMNode ni : values)
+ {
+ NodeHealthStatus nodeHealthStatus = ni.getNodeHealthStatus();
+ String healthReport = nodeHealthStatus.getHealthReport();
+ assertNotNull(healthReport);
+ }
+ }
private void checkResourceUsage(
org.apache.hadoop.yarn.server.resourcemanager.NodeManager... nodes ) {
From c285cf3114ab970bf6ae7f0e5eec993feff9d4c8 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Sun, 25 Sep 2011 09:50:02 +0000
Subject: [PATCH 34/68] MAPREDUCE-3053. Better diagnostic message for unknown
methods in ProtoBuf RPCs. Contributed by Vinod K V.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1175357 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 ++
.../yarn/ipc/ProtoOverHadoopRpcEngine.java | 6 ++++
.../java/org/apache/hadoop/yarn/TestRPC.java | 32 +++++++++++++++++++
3 files changed, 41 insertions(+)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index eee8aa68aa..7166eaa25e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1412,6 +1412,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2990. Fixed display of NodeHealthStatus. (Subroto Sanyal via
acmurthy)
+ MAPREDUCE-3053. Better diagnostic message for unknown methods in ProtoBuf
+ RPCs. (vinodkv via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java
index 9a623a1a8a..9d8b846a3b 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java
@@ -320,6 +320,12 @@ public Writable call(String protocol, Writable writableRequest,
+ methodName);
MethodDescriptor methodDescriptor = service.getDescriptorForType()
.findMethodByName(methodName);
+ if (methodDescriptor == null) {
+ String msg = "Unknown method " + methodName + " called on "
+ + protocol + " protocol.";
+ LOG.warn(msg);
+ return handleException(new IOException(msg));
+ }
Message prototype = service.getRequestPrototype(methodDescriptor);
Message param = prototype.newBuilderForType()
.mergeFrom(rpcRequest.getRequestProto()).build();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
index 58efcc4230..332d044158 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
@@ -25,9 +25,11 @@
import org.apache.avro.ipc.Server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.yarn.api.ClientRMProtocol;
import org.apache.hadoop.yarn.api.ContainerManager;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
@@ -47,6 +49,7 @@
import org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.util.Records;
import org.junit.Test;
public class TestRPC {
@@ -65,6 +68,35 @@ public class TestRPC {
// test(HadoopYarnRPC.class.getName());
// }
+ @Test
+ public void testUnknownCall() {
+ Configuration conf = new Configuration();
+ conf.set(YarnConfiguration.IPC_RPC_IMPL, HadoopYarnProtoRPC.class
+ .getName());
+ YarnRPC rpc = YarnRPC.create(conf);
+ String bindAddr = "localhost:0";
+ InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
+ Server server = rpc.getServer(ContainerManager.class,
+ new DummyContainerManager(), addr, conf, null, 1);
+ server.start();
+
+ // Any unrelated protocol would do
+ ClientRMProtocol proxy = (ClientRMProtocol) rpc.getProxy(
+ ClientRMProtocol.class, NetUtils.createSocketAddr("localhost:"
+ + server.getPort()), conf);
+
+ try {
+ proxy.getNewApplicationId(Records
+ .newRecord(GetNewApplicationIdRequest.class));
+ Assert.fail("Excepted RPC call to fail with unknown method.");
+ } catch (YarnRemoteException e) {
+ Assert.assertEquals("Unknown method getNewApplicationId called on "
+ + "org.apache.hadoop.yarn.proto.ClientRMProtocol"
+ + "$ClientRMProtocolService$BlockingInterface protocol.", e
+ .getMessage());
+ }
+ }
+
@Test
public void testHadoopProtoRPC() throws Exception {
test(HadoopYarnProtoRPC.class.getName());
From a5c9ede1433871fcf4e2e802ee2a65950ecd1e72 Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Sun, 25 Sep 2011 14:46:59 +0000
Subject: [PATCH 35/68] MAPREDUCE-2952. Fixed ResourceManager/MR-client to
consume diagnostics for AM failures in a couple of corner cases. Contributed
by Arun C Murthy.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1175403 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../hadoop/mapreduce/TypeConverter.java | 18 +-
.../mapreduce/v2/api/records/JobReport.java | 4 +
.../api/records/impl/pb/JobReportPBImpl.java | 24 ++
.../src/main/proto/mr_protos.proto | 2 +
.../org/apache/hadoop/mapred/JobStatus.java | 4 +
.../java/org/apache/hadoop/mapreduce/Job.java | 3 +-
.../apache/hadoop/mapreduce/JobStatus.java | 18 +
.../hadoop/mapred/ClientServiceDelegate.java | 34 +-
.../apache/hadoop/mapred/NotRunningJob.java | 45 +-
.../hadoop/mapred/TestClientRedirect.java | 7 +
.../mapred/TestClientServiceDelegate.java | 9 +-
.../dev-support/findbugs-exclude.xml | 4 +
.../yarn/api/records/ApplicationReport.java | 12 +
.../impl/pb/ApplicationReportPBImpl.java | 36 +-
.../src/main/proto/yarn_protos.proto | 1 +
.../apache/hadoop/yarn/util/BuilderUtils.java | 3 +-
.../java/org/apache/hadoop/yarn/MockApps.java | 10 +
.../server/resourcemanager/RMContextImpl.java | 2 -
.../resourcemanager/ResourceManager.java | 4 +
.../amlauncher/ApplicationMasterLauncher.java | 7 +-
.../server/resourcemanager/rmapp/RMApp.java | 1 -
.../rmapp/RMAppFailedAttemptEvent.java | 36 ++
.../resourcemanager/rmapp/RMAppImpl.java | 9 +-
.../rmapp/attempt/RMAppAttempt.java | 2 +-
.../rmapp/attempt/RMAppAttemptImpl.java | 102 +++--
.../resourcetracker/InlineDispatcher.java | 4 +-
.../rmapp/TestRMAppTransitions.java | 109 +++--
.../attempt/TestRMAppAttemptTransitions.java | 403 ++++++++++++++++++
29 files changed, 798 insertions(+), 118 deletions(-)
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppFailedAttemptEvent.java
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 7166eaa25e..39f313ec61 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1415,6 +1415,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3053. Better diagnostic message for unknown methods in ProtoBuf
RPCs. (vinodkv via acmurthy)
+ MAPREDUCE-2952. Fixed ResourceManager/MR-client to consume diagnostics
+ for AM failures in a couple of corner cases. (Arun C Murthy via vinodkv)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
index 9cbc9ad6d4..9f221e6354 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
@@ -281,14 +281,17 @@ public static Counters toYarn(org.apache.hadoop.mapreduce.Counters counters) {
}
public static org.apache.hadoop.mapred.JobStatus fromYarn(
- JobReport jobreport, String jobFile, String trackingUrl) {
+ JobReport jobreport, String jobFile) {
JobPriority jobPriority = JobPriority.NORMAL;
- return new org.apache.hadoop.mapred.JobStatus(fromYarn(jobreport.getJobId()),
- jobreport.getSetupProgress(), jobreport.getMapProgress(),
- jobreport.getReduceProgress(), jobreport.getCleanupProgress(),
- fromYarn(jobreport.getJobState()),
- jobPriority, jobreport.getUser(), jobreport.getJobName(),
- jobFile, trackingUrl);
+ org.apache.hadoop.mapred.JobStatus jobStatus =
+ new org.apache.hadoop.mapred.JobStatus(fromYarn(jobreport.getJobId()),
+ jobreport.getSetupProgress(), jobreport.getMapProgress(),
+ jobreport.getReduceProgress(), jobreport.getCleanupProgress(),
+ fromYarn(jobreport.getJobState()),
+ jobPriority, jobreport.getUser(), jobreport.getJobName(),
+ jobFile, jobreport.getTrackingUrl());
+ jobStatus.setFailureInfo(jobreport.getDiagnostics());
+ return jobStatus;
}
public static org.apache.hadoop.mapreduce.QueueState fromYarn(
@@ -422,6 +425,7 @@ public static JobStatus fromYarn(ApplicationReport application,
);
jobStatus.setSchedulingInfo(trackingUrl); // Set AM tracking url
jobStatus.setStartTime(application.getStartTime());
+ jobStatus.setFailureInfo(application.getDiagnostics());
return jobStatus;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
index fb585e8dd2..0bfc9db3ed 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
@@ -29,6 +29,8 @@ public interface JobReport {
public abstract long getFinishTime();
public abstract String getUser();
public abstract String getJobName();
+ public abstract String getTrackingUrl();
+ public abstract String getDiagnostics();
public abstract void setJobId(JobId jobId);
public abstract void setJobState(JobState jobState);
@@ -40,4 +42,6 @@ public interface JobReport {
public abstract void setFinishTime(long finishTime);
public abstract void setUser(String user);
public abstract void setJobName(String jobName);
+ public abstract void setTrackingUrl(String trackingUrl);
+ public abstract void setDiagnostics(String diagnostics);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
index a4033e695f..c5d2527a9d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
@@ -206,6 +206,30 @@ public void setJobName(String jobName) {
builder.setJobName((jobName));
}
+ @Override
+ public String getTrackingUrl() {
+ JobReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getTrackingUrl());
+ }
+
+ @Override
+ public void setTrackingUrl(String trackingUrl) {
+ maybeInitBuilder();
+ builder.setTrackingUrl(trackingUrl);
+ }
+
+ @Override
+ public String getDiagnostics() {
+ JobReportProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getDiagnostics();
+ }
+
+ @Override
+ public void setDiagnostics(String diagnostics) {
+ maybeInitBuilder();
+ builder.setDiagnostics(diagnostics);
+ }
+
private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
return new JobIdPBImpl(p);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
index 7d8d1b2e0b..29184da486 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
@@ -143,6 +143,8 @@ message JobReportProto {
optional int64 finish_time = 8;
optional string user = 9;
optional string jobName = 10;
+ optional string trackingUrl = 11;
+ optional string diagnostics = 12;
}
enum TaskAttemptCompletionEventStatusProto {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java
index 90b68872ff..e5add2139f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java
@@ -321,6 +321,10 @@ protected synchronized void setJobACLs(Map acls) {
super.setJobACLs(acls);
}
+ public synchronized void setFailureInfo(String failureInfo) {
+ super.setFailureInfo(failureInfo);
+ }
+
/**
* Set the priority of the job, defaulting to NORMAL.
* @param jp new job priority
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index c30216e066..f616df80b8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -1239,7 +1239,8 @@ public boolean monitorAndPrintJob()
if (success) {
LOG.info("Job " + jobId + " completed successfully");
} else {
- LOG.info("Job " + jobId + " failed with state " + status.getState());
+ LOG.info("Job " + jobId + " failed with state " + status.getState() +
+ " due to: " + status.getFailureInfo());
}
Counters counters = getCounters();
if (counters != null) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
index 9e438989cf..6f57f1733a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
@@ -81,6 +81,7 @@ public int getValue() {
private String queue;
private JobPriority priority;
private String schedulingInfo="NA";
+ private String failureInfo = "NA";
private Map jobACLs =
new HashMap();
@@ -278,6 +279,14 @@ protected synchronized void setQueue(String queue) {
this.queue = queue;
}
+ /**
+ * Set diagnostic information.
+ * @param failureInfo diagnostic information
+ */
+ protected synchronized void setFailureInfo(String failureInfo) {
+ this.failureInfo = failureInfo;
+ }
+
/**
* Get queue name
* @return queue name
@@ -359,6 +368,15 @@ public synchronized Map getJobACLs() {
*/
public synchronized JobPriority getPriority() { return priority; }
+ /**
+ * Gets any available info on the reason of failure of the job.
+ * @return diagnostic information on why a job might have failed.
+ */
+ public synchronized String getFailureInfo() {
+ return this.failureInfo;
+ }
+
+
/**
* Returns true if the status is for a completed job.
*/
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 605c44e5ed..429d350c5a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -101,16 +101,20 @@ class ClientServiceDelegate {
// Get the instance of the NotRunningJob corresponding to the specified
// user and state
- private NotRunningJob getNotRunningJob(String user, JobState state) {
+ private NotRunningJob getNotRunningJob(ApplicationReport applicationReport,
+ JobState state) {
synchronized (notRunningJobs) {
HashMap map = notRunningJobs.get(state);
if (map == null) {
map = new HashMap();
notRunningJobs.put(state, map);
}
+ String user =
+ (applicationReport == null) ?
+ UNKNOWN_USER : applicationReport.getUser();
NotRunningJob notRunningJob = map.get(user);
if (notRunningJob == null) {
- notRunningJob = new NotRunningJob(user, state);
+ notRunningJob = new NotRunningJob(applicationReport, state);
map.put(user, notRunningJob);
}
return notRunningJob;
@@ -130,7 +134,7 @@ private MRClientProtocol getProxy() throws YarnRemoteException {
if (application == null) {
LOG.info("Could not get Job info from RM for job " + jobId
+ ". Redirecting to job history server.");
- return checkAndGetHSProxy(UNKNOWN_USER, JobState.NEW);
+ return checkAndGetHSProxy(null, JobState.NEW);
}
try {
if (application.getHost() == null || "".equals(application.getHost())) {
@@ -171,7 +175,7 @@ private MRClientProtocol getProxy() throws YarnRemoteException {
if (application == null) {
LOG.info("Could not get Job info from RM for job " + jobId
+ ". Redirecting to job history server.");
- return checkAndGetHSProxy(UNKNOWN_USER, JobState.RUNNING);
+ return checkAndGetHSProxy(null, JobState.RUNNING);
}
} catch (InterruptedException e) {
LOG.warn("getProxy() call interruped", e);
@@ -191,17 +195,17 @@ private MRClientProtocol getProxy() throws YarnRemoteException {
if (application.getState() == ApplicationState.NEW ||
application.getState() == ApplicationState.SUBMITTED) {
realProxy = null;
- return getNotRunningJob(user, JobState.NEW);
+ return getNotRunningJob(application, JobState.NEW);
}
if (application.getState() == ApplicationState.FAILED) {
realProxy = null;
- return getNotRunningJob(user, JobState.FAILED);
+ return getNotRunningJob(application, JobState.FAILED);
}
if (application.getState() == ApplicationState.KILLED) {
realProxy = null;
- return getNotRunningJob(user, JobState.KILLED);
+ return getNotRunningJob(application, JobState.KILLED);
}
//History server can serve a job only if application
@@ -209,15 +213,16 @@ private MRClientProtocol getProxy() throws YarnRemoteException {
if (application.getState() == ApplicationState.SUCCEEDED) {
LOG.info("Application state is completed. " +
"Redirecting to job history server");
- realProxy = checkAndGetHSProxy(user, JobState.SUCCEEDED);
+ realProxy = checkAndGetHSProxy(application, JobState.SUCCEEDED);
}
return realProxy;
}
- private MRClientProtocol checkAndGetHSProxy(String user, JobState state) {
+ private MRClientProtocol checkAndGetHSProxy(
+ ApplicationReport applicationReport, JobState state) {
if (null == historyServerProxy) {
LOG.warn("Job History Server is not configured.");
- return getNotRunningJob(user, state);
+ return getNotRunningJob(applicationReport, state);
}
return historyServerProxy;
}
@@ -324,21 +329,22 @@ String[] getTaskDiagnostics(org.apache.hadoop.mapreduce.TaskAttemptID arg0)
JobStatus getJobStatus(JobID oldJobID) throws YarnRemoteException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
- GetJobReportRequest request = recordFactory.newRecordInstance(GetJobReportRequest.class);
+ GetJobReportRequest request =
+ recordFactory.newRecordInstance(GetJobReportRequest.class);
request.setJobId(jobId);
JobReport report = ((GetJobReportResponse) invoke("getJobReport",
GetJobReportRequest.class, request)).getJobReport();
String jobFile = MRApps.getJobFile(conf, report.getUser(), oldJobID);
- //TODO: add tracking url in JobReport
- return TypeConverter.fromYarn(report, jobFile, "");
+ return TypeConverter.fromYarn(report, jobFile);
}
org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
throws YarnRemoteException, YarnRemoteException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
- GetTaskReportsRequest request = recordFactory.newRecordInstance(GetTaskReportsRequest.class);
+ GetTaskReportsRequest request =
+ recordFactory.newRecordInstance(GetTaskReportsRequest.class);
request.setJobId(jobId);
request.setTaskType(TypeConverter.toYarn(taskType));
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
index a40fcedda3..17ad9f62aa 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
@@ -22,6 +22,8 @@
import java.util.HashMap;
import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
@@ -53,20 +55,41 @@
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
public class NotRunningJob implements MRClientProtocol {
+ private static final Log LOG = LogFactory.getLog(NotRunningJob.class);
+
private RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
private final JobState jobState;
- private final String user;
-
- NotRunningJob(String username, JobState jobState) {
- this.user = username;
+ private final ApplicationReport applicationReport;
+
+
+ private ApplicationReport getUnknownApplicationReport() {
+ ApplicationReport unknown =
+ recordFactory.newRecordInstance(ApplicationReport.class);
+ unknown.setUser("N/A");
+ unknown.setHost("N/A");
+ unknown.setName("N/A");
+ unknown.setQueue("N/A");
+ unknown.setStartTime(0);
+ unknown.setFinishTime(0);
+ unknown.setTrackingUrl("N/A");
+ unknown.setDiagnostics("N/A");
+ LOG.info("getUnknownApplicationReport");
+ return unknown;
+ }
+
+ NotRunningJob(ApplicationReport applicationReport, JobState jobState) {
+ this.applicationReport =
+ (applicationReport == null) ?
+ getUnknownApplicationReport() : applicationReport;
this.jobState = jobState;
}
@@ -101,15 +124,19 @@ public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request)
@Override
public GetJobReportResponse getJobReport(GetJobReportRequest request)
throws YarnRemoteException {
- GetJobReportResponse resp =
- recordFactory.newRecordInstance(GetJobReportResponse.class);
JobReport jobReport =
recordFactory.newRecordInstance(JobReport.class);
jobReport.setJobId(request.getJobId());
- jobReport.setJobState(this.jobState);
+ jobReport.setJobState(jobState);
+ jobReport.setUser(applicationReport.getUser());
+ jobReport.setStartTime(applicationReport.getStartTime());
+ jobReport.setDiagnostics(applicationReport.getDiagnostics());
+ jobReport.setJobName(applicationReport.getName());
+ jobReport.setTrackingUrl(applicationReport.getTrackingUrl());
+ jobReport.setFinishTime(applicationReport.getFinishTime());
- jobReport.setUser(this.user);
- // TODO: Add jobName & other job information that is available
+ GetJobReportResponse resp =
+ recordFactory.newRecordInstance(GetJobReportResponse.class);
resp.setJobReport(jobReport);
return resp;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index e2cb1e05ea..12b1c2cc9c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -267,6 +267,13 @@ public GetApplicationReportResponse getApplicationReport(
application.setHost(split[0]);
application.setRpcPort(Integer.parseInt(split[1]));
application.setUser("TestClientRedirect-user");
+ application.setName("N/A");
+ application.setQueue("N/A");
+ application.setStartTime(0);
+ application.setFinishTime(0);
+ application.setTrackingUrl("N/A");
+ application.setDiagnostics("N/A");
+
GetApplicationReportResponse response = recordFactory
.newRecordInstance(GetApplicationReportResponse.class);
response.setApplicationReport(application);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
index b7fd6c9475..5b07d4997d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
@@ -109,7 +109,7 @@ public void testHistoryServerNotConfigured() throws Exception {
ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(
null, getRMDelegate());
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
- Assert.assertEquals("Unknown User", jobStatus.getUsername());
+ Assert.assertEquals("N/A", jobStatus.getUsername());
Assert.assertEquals(JobStatus.State.PREP, jobStatus.getState());
//RM has app report and job History Server is not configured
@@ -145,6 +145,13 @@ private ApplicationReport getApplicationReport() {
.newRecord(ApplicationReport.class);
applicationReport.setState(ApplicationState.SUCCEEDED);
applicationReport.setUser("root");
+ applicationReport.setHost("N/A");
+ applicationReport.setName("N/A");
+ applicationReport.setQueue("N/A");
+ applicationReport.setStartTime(0);
+ applicationReport.setFinishTime(0);
+ applicationReport.setTrackingUrl("N/A");
+ applicationReport.setDiagnostics("N/A");
return applicationReport;
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-mapreduce-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 219fd1eb57..7e34ff5487 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -49,6 +49,10 @@
+
+
+
+
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
index ca7a6f415a..ffb920d5b9 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
@@ -186,4 +186,16 @@ public interface ApplicationReport {
@Private
@Unstable
void setStartTime(long startTime);
+
+ /**
+ * Get the finish time of the application.
+ * @return finish time of the application
+ */
+ @Public
+ @Stable
+ long getFinishTime();
+
+ @Private
+ @Unstable
+ void setFinishTime(long finishTime);
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
index b1e80fc759..2ea2ddbcdb 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
@@ -240,6 +240,30 @@ public ApplicationReportProto getProto() {
return proto;
}
+ @Override
+ public long getStartTime() {
+ ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getStartTime();
+ }
+
+ @Override
+ public void setStartTime(long startTime) {
+ maybeInitBuilder();
+ builder.setStartTime(startTime);
+ }
+
+ @Override
+ public long getFinishTime() {
+ ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getFinishTime();
+ }
+
+ @Override
+ public void setFinishTime(long finishTime) {
+ maybeInitBuilder();
+ builder.setFinishTime(finishTime);
+ }
+
private void mergeLocalToBuilder() {
if (this.applicationId != null
&& !((ApplicationIdPBImpl) this.applicationId).getProto().equals(
@@ -279,16 +303,4 @@ private ApplicationIdPBImpl convertFromProtoFormat(
ApplicationIdProto applicationId) {
return new ApplicationIdPBImpl(applicationId);
}
-
- @Override
- public long getStartTime() {
- ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
- return p.getStartTime();
- }
-
- @Override
- public void setStartTime(long startTime) {
- maybeInitBuilder();
- builder.setStartTime(startTime);
- }
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index cdcd1a747b..cd29a9431c 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -140,6 +140,7 @@ message ApplicationReportProto {
optional string trackingUrl = 11;
optional string diagnostics = 12 [default = "N/A"];
optional int64 startTime = 13;
+ optional int64 finishTime = 14;
}
message NodeIdProto {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
index 4eb63c0447..2caafdc19a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
@@ -242,7 +242,7 @@ public static ResourceRequest newResourceRequest(ResourceRequest r) {
public static ApplicationReport newApplicationReport(
ApplicationId applicationId, String user, String queue, String name,
String host, int rpcPort, String clientToken, ApplicationState state,
- String diagnostics, String url, long startTime) {
+ String diagnostics, String url, long startTime, long finishTime) {
ApplicationReport report = recordFactory
.newRecordInstance(ApplicationReport.class);
report.setApplicationId(applicationId);
@@ -256,6 +256,7 @@ public static ApplicationReport newApplicationReport(
report.setDiagnostics(diagnostics);
report.setTrackingUrl(url);
report.setStartTime(startTime);
+ report.setFinishTime(finishTime);
return report;
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java
index 65f6c548fb..7d233e2d9f 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java
@@ -167,6 +167,16 @@ public void setStartTime(long startTime) {
// TODO Auto-generated method stub
}
+ @Override
+ public long getFinishTime() {
+ // TODO Auto-generated method stub
+ return 0;
+ }
+ @Override
+ public void setFinishTime(long finishTime) {
+ // TODO Auto-generated method stub
+
+ }
};
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
index 85cd8825da..997906a62e 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
@@ -22,7 +22,6 @@
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore;
@@ -31,7 +30,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
-import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
public class RMContextImpl implements RMContext {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 179b56a4af..8bd45dff4d 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
@@ -186,6 +187,9 @@ public synchronized void init(Configuration conf) {
addService(adminService);
this.applicationMasterLauncher = createAMLauncher();
+ this.rmDispatcher.register(AMLauncherEventType.class,
+ this.applicationMasterLauncher);
+
addService(applicationMasterLauncher);
super.init(conf);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
index 67f0c8a016..a25a4312b1 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
@@ -46,13 +46,12 @@ public class ApplicationMasterLauncher extends AbstractService implements
private ClientToAMSecretManager clientToAMSecretManager;
protected final RMContext context;
- public ApplicationMasterLauncher(ApplicationTokenSecretManager
- applicationTokenSecretManager, ClientToAMSecretManager clientToAMSecretManager,
+ public ApplicationMasterLauncher(
+ ApplicationTokenSecretManager applicationTokenSecretManager,
+ ClientToAMSecretManager clientToAMSecretManager,
RMContext context) {
super(ApplicationMasterLauncher.class.getName());
this.context = context;
- /* register to dispatcher */
- this.context.getDispatcher().register(AMLauncherEventType.class, this);
this.launcherPool = new ThreadPoolExecutor(1, 10, 1,
TimeUnit.HOURS, new LinkedBlockingQueue());
this.launcherHandlingThread = new LauncherThread();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
index 2e739a98b9..484a7a38ba 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
@@ -24,7 +24,6 @@
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppFailedAttemptEvent.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppFailedAttemptEvent.java
new file mode 100644
index 0000000000..111c6acc41
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppFailedAttemptEvent.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+public class RMAppFailedAttemptEvent extends RMAppEvent {
+
+ private final String diagnostics;
+
+ public RMAppFailedAttemptEvent(ApplicationId appId, RMAppEventType event,
+ String diagnostics) {
+ super(appId, event);
+ this.diagnostics = diagnostics;
+ }
+
+ public String getDiagnostics() {
+ return this.diagnostics;
+ }
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 9246d1838c..838dfc0b08 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -310,7 +310,8 @@ public ApplicationReport createAndGetApplicationReport() {
return BuilderUtils.newApplicationReport(this.applicationId, this.user,
this.queue, this.name, host, rpcPort, clientToken,
createApplicationState(this.stateMachine.getCurrentState()),
- this.diagnostics.toString(), trackingUrl, this.startTime);
+ this.diagnostics.toString(), trackingUrl,
+ this.startTime, this.finishTime);
} finally {
this.readLock.unlock();
}
@@ -470,11 +471,13 @@ public AttemptFailedTransition(RMAppState initialState) {
@Override
public RMAppState transition(RMAppImpl app, RMAppEvent event) {
-
+
+ RMAppFailedAttemptEvent failedEvent = ((RMAppFailedAttemptEvent)event);
if (app.attempts.size() == app.maxRetries) {
String msg = "Application " + app.getApplicationId()
+ " failed " + app.maxRetries
- + " times. Failing the application.";
+ + " times due to " + failedEvent.getDiagnostics()
+ + ". Failing the application.";
LOG.info(msg);
app.diagnostics.append(msg);
// Inform the node for app-finish
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
index 70747deacb..3164602f59 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
@@ -79,7 +79,7 @@ public interface RMAppAttempt extends EventHandler{
* Diagnostics information for the application attempt.
* @return diagnostics information for the application attempt.
*/
- StringBuilder getDiagnostics();
+ String getDiagnostics();
/**
* Progress for the application attempt.
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 7c6357defa..7f7f050bc4 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -31,6 +31,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -47,6 +48,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppFailedAttemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAcquiredEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
@@ -104,10 +106,10 @@ public class RMAppAttemptImpl implements RMAppAttempt {
private Container masterContainer;
private float progress = 0;
- private String host;
+ private String host = "N/A";
private int rpcPort;
- private String trackingUrl;
- private String finalState;
+ private String trackingUrl = "N/A";
+ private String finalState = "N/A";
private final StringBuilder diagnostics = new StringBuilder();
private static final StateMachineFactory 0);
Assert.assertTrue("application start time is before currentTime",
@@ -202,7 +204,8 @@ private static void assertFailed(RMApp application, String regex) {
protected RMApp testCreateAppSubmitted() throws IOException {
RMApp application = createNewTestApp();
// NEW => SUBMITTED event RMAppEventType.START
- RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.START);
+ RMAppEvent event =
+ new RMAppEvent(application.getApplicationId(), RMAppEventType.START);
application.handle(event);
assertStartTimeSet(application);
assertAppState(RMAppState.SUBMITTED, application);
@@ -212,7 +215,9 @@ protected RMApp testCreateAppSubmitted() throws IOException {
protected RMApp testCreateAppAccepted() throws IOException {
RMApp application = testCreateAppSubmitted();
// SUBMITTED => ACCEPTED event RMAppEventType.APP_ACCEPTED
- RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.APP_ACCEPTED);
+ RMAppEvent event =
+ new RMAppEvent(application.getApplicationId(),
+ RMAppEventType.APP_ACCEPTED);
application.handle(event);
assertStartTimeSet(application);
assertAppState(RMAppState.ACCEPTED, application);
@@ -222,7 +227,9 @@ protected RMApp testCreateAppAccepted() throws IOException {
protected RMApp testCreateAppRunning() throws IOException {
RMApp application = testCreateAppAccepted();
// ACCEPTED => RUNNING event RMAppEventType.ATTEMPT_REGISTERED
- RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_REGISTERED);
+ RMAppEvent event =
+ new RMAppEvent(application.getApplicationId(),
+ RMAppEventType.ATTEMPT_REGISTERED);
application.handle(event);
assertStartTimeSet(application);
assertAppState(RMAppState.RUNNING, application);
@@ -232,7 +239,9 @@ protected RMApp testCreateAppRunning() throws IOException {
protected RMApp testCreateAppFinished() throws IOException {
RMApp application = testCreateAppRunning();
// RUNNING => FINISHED event RMAppEventType.ATTEMPT_FINISHED
- RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FINISHED);
+ RMAppEvent event =
+ new RMAppEvent(application.getApplicationId(),
+ RMAppEventType.ATTEMPT_FINISHED);
application.handle(event);
assertAppState(RMAppState.FINISHED, application);
assertTimesAtFinish(application);
@@ -251,7 +260,8 @@ public void testAppNewKill() throws IOException {
RMApp application = createNewTestApp();
// NEW => KILLED event RMAppEventType.KILL
- RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ RMAppEvent event =
+ new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
application.handle(event);
assertKilled(application);
}
@@ -263,7 +273,8 @@ public void testAppNewReject() throws IOException {
RMApp application = createNewTestApp();
// NEW => FAILED event RMAppEventType.APP_REJECTED
String rejectedText = "Test Application Rejected";
- RMAppEvent event = new RMAppRejectedEvent(application.getApplicationId(), rejectedText);
+ RMAppEvent event =
+ new RMAppRejectedEvent(application.getApplicationId(), rejectedText);
application.handle(event);
assertFailed(application, rejectedText);
}
@@ -275,7 +286,8 @@ public void testAppSubmittedRejected() throws IOException {
RMApp application = testCreateAppSubmitted();
// SUBMITTED => FAILED event RMAppEventType.APP_REJECTED
String rejectedText = "app rejected";
- RMAppEvent event = new RMAppRejectedEvent(application.getApplicationId(), rejectedText);
+ RMAppEvent event =
+ new RMAppRejectedEvent(application.getApplicationId(), rejectedText);
application.handle(event);
assertFailed(application, rejectedText);
}
@@ -286,7 +298,8 @@ public void testAppSubmittedKill() throws IOException {
RMApp application = testCreateAppAccepted();
// SUBMITTED => KILLED event RMAppEventType.KILL
- RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ RMAppEvent event =
+ new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
application.handle(event);
assertKilled(application);
}
@@ -298,18 +311,26 @@ public void testAppAcceptedFailed() throws IOException {
RMApp application = testCreateAppAccepted();
// ACCEPTED => ACCEPTED event RMAppEventType.RMAppEventType.ATTEMPT_FAILED
for (int i=1; i FAILED event RMAppEventType.RMAppEventType.ATTEMPT_FAILED after max retries
- RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED);
+ // ACCEPTED => FAILED event RMAppEventType.RMAppEventType.ATTEMPT_FAILED
+ // after max retries
+ String message = "Test fail";
+ RMAppEvent event =
+ new RMAppFailedAttemptEvent(application.getApplicationId(),
+ RMAppEventType.ATTEMPT_FAILED, message);
application.handle(event);
- assertFailed(application, ".*Failing the application.*");
+ assertFailed(application, ".*" + message + ".*Failing the application.*");
}
@Test
@@ -318,7 +339,8 @@ public void testAppAcceptedKill() throws IOException {
RMApp application = testCreateAppAccepted();
// ACCEPTED => KILLED event RMAppEventType.KILL
- RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ RMAppEvent event =
+ new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
application.handle(event);
assertKilled(application);
}
@@ -329,7 +351,8 @@ public void testAppRunningKill() throws IOException {
RMApp application = testCreateAppRunning();
// RUNNING => KILLED event RMAppEventType.KILL
- RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ RMAppEvent event =
+ new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
application.handle(event);
assertKilled(application);
}
@@ -341,25 +364,35 @@ public void testAppRunningFailed() throws IOException {
RMApp application = testCreateAppRunning();
RMAppAttempt appAttempt = application.getCurrentAppAttempt();
int expectedAttemptId = 1;
- Assert.assertEquals(expectedAttemptId, appAttempt.getAppAttemptId().getAttemptId());
+ Assert.assertEquals(expectedAttemptId,
+ appAttempt.getAppAttemptId().getAttemptId());
// RUNNING => FAILED/RESTARTING event RMAppEventType.ATTEMPT_FAILED
for (int i=1; i FAILED/RESTARTING event RMAppEventType.ATTEMPT_FAILED after max retries
- RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED);
+ // RUNNING => FAILED/RESTARTING event RMAppEventType.ATTEMPT_FAILED
+ // after max retries
+ RMAppEvent event =
+ new RMAppFailedAttemptEvent(application.getApplicationId(),
+ RMAppEventType.ATTEMPT_FAILED, "");
application.handle(event);
assertFailed(application, ".*Failing the application.*");
@@ -376,7 +409,8 @@ public void testAppFinishedFinished() throws IOException {
RMApp application = testCreateAppFinished();
// FINISHED => FINISHED event RMAppEventType.KILL
- RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ RMAppEvent event =
+ new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
application.handle(event);
assertTimesAtFinish(application);
assertAppState(RMAppState.FINISHED, application);
@@ -392,25 +426,32 @@ public void testAppKilledKilled() throws IOException {
RMApp application = testCreateAppRunning();
// RUNNING => KILLED event RMAppEventType.KILL
- RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ RMAppEvent event =
+ new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
application.handle(event);
assertTimesAtFinish(application);
assertAppState(RMAppState.KILLED, application);
// KILLED => KILLED event RMAppEventType.ATTEMPT_FINISHED
- event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FINISHED);
+ event =
+ new RMAppEvent(application.getApplicationId(),
+ RMAppEventType.ATTEMPT_FINISHED);
application.handle(event);
assertTimesAtFinish(application);
assertAppState(RMAppState.KILLED, application);
// KILLED => KILLED event RMAppEventType.ATTEMPT_FAILED
- event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED);
+ event =
+ new RMAppFailedAttemptEvent(application.getApplicationId(),
+ RMAppEventType.ATTEMPT_FAILED, "");
application.handle(event);
assertTimesAtFinish(application);
assertAppState(RMAppState.KILLED, application);
// KILLED => KILLED event RMAppEventType.ATTEMPT_KILLED
- event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_KILLED);
+ event =
+ new RMAppEvent(application.getApplicationId(),
+ RMAppEventType.ATTEMPT_KILLED);
application.handle(event);
assertTimesAtFinish(application);
assertAppState(RMAppState.KILLED, application);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
new file mode 100644
index 0000000000..03a4ba0744
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
@@ -0,0 +1,403 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Matchers.*;
+import static org.mockito.Mockito.*;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.MockApps;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppFailedAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRejectedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestRMAppAttemptTransitions {
+
+ private static final Log LOG =
+ LogFactory.getLog(TestRMAppAttemptTransitions.class);
+
+ private static final String EMPTY_DIAGNOSTICS = "";
+
+ private RMContext rmContext;
+ private YarnScheduler scheduler;
+ private ApplicationMasterService masterService;
+ private ApplicationMasterLauncher applicationMasterLauncher;
+
+ private RMApp application;
+ private RMAppAttempt applicationAttempt;
+
+ private final class TestApplicationAttemptEventDispatcher implements
+ EventHandler {
+
+ @Override
+ public void handle(RMAppAttemptEvent event) {
+ ApplicationAttemptId appID = event.getApplicationAttemptId();
+ assertEquals(applicationAttempt.getAppAttemptId(), appID);
+ try {
+ applicationAttempt.handle(event);
+ } catch (Throwable t) {
+ LOG.error("Error in handling event type " + event.getType()
+ + " for application " + appID, t);
+ }
+ }
+ }
+
+ // handle all the RM application events - same as in ResourceManager.java
+ private final class TestApplicationEventDispatcher implements
+ EventHandler {
+ @Override
+ public void handle(RMAppEvent event) {
+ assertEquals(application.getApplicationId(), event.getApplicationId());
+ try {
+ application.handle(event);
+ } catch (Throwable t) {
+ LOG.error("Error in handling event type " + event.getType()
+ + " for application " + application.getApplicationId(), t);
+ }
+ }
+ }
+
+ private final class TestSchedulerEventDispatcher implements
+ EventHandler {
+ @Override
+ public void handle(SchedulerEvent event) {
+ scheduler.handle(event);
+ }
+ }
+
+ private final class TestAMLauncherEventDispatcher implements
+ EventHandler {
+ @Override
+ public void handle(AMLauncherEvent event) {
+ applicationMasterLauncher.handle(event);
+ }
+ }
+
+ private static int appId = 1;
+
+ @Before
+ public void setUp() throws Exception {
+ InlineDispatcher rmDispatcher = new InlineDispatcher();
+
+ ContainerAllocationExpirer containerAllocationExpirer =
+ mock(ContainerAllocationExpirer.class);
+ AMLivelinessMonitor amLivelinessMonitor = mock(AMLivelinessMonitor.class);
+ rmContext = new RMContextImpl(new MemStore(), rmDispatcher,
+ containerAllocationExpirer, amLivelinessMonitor);
+
+ scheduler = mock(YarnScheduler.class);
+ masterService = mock(ApplicationMasterService.class);
+ applicationMasterLauncher = mock(ApplicationMasterLauncher.class);
+
+ rmDispatcher.register(RMAppAttemptEventType.class,
+ new TestApplicationAttemptEventDispatcher());
+
+ rmDispatcher.register(RMAppEventType.class,
+ new TestApplicationEventDispatcher());
+
+ rmDispatcher.register(SchedulerEventType.class,
+ new TestSchedulerEventDispatcher());
+
+ rmDispatcher.register(AMLauncherEventType.class,
+ new TestAMLauncherEventDispatcher());
+
+ rmDispatcher.init(new Configuration());
+ rmDispatcher.start();
+
+
+ ApplicationId applicationId = MockApps.newAppID(appId++);
+ ApplicationAttemptId applicationAttemptId =
+ MockApps.newAppAttemptID(applicationId, 0);
+
+ final String user = MockApps.newUserName();
+ final String queue = MockApps.newQueue();
+ ApplicationSubmissionContext submissionContext =
+ mock(ApplicationSubmissionContext.class);
+ when(submissionContext.getUser()).thenReturn(user);
+ when(submissionContext.getQueue()).thenReturn(queue);
+ ContainerLaunchContext amContainerSpec = mock(ContainerLaunchContext.class);
+ Resource resource = mock(Resource.class);
+ when(amContainerSpec.getResource()).thenReturn(resource);
+ when(submissionContext.getAMContainerSpec()).thenReturn(amContainerSpec);
+
+ application = mock(RMApp.class);
+ applicationAttempt =
+ new RMAppAttemptImpl(applicationAttemptId, null, rmContext, scheduler,
+ masterService, submissionContext);
+ when(application.getCurrentAppAttempt()).thenReturn(applicationAttempt);
+ when(application.getApplicationId()).thenReturn(applicationId);
+
+ testAppAttemptNewState();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ((AsyncDispatcher)this.rmContext.getDispatcher()).stop();
+ }
+
+
+ /**
+ * {@link RMAppAttemptState#NEW}
+ */
+ private void testAppAttemptNewState() {
+ assertEquals(RMAppAttemptState.NEW,
+ applicationAttempt.getAppAttemptState());
+ assertEquals(0, applicationAttempt.getDiagnostics().length());
+ assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
+ assertNull(applicationAttempt.getMasterContainer());
+ assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001);
+ assertEquals(0, applicationAttempt.getRanNodes().size());
+ }
+
+ /**
+ * {@link RMAppAttemptState#SUBMITTED}
+ */
+ private void testAppAttemptSubmittedState() {
+ assertEquals(RMAppAttemptState.SUBMITTED,
+ applicationAttempt.getAppAttemptState());
+ assertEquals(0, applicationAttempt.getDiagnostics().length());
+ assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
+ assertNull(applicationAttempt.getMasterContainer());
+ assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001);
+ assertEquals(0, applicationAttempt.getRanNodes().size());
+
+ // Check events
+ verify(masterService).
+ registerAppAttempt(applicationAttempt.getAppAttemptId());
+ verify(scheduler).handle(any(AppAddedSchedulerEvent.class));
+ }
+
+ /**
+ * {@link RMAppAttemptState#SUBMITTED} -> {@link RMAppAttemptState#FAILED}
+ */
+ private void testAppAttemptSubmittedToFailedState(String diagnostics) {
+ assertEquals(RMAppAttemptState.FAILED,
+ applicationAttempt.getAppAttemptState());
+ assertEquals(diagnostics, applicationAttempt.getDiagnostics());
+ assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
+ assertNull(applicationAttempt.getMasterContainer());
+ assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001);
+ assertEquals(0, applicationAttempt.getRanNodes().size());
+
+ // Check events
+ verify(application).handle(any(RMAppRejectedEvent.class));
+ }
+
+ /**
+ * {@link RMAppAttemptState#KILLED}
+ */
+ private void testAppAttemptKilledState(Container amContainer,
+ String diagnostics) {
+ assertEquals(RMAppAttemptState.KILLED,
+ applicationAttempt.getAppAttemptState());
+ assertEquals(diagnostics, applicationAttempt.getDiagnostics());
+ assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
+ assertEquals(amContainer, applicationAttempt.getMasterContainer());
+ assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001);
+ assertEquals(0, applicationAttempt.getRanNodes().size());
+ }
+
+ /**
+ * {@link RMAppAttemptState#SCHEDULED}
+ */
+ private void testAppAttemptScheduledState() {
+ assertEquals(RMAppAttemptState.SCHEDULED,
+ applicationAttempt.getAppAttemptState());
+ assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
+ assertNull(applicationAttempt.getMasterContainer());
+ assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001);
+ assertEquals(0, applicationAttempt.getRanNodes().size());
+
+ // Check events
+ verify(application).handle(any(RMAppEvent.class));
+ verify(scheduler).
+ allocate(any(ApplicationAttemptId.class),
+ any(List.class), any(List.class));
+ }
+
+ /**
+ * {@link RMAppAttemptState#ALLOCATED}
+ */
+ private void testAppAttemptAllocatedState(Container amContainer) {
+ assertEquals(RMAppAttemptState.ALLOCATED,
+ applicationAttempt.getAppAttemptState());
+ assertEquals(amContainer, applicationAttempt.getMasterContainer());
+
+ // Check events
+ verify(applicationMasterLauncher).handle(any(AMLauncherEvent.class));
+ verify(scheduler, times(2)).
+ allocate(
+ any(ApplicationAttemptId.class), any(List.class), any(List.class));
+ }
+
+ /**
+ * {@link RMAppAttemptState#FAILED}
+ */
+ private void testAppAttemptFailedState(Container container,
+ String diagnostics) {
+ assertEquals(RMAppAttemptState.FAILED,
+ applicationAttempt.getAppAttemptState());
+ assertEquals(diagnostics, applicationAttempt.getDiagnostics());
+ assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
+ assertEquals(container, applicationAttempt.getMasterContainer());
+ assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001);
+ assertEquals(0, applicationAttempt.getRanNodes().size());
+
+ // Check events
+ verify(application, times(2)).handle(any(RMAppFailedAttemptEvent.class));
+ }
+
+ private void submitApplicationAttempt() {
+ ApplicationAttemptId appAttemptId = applicationAttempt.getAppAttemptId();
+ applicationAttempt.handle(
+ new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.START));
+ testAppAttemptSubmittedState();
+ }
+
+ private void scheduleApplicationAttempt() {
+ submitApplicationAttempt();
+ applicationAttempt.handle(
+ new RMAppAttemptEvent(
+ applicationAttempt.getAppAttemptId(),
+ RMAppAttemptEventType.APP_ACCEPTED));
+ testAppAttemptScheduledState();
+ }
+
+ private Container allocateApplicationAttempt() {
+ scheduleApplicationAttempt();
+
+ // Mock the allocation of AM container
+ Container container = mock(Container.class);
+ Allocation allocation = mock(Allocation.class);
+ when(allocation.getContainers()).
+ thenReturn(Collections.singletonList(container));
+ when(
+ scheduler.allocate(
+ any(ApplicationAttemptId.class),
+ any(List.class),
+ any(List.class))).
+ thenReturn(allocation);
+
+ applicationAttempt.handle(
+ new RMAppAttemptContainerAllocatedEvent(
+ applicationAttempt.getAppAttemptId(),
+ container));
+
+ testAppAttemptAllocatedState(container);
+
+ return container;
+ }
+
+ @Test
+ public void testNewToKilled() {
+ applicationAttempt.handle(
+ new RMAppAttemptEvent(
+ applicationAttempt.getAppAttemptId(),
+ RMAppAttemptEventType.KILL));
+ testAppAttemptKilledState(null, EMPTY_DIAGNOSTICS);
+ }
+
+ @Test
+ public void testSubmittedToFailed() {
+ submitApplicationAttempt();
+ String message = "Rejected";
+ applicationAttempt.handle(
+ new RMAppAttemptRejectedEvent(
+ applicationAttempt.getAppAttemptId(), message));
+ testAppAttemptSubmittedToFailedState(message);
+ }
+
+ @Test
+ public void testSubmittedToKilled() {
+ submitApplicationAttempt();
+ applicationAttempt.handle(
+ new RMAppAttemptEvent(
+ applicationAttempt.getAppAttemptId(),
+ RMAppAttemptEventType.KILL));
+ testAppAttemptKilledState(null, EMPTY_DIAGNOSTICS);
+ }
+
+ @Test
+ public void testScheduledToKilled() {
+ scheduleApplicationAttempt();
+ applicationAttempt.handle(
+ new RMAppAttemptEvent(
+ applicationAttempt.getAppAttemptId(),
+ RMAppAttemptEventType.KILL));
+ testAppAttemptKilledState(null, EMPTY_DIAGNOSTICS);
+ }
+
+ @Test
+ public void testAllocatedToKilled() {
+ Container amContainer = allocateApplicationAttempt();
+ applicationAttempt.handle(
+ new RMAppAttemptEvent(
+ applicationAttempt.getAppAttemptId(),
+ RMAppAttemptEventType.KILL));
+ testAppAttemptKilledState(amContainer, EMPTY_DIAGNOSTICS);
+ }
+
+ @Test
+ public void testAllocatedToFailed() {
+ Container amContainer = allocateApplicationAttempt();
+ String diagnostics = "Launch Failed";
+ applicationAttempt.handle(
+ new RMAppAttemptLaunchFailedEvent(
+ applicationAttempt.getAppAttemptId(),
+ diagnostics));
+ testAppAttemptFailedState(amContainer, diagnostics);
+ }
+
+}
From 5a3040cad4b8c75bb06bb878584e88ced0ed8524 Mon Sep 17 00:00:00 2001
From: Mahadev Konar
Date: Sun, 25 Sep 2011 18:02:51 +0000
Subject: [PATCH 36/68] MAPREDUCE-3064. 27 unit test failures with Invalid
mapreduce.jobtracker.address configuration value for JobTracker: local (Venu
Gopala Rao via mahadev)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1175449 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 5 +++++
.../hadoop/mapred/JobTrackerClientProtocolProvider.java | 2 +-
.../test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java | 1 +
.../org/apache/hadoop/mapred/QueueManagerTestUtils.java | 2 ++
.../mapred/org/apache/hadoop/mapred/TestMiniMRClasspath.java | 2 ++
.../hadoop/mapred/TestSpecialCharactersInOutputPath.java | 4 +++-
6 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 39f313ec61..59d2ab64e8 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1409,6 +1409,7 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2691. Increase threadpool size for launching containers in
MapReduce ApplicationMaster. (vinodkv via acmurthy)
+
MAPREDUCE-2990. Fixed display of NodeHealthStatus. (Subroto Sanyal via
acmurthy)
@@ -1418,6 +1419,10 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2952. Fixed ResourceManager/MR-client to consume diagnostics
for AM failures in a couple of corner cases. (Arun C Murthy via vinodkv)
+ MAPREDUCE-3064. 27 unit test failures with Invalid
+ "mapreduce.jobtracker.address" configuration value for
+ JobTracker: "local" (Venu Gopala Rao via mahadev)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobTrackerClientProtocolProvider.java b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobTrackerClientProtocolProvider.java
index d12132c68d..c695816e41 100644
--- a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobTrackerClientProtocolProvider.java
+++ b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobTrackerClientProtocolProvider.java
@@ -37,7 +37,7 @@ public class JobTrackerClientProtocolProvider extends ClientProtocolProvider {
@Override
public ClientProtocol create(Configuration conf) throws IOException {
String framework = conf.get(MRConfig.FRAMEWORK_NAME);
- if (framework != null && !framework.equals("classic")) {
+ if (!MRConfig.CLASSIC_FRAMEWORK_NAME.equals(framework)) {
return null;
}
String tracker = conf.get(JTConfig.JT_IPC_ADDRESS, "local");
diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java
index 7581f8bc7b..86980bb73d 100644
--- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java
+++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java
@@ -382,6 +382,7 @@ static JobConf configureJobConf(JobConf conf, String namenode,
UserGroupInformation ugi) {
JobConf result = new JobConf(conf);
FileSystem.setDefaultUri(result, namenode);
+ result.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
result.set(JTConfig.JT_IPC_ADDRESS, "localhost:"+jobTrackerPort);
result.set(JTConfig.JT_HTTP_ADDRESS,
"127.0.0.1:" + jobTrackerInfoPort);
diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java
index dee6f57b72..4cb0fee616 100644
--- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java
+++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.QueueState;
import org.apache.hadoop.mapreduce.SleepJob;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
@@ -314,6 +315,7 @@ static Job submitSleepJob(final int numMappers, final int numReducers, final lon
final long reduceSleepTime, boolean shouldComplete, String userInfo,
String queueName, Configuration clientConf) throws IOException,
InterruptedException, ClassNotFoundException {
+ clientConf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
clientConf.set(JTConfig.JT_IPC_ADDRESS, "localhost:"
+ miniMRCluster.getJobTrackerPort());
UserGroupInformation ugi;
diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRClasspath.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRClasspath.java
index 911aa2cf7c..2563902d4b 100644
--- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRClasspath.java
+++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRClasspath.java
@@ -55,6 +55,7 @@ static void configureWordCount(FileSystem fs,
file.close();
}
FileSystem.setDefaultUri(conf, fs.getUri());
+ conf.set(JTConfig.FRAMEWORK_NAME, JTConfig.CLASSIC_FRAMEWORK_NAME);
conf.set(JTConfig.JT_IPC_ADDRESS, jobTracker);
conf.setJobName("wordcount");
conf.setInputFormat(TextInputFormat.class);
@@ -121,6 +122,7 @@ static String launchExternal(URI uri, String jobTracker, JobConf conf,
file.close();
}
FileSystem.setDefaultUri(conf, uri);
+ conf.set(JTConfig.FRAMEWORK_NAME, JTConfig.CLASSIC_FRAMEWORK_NAME);
conf.set(JTConfig.JT_IPC_ADDRESS, jobTracker);
conf.setJobName("wordcount");
conf.setInputFormat(TextInputFormat.class);
diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSpecialCharactersInOutputPath.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSpecialCharactersInOutputPath.java
index dc3355bb4b..5e510094ce 100644
--- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSpecialCharactersInOutputPath.java
+++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSpecialCharactersInOutputPath.java
@@ -27,13 +27,14 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
+import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.Progressable;
@@ -67,6 +68,7 @@ public static boolean launchJob(URI fileSys,
// use WordCount example
FileSystem.setDefaultUri(conf, fileSys);
+ conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
conf.set(JTConfig.JT_IPC_ADDRESS, jobTracker);
conf.setJobName("foo");
From c9a7d3dbf902244902b636bf566154c09ecd1116 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Mon, 26 Sep 2011 08:44:41 +0000
Subject: [PATCH 37/68] MAPREDUCE-3090. Fix MR AM to use ApplicationAttemptId
rather than (ApplicationId, startCount) consistently.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1175718 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 ++
.../hadoop/mapreduce/v2/app/MRAppMaster.java | 34 +++++++------------
.../mapreduce/v2/app/job/impl/JobImpl.java | 25 ++++++++------
.../v2/app/recover/RecoveryService.java | 17 +++++-----
.../apache/hadoop/mapreduce/v2/app/MRApp.java | 26 ++++++++++----
5 files changed, 58 insertions(+), 47 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 59d2ab64e8..eaca930fd4 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1423,6 +1423,9 @@ Release 0.23.0 - Unreleased
"mapreduce.jobtracker.address" configuration value for
JobTracker: "local" (Venu Gopala Rao via mahadev)
+ MAPREDUCE-3090. Fix MR AM to use ApplicationAttemptId rather than
+ (ApplicationId, startCount) consistently. (acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index ab8dec169c..6bd1c47133 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -115,8 +115,6 @@ public class MRAppMaster extends CompositeService {
private Clock clock;
private final long startTime = System.currentTimeMillis();
private String appName;
- private final int startCount;
- private final ApplicationId appID;
private final ApplicationAttemptId appAttemptID;
protected final MRAppMetrics metrics;
private Set completedTasksFromPreviousRun;
@@ -134,21 +132,16 @@ public class MRAppMaster extends CompositeService {
private Job job;
- public MRAppMaster(ApplicationId applicationId, int startCount) {
- this(applicationId, new SystemClock(), startCount);
+ public MRAppMaster(ApplicationAttemptId applicationAttemptId) {
+ this(applicationAttemptId, new SystemClock());
}
- public MRAppMaster(ApplicationId applicationId, Clock clock, int startCount) {
+ public MRAppMaster(ApplicationAttemptId applicationAttemptId, Clock clock) {
super(MRAppMaster.class.getName());
this.clock = clock;
- this.appID = applicationId;
- this.appAttemptID = RecordFactoryProvider.getRecordFactory(null)
- .newRecordInstance(ApplicationAttemptId.class);
- this.appAttemptID.setApplicationId(appID);
- this.appAttemptID.setAttemptId(startCount);
- this.startCount = startCount;
+ this.appAttemptID = applicationAttemptId;
this.metrics = MRAppMetrics.create();
- LOG.info("Created MRAppMaster for application " + applicationId);
+ LOG.info("Created MRAppMaster for application " + applicationAttemptId);
}
@Override
@@ -160,9 +153,9 @@ public void init(final Configuration conf) {
appName = conf.get(MRJobConfig.JOB_NAME, "");
if (conf.getBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, false)
- && startCount > 1) {
+ && appAttemptID.getAttemptId() > 1) {
LOG.info("Recovery is enabled. Will try to recover from previous life.");
- Recovery recoveryServ = new RecoveryService(appID, clock, startCount);
+ Recovery recoveryServ = new RecoveryService(appAttemptID, clock);
addIfService(recoveryServ);
dispatcher = recoveryServ.getDispatcher();
clock = recoveryServ.getClock();
@@ -265,8 +258,8 @@ protected Job createJob(Configuration conf) {
// ////////// End of obtaining the tokens needed by the job. //////////
// create single job
- Job newJob = new JobImpl(appID, conf, dispatcher.getEventHandler(),
- taskAttemptListener, jobTokenSecretManager, fsTokens, clock, startCount,
+ Job newJob = new JobImpl(appAttemptID, conf, dispatcher.getEventHandler(),
+ taskAttemptListener, jobTokenSecretManager, fsTokens, clock,
completedTasksFromPreviousRun, metrics, currentUser.getUserName());
((RunningAppContext) context).jobs.put(newJob.getID(), newJob);
@@ -377,11 +370,11 @@ protected ClientService createClientService(AppContext context) {
}
public ApplicationId getAppID() {
- return appID;
+ return appAttemptID.getApplicationId();
}
public int getStartCount() {
- return startCount;
+ return appAttemptID.getAttemptId();
}
public AppContext getContext() {
@@ -506,7 +499,7 @@ public ApplicationAttemptId getApplicationAttemptId() {
@Override
public ApplicationId getApplicationID() {
- return appID;
+ return appAttemptID.getApplicationId();
}
@Override
@@ -659,8 +652,7 @@ public static void main(String[] args) {
}
ApplicationAttemptId applicationAttemptId = ConverterUtils
.toApplicationAttemptId(applicationAttemptIdStr);
- MRAppMaster appMaster = new MRAppMaster(applicationAttemptId
- .getApplicationId(), applicationAttemptId.getAttemptId());
+ MRAppMaster appMaster = new MRAppMaster(applicationAttemptId);
Runtime.getRuntime().addShutdownHook(
new CompositeServiceShutdownHook(appMaster));
YarnConfiguration conf = new YarnConfiguration(new JobConf());
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index e822cab80d..a3f067d14c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -42,7 +42,6 @@
import org.apache.hadoop.mapred.FileOutputCommitter;
import org.apache.hadoop.mapred.JobACLsManager;
import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.MapReduceChildJVM;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -101,6 +100,7 @@
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -129,11 +129,11 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
RecordFactoryProvider.getRecordFactory(null);
//final fields
+ private final ApplicationAttemptId applicationAttemptId;
private final Clock clock;
private final JobACLsManager aclsManager;
private final String username;
private final Map jobACLs;
- private final int startCount;
private final Set completedTasksFromPreviousRun;
private final Lock readLock;
private final Lock writeLock;
@@ -365,26 +365,26 @@ JobEventType.JOB_KILL, new KillTasksTransition())
private Token jobToken;
private JobTokenSecretManager jobTokenSecretManager;
- public JobImpl(ApplicationId appID, Configuration conf,
+ public JobImpl(ApplicationAttemptId applicationAttemptId, Configuration conf,
EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
JobTokenSecretManager jobTokenSecretManager,
- Credentials fsTokenCredentials, Clock clock, int startCount,
+ Credentials fsTokenCredentials, Clock clock,
Set completedTasksFromPreviousRun, MRAppMetrics metrics,
String userName) {
-
+ this.applicationAttemptId = applicationAttemptId;
this.jobId = recordFactory.newRecordInstance(JobId.class);
this.jobName = conf.get(JobContext.JOB_NAME, "");
this.conf = conf;
this.metrics = metrics;
this.clock = clock;
this.completedTasksFromPreviousRun = completedTasksFromPreviousRun;
- this.startCount = startCount;
this.userName = userName;
- jobId.setAppId(appID);
- jobId.setId(appID.getId());
+ ApplicationId applicationId = applicationAttemptId.getApplicationId();
+ jobId.setAppId(applicationId);
+ jobId.setId(applicationId.getId());
oldJobId = TypeConverter.fromYarn(jobId);
LOG.info("Job created" +
- " appId=" + appID +
+ " appId=" + applicationId +
" jobId=" + jobId +
" oldJobId=" + oldJobId);
@@ -1078,7 +1078,8 @@ private void createMapTasks(JobImpl job, long inputLength,
job.conf, splits[i],
job.taskAttemptListener,
job.committer, job.jobToken, job.fsTokens.getAllTokens(),
- job.clock, job.completedTasksFromPreviousRun, job.startCount,
+ job.clock, job.completedTasksFromPreviousRun,
+ job.applicationAttemptId.getAttemptId(),
job.metrics);
job.addTask(task);
}
@@ -1095,7 +1096,9 @@ private void createReduceTasks(JobImpl job) {
job.conf, job.numMapTasks,
job.taskAttemptListener, job.committer, job.jobToken,
job.fsTokens.getAllTokens(), job.clock,
- job.completedTasksFromPreviousRun, job.startCount, job.metrics);
+ job.completedTasksFromPreviousRun,
+ job.applicationAttemptId.getAttemptId(),
+ job.metrics);
job.addTask(task);
}
LOG.info("Number of reduces for job " + job.jobId + " = "
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
index 073411c9b4..ca213f17f8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
@@ -58,7 +58,7 @@
import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanupEvent;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.apache.hadoop.yarn.Clock;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
@@ -92,10 +92,9 @@ public class RecoveryService extends CompositeService implements Recovery {
private static final Log LOG = LogFactory.getLog(RecoveryService.class);
- private final ApplicationId appID;
+ private final ApplicationAttemptId applicationAttemptId;
private final Dispatcher dispatcher;
private final ControlledClock clock;
- private final int startCount;
private JobInfo jobInfo = null;
private final Map completedTasks =
@@ -106,10 +105,10 @@ public class RecoveryService extends CompositeService implements Recovery {
private volatile boolean recoveryMode = false;
- public RecoveryService(ApplicationId appID, Clock clock, int startCount) {
+ public RecoveryService(ApplicationAttemptId applicationAttemptId,
+ Clock clock) {
super("RecoveringDispatcher");
- this.appID = appID;
- this.startCount = startCount;
+ this.applicationAttemptId = applicationAttemptId;
this.dispatcher = new RecoveryDispatcher();
this.clock = new ControlledClock(clock);
addService((Service) dispatcher);
@@ -152,7 +151,8 @@ public Set getCompletedTasks() {
private void parse() throws IOException {
// TODO: parse history file based on startCount
- String jobName = TypeConverter.fromYarn(appID).toString();
+ String jobName =
+ TypeConverter.fromYarn(applicationAttemptId.getApplicationId()).toString();
String jobhistoryDir = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(getConfig());
FSDataInputStream in = null;
Path historyFile = null;
@@ -160,8 +160,9 @@ private void parse() throws IOException {
new Path(jobhistoryDir));
FileContext fc = FileContext.getFileContext(histDirPath.toUri(),
getConfig());
+ //read the previous history file
historyFile = fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(
- histDirPath, jobName, startCount - 1)); //read the previous history file
+ histDirPath, jobName, (applicationAttemptId.getAttemptId() - 1)));
in = fc.open(historyFile);
JobHistoryParser parser = new JobHistoryParser(in);
jobInfo = parser.parse();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
index 548d754a6c..d6e2d96817 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
@@ -66,6 +66,7 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -91,7 +92,7 @@ public class MRApp extends MRAppMaster {
private File testWorkDir;
private Path testAbsPath;
- private final RecordFactory recordFactory =
+ private static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
//if true, tasks complete automatically as soon as they are launched
@@ -100,7 +101,7 @@ public class MRApp extends MRAppMaster {
static ApplicationId applicationId;
static {
- applicationId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class);
+ applicationId = recordFactory.newRecordInstance(ApplicationId.class);
applicationId.setClusterTimestamp(0);
applicationId.setId(0);
}
@@ -108,9 +109,19 @@ public class MRApp extends MRAppMaster {
public MRApp(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart) {
this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
}
+
+ private static ApplicationAttemptId getApplicationAttemptId(
+ ApplicationId applicationId, int startCount) {
+ ApplicationAttemptId applicationAttemptId =
+ recordFactory.newRecordInstance(ApplicationAttemptId.class);
+ applicationAttemptId.setApplicationId(applicationId);
+ applicationAttemptId.setAttemptId(startCount);
+ return applicationAttemptId;
+ }
- public MRApp(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart, int startCount) {
- super(applicationId, startCount);
+ public MRApp(int maps, int reduces, boolean autoComplete, String testName,
+ boolean cleanOnStart, int startCount) {
+ super(getApplicationAttemptId(applicationId, startCount));
this.testWorkDir = new File("target", testName);
testAbsPath = new Path(testWorkDir.getAbsolutePath());
LOG.info("PathUsed: " + testAbsPath);
@@ -391,11 +402,12 @@ protected StateMachine getStateMachine() {
return localStateMachine;
}
- public TestJob(Configuration conf, ApplicationId appID,
+ public TestJob(Configuration conf, ApplicationId applicationId,
EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
Clock clock, String user) {
- super(appID, conf, eventHandler, taskAttemptListener,
- new JobTokenSecretManager(), new Credentials(), clock, getStartCount(),
+ super(getApplicationAttemptId(applicationId, getStartCount()),
+ conf, eventHandler, taskAttemptListener,
+ new JobTokenSecretManager(), new Credentials(), clock,
getCompletedTaskFromPreviousRun(), metrics, user);
// This "this leak" is okay because the retained pointer is in an
From 1e6dfa7472ad78a252d05c8ebffe086d938b61fa Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Mon, 26 Sep 2011 13:25:27 +0000
Subject: [PATCH 38/68] MAPREDUCE-2646. Fixed AMRMProtocol to return containers
based on priority. Contributed by Sharad Agarwal and Arun C Murthy.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1175859 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 ++
.../v2/app/rm/RMContainerAllocator.java | 40 ++++++-------------
.../hadoop/yarn/api/records/Container.java | 13 ++++++
.../api/records/impl/pb/ContainerPBImpl.java | 39 ++++++++++++++++++
.../src/main/proto/yarn_protos.proto | 15 +++----
.../apache/hadoop/yarn/util/BuilderUtils.java | 20 +++-------
.../scheduler/capacity/LeafQueue.java | 13 +++---
.../scheduler/fifo/FifoScheduler.java | 3 +-
.../server/resourcemanager/NodeManager.java | 5 ++-
.../scheduler/capacity/TestLeafQueue.java | 7 +++-
.../scheduler/capacity/TestUtils.java | 4 +-
11 files changed, 102 insertions(+), 60 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index eaca930fd4..a35fc51cd3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1426,6 +1426,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3090. Fix MR AM to use ApplicationAttemptId rather than
(ApplicationId, startCount) consistently. (acmurthy)
+ MAPREDUCE-2646. Fixed AMRMProtocol to return containers based on
+ priority. (Sharad Agarwal and Arun C Murthy via vinodkv)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index ff232104bd..7b75cd1fbd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -586,37 +586,21 @@ private void assign(List allocatedContainers) {
private ContainerRequest assign(Container allocated) {
ContainerRequest assigned = null;
- if (mapResourceReqt != reduceResourceReqt) {
- //assign based on size
- LOG.info("Assigning based on container size");
- if (allocated.getResource().getMemory() == mapResourceReqt) {
- assigned = assignToFailedMap(allocated);
- if (assigned == null) {
- assigned = assignToMap(allocated);
- }
- } else if (allocated.getResource().getMemory() == reduceResourceReqt) {
- assigned = assignToReduce(allocated);
- }
-
- return assigned;
- }
-
- //container can be given to either map or reduce
- //assign based on priority
-
- //try to assign to earlierFailedMaps if present
- assigned = assignToFailedMap(allocated);
-
- //Assign to reduces before assigning to maps ?
- if (assigned == null) {
+ Priority priority = allocated.getPriority();
+ if (PRIORITY_FAST_FAIL_MAP.equals(priority)) {
+ LOG.info("Assigning container " + allocated + " to fast fail map");
+ assigned = assignToFailedMap(allocated);
+ } else if (PRIORITY_REDUCE.equals(priority)) {
+ LOG.info("Assigning container " + allocated + " to reduce");
assigned = assignToReduce(allocated);
- }
-
- //try to assign to maps if present
- if (assigned == null) {
+ } else if (PRIORITY_MAP.equals(priority)) {
+ LOG.info("Assigning container " + allocated + " to map");
assigned = assignToMap(allocated);
+ } else {
+ LOG.warn("Container allocated at unwanted priority: " + priority +
+ ". Returning to RM...");
}
-
+
return assigned;
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
index 97c84e4d10..ff054b22ac 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
@@ -43,6 +43,7 @@
*
*
HTTP uri of the node.
*
{@link Resource} allocated to the container.
+ *
{@link Priority} at which the container was allocated.
*
{@link ContainerState} of the container.
*
* {@link ContainerToken} of the container, used to securely verify
@@ -111,6 +112,18 @@ public interface Container extends Comparable {
@Private
@Unstable
void setResource(Resource resource);
+
+ /**
+ * Get the Priority at which the Container was
+ * allocated.
+ * @return Priority at which the Container was
+ * allocated
+ */
+ Priority getPriority();
+
+ @Private
+ @Unstable
+ void setPriority(Priority priority);
/**
* Get the current ContainerState of the container.
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
index 388cad0f4d..39b15e0cef 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.ContainerToken;
import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.ProtoBase;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
@@ -34,6 +35,7 @@
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerTokenProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
import org.apache.hadoop.yarn.util.ProtoUtils;
@@ -48,6 +50,7 @@ public class ContainerPBImpl extends ProtoBase implements Contai
private ContainerId containerId = null;
private NodeId nodeId = null;
private Resource resource = null;
+ private Priority priority = null;
private ContainerToken containerToken = null;
private ContainerStatus containerStatus = null;
@@ -84,6 +87,11 @@ private void mergeLocalToBuilder() {
builder.getResource())) {
builder.setResource(convertToProtoFormat(this.resource));
}
+ if (this.priority != null &&
+ !((PriorityPBImpl) this.priority).getProto().equals(
+ builder.getPriority())) {
+ builder.setPriority(convertToProtoFormat(this.priority));
+ }
if (this.containerToken != null
&& !((ContainerTokenPBImpl) this.containerToken).getProto().equals(
builder.getContainerToken())) {
@@ -211,6 +219,29 @@ public void setResource(Resource resource) {
builder.clearResource();
this.resource = resource;
}
+
+ @Override
+ public Priority getPriority() {
+ ContainerProtoOrBuilder p = viaProto ? proto : builder;
+ if (this.priority != null) {
+ return this.priority;
+ }
+ if (!p.hasPriority()) {
+ return null;
+ }
+ this.priority = convertFromProtoFormat(p.getPriority());
+ return this.priority;
+ }
+
+ @Override
+ public void setPriority(Priority priority) {
+ maybeInitBuilder();
+ if (priority == null) {
+ builder.clearPriority();
+ }
+ this.priority = priority;
+ }
+
@Override
public ContainerToken getContainerToken() {
ContainerProtoOrBuilder p = viaProto ? proto : builder;
@@ -285,6 +316,14 @@ private ResourceProto convertToProtoFormat(Resource t) {
return ((ResourcePBImpl)t).getProto();
}
+ private PriorityPBImpl convertFromProtoFormat(PriorityProto p) {
+ return new PriorityPBImpl(p);
+ }
+
+ private PriorityProto convertToProtoFormat(Priority p) {
+ return ((PriorityPBImpl)p).getProto();
+ }
+
private ContainerTokenPBImpl convertFromProtoFormat(ContainerTokenProto p) {
return new ContainerTokenPBImpl(p);
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index cd29a9431c..704c710996 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -48,6 +48,10 @@ message ResourceProto {
optional int32 memory = 1;
}
+message PriorityProto {
+ optional int32 priority = 1;
+}
+
enum ContainerStateProto {
C_NEW = 1;
C_RUNNING = 2;
@@ -66,9 +70,10 @@ message ContainerProto {
optional NodeIdProto nodeId = 2;
optional string node_http_address = 3;
optional ResourceProto resource = 4;
- optional ContainerStateProto state = 5;
- optional ContainerTokenProto container_token = 6;
- optional ContainerStatusProto container_status = 7;
+ optional PriorityProto priority = 5;
+ optional ContainerStateProto state = 6;
+ optional ContainerTokenProto container_token = 7;
+ optional ContainerStatusProto container_status = 8;
}
enum ApplicationStateProto {
@@ -253,10 +258,6 @@ message ContainerStatusProto {
////////////////////////////////////////////////////////////////////////
////// From common//////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
-message PriorityProto {
- optional int32 priority = 1;
-}
-
message StringURLMapProto {
optional string key = 1;
optional URLProto value = 2;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
index 2caafdc19a..9df37ee03a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
@@ -184,32 +184,24 @@ public static ContainerId newContainerId(RecordFactory recordFactory,
return id;
}
- public static Container clone(Container c) {
- Container container = recordFactory.newRecordInstance(Container.class);
- container.setId(c.getId());
- container.setContainerToken(c.getContainerToken());
- container.setNodeId(c.getNodeId());
- container.setNodeHttpAddress(c.getNodeHttpAddress());
- container.setResource(c.getResource());
- container.setState(c.getState());
- return container;
- }
-
public static Container newContainer(RecordFactory recordFactory,
ApplicationAttemptId appAttemptId, int containerId, NodeId nodeId,
- String nodeHttpAddress, Resource resource) {
+ String nodeHttpAddress, Resource resource, Priority priority) {
ContainerId containerID =
newContainerId(recordFactory, appAttemptId, containerId);
- return newContainer(containerID, nodeId, nodeHttpAddress, resource);
+ return newContainer(containerID, nodeId, nodeHttpAddress,
+ resource, priority);
}
public static Container newContainer(ContainerId containerId,
- NodeId nodeId, String nodeHttpAddress, Resource resource) {
+ NodeId nodeId, String nodeHttpAddress,
+ Resource resource, Priority priority) {
Container container = recordFactory.newRecordInstance(Container.class);
container.setId(containerId);
container.setNodeId(nodeId);
container.setNodeHttpAddress(nodeHttpAddress);
container.setResource(resource);
+ container.setPriority(priority);
container.setState(ContainerState.NEW);
ContainerStatus containerStatus = Records.newRecord(ContainerStatus.class);
containerStatus.setContainerId(containerId);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 0753e3795c..e67d371ee6 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -1046,19 +1046,20 @@ boolean canAssign(SchedulerApp application, Priority priority,
}
private Container getContainer(RMContainer rmContainer,
- SchedulerApp application, SchedulerNode node, Resource capability) {
+ SchedulerApp application, SchedulerNode node,
+ Resource capability, Priority priority) {
return (rmContainer != null) ? rmContainer.getContainer() :
- createContainer(application, node, capability);
+ createContainer(application, node, capability, priority);
}
public Container createContainer(SchedulerApp application, SchedulerNode node,
- Resource capability) {
+ Resource capability, Priority priority) {
Container container =
BuilderUtils.newContainer(this.recordFactory,
application.getApplicationAttemptId(),
application.getNewContainerId(),
- node.getNodeID(),
- node.getHttpAddress(), capability);
+ node.getNodeID(), node.getHttpAddress(),
+ capability, priority);
// If security is enabled, send the container-tokens too.
if (UserGroupInformation.isSecurityEnabled()) {
@@ -1099,7 +1100,7 @@ private Resource assignContainer(Resource clusterResource, SchedulerNode node,
// Create the container if necessary
Container container =
- getContainer(rmContainer, application, node, capability);
+ getContainer(rmContainer, application, node, capability, priority);
// Can we allocate a container on this node?
int availableContainers =
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index 752b81ce5d..dfa4965d5d 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -528,7 +528,8 @@ private int assignContainer(SchedulerNode node, SchedulerApp application,
application.getApplicationAttemptId(),
application.getNewContainerId(),
node.getRMNode().getNodeID(),
- node.getRMNode().getHttpAddress(), capability);
+ node.getRMNode().getHttpAddress(),
+ capability, priority);
// If security is enabled, send the container-tokens too.
if (UserGroupInformation.isSecurityEnabled()) {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java
index a7b5d02c91..72ade5c1da 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java
@@ -45,6 +45,7 @@
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -184,7 +185,9 @@ synchronized public StartContainerResponse startContainer(
Container container =
BuilderUtils.newContainer(containerLaunchContext.getContainerId(),
this.nodeId, nodeHttpAddress,
- containerLaunchContext.getResource());
+ containerLaunchContext.getResource(),
+ null // DKDC - Doesn't matter
+ );
applicationContainers.add(container);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 70c4d1a1f4..639daf9e5a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -135,7 +135,8 @@ public Container answer(InvocationOnMock invocation)
Container container = TestUtils.getMockContainer(
containerId,
((SchedulerNode)(invocation.getArguments()[1])).getNodeID(),
- (Resource)(invocation.getArguments()[2]));
+ (Resource)(invocation.getArguments()[2]),
+ ((Priority)invocation.getArguments()[3]));
return container;
}
}
@@ -143,7 +144,9 @@ public Container answer(InvocationOnMock invocation)
when(queue).createContainer(
any(SchedulerApp.class),
any(SchedulerNode.class),
- any(Resource.class));
+ any(Resource.class),
+ any(Priority.class)
+ );
// 2. Stub out LeafQueue.parent.completedContainer
CSQueue parent = queue.getParent();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
index 84dbbac867..8459e51d5c 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
@@ -161,11 +161,13 @@ public static ContainerId getMockContainerId(SchedulerApp application) {
}
public static Container getMockContainer(
- ContainerId containerId, NodeId nodeId, Resource resource) {
+ ContainerId containerId, NodeId nodeId,
+ Resource resource, Priority priority) {
Container container = mock(Container.class);
when(container.getId()).thenReturn(containerId);
when(container.getNodeId()).thenReturn(nodeId);
when(container.getResource()).thenReturn(resource);
+ when(container.getPriority()).thenReturn(priority);
return container;
}
}
From eff931a1b1b9e98a74ff4afae2e1d63f9ba231c4 Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Mon, 26 Sep 2011 17:27:15 +0000
Subject: [PATCH 39/68] MAPREDUCE-3031. Proper handling of killed containers to
prevent stuck containers/AMs on an external kill signal. Contributed by
Siddharth Seth.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1175960 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../container/ContainerImpl.java | 59 ++++++++++++++++---
.../container/TestContainer.java | 24 +++++++-
3 files changed, 75 insertions(+), 11 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index a35fc51cd3..6f44623b6a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1429,6 +1429,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2646. Fixed AMRMProtocol to return containers based on
priority. (Sharad Agarwal and Arun C Murthy via vinodkv)
+ MAPREDUCE-3031. Proper handling of killed containers to prevent stuck
+ containers/AMs on an external kill signal. (Siddharth Seth via vinodkv)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 4e02c3aded..8d673fbf9c 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -158,10 +158,12 @@ ContainerEventType.RESOURCE_LOCALIZED, new LocalizedTransition())
ContainerEventType.CONTAINER_LAUNCHED, new LaunchTransition())
.addTransition(ContainerState.LOCALIZED, ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
- new ExitedWithFailureTransition())
+ new ExitedWithFailureTransition(true))
.addTransition(ContainerState.LOCALIZED, ContainerState.LOCALIZED,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
+ // TODO race: Can lead to a CONTAINER_LAUNCHED event at state KILLING,
+ // and a container which will never be killed by the NM.
.addTransition(ContainerState.LOCALIZED, ContainerState.KILLING,
ContainerEventType.KILL_CONTAINER, new KillTransition())
@@ -169,16 +171,19 @@ ContainerEventType.KILL_CONTAINER, new KillTransition())
.addTransition(ContainerState.RUNNING,
ContainerState.EXITED_WITH_SUCCESS,
ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
- new ExitedWithSuccessTransition())
+ new ExitedWithSuccessTransition(true))
.addTransition(ContainerState.RUNNING,
ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
- new ExitedWithFailureTransition())
+ new ExitedWithFailureTransition(true))
.addTransition(ContainerState.RUNNING, ContainerState.RUNNING,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.RUNNING, ContainerState.KILLING,
ContainerEventType.KILL_CONTAINER, new KillTransition())
+ .addTransition(ContainerState.RUNNING, ContainerState.EXITED_WITH_FAILURE,
+ ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
+ new KilledExternallyTransition())
// From CONTAINER_EXITED_WITH_SUCCESS State
.addTransition(ContainerState.EXITED_WITH_SUCCESS, ContainerState.DONE,
@@ -220,10 +225,10 @@ ContainerEventType.KILL_CONTAINER, new KillTransition())
ContainerEventType.KILL_CONTAINER)
.addTransition(ContainerState.KILLING, ContainerState.EXITED_WITH_SUCCESS,
ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
- new ExitedWithSuccessTransition())
+ new ExitedWithSuccessTransition(false))
.addTransition(ContainerState.KILLING, ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
- new ExitedWithFailureTransition())
+ new ExitedWithFailureTransition(false))
.addTransition(ContainerState.KILLING,
ContainerState.DONE,
ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP,
@@ -551,18 +556,38 @@ public void transition(ContainerImpl container, ContainerEvent event) {
}
}
+ @SuppressWarnings("unchecked") // dispatcher not typed
static class ExitedWithSuccessTransition extends ContainerTransition {
+
+ boolean clCleanupRequired;
+
+ public ExitedWithSuccessTransition(boolean clCleanupRequired) {
+ this.clCleanupRequired = clCleanupRequired;
+ }
+
@Override
public void transition(ContainerImpl container, ContainerEvent event) {
// TODO: Add containerWorkDir to the deletion service.
- // Inform the localizer to decrement reference counts and cleanup
- // resources.
+ if (clCleanupRequired) {
+ container.dispatcher.getEventHandler().handle(
+ new ContainersLauncherEvent(container,
+ ContainersLauncherEventType.CLEANUP_CONTAINER));
+ }
+
container.cleanup();
}
}
+ @SuppressWarnings("unchecked") // dispatcher not typed
static class ExitedWithFailureTransition extends ContainerTransition {
+
+ boolean clCleanupRequired;
+
+ public ExitedWithFailureTransition(boolean clCleanupRequired) {
+ this.clCleanupRequired = clCleanupRequired;
+ }
+
@Override
public void transition(ContainerImpl container, ContainerEvent event) {
ContainerExitEvent exitEvent = (ContainerExitEvent) event;
@@ -571,12 +596,28 @@ public void transition(ContainerImpl container, ContainerEvent event) {
// TODO: Add containerWorkDir to the deletion service.
// TODO: Add containerOuputDir to the deletion service.
- // Inform the localizer to decrement reference counts and cleanup
- // resources.
+ if (clCleanupRequired) {
+ container.dispatcher.getEventHandler().handle(
+ new ContainersLauncherEvent(container,
+ ContainersLauncherEventType.CLEANUP_CONTAINER));
+ }
+
container.cleanup();
}
}
+ static class KilledExternallyTransition extends ExitedWithFailureTransition {
+ KilledExternallyTransition() {
+ super(true);
+ }
+
+ @Override
+ public void transition(ContainerImpl container, ContainerEvent event) {
+ super.transition(container, event);
+ container.diagnostics.append("Killed by external signal\n");
+ }
+ }
+
static class ResourceFailedTransition implements
SingleArcTransition {
@Override
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
index 04d400ad18..48c745457a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
@@ -38,8 +38,6 @@
import java.util.Random;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.LocalResource;
@@ -135,6 +133,28 @@ public boolean matches(Object o) {
}
}
+ @Test
+ @SuppressWarnings("unchecked") // mocked generic
+ public void testExternalKill() throws Exception {
+ WrappedContainer wc = null;
+ try {
+ wc = new WrappedContainer(13, 314159265358979L, 4344, "yak");
+ wc.initContainer();
+ wc.localizeResources();
+ wc.launchContainer();
+ reset(wc.localizerBus);
+ wc.containerKilledOnRequest();
+ assertEquals(ContainerState.EXITED_WITH_FAILURE,
+ wc.c.getContainerState());
+ verifyCleanupCall(wc);
+ }
+ finally {
+ if (wc != null) {
+ wc.finished();
+ }
+ }
+ }
+
@Test
@SuppressWarnings("unchecked") // mocked generic
public void testCleanupOnFailure() throws Exception {
From 81926396f69c36bc2420841d2b1fed69d78934ad Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Mon, 26 Sep 2011 20:45:18 +0000
Subject: [PATCH 40/68] MAPREDUCE-2984. Better error message for displaying
completed containers. Contributed by Devaraj K.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176045 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../yarn/server/nodemanager/webapp/ContainerPage.java | 8 ++++++++
2 files changed, 11 insertions(+)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 6f44623b6a..7e80d0e241 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1432,6 +1432,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3031. Proper handling of killed containers to prevent stuck
containers/AMs on an external kill signal. (Siddharth Seth via vinodkv)
+ MAPREDUCE-2984. Better error message for displaying completed containers.
+ (Devaraj K via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
index 5425032eec..de76b84e27 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
@@ -31,6 +31,8 @@
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
@@ -69,7 +71,13 @@ protected void render(Block html) {
return;
}
+ DIV div = html.div("#content");
Container container = this.nmContext.getContainers().get(containerID);
+ if (container == null) {
+ div.h1("Unknown Container. Container might have completed, "
+ + "please go back to the previous page and retry.")._();
+ return;
+ }
ContainerStatus containerData = container.cloneAndGetContainerStatus();
int exitCode = containerData.getExitStatus();
String exiStatus =
From bf78f15ffb438cc13546328b2e85cba6f51b9422 Mon Sep 17 00:00:00 2001
From: Tsz-wo Sze
Date: Tue, 27 Sep 2011 02:43:37 +0000
Subject: [PATCH 41/68] HDFS-2366. Initialize WebHdfsFileSystem.ugi in object
construction.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176178 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java | 11 +++++++++--
2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f39834f0e3..e1c3993e59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -69,6 +69,9 @@ Trunk (unreleased changes)
HDFS-46. Change default namespace quota of root directory from
Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo)
+ HDFS-2366. Initialize WebHdfsFileSystem.ugi in object construction.
+ (szetszwo)
+
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index b2b1fac75e..35c325281b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -91,17 +91,24 @@ public class WebHdfsFileSystem extends HftpFileSystem {
private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
- private UserGroupInformation ugi;
+ private final UserGroupInformation ugi;
private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
protected Path workingDir;
+ {
+ try {
+ ugi = UserGroupInformation.getCurrentUser();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
@Override
public synchronized void initialize(URI uri, Configuration conf
) throws IOException {
super.initialize(uri, conf);
setConf(conf);
- ugi = UserGroupInformation.getCurrentUser();
this.workingDir = getHomeDirectory();
}
From 70388cffe0f91f02d7bca4fed726b228710adaa6 Mon Sep 17 00:00:00 2001
From: Mahadev Konar
Date: Tue, 27 Sep 2011 05:30:01 +0000
Subject: [PATCH 42/68] MAPREDUCE-3071. app master configuration web UI link
under the Job menu opens up application menu. (thomas graves via mahadev)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176203 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java | 1 +
2 files changed, 4 insertions(+)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 7e80d0e241..4a6aa0e6eb 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1435,6 +1435,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2984. Better error message for displaying completed containers.
(Devaraj K via acmurthy)
+ MAPREDUCE-3071. app master configuration web UI link under the Job menu
+ opens up application menu. (thomas graves via mahadev)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java
index 8bf2ce1955..983859e7d6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java
@@ -44,6 +44,7 @@ public class JobConfPage extends AppView {
set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
: join("Configuration for MapReduce Job ", $(JOB_ID)));
commonPreHead(html);
+ set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}");
set(DATATABLES_ID, "conf");
set(initID(DATATABLES, "conf"), confTableInit());
set(postInitID(DATATABLES, "conf"), confPostTableInit());
From 17da0bdb279d30a0b1ded402de52d10b40bbf2d7 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Tue, 27 Sep 2011 06:56:50 +0000
Subject: [PATCH 43/68] MAPREDUCE-3067. Ensure exit-code is set correctly for
containers. Contributed by Hitesh Shah.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176235 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../container/ContainerImpl.java | 3 +
.../TestContainerManagerWithLCE.java | 24 +++++
.../TestContainerManager.java | 88 +++++++++++++++++++
4 files changed, 118 insertions(+)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 4a6aa0e6eb..ce14210eac 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1438,6 +1438,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3071. app master configuration web UI link under the Job menu
opens up application menu. (thomas graves via mahadev)
+ MAPREDUCE-3067. Ensure exit-code is set correctly for containers. (Hitesh
+ Shah via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 8d673fbf9c..8d3f3fe084 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -567,6 +567,9 @@ public ExitedWithSuccessTransition(boolean clCleanupRequired) {
@Override
public void transition(ContainerImpl container, ContainerEvent event) {
+ // Set exit code to 0 on success
+ container.exitCode = 0;
+
// TODO: Add containerWorkDir to the deletion service.
if (clCleanupRequired) {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
index 6ee220b674..44328dbe0a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
@@ -105,7 +105,31 @@ public void testContainerLaunchAndStop() throws IOException,
LOG.info("Running testContainerLaunchAndStop");
super.testContainerLaunchAndStop();
}
+
+ @Override
+ public void testContainerLaunchAndExitSuccess() throws IOException,
+ InterruptedException {
+ // Don't run the test if the binary is not available.
+ if (!shouldRunTest()) {
+ LOG.info("LCE binary path is not passed. Not running the test");
+ return;
+ }
+ LOG.info("Running testContainerLaunchAndExitSuccess");
+ super.testContainerLaunchAndExitSuccess();
+ }
+ @Override
+ public void testContainerLaunchAndExitFailure() throws IOException,
+ InterruptedException {
+ // Don't run the test if the binary is not available.
+ if (!shouldRunTest()) {
+ LOG.info("LCE binary path is not passed. Not running the test");
+ return;
+ }
+ LOG.info("Running testContainerLaunchAndExitFailure");
+ super.testContainerLaunchAndExitFailure();
+ }
+
@Override
public void testLocalFilesCleanup() throws InterruptedException,
IOException {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index 87460d045c..2de0428cb6 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -287,7 +287,95 @@ public void testContainerLaunchAndStop() throws IOException,
exec.signalContainer(user,
pid, Signal.NULL));
}
+
+ private void testContainerLaunchAndExit(int exitCode) throws IOException, InterruptedException {
+ File scriptFile = new File(tmpDir, "scriptFile.sh");
+ PrintWriter fileWriter = new PrintWriter(scriptFile);
+ File processStartFile =
+ new File(tmpDir, "start_file.txt").getAbsoluteFile();
+ fileWriter.write("\numask 0"); // So that start file is readable by the test
+ fileWriter.write("\necho Hello World! > " + processStartFile);
+ fileWriter.write("\necho $$ >> " + processStartFile);
+
+ // Have script throw an exit code at the end
+ if (exitCode != 0) {
+ fileWriter.write("\nexit "+exitCode);
+ }
+
+ fileWriter.close();
+
+ ContainerLaunchContext containerLaunchContext =
+ recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+ // ////// Construct the Container-id
+ ContainerId cId = createContainerId();
+ containerLaunchContext.setContainerId(cId);
+
+ containerLaunchContext.setUser(user);
+
+ URL resource_alpha =
+ ConverterUtils.getYarnUrlFromPath(localFS
+ .makeQualified(new Path(scriptFile.getAbsolutePath())));
+ LocalResource rsrc_alpha =
+ recordFactory.newRecordInstance(LocalResource.class);
+ rsrc_alpha.setResource(resource_alpha);
+ rsrc_alpha.setSize(-1);
+ rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
+ rsrc_alpha.setType(LocalResourceType.FILE);
+ rsrc_alpha.setTimestamp(scriptFile.lastModified());
+ String destinationFile = "dest_file";
+ Map localResources =
+ new HashMap();
+ localResources.put(destinationFile, rsrc_alpha);
+ containerLaunchContext.setLocalResources(localResources);
+ containerLaunchContext.setUser(containerLaunchContext.getUser());
+ List commands = new ArrayList();
+ commands.add("/bin/bash");
+ commands.add(scriptFile.getAbsolutePath());
+ containerLaunchContext.setCommands(commands);
+ containerLaunchContext.setResource(recordFactory
+ .newRecordInstance(Resource.class));
+ containerLaunchContext.getResource().setMemory(100 * 1024 * 1024);
+
+ StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class);
+ startRequest.setContainerLaunchContext(containerLaunchContext);
+ containerManager.startContainer(startRequest);
+
+ BaseContainerManagerTest.waitForContainerState(containerManager, cId,
+ ContainerState.COMPLETE);
+
+ GetContainerStatusRequest gcsRequest =
+ recordFactory.newRecordInstance(GetContainerStatusRequest.class);
+ gcsRequest.setContainerId(cId);
+ ContainerStatus containerStatus =
+ containerManager.getContainerStatus(gcsRequest).getStatus();
+
+ // Verify exit status matches exit state of script
+ Assert.assertEquals(exitCode,
+ containerStatus.getExitStatus());
+ }
+
+ @Test
+ public void testContainerLaunchAndExitSuccess() throws IOException, InterruptedException {
+ containerManager.start();
+ int exitCode = 0;
+
+ // launch context for a command that will return exit code 0
+ // and verify exit code returned
+ testContainerLaunchAndExit(exitCode);
+ }
+
+ @Test
+ public void testContainerLaunchAndExitFailure() throws IOException, InterruptedException {
+ containerManager.start();
+ int exitCode = 50;
+
+ // launch context for a command that will return exit code 0
+ // and verify exit code returned
+ testContainerLaunchAndExit(exitCode);
+ }
+
@Test
public void testLocalFilesCleanup() throws InterruptedException,
IOException {
From 6507a0bc35b55fd41aa0bc7f34f005c6aa67006b Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Tue, 27 Sep 2011 16:11:54 +0000
Subject: [PATCH 44/68] MAPREDUCE-3092. Removed a special comparator for JobIDs
in JobHistory as JobIDs are already comparable. Contributed by Devaraj K.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176453 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 ++
.../hadoop/mapreduce/v2/hs/JobHistory.java | 28 +++----------------
2 files changed, 7 insertions(+), 24 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index ce14210eac..61cebeeff6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -313,6 +313,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3055. Simplified ApplicationAttemptId passing to
ApplicationMaster via environment variable. (vinodkv)
+ MAPREDUCE-3092. Removed a special comparator for JobIDs in JobHistory as
+ JobIDs are already comparable. (Devaraj K via vinodkv)
+
OPTIMIZATIONS
MAPREDUCE-2026. Make JobTracker.getJobCounters() and
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
index c9f90b9e79..7e9e67c3c3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
@@ -22,7 +22,6 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -84,25 +83,6 @@ public class JobHistory extends AbstractService implements HistoryContext {
private static final Log SUMMARY_LOG = LogFactory.getLog(JobSummary.class);
- /*
- * TODO Get rid of this once JobId has it's own comparator
- */
- private static final Comparator JOB_ID_COMPARATOR =
- new Comparator() {
- @Override
- public int compare(JobId o1, JobId o2) {
- if (o1.getAppId().getClusterTimestamp() >
- o2.getAppId().getClusterTimestamp()) {
- return 1;
- } else if (o1.getAppId().getClusterTimestamp() <
- o2.getAppId().getClusterTimestamp()) {
- return -1;
- } else {
- return o1.getId() - o2.getId();
- }
- }
- };
-
private static String DONE_BEFORE_SERIAL_TAIL =
JobHistoryUtils.doneSubdirsBeforeSerialTail();
@@ -118,19 +98,19 @@ public int compare(JobId o1, JobId o2) {
//Maintains minimal details for recent jobs (parsed from history file name).
//Sorted on Job Completion Time.
private final SortedMap jobListCache =
- new ConcurrentSkipListMap(JOB_ID_COMPARATOR);
+ new ConcurrentSkipListMap();
// Re-use exisiting MetaInfo objects if they exist for the specific JobId. (synchronization on MetaInfo)
// Check for existance of the object when using iterators.
private final SortedMap intermediateListCache =
- new ConcurrentSkipListMap(JOB_ID_COMPARATOR);
+ new ConcurrentSkipListMap();
//Maintains a list of known done subdirectories. Not currently used.
private final Set existingDoneSubdirs = new HashSet();
private final SortedMap loadedJobCache =
- new ConcurrentSkipListMap(JOB_ID_COMPARATOR);
+ new ConcurrentSkipListMap();
/**
* Maintains a mapping between intermediate user directories and the last
@@ -673,7 +653,7 @@ private Job loadJob(MetaInfo metaInfo) {
private Map getAllJobsInternal() {
//TODO This should ideally be using getAllJobsMetaInfo
// or get rid of that method once Job has APIs for user, finishTime etc.
- SortedMap result = new TreeMap(JOB_ID_COMPARATOR);
+ SortedMap result = new TreeMap();
try {
scanIntermediateDirectory();
} catch (IOException e) {
From 87b969c83541c6719abcc1dabc38dc41704876ee Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Tue, 27 Sep 2011 17:03:19 +0000
Subject: [PATCH 45/68] MAPREDUCE-2999. Fix YARN webapp framework to properly
filter servlet paths. Contributed by Thomas Graves.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176469 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../apache/hadoop/yarn/webapp/Dispatcher.java | 9 +++++++
.../org/apache/hadoop/yarn/webapp/WebApp.java | 25 ++++++++++++++++-
.../apache/hadoop/yarn/webapp/WebApps.java | 11 +++++++-
.../apache/hadoop/yarn/webapp/TestWebApp.java | 27 +++++++++++++++++++
.../resourcemanager/rmnode/RMNodeImpl.java | 2 +-
6 files changed, 74 insertions(+), 3 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 61cebeeff6..31b90d0a97 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1444,6 +1444,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3067. Ensure exit-code is set correctly for containers. (Hitesh
Shah via acmurthy)
+ MAPREDUCE-2999. Fix YARN webapp framework to properly filter servlet
+ paths. (Thomas Graves via vinodkv)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
index ef8ab976ef..e404fe5a72 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
@@ -84,6 +84,15 @@ public void service(HttpServletRequest req, HttpServletResponse res)
prepareToExit();
return;
}
+ // if they provide a redirectPath go there instead of going to
+ // "/" so that filters can differentiate the webapps.
+ if (uri.equals("/")) {
+ String redirectPath = webApp.getRedirectPath();
+ if (redirectPath != null && !redirectPath.isEmpty()) {
+ res.sendRedirect(redirectPath);
+ return;
+ }
+ }
String method = req.getMethod();
if (method.equals("OPTIONS")) {
doOptions(req, res);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java
index b9afe81ca8..f83843e97e 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java
@@ -26,6 +26,7 @@
import com.google.inject.servlet.GuiceFilter;
import com.google.inject.servlet.ServletModule;
+import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
@@ -44,6 +45,9 @@ public abstract class WebApp extends ServletModule {
public enum HTTP { GET, POST, HEAD, PUT, DELETE };
private volatile String name;
+ private volatile List servePathSpecs = new ArrayList();
+ // path to redirect to if user goes to "/"
+ private volatile String redirectPath;
private volatile Configuration conf;
private volatile HttpServer httpServer;
private volatile GuiceFilter guiceFilter;
@@ -98,6 +102,22 @@ public void joinThread() {
public String name() { return this.name; }
+ void addServePathSpec(String path) { this.servePathSpecs.add(path); }
+
+ public String[] getServePathSpecs() {
+ return this.servePathSpecs.toArray(new String[this.servePathSpecs.size()]);
+ }
+
+ /**
+ * Set a path to redirect the user to if they just go to "/". For
+ * instance "/" goes to "/yarn/apps". This allows the filters to
+ * more easily differentiate the different webapps.
+ * @param path the path to redirect to
+ */
+ void setRedirectPath(String path) { this.redirectPath = path; }
+
+ public String getRedirectPath() { return this.redirectPath; }
+
void setHostClass(Class> cls) {
router.setHostClass(cls);
}
@@ -109,7 +129,10 @@ void setGuiceFilter(GuiceFilter instance) {
@Override
public void configureServlets() {
setup();
- serve("/", "/__stop", StringHelper.join('/', name, '*')).with(Dispatcher.class);
+ serve("/", "/__stop").with(Dispatcher.class);
+ for (String path : this.servePathSpecs) {
+ serve(path).with(Dispatcher.class);
+ }
}
/**
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
index 85b88d16cc..b521799968 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
@@ -113,6 +113,14 @@ public void setup() {
};
}
webapp.setName(name);
+ String basePath = "/" + name;
+ webapp.setRedirectPath(basePath);
+ if (basePath.equals("/")) {
+ webapp.addServePathSpec("/*");
+ } else {
+ webapp.addServePathSpec(basePath);
+ webapp.addServePathSpec(basePath + "/*");
+ }
if (conf == null) {
conf = new Configuration();
}
@@ -142,7 +150,8 @@ public void setup() {
}
}
HttpServer server =
- new HttpServer(name, bindAddress, port, findPort, conf);
+ new HttpServer(name, bindAddress, port, findPort, conf,
+ webapp.getServePathSpecs());
server.addGlobalFilter("guice", GuiceFilter.class.getName(), null);
webapp.setConf(conf);
webapp.setHttpServer(server);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
index db84f32cf6..31b2aaa2ed 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.webapp;
+import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.yarn.MockApps;
import org.apache.hadoop.yarn.webapp.Controller;
import org.apache.hadoop.yarn.webapp.WebApp;
@@ -148,6 +149,32 @@ public void render(Page.HTML<_> html) {
app.stop();
}
+ @Test public void testServePaths() {
+ WebApp app = WebApps.$for("test", this).start();
+ assertEquals("/test", app.getRedirectPath());
+ String[] expectedPaths = { "/test", "/test/*" };
+ String[] pathSpecs = app.getServePathSpecs();
+
+ assertEquals(2, pathSpecs.length);
+ for(int i = 0; i < expectedPaths.length; i++) {
+ assertTrue(ArrayUtils.contains(pathSpecs, expectedPaths[i]));
+ }
+ app.stop();
+ }
+
+ @Test public void testServePathsNoName() {
+ WebApp app = WebApps.$for("", this).start();
+ assertEquals("/", app.getRedirectPath());
+ String[] expectedPaths = { "/*" };
+ String[] pathSpecs = app.getServePathSpecs();
+
+ assertEquals(1, pathSpecs.length);
+ for(int i = 0; i < expectedPaths.length; i++) {
+ assertTrue(ArrayUtils.contains(pathSpecs, expectedPaths[i]));
+ }
+ app.stop();
+ }
+
@Test public void testDefaultRoutes() throws Exception {
WebApp app = WebApps.$for("test", this).start();
String baseUrl = baseUrl(app);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 81de047bc0..3b3864a541 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -144,7 +144,7 @@ public RMNodeImpl(NodeId nodeId, RMContext context, String hostName,
this.httpPort = httpPort;
this.totalCapability = capability;
this.nodeAddress = hostName + ":" + cmPort;
- this.httpAddress = hostName + ":" + httpPort;;
+ this.httpAddress = hostName + ":" + httpPort;
this.node = node;
this.nodeHealthStatus.setIsNodeHealthy(true);
this.nodeHealthStatus.setHealthReport("Healthy");
From 98cc2007003ce7c5dd10714b539e4e0b8f7277b1 Mon Sep 17 00:00:00 2001
From: Giridharan Kesavan
Date: Tue, 27 Sep 2011 19:35:29 +0000
Subject: [PATCH 46/68] MAPREDUCE-3081. Fix vaidya startup script. Contributed
by Suhas
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176550 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 2 +
.../java/org/apache/hadoop/vaidya/vaidya.sh | 73 +++++++++++++++++--
2 files changed, 69 insertions(+), 6 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 31b90d0a97..789caf642a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -29,6 +29,8 @@ Trunk (unreleased changes)
findBugs, correct links to findBugs artifacts and no links to the
artifacts when there are no warnings. (Tom White via vinodkv).
+ MAPREDUCE-3081. Fix vaidya startup script. (gkesavan via suhas).
+
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh b/hadoop-mapreduce-project/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh
index 8ac5b61a5d..2a32cbd1c9 100644
--- a/hadoop-mapreduce-project/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh
+++ b/hadoop-mapreduce-project/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh
@@ -31,17 +31,78 @@ script=`basename "$this"`
bin=`cd "$bin"; pwd`
this="$bin/$script"
-# Check if HADOOP_PREFIX AND JAVA_HOME is set.
-if [ -z $HADOOP_PREFIX ] ; then
- echo "HADOOP_PREFIX environment variable not defined"
+# Check if HADOOP_HOME AND JAVA_HOME is set.
+if [ -z "$HADOOP_HOME" ] && [ -z "$HADOOP_PREFIX" ] ; then
+ echo "HADOOP_HOME or HADOOP_PREFIX environment variable should be defined"
exit -1;
fi
-if [ -z $JAVA_HOME ] ; then
+if [ -z "$JAVA_HOME" ] ; then
echo "JAVA_HOME environment variable not defined"
exit -1;
fi
-hadoopVersion=`$HADOOP_PREFIX/bin/hadoop version | grep Hadoop | awk '{print $2}'`
+if [ -z "$HADOOP_PREFIX" ]; then
+ hadoopVersion=`$HADOOP_HOME/bin/hadoop version | awk 'BEGIN { RS = "" ; FS = "\n" } ; { print $1 }' | awk '{print $2}'`
+else
+ hadoopVersion=`$HADOOP_PREFIX/bin/hadoop version | awk 'BEGIN { RS = "" ; FS = "\n" } ; { print $1 }' | awk '{print $2}'`
+fi
-$JAVA_HOME/bin/java -Xmx1024m -classpath $HADOOP_PREFIX/hadoop-${hadoopVersion}-core.jar:$HADOOP_PREFIX/contrib/vaidya/hadoop-${hadoopVersion}-vaidya.jar:$HADOOP_PREFIX/lib/commons-logging-1.0.4.jar:${CLASSPATH} org.apache.hadoop.vaidya.postexdiagnosis.PostExPerformanceDiagnoser $@
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+# for releases, add core hadoop jar to CLASSPATH
+if [ -e $HADOOP_PREFIX/share/hadoop/hadoop-core-* ]; then
+ for f in $HADOOP_PREFIX/share/hadoop/hadoop-core-*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+
+ # add libs to CLASSPATH
+ for f in $HADOOP_PREFIX/share/hadoop/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+else
+ # tarball layout
+ if [ -e $HADOOP_HOME/hadoop-core-* ]; then
+ for f in $HADOOP_HOME/hadoop-core-*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+ fi
+ if [ -e $HADOOP_HOME/build/hadoop-core-* ]; then
+ for f in $HADOOP_HOME/build/hadoop-core-*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+ fi
+ for f in $HADOOP_HOME/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+
+ if [ -d "$HADOOP_HOME/build/ivy/lib/Hadoop/common" ]; then
+ for f in $HADOOP_HOME/build/ivy/lib/Hadoop/common/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+ fi
+fi
+
+# Set the Vaidya home
+if [ -d "$HADOOP_PREFIX/share/hadoop/contrib/vaidya/" ]; then
+ VAIDYA_HOME=$HADOOP_PREFIX/share/hadoop/contrib/vaidya/
+fi
+if [ -d "$HADOOP_HOME/contrib/vaidya" ]; then
+ VAIDYA_HOME=$HADOOP_HOME/contrib/vaidya/
+fi
+if [ -d "$HADOOP_HOME/build/contrib/vaidya" ]; then
+ VAIDYA_HOME=$HADOOP_HOME/build/contrib/vaidya/
+fi
+
+# add user-specified CLASSPATH last
+if [ "$HADOOP_USER_CLASSPATH_FIRST" = "" ] && [ "$HADOOP_CLASSPATH" != "" ]; then
+ CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+echo "$CLASSPATH"
+
+$JAVA_HOME/bin/java -Xmx1024m -classpath $VAIDYA_HOME/hadoop-vaidya-${hadoopVersion}.jar:${CLASSPATH} org.apache.hadoop.vaidya.postexdiagnosis.PostExPerformanceDiagnoser $@
From 359c17071bc99bdd277d97915e263c553dac6234 Mon Sep 17 00:00:00 2001
From: Mahadev Konar
Date: Tue, 27 Sep 2011 19:44:56 +0000
Subject: [PATCH 47/68] MAPREDUCE-3095. fairscheduler ivy including wrong
version for hdfs. (John George via mahadev)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176566 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
hadoop-mapreduce-project/src/contrib/fairscheduler/ivy.xml | 4 ++--
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 789caf642a..f77c80c644 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1449,6 +1449,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2999. Fix YARN webapp framework to properly filter servlet
paths. (Thomas Graves via vinodkv)
+ MAPREDUCE-3095. fairscheduler ivy including wrong version for hdfs.
+ (John George via mahadev)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/src/contrib/fairscheduler/ivy.xml b/hadoop-mapreduce-project/src/contrib/fairscheduler/ivy.xml
index e927032d7d..0b910158df 100644
--- a/hadoop-mapreduce-project/src/contrib/fairscheduler/ivy.xml
+++ b/hadoop-mapreduce-project/src/contrib/fairscheduler/ivy.xml
@@ -48,9 +48,9 @@
+ rev="${hadoop-hdfs.version}" conf="common->default"/>
+ rev="${hadoop-hdfs.version}" conf="test->default">
Date: Tue, 27 Sep 2011 20:30:24 +0000
Subject: [PATCH 48/68] MAPREDUCE-3054. Unable to kill submitted jobs.
(mahadev)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176600 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 2 +
.../org/apache/hadoop/mapred/ClientCache.java | 58 ++++-----
.../hadoop/mapred/ClientServiceDelegate.java | 18 +--
.../hadoop/mapred/ResourceMgrDelegate.java | 20 ++-
.../org/apache/hadoop/mapred/YARNRunner.java | 46 ++++++-
.../hadoop/mapred/TestClientRedirect.java | 12 +-
.../hadoop/mapreduce/v2/TestYARNRunner.java | 122 +++++++++++++++++-
.../hadoop/yarn/api/ClientRMProtocol.java | 10 +-
...quest.java => KillApplicationRequest.java} | 4 +-
...onse.java => KillApplicationResponse.java} | 4 +-
...java => KillApplicationRequestPBImpl.java} | 24 ++--
...ava => KillApplicationResponsePBImpl.java} | 20 +--
.../src/main/proto/client_RM_protocol.proto | 2 +-
.../src/main/proto/yarn_service_protos.proto | 4 +-
.../client/ClientRMProtocolPBClientImpl.java | 23 ++--
.../ClientRMProtocolPBServiceImpl.java | 20 +--
.../resourcemanager/ClientRMService.java | 12 +-
.../resourcemanager/rmapp/RMAppImpl.java | 32 +++--
.../yarn/server/resourcemanager/MockRM.java | 6 +-
.../resourcetracker/InlineDispatcher.java | 65 +++++-----
.../rmapp/TestRMAppTransitions.java | 90 ++++++++-----
21 files changed, 402 insertions(+), 192 deletions(-)
rename hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/{FinishApplicationRequest.java => KillApplicationRequest.java} (94%)
rename hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/{FinishApplicationResponse.java => KillApplicationResponse.java} (91%)
rename hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/{FinishApplicationRequestPBImpl.java => KillApplicationRequestPBImpl.java} (74%)
rename hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/{FinishApplicationResponsePBImpl.java => KillApplicationResponsePBImpl.java} (62%)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index f77c80c644..77aab5002a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1452,6 +1452,8 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3095. fairscheduler ivy including wrong version for hdfs.
(John George via mahadev)
+ MAPREDUCE-3054. Unable to kill submitted jobs. (mahadev)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
index 80c8d91a1b..20c6ce7c00 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
@@ -1,20 +1,20 @@
/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements. See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership. The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License. You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package org.apache.hadoop.mapred;
@@ -42,29 +42,29 @@ public class ClientCache {
private final Configuration conf;
private final ResourceMgrDelegate rm;
-
+
private static final Log LOG = LogFactory.getLog(ClientCache.class);
private Map cache =
- new HashMap();
-
+ new HashMap();
+
private MRClientProtocol hsProxy;
- ClientCache(Configuration conf, ResourceMgrDelegate rm) {
+ public ClientCache(Configuration conf, ResourceMgrDelegate rm) {
this.conf = conf;
this.rm = rm;
}
//TODO: evict from the cache on some threshold
- synchronized ClientServiceDelegate getClient(JobID jobId) {
- if (hsProxy == null) {
+ public synchronized ClientServiceDelegate getClient(JobID jobId) {
+ if (hsProxy == null) {
try {
- hsProxy = instantiateHistoryProxy();
- } catch (IOException e) {
- LOG.warn("Could not connect to History server.", e);
- throw new YarnException("Could not connect to History server.", e);
- }
- }
+ hsProxy = instantiateHistoryProxy();
+ } catch (IOException e) {
+ LOG.warn("Could not connect to History server.", e);
+ throw new YarnException("Could not connect to History server.", e);
+ }
+ }
ClientServiceDelegate client = cache.get(jobId);
if (client == null) {
client = new ClientServiceDelegate(conf, rm, jobId, hsProxy);
@@ -74,7 +74,7 @@ synchronized ClientServiceDelegate getClient(JobID jobId) {
}
private MRClientProtocol instantiateHistoryProxy()
- throws IOException {
+ throws IOException {
final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);
if (StringUtils.isEmpty(serviceAddr)) {
return null;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 429d350c5a..341e17e951 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -70,7 +70,7 @@
import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
import org.apache.hadoop.yarn.security.SchedulerSecurityInfo;
-class ClientServiceDelegate {
+public class ClientServiceDelegate {
private static final Log LOG = LogFactory.getLog(ClientServiceDelegate.class);
// Caches for per-user NotRunningJobs
@@ -87,7 +87,7 @@ class ClientServiceDelegate {
private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
private static String UNKNOWN_USER = "Unknown User";
- ClientServiceDelegate(Configuration conf, ResourceMgrDelegate rm,
+ public ClientServiceDelegate(Configuration conf, ResourceMgrDelegate rm,
JobID jobId, MRClientProtocol historyServerProxy) {
this.conf = new Configuration(conf); // Cloning for modifying.
// For faster redirects from AM to HS.
@@ -279,7 +279,7 @@ private synchronized Object invoke(String method, Class argClass,
}
}
- org.apache.hadoop.mapreduce.Counters getJobCounters(JobID arg0) throws IOException,
+ public org.apache.hadoop.mapreduce.Counters getJobCounters(JobID arg0) throws IOException,
InterruptedException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobID = TypeConverter.toYarn(arg0);
GetCountersRequest request = recordFactory.newRecordInstance(GetCountersRequest.class);
@@ -290,7 +290,7 @@ org.apache.hadoop.mapreduce.Counters getJobCounters(JobID arg0) throws IOExcepti
}
- TaskCompletionEvent[] getTaskCompletionEvents(JobID arg0, int arg1, int arg2)
+ public TaskCompletionEvent[] getTaskCompletionEvents(JobID arg0, int arg1, int arg2)
throws IOException, InterruptedException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobID = TypeConverter
.toYarn(arg0);
@@ -308,7 +308,7 @@ TaskCompletionEvent[] getTaskCompletionEvents(JobID arg0, int arg1, int arg2)
.toArray(new org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent[0]));
}
- String[] getTaskDiagnostics(org.apache.hadoop.mapreduce.TaskAttemptID arg0)
+ public String[] getTaskDiagnostics(org.apache.hadoop.mapreduce.TaskAttemptID arg0)
throws IOException, InterruptedException {
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter
@@ -326,7 +326,7 @@ String[] getTaskDiagnostics(org.apache.hadoop.mapreduce.TaskAttemptID arg0)
return result;
}
- JobStatus getJobStatus(JobID oldJobID) throws YarnRemoteException {
+ public JobStatus getJobStatus(JobID oldJobID) throws YarnRemoteException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
GetJobReportRequest request =
@@ -339,7 +339,7 @@ JobStatus getJobStatus(JobID oldJobID) throws YarnRemoteException {
return TypeConverter.fromYarn(report, jobFile);
}
- org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
+ public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
throws YarnRemoteException, YarnRemoteException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
@@ -356,7 +356,7 @@ org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType
(taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
- boolean killTask(TaskAttemptID taskAttemptID, boolean fail)
+ public boolean killTask(TaskAttemptID taskAttemptID, boolean fail)
throws YarnRemoteException {
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID
= TypeConverter.toYarn(taskAttemptID);
@@ -372,7 +372,7 @@ boolean killTask(TaskAttemptID taskAttemptID, boolean fail)
return true;
}
- boolean killJob(JobID oldJobID)
+ public boolean killJob(JobID oldJobID)
throws YarnRemoteException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId
= TypeConverter.toYarn(oldJobID);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index be5b862100..65e51735dd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -44,7 +44,7 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
@@ -79,6 +79,10 @@ public class ResourceMgrDelegate {
private ApplicationId applicationId;
private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ /**
+ * Delegate responsible for communicating with the Resource Manager's {@link ClientRMProtocol}.
+ * @param conf the configuration object.
+ */
public ResourceMgrDelegate(YarnConfiguration conf) {
this.conf = conf;
YarnRPC rpc = YarnRPC.create(this.conf);
@@ -97,6 +101,16 @@ public ResourceMgrDelegate(YarnConfiguration conf) {
LOG.info("Connected to ResourceManager at " + rmAddress);
}
+ /**
+ * Used for injecting applicationsManager, mostly for testing.
+ * @param conf the configuration object
+ * @param applicationsManager the handle to talk the resource managers {@link ClientRMProtocol}.
+ */
+ public ResourceMgrDelegate(YarnConfiguration conf, ClientRMProtocol applicationsManager) {
+ this.conf = conf;
+ this.applicationsManager = applicationsManager;
+ }
+
public void cancelDelegationToken(Token arg0)
throws IOException, InterruptedException {
return;
@@ -294,9 +308,9 @@ public ApplicationId submitApplication(ApplicationSubmissionContext appContext)
}
public void killApplication(ApplicationId applicationId) throws IOException {
- FinishApplicationRequest request = recordFactory.newRecordInstance(FinishApplicationRequest.class);
+ KillApplicationRequest request = recordFactory.newRecordInstance(KillApplicationRequest.class);
request.setApplicationId(applicationId);
- applicationsManager.finishApplication(request);
+ applicationsManager.forceKillApplication(request);
LOG.info("Killing application " + applicationId);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
index 20bd976b8d..a11968a16f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
@@ -105,10 +105,22 @@ public YARNRunner(Configuration conf) {
* @param resMgrDelegate the resourcemanager client handle.
*/
public YARNRunner(Configuration conf, ResourceMgrDelegate resMgrDelegate) {
+ this(conf, resMgrDelegate, new ClientCache(conf, resMgrDelegate));
+ }
+
+ /**
+ * Similar to {@link YARNRunner#YARNRunner(Configuration, ResourceMgrDelegate)}
+ * but allowing injecting {@link ClientCache}. Enable mocking and testing.
+ * @param conf the configuration object
+ * @param resMgrDelegate the resource manager delegate
+ * @param clientCache the client cache object.
+ */
+ public YARNRunner(Configuration conf, ResourceMgrDelegate resMgrDelegate,
+ ClientCache clientCache) {
this.conf = conf;
try {
this.resMgrDelegate = resMgrDelegate;
- this.clientCache = new ClientCache(this.conf, resMgrDelegate);
+ this.clientCache = clientCache;
this.defaultFileContext = FileContext.getFileContext(this.conf);
} catch (UnsupportedFileSystemException ufe) {
throw new RuntimeException("Error in instantiating YarnClient", ufe);
@@ -429,9 +441,35 @@ public TaskReport[] getTaskReports(JobID jobID, TaskType taskType)
@Override
public void killJob(JobID arg0) throws IOException, InterruptedException {
- if (!clientCache.getClient(arg0).killJob(arg0)) {
- resMgrDelegate.killApplication(TypeConverter.toYarn(arg0).getAppId());
- }
+ /* check if the status is not running, if not send kill to RM */
+ JobStatus status = clientCache.getClient(arg0).getJobStatus(arg0);
+ if (status.getState() != JobStatus.State.RUNNING) {
+ resMgrDelegate.killApplication(TypeConverter.toYarn(arg0).getAppId());
+ return;
+ }
+
+ try {
+ /* send a kill to the AM */
+ clientCache.getClient(arg0).killJob(arg0);
+ long currentTimeMillis = System.currentTimeMillis();
+ long timeKillIssued = currentTimeMillis;
+ while ((currentTimeMillis < timeKillIssued + 10000L) && (status.getState()
+ != JobStatus.State.KILLED)) {
+ try {
+ Thread.sleep(1000L);
+ } catch(InterruptedException ie) {
+ /** interrupted, just break */
+ break;
+ }
+ currentTimeMillis = System.currentTimeMillis();
+ status = clientCache.getClient(arg0).getJobStatus(arg0);
+ }
+ } catch(IOException io) {
+ LOG.debug("Error when checking for application status", io);
+ }
+ if (status.getState() != JobStatus.State.KILLED) {
+ resMgrDelegate.killApplication(TypeConverter.toYarn(arg0).getAppId());
+ }
}
@Override
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index 12b1c2cc9c..54b0422648 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -68,8 +68,8 @@
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
@@ -288,9 +288,9 @@ public SubmitApplicationResponse submitApplication(
}
@Override
- public FinishApplicationResponse finishApplication(
- FinishApplicationRequest request) throws YarnRemoteException {
- return null;
+ public KillApplicationResponse forceKillApplication(
+ KillApplicationRequest request) throws YarnRemoteException {
+ return recordFactory.newRecordInstance(KillApplicationResponse.class);
}
@Override
@@ -451,7 +451,7 @@ public GetTaskAttemptReportResponse getTaskAttemptReport(
@Override
public KillJobResponse killJob(KillJobRequest request)
throws YarnRemoteException {
- return null;
+ return recordFactory.newRecordInstance(KillJobResponse.class);
}
@Override
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java
index bc0dfe5fa4..8878851d5c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java
@@ -22,6 +22,7 @@
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.File;
@@ -36,15 +37,38 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.ClientCache;
+import org.apache.hadoop.mapred.ClientServiceDelegate;
+import org.apache.hadoop.mapred.JobStatus;
import org.apache.hadoop.mapred.ResourceMgrDelegate;
import org.apache.hadoop.mapred.YARNRunner;
import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.JobPriority;
+import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationState;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -54,9 +78,8 @@
import org.mockito.stubbing.Answer;
/**
- * Test if the jobclient shows enough diagnostics
- * on a job failure.
- *
+ * Test YarnRunner and make sure the client side plugin works
+ * fine
*/
public class TestYARNRunner extends TestCase {
private static final Log LOG = LogFactory.getLog(TestYARNRunner.class);
@@ -65,18 +88,22 @@ public class TestYARNRunner extends TestCase {
private YARNRunner yarnRunner;
private ResourceMgrDelegate resourceMgrDelegate;
private YarnConfiguration conf;
+ private ClientCache clientCache;
private ApplicationId appId;
private JobID jobId;
private File testWorkDir =
new File("target", TestYARNRunner.class.getName());
private ApplicationSubmissionContext submissionContext;
+ private ClientServiceDelegate clientDelegate;
private static final String failString = "Rejected job";
@Before
public void setUp() throws Exception {
resourceMgrDelegate = mock(ResourceMgrDelegate.class);
conf = new YarnConfiguration();
- yarnRunner = new YARNRunner(conf, resourceMgrDelegate);
+ clientCache = new ClientCache(conf, resourceMgrDelegate);
+ clientCache = spy(clientCache);
+ yarnRunner = new YARNRunner(conf, resourceMgrDelegate, clientCache);
yarnRunner = spy(yarnRunner);
submissionContext = mock(ApplicationSubmissionContext.class);
doAnswer(
@@ -101,6 +128,31 @@ public ApplicationSubmissionContext answer(InvocationOnMock invocation)
}
+ @Test
+ public void testJobKill() throws Exception {
+ clientDelegate = mock(ClientServiceDelegate.class);
+ when(clientDelegate.getJobStatus(any(JobID.class))).thenReturn(new
+ org.apache.hadoop.mapreduce.JobStatus(jobId, 0f, 0f, 0f, 0f,
+ State.PREP, JobPriority.HIGH, "tmp", "tmp", "tmp", "tmp"));
+ when(clientDelegate.killJob(any(JobID.class))).thenReturn(true);
+ doAnswer(
+ new Answer() {
+ @Override
+ public ClientServiceDelegate answer(InvocationOnMock invocation)
+ throws Throwable {
+ return clientDelegate;
+ }
+ }
+ ).when(clientCache).getClient(any(JobID.class));
+ yarnRunner.killJob(jobId);
+ verify(resourceMgrDelegate).killApplication(appId);
+ when(clientDelegate.getJobStatus(any(JobID.class))).thenReturn(new
+ org.apache.hadoop.mapreduce.JobStatus(jobId, 0f, 0f, 0f, 0f,
+ State.RUNNING, JobPriority.HIGH, "tmp", "tmp", "tmp", "tmp"));
+ yarnRunner.killJob(jobId);
+ verify(clientDelegate).killJob(jobId);
+ }
+
@Test
public void testJobSubmissionFailure() throws Exception {
when(resourceMgrDelegate.submitApplication(any(ApplicationSubmissionContext.class))).
@@ -122,4 +174,66 @@ public void testJobSubmissionFailure() throws Exception {
assertTrue(io.getLocalizedMessage().contains(failString));
}
}
+
+ @Test
+ public void testResourceMgrDelegate() throws Exception {
+ /* we not want a mock of resourcemgr deleagte */
+ ClientRMProtocol clientRMProtocol = mock(ClientRMProtocol.class);
+ ResourceMgrDelegate delegate = new ResourceMgrDelegate(conf, clientRMProtocol);
+ /* make sure kill calls finish application master */
+ when(clientRMProtocol.forceKillApplication(any(KillApplicationRequest.class)))
+ .thenReturn(null);
+ delegate.killApplication(appId);
+ verify(clientRMProtocol).forceKillApplication(any(KillApplicationRequest.class));
+
+ /* make sure getalljobs calls get all applications */
+ when(clientRMProtocol.getAllApplications(any(GetAllApplicationsRequest.class))).
+ thenReturn(recordFactory.newRecordInstance(GetAllApplicationsResponse.class));
+ delegate.getAllJobs();
+ verify(clientRMProtocol).getAllApplications(any(GetAllApplicationsRequest.class));
+
+ /* make sure getapplication report is called */
+ when(clientRMProtocol.getApplicationReport(any(GetApplicationReportRequest.class)))
+ .thenReturn(recordFactory.newRecordInstance(GetApplicationReportResponse.class));
+ delegate.getApplicationReport(appId);
+ verify(clientRMProtocol).getApplicationReport(any(GetApplicationReportRequest.class));
+
+ /* make sure metrics is called */
+ GetClusterMetricsResponse clusterMetricsResponse = recordFactory.newRecordInstance
+ (GetClusterMetricsResponse.class);
+ clusterMetricsResponse.setClusterMetrics(recordFactory.newRecordInstance(
+ YarnClusterMetrics.class));
+ when(clientRMProtocol.getClusterMetrics(any(GetClusterMetricsRequest.class)))
+ .thenReturn(clusterMetricsResponse);
+ delegate.getClusterMetrics();
+ verify(clientRMProtocol).getClusterMetrics(any(GetClusterMetricsRequest.class));
+
+ when(clientRMProtocol.getClusterNodes(any(GetClusterNodesRequest.class))).
+ thenReturn(recordFactory.newRecordInstance(GetClusterNodesResponse.class));
+ delegate.getActiveTrackers();
+ verify(clientRMProtocol).getClusterNodes(any(GetClusterNodesRequest.class));
+
+ GetNewApplicationIdResponse newAppIdResponse = recordFactory.newRecordInstance(
+ GetNewApplicationIdResponse.class);
+ newAppIdResponse.setApplicationId(appId);
+ when(clientRMProtocol.getNewApplicationId(any(GetNewApplicationIdRequest.class))).
+ thenReturn(newAppIdResponse);
+ delegate.getNewJobID();
+ verify(clientRMProtocol).getNewApplicationId(any(GetNewApplicationIdRequest.class));
+
+ GetQueueInfoResponse queueInfoResponse = recordFactory.newRecordInstance(
+ GetQueueInfoResponse.class);
+ queueInfoResponse.setQueueInfo(recordFactory.newRecordInstance(QueueInfo.class));
+ when(clientRMProtocol.getQueueInfo(any(GetQueueInfoRequest.class))).
+ thenReturn(queueInfoResponse);
+ delegate.getQueues();
+ verify(clientRMProtocol).getQueueInfo(any(GetQueueInfoRequest.class));
+
+ GetQueueUserAclsInfoResponse aclResponse = recordFactory.newRecordInstance(
+ GetQueueUserAclsInfoResponse.class);
+ when(clientRMProtocol.getQueueUserAcls(any(GetQueueUserAclsInfoRequest.class)))
+ .thenReturn(aclResponse);
+ delegate.getQueueAclsForCurrentUser();
+ verify(clientRMProtocol).getQueueUserAcls(any(GetQueueUserAclsInfoRequest.class));
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java
index db4c4790cf..f16cb6da4a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java
@@ -21,8 +21,8 @@
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
@@ -102,7 +102,7 @@ public SubmitApplicationResponse submitApplication(
*
The interface used by clients to request the
* ResourceManager to abort submitted application.
*
- *
The client, via {@link FinishApplicationRequest} provides the
+ *
The client, via {@link KillApplicationRequest} provides the
* {@link ApplicationId} of the application to be aborted.
*
*
In secure mode,the ResourceManager verifies access to the
@@ -117,8 +117,8 @@ public SubmitApplicationResponse submitApplication(
* @throws YarnRemoteException
* @see #getQueueUserAcls(GetQueueUserAclsInfoRequest)
*/
- public FinishApplicationResponse finishApplication(
- FinishApplicationRequest request)
+ public KillApplicationResponse forceKillApplication(
+ KillApplicationRequest request)
throws YarnRemoteException;
/**
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationRequest.java
similarity index 94%
rename from hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationRequest.java
rename to hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationRequest.java
index 023ee3c4ac..c033e64bb2 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationRequest.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationRequest.java
@@ -32,11 +32,11 @@
*
The request includes the {@link ApplicationId} of the application to be
* aborted.
*
- * @see ClientRMProtocol#finishApplication(FinishApplicationRequest)
+ * @see ClientRMProtocol#forceKillApplication(KillApplicationRequest)
*/
@Public
@Stable
-public interface FinishApplicationRequest {
+public interface KillApplicationRequest {
/**
* Get the ApplicationId of the application to be aborted.
* @return ApplicationId of the application to be aborted
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationResponse.java
similarity index 91%
rename from hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationResponse.java
rename to hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationResponse.java
index cd0c728e53..2a8d0f06d2 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationResponse.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationResponse.java
@@ -28,10 +28,10 @@
*
*
Currently it's empty.
*
- * @see ClientRMProtocol#finishApplication(FinishApplicationRequest)
+ * @see ClientRMProtocol#forceKillApplication(KillApplicationRequest)
*/
@Public
@Stable
-public interface FinishApplicationResponse {
+public interface KillApplicationResponse {
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationRequestPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationRequestPBImpl.java
similarity index 74%
rename from hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationRequestPBImpl.java
rename to hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationRequestPBImpl.java
index 044382bdde..e2761a090b 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationRequestPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationRequestPBImpl.java
@@ -19,34 +19,34 @@
package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ProtoBase;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationRequestProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProtoOrBuilder;
-public class FinishApplicationRequestPBImpl extends ProtoBase implements FinishApplicationRequest {
- FinishApplicationRequestProto proto = FinishApplicationRequestProto.getDefaultInstance();
- FinishApplicationRequestProto.Builder builder = null;
+public class KillApplicationRequestPBImpl extends ProtoBase implements KillApplicationRequest {
+ KillApplicationRequestProto proto = KillApplicationRequestProto.getDefaultInstance();
+ KillApplicationRequestProto.Builder builder = null;
boolean viaProto = false;
private ApplicationId applicationId = null;
- public FinishApplicationRequestPBImpl() {
- builder = FinishApplicationRequestProto.newBuilder();
+ public KillApplicationRequestPBImpl() {
+ builder = KillApplicationRequestProto.newBuilder();
}
- public FinishApplicationRequestPBImpl(FinishApplicationRequestProto proto) {
+ public KillApplicationRequestPBImpl(KillApplicationRequestProto proto) {
this.proto = proto;
viaProto = true;
}
- public FinishApplicationRequestProto getProto() {
+ public KillApplicationRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
@@ -69,7 +69,7 @@ private void mergeLocalToProto() {
private void maybeInitBuilder() {
if (viaProto || builder == null) {
- builder = FinishApplicationRequestProto.newBuilder(proto);
+ builder = KillApplicationRequestProto.newBuilder(proto);
}
viaProto = false;
}
@@ -77,7 +77,7 @@ private void maybeInitBuilder() {
@Override
public ApplicationId getApplicationId() {
- FinishApplicationRequestProtoOrBuilder p = viaProto ? proto : builder;
+ KillApplicationRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.applicationId != null) {
return this.applicationId;
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationResponsePBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java
similarity index 62%
rename from hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationResponsePBImpl.java
rename to hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java
index b8ad6dd7ea..61c42fd20d 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationResponsePBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java
@@ -19,27 +19,27 @@
package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
import org.apache.hadoop.yarn.api.records.ProtoBase;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationResponseProto;
-public class FinishApplicationResponsePBImpl extends ProtoBase implements FinishApplicationResponse {
- FinishApplicationResponseProto proto = FinishApplicationResponseProto.getDefaultInstance();
- FinishApplicationResponseProto.Builder builder = null;
+public class KillApplicationResponsePBImpl extends ProtoBase implements KillApplicationResponse {
+ KillApplicationResponseProto proto = KillApplicationResponseProto.getDefaultInstance();
+ KillApplicationResponseProto.Builder builder = null;
boolean viaProto = false;
- public FinishApplicationResponsePBImpl() {
- builder = FinishApplicationResponseProto.newBuilder();
+ public KillApplicationResponsePBImpl() {
+ builder = KillApplicationResponseProto.newBuilder();
}
- public FinishApplicationResponsePBImpl(FinishApplicationResponseProto proto) {
+ public KillApplicationResponsePBImpl(KillApplicationResponseProto proto) {
this.proto = proto;
viaProto = true;
}
- public FinishApplicationResponseProto getProto() {
+ public KillApplicationResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
@@ -47,7 +47,7 @@ public FinishApplicationResponseProto getProto() {
private void maybeInitBuilder() {
if (viaProto || builder == null) {
- builder = FinishApplicationResponseProto.newBuilder(proto);
+ builder = KillApplicationResponseProto.newBuilder(proto);
}
viaProto = false;
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto
index cfb14ff351..760b75c021 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto
@@ -27,7 +27,7 @@ service ClientRMProtocolService {
rpc getNewApplicationId (GetNewApplicationIdRequestProto) returns (GetNewApplicationIdResponseProto);
rpc getApplicationReport (GetApplicationReportRequestProto) returns (GetApplicationReportResponseProto);
rpc submitApplication (SubmitApplicationRequestProto) returns (SubmitApplicationResponseProto);
- rpc finishApplication (FinishApplicationRequestProto) returns (FinishApplicationResponseProto);
+ rpc forceKillApplication (KillApplicationRequestProto) returns (KillApplicationResponseProto);
rpc getClusterMetrics (GetClusterMetricsRequestProto) returns (GetClusterMetricsResponseProto);
rpc getAllApplications (GetAllApplicationsRequestProto) returns (GetAllApplicationsResponseProto);
rpc getClusterNodes (GetClusterNodesRequestProto) returns (GetClusterNodesResponseProto);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index 753c6b8c9a..84003ad439 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -88,11 +88,11 @@ message SubmitApplicationRequestProto {
message SubmitApplicationResponseProto {
}
-message FinishApplicationRequestProto {
+message KillApplicationRequestProto {
optional ApplicationIdProto application_id = 1;
}
-message FinishApplicationResponseProto {
+message KillApplicationResponseProto {
}
message GetClusterMetricsRequestProto {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java
index 8972c656d9..b06c2caa35 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java
@@ -25,8 +25,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
@@ -41,10 +39,10 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationRequestPBImpl;
-import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllApplicationsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllApplicationsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportRequestPBImpl;
@@ -59,21 +57,22 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
import org.apache.hadoop.yarn.proto.ClientRMProtocol.ClientRMProtocolService;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdRequestProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto;
import com.google.protobuf.ServiceException;
@@ -88,11 +87,11 @@ public ClientRMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr,
}
@Override
- public FinishApplicationResponse finishApplication(
- FinishApplicationRequest request) throws YarnRemoteException {
- FinishApplicationRequestProto requestProto = ((FinishApplicationRequestPBImpl)request).getProto();
+ public KillApplicationResponse forceKillApplication(
+ KillApplicationRequest request) throws YarnRemoteException {
+ KillApplicationRequestProto requestProto = ((KillApplicationRequestPBImpl)request).getProto();
try {
- return new FinishApplicationResponsePBImpl(proxy.finishApplication(null, requestProto));
+ return new KillApplicationResponsePBImpl(proxy.forceKillApplication(null, requestProto));
} catch (ServiceException e) {
if (e.getCause() instanceof YarnRemoteException) {
throw (YarnRemoteException)e.getCause();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java
index 35e4be5398..242e9624a1 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.yarn.api.impl.pb.service;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
@@ -27,9 +26,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationRequestPBImpl;
-import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllApplicationsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllApplicationsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportRequestPBImpl;
@@ -44,12 +42,12 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.proto.ClientRMProtocol.ClientRMProtocolService.BlockingInterface;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationRequestProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto;
@@ -64,6 +62,8 @@
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto;
@@ -79,12 +79,12 @@ public ClientRMProtocolPBServiceImpl(ClientRMProtocol impl) {
}
@Override
- public FinishApplicationResponseProto finishApplication(RpcController arg0,
- FinishApplicationRequestProto proto) throws ServiceException {
- FinishApplicationRequestPBImpl request = new FinishApplicationRequestPBImpl(proto);
+ public KillApplicationResponseProto forceKillApplication(RpcController arg0,
+ KillApplicationRequestProto proto) throws ServiceException {
+ KillApplicationRequestPBImpl request = new KillApplicationRequestPBImpl(proto);
try {
- FinishApplicationResponse response = real.finishApplication(request);
- return ((FinishApplicationResponsePBImpl)response).getProto();
+ KillApplicationResponse response = real.forceKillApplication(request);
+ return ((KillApplicationResponsePBImpl)response).getProto();
} catch (YarnRemoteException e) {
throw new ServiceException(e);
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index a31bef8af9..8e29b33ff8 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -36,8 +36,8 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
@@ -228,8 +228,8 @@ public SubmitApplicationResponse submitApplication(
@SuppressWarnings("unchecked")
@Override
- public FinishApplicationResponse finishApplication(
- FinishApplicationRequest request) throws YarnRemoteException {
+ public KillApplicationResponse forceKillApplication(
+ KillApplicationRequest request) throws YarnRemoteException {
ApplicationId applicationId = request.getApplicationId();
@@ -262,8 +262,8 @@ public FinishApplicationResponse finishApplication(
RMAuditLogger.logSuccess(callerUGI.getShortUserName(),
AuditConstants.KILL_APP_REQUEST, "ClientRMService" , applicationId);
- FinishApplicationResponse response = recordFactory
- .newRecordInstance(FinishApplicationResponse.class);
+ KillApplicationResponse response = recordFactory
+ .newRecordInstance(KillApplicationResponse.class);
return response;
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 838dfc0b08..94d04a8d12 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType;
@@ -86,7 +87,8 @@ public class RMAppImpl implements RMApp {
private long startTime;
private long finishTime;
private RMAppAttempt currentAttempt;
-
+ @SuppressWarnings("rawtypes")
+ private EventHandler handler;
private static final FinalTransition FINAL_TRANSITION = new FinalTransition();
private static final StateMachineFactory(RMAppState.NEW)
- // TODO - ATTEMPT_KILLED not sent right now but should handle if
- // attempt starts sending
-
// Transitions from NEW state
.addTransition(RMAppState.NEW, RMAppState.SUBMITTED,
RMAppEventType.START, new StartAppAttemptTransition())
@@ -116,7 +115,7 @@ RMAppEventType.APP_REJECTED, new AppRejectedTransition())
.addTransition(RMAppState.SUBMITTED, RMAppState.ACCEPTED,
RMAppEventType.APP_ACCEPTED)
.addTransition(RMAppState.SUBMITTED, RMAppState.KILLED,
- RMAppEventType.KILL, new AppKilledTransition())
+ RMAppEventType.KILL, new KillAppAndAttemptTransition())
// Transitions from ACCEPTED state
.addTransition(RMAppState.ACCEPTED, RMAppState.RUNNING,
@@ -126,7 +125,7 @@ RMAppEventType.KILL, new AppKilledTransition())
RMAppEventType.ATTEMPT_FAILED,
new AttemptFailedTransition(RMAppState.SUBMITTED))
.addTransition(RMAppState.ACCEPTED, RMAppState.KILLED,
- RMAppEventType.KILL, new AppKilledTransition())
+ RMAppEventType.KILL, new KillAppAndAttemptTransition())
// Transitions from RUNNING state
.addTransition(RMAppState.RUNNING, RMAppState.FINISHED,
@@ -136,7 +135,7 @@ RMAppEventType.KILL, new AppKilledTransition())
RMAppEventType.ATTEMPT_FAILED,
new AttemptFailedTransition(RMAppState.SUBMITTED))
.addTransition(RMAppState.RUNNING, RMAppState.KILLED,
- RMAppEventType.KILL, new AppKilledTransition())
+ RMAppEventType.KILL, new KillAppAndAttemptTransition())
// Transitions from FINISHED state
.addTransition(RMAppState.FINISHED, RMAppState.FINISHED,
@@ -168,6 +167,7 @@ public RMAppImpl(ApplicationId applicationId, RMContext rmContext,
this.name = name;
this.rmContext = rmContext;
this.dispatcher = rmContext.getDispatcher();
+ this.handler = dispatcher.getEventHandler();
this.conf = config;
this.user = user;
this.queue = queue;
@@ -403,7 +403,7 @@ private void createNewAttempt() {
submissionContext);
attempts.put(appAttemptId, attempt);
currentAttempt = attempt;
- dispatcher.getEventHandler().handle(
+ handler.handle(
new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.START));
}
@@ -420,13 +420,23 @@ public void transition(RMAppImpl app, RMAppEvent event) {
};
}
- private static final class AppKilledTransition extends FinalTransition {
+ private static class AppKilledTransition extends FinalTransition {
+ @Override
public void transition(RMAppImpl app, RMAppEvent event) {
app.diagnostics.append("Application killed by user.");
super.transition(app, event);
};
}
+ private static class KillAppAndAttemptTransition extends AppKilledTransition {
+ @SuppressWarnings("unchecked")
+ @Override
+ public void transition(RMAppImpl app, RMAppEvent event) {
+ app.handler.handle(new RMAppAttemptEvent(app.currentAttempt.getAppAttemptId(),
+ RMAppAttemptEventType.KILL));
+ super.transition(app, event);
+ }
+ }
private static final class AppRejectedTransition extends
FinalTransition{
public void transition(RMAppImpl app, RMAppEvent event) {
@@ -450,11 +460,11 @@ private Set getNodesOnWhichAttemptRan(RMAppImpl app) {
public void transition(RMAppImpl app, RMAppEvent event) {
Set nodes = getNodesOnWhichAttemptRan(app);
for (NodeId nodeId : nodes) {
- app.dispatcher.getEventHandler().handle(
+ app.handler.handle(
new RMNodeCleanAppEvent(nodeId, app.applicationId));
}
app.finishTime = System.currentTimeMillis();
- app.dispatcher.getEventHandler().handle(
+ app.handler.handle(
new RMAppManagerEvent(app.applicationId,
RMAppManagerEventType.APP_COMPLETED));
};
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 2123ee806c..fa2ca44d30 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
@@ -109,9 +109,9 @@ public MockNM registerNode(String nodeIdStr, int memory) throws Exception {
public void killApp(ApplicationId appId) throws Exception {
ClientRMProtocol client = getClientRMService();
- FinishApplicationRequest req = Records.newRecord(FinishApplicationRequest.class);
+ KillApplicationRequest req = Records.newRecord(KillApplicationRequest.class);
req.setApplicationId(appId);
- client.finishApplication(req);
+ client.forceKillApplication(req);
}
//from AMLauncher
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/InlineDispatcher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/InlineDispatcher.java
index 32f6a429ca..d771a61d86 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/InlineDispatcher.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/InlineDispatcher.java
@@ -1,52 +1,57 @@
/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
-@Private
public class InlineDispatcher extends AsyncDispatcher {
- private class InlineEventHandler implements EventHandler {
- private final InlineDispatcher dispatcher;
- public InlineEventHandler(InlineDispatcher dispatcher) {
- this.dispatcher = dispatcher;
- }
+ private static final Log LOG = LogFactory.getLog(InlineDispatcher.class);
+
+ private class TestEventHandler implements EventHandler {
@Override
public void handle(Event event) {
- this.dispatcher.dispatch(event);
+ dispatch(event);
}
}
- public void dispatch(Event event) {
- super.dispatch(event);
+ @Override
+ protected void dispatch(Event event) {
+ LOG.info("Dispatching the event " + event.getClass().getName() + "."
+ + event.toString());
+
+ Class extends Enum> type = event.getType().getDeclaringClass();
+ if (eventDispatchers.get(type) != null) {
+ eventDispatchers.get(type).handle(event);
+ }
}
@Override
public EventHandler getEventHandler() {
- return new InlineEventHandler(this);
+ return new TestEventHandler();
}
-
+
static class EmptyEventHandler implements EventHandler {
@Override
public void handle(Event event) {
- ; // ignore
- }
+ //do nothing
+ }
}
}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
index 539207db3d..24408821e2 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
@@ -1,26 +1,27 @@
/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements. See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership. The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License. You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
import static org.mockito.Mockito.mock;
import java.io.IOException;
+import java.util.List;
import junit.framework.Assert;
@@ -37,12 +38,14 @@
import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
+import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
@@ -52,27 +55,40 @@
public class TestRMAppTransitions {
- private static final Log LOG = LogFactory.getLog(TestRMAppTransitions.class);
-
+ static final Log LOG = LogFactory.getLog(TestRMAppTransitions.class);
+
private RMContext rmContext;
private static int maxRetries = 4;
private static int appId = 1;
+ private AsyncDispatcher rmDispatcher;
// ignore all the RM application attempt events
private static final class TestApplicationAttemptEventDispatcher implements
- EventHandler {
+ EventHandler {
- public TestApplicationAttemptEventDispatcher() {
+ private final RMContext rmContext;
+ public TestApplicationAttemptEventDispatcher(RMContext rmContext) {
+ this.rmContext = rmContext;
}
@Override
public void handle(RMAppAttemptEvent event) {
+ ApplicationId appId = event.getApplicationAttemptId().getApplicationId();
+ RMApp rmApp = this.rmContext.getRMApps().get(appId);
+ if (rmApp != null) {
+ try {
+ rmApp.getRMAppAttempt(event.getApplicationAttemptId()).handle(event);
+ } catch (Throwable t) {
+ LOG.error("Error in handling event type " + event.getType()
+ + " for application " + appId, t);
+ }
+ }
}
}
// handle all the RM application events - same as in ResourceManager.java
private static final class TestApplicationEventDispatcher implements
- EventHandler {
+ EventHandler {
private final RMContext rmContext;
public TestApplicationEventDispatcher(RMContext rmContext) {
@@ -97,18 +113,22 @@ public void handle(RMAppEvent event) {
@Before
public void setUp() throws Exception {
AsyncDispatcher rmDispatcher = new AsyncDispatcher();
+ Configuration conf = new Configuration();
+ rmDispatcher = new InlineDispatcher();
ContainerAllocationExpirer containerAllocationExpirer =
mock(ContainerAllocationExpirer.class);
AMLivelinessMonitor amLivelinessMonitor = mock(AMLivelinessMonitor.class);
this.rmContext = new RMContextImpl(new MemStore(), rmDispatcher,
- containerAllocationExpirer, amLivelinessMonitor);
+ containerAllocationExpirer, amLivelinessMonitor);
rmDispatcher.register(RMAppAttemptEventType.class,
- new TestApplicationAttemptEventDispatcher());
+ new TestApplicationAttemptEventDispatcher(this.rmContext));
rmDispatcher.register(RMAppEventType.class,
new TestApplicationEventDispatcher(rmContext));
+ rmDispatcher.init(conf);
+ rmDispatcher.start();
}
protected RMApp createNewTestApp() {
@@ -128,10 +148,10 @@ protected RMApp createNewTestApp() {
new ApplicationTokenSecretManager(), scheduler);
RMApp application = new RMAppImpl(applicationId, rmContext,
- conf, name, user,
- queue, submissionContext, clientTokenStr,
- appStore, scheduler,
- masterService);
+ conf, name, user,
+ queue, submissionContext, clientTokenStr,
+ appStore, scheduler,
+ masterService);
testAppStartState(applicationId, user, name, queue, application);
return application;
@@ -193,6 +213,14 @@ private static void assertKilled(RMApp application) {
"Application killed by user.", diag.toString());
}
+ private static void assertAppAndAttemptKilled(RMApp application) {
+ assertKilled(application);
+ /* also check if the attempt is killed */
+ Assert.assertEquals( RMAppAttemptState.KILLED,
+ application.getCurrentAppAttempt().getAppAttemptState()
+ );
+ }
+
private static void assertFailed(RMApp application, String regex) {
assertTimesAtFinish(application);
assertAppState(RMAppState.FAILED, application);
@@ -298,10 +326,10 @@ public void testAppSubmittedKill() throws IOException {
RMApp application = testCreateAppAccepted();
// SUBMITTED => KILLED event RMAppEventType.KILL
- RMAppEvent event =
- new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL);
+ this.rmContext.getRMApps().putIfAbsent(application.getApplicationId(), application);
application.handle(event);
- assertKilled(application);
+ assertAppAndAttemptKilled(application);
}
@Test
From 8e4c70fb20f90aa0613d262725c2eaa878d331d4 Mon Sep 17 00:00:00 2001
From: Mahadev Konar
Date: Tue, 27 Sep 2011 20:41:56 +0000
Subject: [PATCH 49/68] MAPREDUCE-3099. Add docs for setting up a single node
MRv2 cluster. (mahadev)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176607 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../src/site/apt/SingleCluster.apt.vm | 180 ++++++++++++++++++
.../hadoop-yarn/src/site/apt/index.apt.vm | 39 ++++
.../hadoop-yarn/src/site/site.xml | 34 ++++
4 files changed, 256 insertions(+)
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/src/site/apt/SingleCluster.apt.vm
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/src/site/apt/index.apt.vm
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/src/site/site.xml
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 77aab5002a..ca69f28166 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -318,6 +318,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3092. Removed a special comparator for JobIDs in JobHistory as
JobIDs are already comparable. (Devaraj K via vinodkv)
+ MAPREDUCE-3099. Add docs for setting up a single node MRv2 cluster.
+ (mahadev)
+
OPTIMIZATIONS
MAPREDUCE-2026. Make JobTracker.getJobCounters() and
diff --git a/hadoop-mapreduce-project/hadoop-yarn/src/site/apt/SingleCluster.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/src/site/apt/SingleCluster.apt.vm
new file mode 100644
index 0000000000..affb277b7f
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/src/site/apt/SingleCluster.apt.vm
@@ -0,0 +1,180 @@
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License. See accompanying LICENSE file.
+
+ ---
+ Hadoop MapReduce Next Generation ${project.version} - Setting up a Single Node Cluster.
+ ---
+ ---
+ ${maven.build.timestamp}
+
+Hadoop MapReduce Next Generation - Setting up a Single Node Cluster.
+
+ \[ {{{./index.html}Go Back}} \]
+
+* Mapreduce Tarball
+
+ You should be able to obtain the MapReduce tarball from the release.
+ If not, you should be able to create a tarball from the source.
+
++---+
+$ mvn clean install -DskipTests
+$ cd hadoop-mapreduce-project
+$ mvn clean install assembly:assembly
++---+
+ <> You will need protoc installed of version 2.4.1 or greater.
+
+ To ignore the native builds in mapreduce you can use <<<-P-cbuild>>> argument
+ for maven. The tarball should be available in <<>> directory.
+
+
+* Setting up the environment.
+
+ Assuming you have installed hadoop-common/hadoop-hdfs and exported
+ <<$HADOOP_COMMON_HOME>>/<<$HADOOP_COMMON_HOME>>, untar hadoop mapreduce
+ tarball and set environment variable <<$HADOOP_MAPRED_HOME>> to the
+ untarred directory. Set <<$YARN_HOME>> the same as <<$HADOOP_MAPRED_HOME>>.
+
+ <> The following instructions assume you have hdfs running.
+
+* Setting up Configuration.
+
+ To start the ResourceManager and NodeManager, you will have to update the configs.
+ Assuming your $HADOOP_CONF_DIR is the configuration directory and has the installed
+ configs for HDFS and <<>>. There are 2 config files you will have to setup
+ <<>> and <<>>.
+
+** Setting up <<>>
+
+ Add the following configs to your <<>>.
+
++---+
+
+ mapreduce.cluster.temp.dir
+
+ No description
+ true
+
+
+
+ mapreduce.cluster.local.dir
+
+ No description
+ true
+
++---+
+
+** Setting up <<>>
+
+Add the following configs to your <<>>
+
++---+
+
+ yarn.resourcemanager.resource-tracker.address
+ host:port
+ host is the hostname of the resource manager and
+ port is the port on which the NodeManagers contact the Resource Manager.
+
+
+
+
+ yarn.resourcemanager.scheduler.address
+ host:port
+ host is the hostname of the resourcemanager and port is the port
+ on which the Applications in the cluster talk to the Resource Manager.
+
+
+
+
+ yarn.resourcemanager.scheduler.class
+ org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+ In case you do not want to use the default scheduler
+
+
+
+ yarn.resourcemanager.address
+ host:port
+ the host is the hostname of the ResourceManager and the port is the port on
+ which the clients can talk to the Resource Manager.
+
+
+
+ yarn.nodemanager.local-dirs
+
+ the local directories used by the nodemanager
+
+
+
+ yarn.nodemanager.address
+ 0.0.0.0:port
+ the nodemanagers bind to this port
+
+
+
+ yarn.nodemanager.resource.memory-gb
+ 10
+ the amount of memory on the NodeManager in GB
+
+
+
+ yarn.nodemanager.remote-app-log-dir
+ /app-logs
+ directory on hdfs where the application logs are moved to
+
+
+
+ yarn.nodemanager.log-dirs
+
+ the directories used by Nodemanagers as log directories
+
+
+
+ yarn.nodemanager.aux-services
+ mapreduce.shuffle
+ shuffle service that needs to be set for Map Reduce to run
+
++---+
+
+* Create Symlinks.
+
+ You will have to create the following symlinks:
+
++---+
+$ cd $HADOOP_COMMON_HOME/share/hadoop/common/lib/
+$ ln -s $HADOOP_MAPRED_HOME/modules/hadoop-mapreduce-client-app-*-SNAPSHOT.jar .
+$ ln -s $HADOOP_MAPRED_HOME/modules/hadoop-mapreduce-client-jobclient-*-SNAPSHOT.jar .
+$ ln -s $HADOOP_MAPRED_HOME/modules/hadoop-mapreduce-client-common-*-SNAPSHOT.jar .
+$ ln -s $HADOOP_MAPRED_HOME/modules/hadoop-mapreduce-client-shuffle-*-SNAPSHOT.jar .
+$ ln -s $HADOOP_MAPRED_HOME/modules/hadoop-mapreduce-client-core-*-SNAPSHOT.jar .
+$ ln -s $HADOOP_MAPRED_HOME/modules/hadoop-yarn-common-*-SNAPSHOT.jar .
+$ ln -s $HADOOP_MAPRED_HOME/modules/hadoop-yarn-api-*-SNAPSHOT.jar .
++---+
+* Running daemons.
+
+ Assuming that the environment variables <<$HADOOP_COMMON_HOME>>, <<$HADOOP_HDFS_HOME>>, <<$HADOO_MAPRED_HOME>>,
+ <<$YARN_HOME>>, <<$JAVA_HOME>> and <<$HADOOP_CONF_DIR>> have been set appropriately.
+ Set $<<$YARN_CONF_DIR>> the same as $<>
+
+ Run ResourceManager and NodeManager as:
+
++---+
+$ cd $HADOOP_MAPRED_HOME
+$ bin/yarn-daemon.sh start resourcemanager
+$ bin/yarn-daemon.sh start nodemanager
++---+
+
+ You should be up and running. You can run randomwriter as:
+
++---+
+$ $HADOOP_COMMON_HOME/bin/hadoop jar hadoop-examples.jar randomwriter out
++---+
+
+Good luck.
diff --git a/hadoop-mapreduce-project/hadoop-yarn/src/site/apt/index.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/src/site/apt/index.apt.vm
new file mode 100644
index 0000000000..db9fe87034
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/src/site/apt/index.apt.vm
@@ -0,0 +1,39 @@
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License. See accompanying LICENSE file.
+
+ ---
+ Hadoop MapReduce Next Generation ${project.version}
+ ---
+ ---
+ ${maven.build.timestamp}
+
+Hadoop MapReduce Next Generation
+
+* Architecture
+
+ The new architecture introduced in 0.23, divides the two major functions
+ of the JobTracker, resource management and job scheduling/monitoring, into separate
+ components.
+ The new ResourceManager manages the global assignment of compute resources to applications
+ and the per-application ApplicationMaster manages the application’s scheduling and coordination.
+ An application is either a single job in the classic MapReduce jobs or a DAG of such jobs.
+ The ResourceManager and per-machine NodeManager server, which manages the user processes on that
+ machine, form the computation fabric. The per-application ApplicationMaster is, in effect, a
+ framework specific library and is tasked with negotiating resources from the ResourceManager
+ and working with the NodeManager(s) to execute and monitor the tasks.
+
+* User Documentation
+
+ * {{{./SingleCluster.html}SingleCluster}}
+
+ * {{{./apidocs/index.html}JavaDocs}}
+
diff --git a/hadoop-mapreduce-project/hadoop-yarn/src/site/site.xml b/hadoop-mapreduce-project/hadoop-yarn/src/site/site.xml
new file mode 100644
index 0000000000..35a75cb2e5
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/src/site/site.xml
@@ -0,0 +1,34 @@
+
+
+
+
+
+
+
+
+
+
+ org.apache.maven.skins
+ maven-stylus-skin
+ 1.1
+
+
+
+
+
+
+
+
+
From 201b7879ba6994400c1aa955f242665c16c2c0ea Mon Sep 17 00:00:00 2001
From: Suresh Srinivas
Date: Tue, 27 Sep 2011 23:16:17 +0000
Subject: [PATCH 50/68] HDFS-2373. Commands using webhdfs and hftp print
unnecessary debug info on the console with security enabled. Contributed by
Arpit Gupta.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176654 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../hdfs/tools/DelegationTokenFetcher.java | 45 +++++++++++++------
2 files changed, 35 insertions(+), 13 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e1c3993e59..40a95c746c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -72,6 +72,9 @@ Trunk (unreleased changes)
HDFS-2366. Initialize WebHdfsFileSystem.ugi in object construction.
(szetszwo)
+ HDFS-2373. Commands using webhdfs and hftp print unnecessary debug
+ info on the console with security enabled. (Arpit Gupta via suresh)
+
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
index d085534e11..1e85393343 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
@@ -149,7 +149,9 @@ public Object run() throws Exception {
DataInputStream in = new DataInputStream(
new ByteArrayInputStream(token.getIdentifier()));
id.readFields(in);
- System.out.println("Token (" + id + ") for " + token.getService());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Token (" + id + ") for " + token.getService());
+ }
}
return null;
}
@@ -160,22 +162,28 @@ public Object run() throws Exception {
for (Token> token : readTokens(tokenFile, conf)) {
result = renewDelegationToken(webUrl,
(Token) token);
- System.out.println("Renewed token via " + webUrl + " for "
- + token.getService() + " until: " + new Date(result));
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Renewed token via " + webUrl + " for "
+ + token.getService() + " until: " + new Date(result));
+ }
}
} else if (cancel) {
for (Token> token : readTokens(tokenFile, conf)) {
cancelDelegationToken(webUrl,
(Token) token);
- System.out.println("Cancelled token via " + webUrl + " for "
- + token.getService());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Cancelled token via " + webUrl + " for "
+ + token.getService());
+ }
}
} else {
Credentials creds = getDTfromRemote(webUrl, renewer);
creds.writeTokenStorageFile(tokenFile, conf);
for (Token> token : creds.getAllTokens()) {
- System.out.println("Fetched token via " + webUrl + " for "
- + token.getService() + " into " + tokenFile);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Fetched token via " + webUrl + " for "
+ + token.getService() + " into " + tokenFile);
+ }
}
}
} else {
@@ -184,24 +192,30 @@ public Object run() throws Exception {
for (Token> token : readTokens(tokenFile, conf)) {
((DistributedFileSystem) fs)
.cancelDelegationToken((Token) token);
- System.out.println("Cancelled token for "
- + token.getService());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Cancelled token for "
+ + token.getService());
+ }
}
} else if (renew) {
long result;
for (Token> token : readTokens(tokenFile, conf)) {
result = ((DistributedFileSystem) fs)
.renewDelegationToken((Token) token);
- System.out.println("Renewed token for " + token.getService()
- + " until: " + new Date(result));
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Renewed token for " + token.getService()
+ + " until: " + new Date(result));
+ }
}
} else {
Token> token = fs.getDelegationToken(renewer);
Credentials cred = new Credentials();
cred.addToken(token.getService(), token);
cred.writeTokenStorageFile(tokenFile, conf);
- System.out.println("Fetched token for " + token.getService()
- + " into " + tokenFile);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Fetched token for " + token.getService()
+ + " into " + tokenFile);
+ }
}
}
return null;
@@ -221,6 +235,11 @@ static public Credentials getDTfromRemote(String nnAddr,
} else {
url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC);
}
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Retrieving token from: " + url);
+ }
+
URL remoteURL = new URL(url.toString());
SecurityUtil.fetchServiceTicket(remoteURL);
URLConnection connection = remoteURL.openConnection();
From 5f6adcf396b78934d7da2bba0d052038ff962f26 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Wed, 28 Sep 2011 01:23:04 +0000
Subject: [PATCH 51/68] MAPREDUCE-3021. Change base urls for RM web-ui.
Contributed by Thomas Graves.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176681 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 ++
.../v2/app/client/MRClientService.java | 2 +-
.../mapreduce/v2/app/webapp/NavBlock.java | 6 +--
.../mapreduce/v2/app/webapp/TaskPage.java | 2 +-
.../v2/jobhistory/JobHistoryUtils.java | 2 +-
.../mapreduce/v2/hs/HistoryClientService.java | 2 +-
.../hadoop/yarn/conf/YarnConfiguration.java | 2 +-
.../yarn/conf/TestYarnConfiguration.java | 54 +++++++++++++++++++
.../server/nodemanager/webapp/WebServer.java | 2 +-
.../resourcemanager/ResourceManager.java | 2 +-
.../resourcemanager/webapp/RmController.java | 2 +-
11 files changed, 68 insertions(+), 11 deletions(-)
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index ca69f28166..63e6b3d44e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1457,6 +1457,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3054. Unable to kill submitted jobs. (mahadev)
+ MAPREDUCE-3021. Change base urls for RM web-ui. (Thomas Graves via
+ acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
index 73359bb12a..f84a4d9dbe 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
@@ -149,7 +149,7 @@ public void start() {
+ ":" + server.getPort());
LOG.info("Instantiated MRClientService at " + this.bindAddress);
try {
- webApp = WebApps.$for("yarn", AppContext.class, appContext).with(conf).
+ webApp = WebApps.$for("mapreduce", AppContext.class, appContext).with(conf).
start(new AMWebApp());
} catch (Exception e) {
LOG.error("Webapps failed to start. Ignoring for now:", e);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
index bb4e2390a7..8b4524ad11 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
@@ -38,9 +38,9 @@ public class NavBlock extends HtmlBlock {
div("#nav").
h3("Cluster").
ul().
- li().a(url(rmweb, prefix(), "cluster"), "About")._().
- li().a(url(rmweb, prefix(), "apps"), "Applications")._().
- li().a(url(rmweb, prefix(), "scheduler"), "Scheduler")._()._().
+ li().a(url(rmweb, "cluster", "cluster"), "About")._().
+ li().a(url(rmweb, "cluster", "apps"), "Applications")._().
+ li().a(url(rmweb, "cluster", "scheduler"), "Scheduler")._()._().
h3("Application").
ul().
li().a(url("app/info"), "About")._().
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
index d9884d146a..736bef639e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
@@ -85,7 +85,7 @@ protected void render(Block html) {
if (containerId != null) {
String containerIdStr = ConverterUtils.toString(containerId);
nodeTd._(" ").
- a(".logslink", url("http://", nodeHttpAddr, "yarn", "containerlogs",
+ a(".logslink", url("http://", nodeHttpAddr, "node", "containerlogs",
containerIdStr), "logs");
}
nodeTd._().
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
index dcddd126cc..e57cf8d3c6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
@@ -489,7 +489,7 @@ public static String getHistoryUrl(Configuration conf, ApplicationId appId)
sb.append(address.getHostName());
}
sb.append(":").append(address.getPort());
- sb.append("/yarn/job/"); // TODO This will change when the history server
+ sb.append("/jobhistory/job/"); // TODO This will change when the history server
// understands apps.
// TOOD Use JobId toString once UI stops using _id_id
sb.append("job_").append(appId.getClusterTimestamp());
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
index 56f114adc5..e86eb279e9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
@@ -135,7 +135,7 @@ private void initializeWebApp(Configuration conf) {
webApp = new HsWebApp(history);
String bindAddress = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
- WebApps.$for("yarn", this).at(bindAddress).start(webApp);
+ WebApps.$for("jobhistory", this).at(bindAddress).start(webApp);
}
@Override
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f34830c605..cb955af8c4 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -386,6 +386,6 @@ public static String getRMWebAppURL(Configuration conf) {
// Use apps manager address to figure out the host for webapp
addr = conf.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS);
String host = ADDR_SPLITTER.split(addr).iterator().next();
- return JOINER.join("http://", host, ":", port, "/");
+ return JOINER.join("http://", host, ":", port);
}
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java
new file mode 100644
index 0000000000..3d2a576909
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java
@@ -0,0 +1,54 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.conf;
+
+import java.net.InetSocketAddress;
+
+import junit.framework.Assert;
+
+import org.apache.avro.ipc.Server;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Test;
+
+public class TestYarnConfiguration {
+
+ @Test
+ public void testDefaultRMWebUrl() throws Exception {
+ YarnConfiguration conf = new YarnConfiguration();
+ String rmWebUrl = YarnConfiguration.getRMWebAppURL(conf);
+ // shouldn't have a "/" on the end of the url as all the other uri routinnes
+ // specifically add slashes and Jetty doesn't handle double slashes.
+ Assert.assertEquals("RM Web Url is not correct", "http://0.0.0.0:8088",
+ rmWebUrl);
+ }
+
+ @Test
+ public void testRMWebUrlSpecified() throws Exception {
+ YarnConfiguration conf = new YarnConfiguration();
+ // seems a bit odd but right now we are forcing webapp for RM to be RM_ADDRESS
+ // for host and use the port from the RM_WEBAPP_ADDRESS
+ conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "footesting:99110");
+ conf.set(YarnConfiguration.RM_ADDRESS, "rmtesting:9999");
+ String rmWebUrl = YarnConfiguration.getRMWebAppURL(conf);
+ Assert.assertEquals("RM Web Url is not correct", "http://rmtesting:99110",
+ rmWebUrl);
+ }
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
index 307e87eccd..a043a37f59 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
@@ -57,7 +57,7 @@ public synchronized void start() {
LOG.info("Instantiating NMWebApp at " + bindAddress);
try {
this.webApp =
- WebApps.$for("yarn", Context.class, this.nmContext)
+ WebApps.$for("node", Context.class, this.nmContext)
.at(bindAddress).with(getConfig())
.start(new NMWebApp(this.resourceView));
} catch (Exception e) {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 8bd45dff4d..d1515e4fb5 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -385,7 +385,7 @@ public void handle(RMNodeEvent event) {
}
protected void startWepApp() {
- webApp = WebApps.$for("yarn", masterService).at(
+ webApp = WebApps.$for("cluster", masterService).at(
conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS)).
start(new RMWebApp(this));
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
index 234f93e2f0..698bc3c933 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
@@ -102,7 +102,7 @@ public void app() {
.getMasterContainer();
if (masterContainer != null) {
String url = join("http://", masterContainer.getNodeHttpAddress(),
- "/yarn", "/containerlogs/",
+ "/node", "/containerlogs/",
ConverterUtils.toString(masterContainer.getId()));
info._("AM container logs:", url, url);
} else {
From eb3a692a57614065dd441c16430326ab63df1dc7 Mon Sep 17 00:00:00 2001
From: Todd Lipcon
Date: Wed, 28 Sep 2011 03:00:59 +0000
Subject: [PATCH 52/68] HDFS-2332. Add test for HADOOP-7629 (using an immutable
FsPermission object as an RPC parameter fails). Contributed by Todd Lipcon.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176692 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../apache/hadoop/hdfs/TestDFSPermission.java | 50 +++++++++++--------
2 files changed, 33 insertions(+), 20 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 40a95c746c..ebc0c9bf0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -757,6 +757,9 @@ Release 0.23.0 - Unreleased
HDFS-1217. Change some NameNode methods from public to package private.
(Laxman via szetszwo)
+ HDFS-2332. Add test for HADOOP-7629 (using an immutable FsPermission
+ object as an RPC parameter fails). (todd)
+
OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index 2d50ce440f..495e8e191a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -72,6 +72,7 @@ public class TestDFSPermission extends TestCase {
final private static Path NON_EXISTENT_FILE = new Path("/NonExistentFile");
private FileSystem fs;
+ private MiniDFSCluster cluster;
private static Random r;
static {
@@ -105,18 +106,25 @@ public class TestDFSPermission extends TestCase {
}
}
+ @Override
+ public void setUp() throws IOException {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ cluster.waitActive();
+ }
+
+ @Override
+ public void tearDown() throws IOException {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
/** This tests if permission setting in create, mkdir, and
* setPermission works correctly
*/
public void testPermissionSetting() throws Exception {
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
- try {
- cluster.waitActive();
- testPermissionSetting(OpType.CREATE); // test file creation
- testPermissionSetting(OpType.MKDIRS); // test directory creation
- } finally {
- cluster.shutdown();
- }
+ testPermissionSetting(OpType.CREATE); // test file creation
+ testPermissionSetting(OpType.MKDIRS); // test directory creation
}
private void initFileSystem(short umask) throws Exception {
@@ -245,17 +253,22 @@ private void checkPermission(Path name, short expectedPermission,
}
}
+ /**
+ * check that ImmutableFsPermission can be used as the argument
+ * to setPermission
+ */
+ public void testImmutableFsPermission() throws IOException {
+ fs = FileSystem.get(conf);
+
+ // set the permission of the root to be world-wide rwx
+ fs.setPermission(new Path("/"),
+ FsPermission.createImmutable((short)0777));
+ }
+
/* check if the ownership of a file/directory is set correctly */
public void testOwnership() throws Exception {
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
- try {
- cluster.waitActive();
- testOwnership(OpType.CREATE); // test file creation
- testOwnership(OpType.MKDIRS); // test directory creation
- } finally {
- fs.close();
- cluster.shutdown();
- }
+ testOwnership(OpType.CREATE); // test file creation
+ testOwnership(OpType.MKDIRS); // test directory creation
}
/* change a file/directory's owner and group.
@@ -342,9 +355,7 @@ private enum OpType {CREATE, MKDIRS, OPEN, SET_REPLICATION,
/* Check if namenode performs permission checking correctly for
* superuser, file owner, group owner, and other users */
public void testPermissionChecking() throws Exception {
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
- cluster.waitActive();
fs = FileSystem.get(conf);
// set the permission of the root to be world-wide rwx
@@ -401,7 +412,6 @@ public void testPermissionChecking() throws Exception {
parentPermissions, permissions, parentPaths, filePaths, dirPaths);
} finally {
fs.close();
- cluster.shutdown();
}
}
From d06916f03382a2fae25553d739b5e7231d8bd091 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Wed, 28 Sep 2011 04:02:34 +0000
Subject: [PATCH 53/68] MAPREDUCE-3021. Adding additional files needed for new
web-ui.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176708 13f79535-47bb-0310-9956-ffa450edef68
---
.../hadoop-yarn-common/src/main/resources/webapps/cluster/.keep | 0
.../src/main/resources/webapps/jobhistory/.keep | 0
.../hadoop-yarn-common/src/main/resources/webapps/mapreduce/.keep | 0
.../hadoop-yarn-common/src/main/resources/webapps/node/.keep | 0
4 files changed, 0 insertions(+), 0 deletions(-)
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/cluster/.keep
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/jobhistory/.keep
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/mapreduce/.keep
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/node/.keep
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/cluster/.keep b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/cluster/.keep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/jobhistory/.keep b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/jobhistory/.keep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/mapreduce/.keep b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/mapreduce/.keep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/node/.keep b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/node/.keep
new file mode 100644
index 0000000000..e69de29bb2
From eda0ad4fd0a42e40e6013c747c350a8e078d342f Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Wed, 28 Sep 2011 04:20:16 +0000
Subject: [PATCH 54/68] MAPREDUCE-3041. Fixed ClientRMProtocol to provide
min/max resource capabilities along-with new ApplicationId for application
submission. Contributed by Hitesh Shah.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176715 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 4 +
.../hadoop/mapred/ResourceMgrDelegate.java | 6 +-
.../hadoop/mapred/TestClientRedirect.java | 6 +-
.../hadoop/mapreduce/v2/TestYARNRunner.java | 17 +-
.../hadoop/yarn/api/ClientRMProtocol.java | 16 +-
...est.java => GetNewApplicationRequest.java} | 4 +-
...se.java => GetNewApplicationResponse.java} | 31 +++-
.../pb/GetNewApplicationIdResponsePBImpl.java | 109 -----------
...va => GetNewApplicationRequestPBImpl.java} | 21 +--
.../pb/GetNewApplicationResponsePBImpl.java | 173 ++++++++++++++++++
.../src/main/proto/client_RM_protocol.proto | 2 +-
.../src/main/proto/yarn_service_protos.proto | 6 +-
.../client/ClientRMProtocolPBClientImpl.java | 18 +-
.../ClientRMProtocolPBServiceImpl.java | 20 +-
.../java/org/apache/hadoop/yarn/TestRPC.java | 8 +-
.../resourcemanager/ClientRMService.java | 18 +-
.../yarn/server/resourcemanager/MockRM.java | 14 +-
.../yarn/server/resourcemanager/TestRM.java | 16 ++
18 files changed, 308 insertions(+), 181 deletions(-)
rename hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/{GetNewApplicationIdRequest.java => GetNewApplicationRequest.java} (91%)
rename hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/{GetNewApplicationIdResponse.java => GetNewApplicationResponse.java} (66%)
delete mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdResponsePBImpl.java
rename hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/{GetNewApplicationIdRequestPBImpl.java => GetNewApplicationRequestPBImpl.java} (68%)
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationResponsePBImpl.java
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 63e6b3d44e..e922974a61 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1460,6 +1460,10 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3021. Change base urls for RM web-ui. (Thomas Graves via
acmurthy)
+ MAPREDUCE-3041. Fixed ClientRMProtocol to provide min/max resource
+ capabilities along-with new ApplicationId for application submission.
+ (Hitesh Shah via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 65e51735dd..8b7c818b1e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -53,7 +53,7 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
@@ -169,8 +169,8 @@ public String getFilesystemName() throws IOException, InterruptedException {
}
public JobID getNewJobID() throws IOException, InterruptedException {
- GetNewApplicationIdRequest request = recordFactory.newRecordInstance(GetNewApplicationIdRequest.class);
- applicationId = applicationsManager.getNewApplicationId(request).getApplicationId();
+ GetNewApplicationRequest request = recordFactory.newRecordInstance(GetNewApplicationRequest.class);
+ applicationId = applicationsManager.getNewApplication(request).getApplicationId();
return TypeConverter.fromYarn(applicationId);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index 54b0422648..d90e721694 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -78,8 +78,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
@@ -245,7 +245,7 @@ public void start() {
}
@Override
- public GetNewApplicationIdResponse getNewApplicationId(GetNewApplicationIdRequest request) throws YarnRemoteException {
+ public GetNewApplicationResponse getNewApplication(GetNewApplicationRequest request) throws YarnRemoteException {
return null;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java
index 8878851d5c..346ccd2f0d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java
@@ -39,7 +39,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.ClientCache;
import org.apache.hadoop.mapred.ClientServiceDelegate;
-import org.apache.hadoop.mapred.JobStatus;
import org.apache.hadoop.mapred.ResourceMgrDelegate;
import org.apache.hadoop.mapred.YARNRunner;
import org.apache.hadoop.mapreduce.JobID;
@@ -48,6 +47,8 @@
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
@@ -57,8 +58,6 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
@@ -213,13 +212,13 @@ public void testResourceMgrDelegate() throws Exception {
delegate.getActiveTrackers();
verify(clientRMProtocol).getClusterNodes(any(GetClusterNodesRequest.class));
- GetNewApplicationIdResponse newAppIdResponse = recordFactory.newRecordInstance(
- GetNewApplicationIdResponse.class);
- newAppIdResponse.setApplicationId(appId);
- when(clientRMProtocol.getNewApplicationId(any(GetNewApplicationIdRequest.class))).
- thenReturn(newAppIdResponse);
+ GetNewApplicationResponse newAppResponse = recordFactory.newRecordInstance(
+ GetNewApplicationResponse.class);
+ newAppResponse.setApplicationId(appId);
+ when(clientRMProtocol.getNewApplication(any(GetNewApplicationRequest.class))).
+ thenReturn(newAppResponse);
delegate.getNewJobID();
- verify(clientRMProtocol).getNewApplicationId(any(GetNewApplicationIdRequest.class));
+ verify(clientRMProtocol).getNewApplication(any(GetNewApplicationRequest.class));
GetQueueInfoResponse queueInfoResponse = recordFactory.newRecordInstance(
GetQueueInfoResponse.class);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java
index f16cb6da4a..fb93459135 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java
@@ -31,8 +31,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
@@ -62,14 +62,18 @@ public interface ClientRMProtocol {
*
The ResourceManager responds with a new, monotonically
* increasing, {@link ApplicationId} which is used by the client to submit
* a new application.
- *
+ *
+ *
The ResourceManager also responds with details such
+ * as minimum and maximum resource capabilities in the cluster as specified in
+ * {@link GetNewApplicationResponse}.
+ *
* @param request request to get a new ApplicationId
* @return new ApplicationId to be used to submit an application
* @throws YarnRemoteException
* @see #submitApplication(SubmitApplicationRequest)
*/
- public GetNewApplicationIdResponse getNewApplicationId(
- GetNewApplicationIdRequest request)
+ public GetNewApplicationResponse getNewApplication(
+ GetNewApplicationRequest request)
throws YarnRemoteException;
/**
@@ -92,7 +96,7 @@ public GetNewApplicationIdResponse getNewApplicationId(
* @param request request to submit a new application
* @return (empty) response on accepting the submission
* @throws YarnRemoteException
- * @see #getNewApplicationId(GetNewApplicationIdRequest)
+ * @see #getNewApplication(GetNewApplicationRequest)
*/
public SubmitApplicationResponse submitApplication(
SubmitApplicationRequest request)
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationRequest.java
similarity index 91%
rename from hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdRequest.java
rename to hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationRequest.java
index c841070080..a70989f1aa 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdRequest.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationRequest.java
@@ -27,10 +27,10 @@
*
The request sent by clients to get a new {@link ApplicationId} for
* submitting an application.
The response sent by the ResourceManager to the client for
* a request to a new {@link ApplicationId} for submitting applications.
*
- * @see ClientRMProtocol#getNewApplicationId(GetNewApplicationIdRequest)
+ * @see ClientRMProtocol#getNewApplication(GetNewApplicationRequest)
*/
@Public
@Stable
-public interface GetNewApplicationIdResponse {
+public interface GetNewApplicationResponse {
/**
* Get the newApplicationId allocated by the
* ResourceManager.
@@ -47,4 +48,30 @@ public interface GetNewApplicationIdResponse {
@Private
@Unstable
public abstract void setApplicationId(ApplicationId applicationId);
+
+ /**
+ * Get the minimum capability for any {@link Resource} allocated by the
+ * ResourceManager in the cluster.
+ * @return minimum capability of allocated resources in the cluster
+ */
+ @Public
+ @Stable
+ public Resource getMinimumResourceCapability();
+
+ @Private
+ @Unstable
+ public void setMinimumResourceCapability(Resource capability);
+
+ /**
+ * Get the maximum capability for any {@link Resource} allocated by the
+ * ResourceManager in the cluster.
+ * @return maximum capability of allocated resources in the cluster
+ */
+ @Public
+ @Stable
+ public Resource getMaximumResourceCapability();
+
+ @Private
+ @Unstable
+ public void setMaximumResourceCapability(Resource capability);
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdResponsePBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdResponsePBImpl.java
deleted file mode 100644
index 45fefd390e..0000000000
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdResponsePBImpl.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
-
-
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ProtoBase;
-import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
-import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdResponseProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdResponseProtoOrBuilder;
-
-
-
-public class GetNewApplicationIdResponsePBImpl extends ProtoBase implements GetNewApplicationIdResponse {
- GetNewApplicationIdResponseProto proto = GetNewApplicationIdResponseProto.getDefaultInstance();
- GetNewApplicationIdResponseProto.Builder builder = null;
- boolean viaProto = false;
-
- private ApplicationId applicationId = null;
-
-
- public GetNewApplicationIdResponsePBImpl() {
- builder = GetNewApplicationIdResponseProto.newBuilder();
- }
-
- public GetNewApplicationIdResponsePBImpl(GetNewApplicationIdResponseProto proto) {
- this.proto = proto;
- viaProto = true;
- }
-
- public GetNewApplicationIdResponseProto getProto() {
- mergeLocalToProto();
- proto = viaProto ? proto : builder.build();
- viaProto = true;
- return proto;
- }
-
- private void mergeLocalToBuilder() {
- if (applicationId != null) {
- builder.setApplicationId(convertToProtoFormat(this.applicationId));
- }
- }
-
- private void mergeLocalToProto() {
- if (viaProto)
- maybeInitBuilder();
- mergeLocalToBuilder();
- proto = builder.build();
- viaProto = true;
- }
-
- private void maybeInitBuilder() {
- if (viaProto || builder == null) {
- builder = GetNewApplicationIdResponseProto.newBuilder(proto);
- }
- viaProto = false;
- }
-
-
- @Override
- public ApplicationId getApplicationId() {
- GetNewApplicationIdResponseProtoOrBuilder p = viaProto ? proto : builder;
- if (this.applicationId != null) {
- return this.applicationId;
- }
- if (!p.hasApplicationId()) {
- return null;
- }
- this.applicationId = convertFromProtoFormat(p.getApplicationId());
- return this.applicationId;
- }
-
- @Override
- public void setApplicationId(ApplicationId applicationId) {
- maybeInitBuilder();
- if (applicationId == null)
- builder.clearApplicationId();
- this.applicationId = applicationId;
- }
-
- private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
- return new ApplicationIdPBImpl(p);
- }
-
- private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
- return ((ApplicationIdPBImpl)t).getProto();
- }
-
-
-
-}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdRequestPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationRequestPBImpl.java
similarity index 68%
rename from hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdRequestPBImpl.java
rename to hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationRequestPBImpl.java
index 0d318674d5..90eae07839 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationIdRequestPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationRequestPBImpl.java
@@ -19,27 +19,26 @@
package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.records.ProtoBase;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdRequestProto;
-
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationRequestProto;
-public class GetNewApplicationIdRequestPBImpl extends ProtoBase implements GetNewApplicationIdRequest {
- GetNewApplicationIdRequestProto proto = GetNewApplicationIdRequestProto.getDefaultInstance();
- GetNewApplicationIdRequestProto.Builder builder = null;
+public class GetNewApplicationRequestPBImpl extends ProtoBase implements GetNewApplicationRequest {
+ GetNewApplicationRequestProto proto = GetNewApplicationRequestProto.getDefaultInstance();
+ GetNewApplicationRequestProto.Builder builder = null;
boolean viaProto = false;
- public GetNewApplicationIdRequestPBImpl() {
- builder = GetNewApplicationIdRequestProto.newBuilder();
+ public GetNewApplicationRequestPBImpl() {
+ builder = GetNewApplicationRequestProto.newBuilder();
}
- public GetNewApplicationIdRequestPBImpl(GetNewApplicationIdRequestProto proto) {
+ public GetNewApplicationRequestPBImpl(GetNewApplicationRequestProto proto) {
this.proto = proto;
viaProto = true;
}
- public GetNewApplicationIdRequestProto getProto() {
+ public GetNewApplicationRequestProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
@@ -47,7 +46,7 @@ public GetNewApplicationIdRequestProto getProto() {
private void maybeInitBuilder() {
if (viaProto || builder == null) {
- builder = GetNewApplicationIdRequestProto.newBuilder(proto);
+ builder = GetNewApplicationRequestProto.newBuilder(proto);
}
viaProto = false;
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationResponsePBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationResponsePBImpl.java
new file mode 100644
index 0000000000..d15f1b7527
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationResponsePBImpl.java
@@ -0,0 +1,173 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationResponseProtoOrBuilder;
+
+public class GetNewApplicationResponsePBImpl extends ProtoBase implements GetNewApplicationResponse {
+ GetNewApplicationResponseProto proto = GetNewApplicationResponseProto.getDefaultInstance();
+ GetNewApplicationResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationId applicationId = null;
+ private Resource minimumResourceCapability = null;
+ private Resource maximumResourceCapability = null;
+
+ public GetNewApplicationResponsePBImpl() {
+ builder = GetNewApplicationResponseProto.newBuilder();
+ }
+
+ public GetNewApplicationResponsePBImpl(GetNewApplicationResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetNewApplicationResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (applicationId != null) {
+ builder.setApplicationId(convertToProtoFormat(this.applicationId));
+ }
+ if (minimumResourceCapability != null) {
+ builder.setMinimumCapability(convertToProtoFormat(this.minimumResourceCapability));
+ }
+ if (maximumResourceCapability != null) {
+ builder.setMaximumCapability(convertToProtoFormat(this.maximumResourceCapability));
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetNewApplicationResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+
+ @Override
+ public ApplicationId getApplicationId() {
+ if (this.applicationId != null) {
+ return this.applicationId;
+ }
+
+ GetNewApplicationResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasApplicationId()) {
+ return null;
+ }
+
+ this.applicationId = convertFromProtoFormat(p.getApplicationId());
+ return this.applicationId;
+ }
+
+ @Override
+ public void setApplicationId(ApplicationId applicationId) {
+ maybeInitBuilder();
+ if (applicationId == null)
+ builder.clearApplicationId();
+ this.applicationId = applicationId;
+ }
+
+ @Override
+ public Resource getMaximumResourceCapability() {
+ if (this.maximumResourceCapability != null) {
+ return this.maximumResourceCapability;
+ }
+
+ GetNewApplicationResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasMaximumCapability()) {
+ return null;
+ }
+
+ this.maximumResourceCapability = convertFromProtoFormat(p.getMaximumCapability());
+ return this.maximumResourceCapability;
+ }
+
+ @Override
+ public Resource getMinimumResourceCapability() {
+ if (this.minimumResourceCapability != null) {
+ return this.minimumResourceCapability;
+ }
+
+ GetNewApplicationResponseProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasMinimumCapability()) {
+ return null;
+ }
+
+ this.minimumResourceCapability = convertFromProtoFormat(p.getMinimumCapability());
+ return this.minimumResourceCapability;
+ }
+
+ @Override
+ public void setMaximumResourceCapability(Resource capability) {
+ maybeInitBuilder();
+ if(maximumResourceCapability == null) {
+ builder.clearMaximumCapability();
+ }
+ this.maximumResourceCapability = capability;
+ }
+
+ @Override
+ public void setMinimumResourceCapability(Resource capability) {
+ maybeInitBuilder();
+ if(minimumResourceCapability == null) {
+ builder.clearMinimumCapability();
+ }
+ this.minimumResourceCapability = capability;
+ }
+
+ private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
+ return new ApplicationIdPBImpl(p);
+ }
+
+ private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+ return ((ApplicationIdPBImpl)t).getProto();
+ }
+
+ private Resource convertFromProtoFormat(ResourceProto resource) {
+ return new ResourcePBImpl(resource);
+ }
+
+ private ResourceProto convertToProtoFormat(Resource resource) {
+ return ((ResourcePBImpl)resource).getProto();
+ }
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto
index 760b75c021..fb5f5f6e74 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto
@@ -24,7 +24,7 @@ option java_generate_equals_and_hash = true;
import "yarn_service_protos.proto";
service ClientRMProtocolService {
- rpc getNewApplicationId (GetNewApplicationIdRequestProto) returns (GetNewApplicationIdResponseProto);
+ rpc getNewApplication (GetNewApplicationRequestProto) returns (GetNewApplicationResponseProto);
rpc getApplicationReport (GetApplicationReportRequestProto) returns (GetApplicationReportResponseProto);
rpc submitApplication (SubmitApplicationRequestProto) returns (SubmitApplicationResponseProto);
rpc forceKillApplication (KillApplicationRequestProto) returns (KillApplicationResponseProto);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index 84003ad439..1a992ad578 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -66,11 +66,13 @@ message AllocateResponseProto {
/////// client_RM_Protocol ///////////////////////////
//////////////////////////////////////////////////////
-message GetNewApplicationIdRequestProto {
+message GetNewApplicationRequestProto {
}
-message GetNewApplicationIdResponseProto {
+message GetNewApplicationResponseProto {
optional ApplicationIdProto application_id = 1;
+ optional ResourceProto minimumCapability = 2;
+ optional ResourceProto maximumCapability = 3;
}
message GetApplicationReportRequestProto {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java
index b06c2caa35..b4f2dc46e0 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java
@@ -33,8 +33,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
@@ -51,8 +51,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesResponsePBImpl;
-import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationIdRequestPBImpl;
-import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationIdResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl;
@@ -68,7 +68,7 @@
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto;
@@ -138,11 +138,11 @@ public GetClusterMetricsResponse getClusterMetrics(
}
@Override
- public GetNewApplicationIdResponse getNewApplicationId(
- GetNewApplicationIdRequest request) throws YarnRemoteException {
- GetNewApplicationIdRequestProto requestProto = ((GetNewApplicationIdRequestPBImpl)request).getProto();
+ public GetNewApplicationResponse getNewApplication(
+ GetNewApplicationRequest request) throws YarnRemoteException {
+ GetNewApplicationRequestProto requestProto = ((GetNewApplicationRequestPBImpl)request).getProto();
try {
- return new GetNewApplicationIdResponsePBImpl(proxy.getNewApplicationId(null, requestProto));
+ return new GetNewApplicationResponsePBImpl(proxy.getNewApplication(null, requestProto));
} catch (ServiceException e) {
if (e.getCause() instanceof YarnRemoteException) {
throw (YarnRemoteException)e.getCause();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java
index 242e9624a1..342d864ca5 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java
@@ -23,7 +23,7 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
@@ -36,8 +36,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesResponsePBImpl;
-import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationIdRequestPBImpl;
-import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationIdResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl;
@@ -56,8 +56,8 @@
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdRequestProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationIdResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto;
@@ -116,13 +116,13 @@ public GetClusterMetricsResponseProto getClusterMetrics(RpcController arg0,
}
@Override
- public GetNewApplicationIdResponseProto getNewApplicationId(
- RpcController arg0, GetNewApplicationIdRequestProto proto)
+ public GetNewApplicationResponseProto getNewApplication(
+ RpcController arg0, GetNewApplicationRequestProto proto)
throws ServiceException {
- GetNewApplicationIdRequestPBImpl request = new GetNewApplicationIdRequestPBImpl(proto);
+ GetNewApplicationRequestPBImpl request = new GetNewApplicationRequestPBImpl(proto);
try {
- GetNewApplicationIdResponse response = real.getNewApplicationId(request);
- return ((GetNewApplicationIdResponsePBImpl)response).getProto();
+ GetNewApplicationResponse response = real.getNewApplication(request);
+ return ((GetNewApplicationResponsePBImpl)response).getProto();
} catch (YarnRemoteException e) {
throw new ServiceException(e);
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
index 332d044158..82b90d2f5a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
@@ -29,7 +29,7 @@
import org.apache.hadoop.yarn.api.ContainerManager;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
@@ -86,11 +86,11 @@ public void testUnknownCall() {
+ server.getPort()), conf);
try {
- proxy.getNewApplicationId(Records
- .newRecord(GetNewApplicationIdRequest.class));
+ proxy.getNewApplication(Records
+ .newRecord(GetNewApplicationRequest.class));
Assert.fail("Excepted RPC call to fail with unknown method.");
} catch (YarnRemoteException e) {
- Assert.assertEquals("Unknown method getNewApplicationId called on "
+ Assert.assertEquals("Unknown method getNewApplication called on "
+ "org.apache.hadoop.yarn.proto.ClientRMProtocol"
+ "$ClientRMProtocolService$BlockingInterface protocol.", e
.getMessage());
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 8e29b33ff8..2cf1900098 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -46,8 +46,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
@@ -165,11 +165,17 @@ public ApplicationId getNewApplicationId() {
}
@Override
- public GetNewApplicationIdResponse getNewApplicationId(
- GetNewApplicationIdRequest request) throws YarnRemoteException {
- GetNewApplicationIdResponse response = recordFactory
- .newRecordInstance(GetNewApplicationIdResponse.class);
+ public GetNewApplicationResponse getNewApplication(
+ GetNewApplicationRequest request) throws YarnRemoteException {
+ GetNewApplicationResponse response = recordFactory
+ .newRecordInstance(GetNewApplicationResponse.class);
response.setApplicationId(getNewApplicationId());
+ // Pick up min/max resource from scheduler...
+ response.setMinimumResourceCapability(scheduler
+ .getMinimumResourceCapability());
+ response.setMaximumResourceCapability(scheduler
+ .getMaximumResourceCapability());
+
return response;
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index fa2ca44d30..d2a9a11182 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -23,8 +23,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -74,11 +74,17 @@ public void waitForState(ApplicationId appId, RMAppState finalState)
Assert.assertEquals("App state is not correct (timedout)",
finalState, app.getState());
}
+
+ // get new application id
+ public GetNewApplicationResponse getNewAppId() throws Exception {
+ ClientRMProtocol client = getClientRMService();
+ return client.getNewApplication(Records.newRecord(GetNewApplicationRequest.class));
+ }
//client
public RMApp submitApp(int masterMemory) throws Exception {
ClientRMProtocol client = getClientRMService();
- GetNewApplicationIdResponse resp = client.getNewApplicationId(Records.newRecord(GetNewApplicationIdRequest.class));
+ GetNewApplicationResponse resp = client.getNewApplication(Records.newRecord(GetNewApplicationRequest.class));
ApplicationId appId = resp.getApplicationId();
SubmitApplicationRequest req = Records.newRecord(SubmitApplicationRequest.class);
@@ -89,7 +95,7 @@ public RMApp submitApp(int masterMemory) throws Exception {
sub.setUser("");
ContainerLaunchContext clc =
Records.newRecord(ContainerLaunchContext.class);
- Resource capability = Records.newRecord(Resource.class);
+ Resource capability = Records.newRecord(Resource.class);
capability.setMemory(masterMemory);
clc.setResource(capability);
sub.setAMContainerSpec(clc);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
index 03941e3625..3bba11e1fb 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
@@ -25,6 +25,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -40,6 +41,20 @@ public class TestRM {
private static final Log LOG = LogFactory.getLog(TestRM.class);
+ @Test
+ public void testGetNewAppId() throws Exception {
+ Logger rootLogger = LogManager.getRootLogger();
+ rootLogger.setLevel(Level.DEBUG);
+ MockRM rm = new MockRM();
+ rm.start();
+
+ GetNewApplicationResponse resp = rm.getNewAppId();
+ assert (resp.getApplicationId().getId() != 0);
+ assert (resp.getMinimumResourceCapability().getMemory() > 0);
+ assert (resp.getMaximumResourceCapability().getMemory() > 0);
+ rm.stop();
+ }
+
@Test
public void testAppWithNoContainers() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
@@ -119,6 +134,7 @@ public void testAppOnMultiNode() throws Exception {
public static void main(String[] args) throws Exception {
TestRM t = new TestRM();
+ t.testGetNewAppId();
t.testAppWithNoContainers();
t.testAppOnMultiNode();
}
From 59265d6ed8c12876ec0687bac0032e40683fdd83 Mon Sep 17 00:00:00 2001
From: Tsz-wo Sze
Date: Wed, 28 Sep 2011 04:51:29 +0000
Subject: [PATCH 55/68] HDFS-2368. Move SPNEGO conf properties from
hdfs-default.xml to hdfs-site.xml.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176719 13f79535-47bb-0310-9956-ffa450edef68
---
.../packages/templates/conf/hdfs-site.xml | 20 +++++++++++++++++++
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../src/main/resources/hdfs-default.xml | 20 -------------------
3 files changed, 23 insertions(+), 20 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml
index 69e078380c..ffec60355a 100644
--- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml
+++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml
@@ -144,6 +144,26 @@
+
+ dfs.web.authentication.kerberos.principal
+ HTTP/_HOST@${local.realm}
+
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+
+
+
+
+ dfs.web.authentication.kerberos.keytab
+ /etc/security/keytabs/nn.service.keytab
+
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+
+
+
dfs.namenode.keytab.file/etc/security/keytabs/nn.service.keytab
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ebc0c9bf0f..f1bbd49ed1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -44,6 +44,9 @@ Trunk (unreleased changes)
HDFS-2356. Support case insensitive query parameter names in webhdfs.
(szetszwo)
+ HDFS-2368. Move SPNEGO conf properties from hdfs-default.xml to
+ hdfs-site.xml. (szetszwo)
+
BUG FIXES
HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 9fae462f04..59a8ff645f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -683,24 +683,4 @@ creations/deletions), or "all".
-
- dfs.web.authentication.kerberos.principal
- HTTP/${dfs.web.hostname}@${kerberos.realm}
-
- The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-
- The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
- HTTP SPENGO specification.
-
-
-
-
- dfs.web.authentication.kerberos.keytab
- ${user.home}/dfs.web.keytab
-
- The Kerberos keytab file with the credentials for the
- HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-
-
-
From 825f9c80a4f2788040cc17455bea96ffaa08cb5a Mon Sep 17 00:00:00 2001
From: Jitendra Nath Pandey
Date: Wed, 28 Sep 2011 05:29:09 +0000
Subject: [PATCH 56/68] HDFS-2361. hftp is broken, fixed username checks in
JspHelper.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176729 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
.../hadoop/hdfs/server/common/JspHelper.java | 20 +++++++++++++------
2 files changed, 16 insertions(+), 6 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f1bbd49ed1..c6a78355aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -78,6 +78,8 @@ Trunk (unreleased changes)
HDFS-2373. Commands using webhdfs and hftp print unnecessary debug
info on the console with security enabled. (Arpit Gupta via suresh)
+ HDFS-2361. hftp is broken, fixed username checks in JspHelper. (jitendra)
+
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
index 82ec3bd771..67f67c0395 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
@@ -60,6 +60,7 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.Token;
@@ -552,7 +553,8 @@ public static UserGroupInformation getUGI(ServletContext context,
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
ugi = id.getUser();
- checkUsername(ugi.getUserName(), user);
+ checkUsername(ugi.getShortUserName(), usernameFromQuery);
+ checkUsername(ugi.getShortUserName(), user);
ugi.addToken(token);
ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
} else {
@@ -561,13 +563,11 @@ public static UserGroupInformation getUGI(ServletContext context,
"authenticated by filter");
}
ugi = UserGroupInformation.createRemoteUser(user);
+ checkUsername(ugi.getShortUserName(), usernameFromQuery);
// This is not necessarily true, could have been auth'ed by user-facing
// filter
ugi.setAuthenticationMethod(secureAuthMethod);
}
-
- checkUsername(user, usernameFromQuery);
-
} else { // Security's not on, pull from url
ugi = usernameFromQuery == null?
getDefaultWebUser(conf) // not specified in request
@@ -580,10 +580,18 @@ public static UserGroupInformation getUGI(ServletContext context,
return ugi;
}
+ /**
+ * Expected user name should be a short name.
+ */
private static void checkUsername(final String expected, final String name
) throws IOException {
- if (name != null && !name.equals(expected)) {
- throw new IOException("Usernames not matched: name=" + name
+ if (name == null) {
+ return;
+ }
+ KerberosName u = new KerberosName(name);
+ String shortName = u.getShortName();
+ if (!shortName.equals(expected)) {
+ throw new IOException("Usernames not matched: name=" + shortName
+ " != expected=" + expected);
}
}
From cc42ccf02acca7b5a971fa5ca0986e075169fc8c Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Wed, 28 Sep 2011 05:35:43 +0000
Subject: [PATCH 57/68] MAPREDUCE-2843. Fixed the node-table to be completely
displayed and making node entries on RM UI to be sortable. Contributed by
Abhijit Suresh Shingate.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176730 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../resourcemanager/webapp/NodesPage.java | 4 +-
.../resourcemanager/webapp/TestNodesPage.java | 55 +++++++++++++++++++
3 files changed, 60 insertions(+), 2 deletions(-)
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index e922974a61..fd7227078a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1464,6 +1464,9 @@ Release 0.23.0 - Unreleased
capabilities along-with new ApplicationId for application submission.
(Hitesh Shah via acmurthy)
+ MAPREDUCE-2843. Fixed the node-table to be completely displayed and making
+ node entries on RM UI to be sortable. (Abhijit Suresh Shingate via vinodkv)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index a621cc1047..1d074e3160 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -76,7 +76,7 @@ protected void render(Block html) {
// TODO: FIXME Vinodkv
// td(String.valueOf(ni.getUsedResource().getMemory())).
// td(String.valueOf(ni.getAvailableResource().getMemory())).
- _();
+ td("n/a")._();
}
tbody._()._();
}
@@ -100,7 +100,7 @@ private String nodesTableInit() {
// rack, nodeid, host, healthStatus, health update ts, health report,
// containers, memused, memavail
append(", aoColumns:[null, null, null, null, null, null, ").
- append("{bSearchable:false},{bSearchable:false},{bSearchable:false}]}").
+ append("{sType:'title-numeric', bSearchable:false}]}").
toString();
}
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
new file mode 100644
index 0000000000..e0583a2007
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import java.io.PrintWriter;
+
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.NodesPage.NodesBlock;
+import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * This tests the NodesPage block table that it should contain the table body
+ * data for all the columns in the table as specified in the header.
+ */
+public class TestNodesPage {
+
+ @Test
+ public void testNodesBlockRender() throws Exception {
+ int numberOfRacks = 2;
+ int numberOfNodesPerRack = 2;
+ // Number of Actual Table Headers for NodesPage.NodesBlock might change in
+ // future. In that case this value should be adjusted to the new value.
+ int numberOfActualTableHeaders = 7;
+
+ PrintWriter writer = WebAppTests.testBlock(
+ NodesBlock.class,
+ RMContext.class,
+ TestRMWebApp.mockRMContext(3, numberOfRacks, numberOfNodesPerRack,
+ 8 * TestRMWebApp.GiB)).getInstance(PrintWriter.class);
+
+ Mockito.verify(writer, Mockito.times(numberOfActualTableHeaders)).print(
+ "
Date: Wed, 28 Sep 2011 05:49:17 +0000
Subject: [PATCH 58/68] HDFS-2363. Move datanodes size printing from
FSNamesystem.metasave(..) to BlockManager. Contributed by Uma Maheswara Rao
G
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176733 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hadoop/hdfs/server/blockmanagement/BlockManager.java | 5 +++++
.../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 5 -----
3 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c6a78355aa..4a37a36de8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -765,6 +765,9 @@ Release 0.23.0 - Unreleased
HDFS-2332. Add test for HADOOP-7629 (using an immutable FsPermission
object as an RPC parameter fails). (todd)
+ HDFS-2363. Move datanodes size printing from FSNamesystem.metasave(..)
+ to BlockManager. (Uma Maheswara Rao G via szetszwo)
+
OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 402e95c3cc..994275aec0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -308,6 +308,11 @@ public void setBlockPlacementPolicy(BlockPlacementPolicy newpolicy) {
/** Dump meta data to out. */
public void metaSave(PrintWriter out) {
assert namesystem.hasWriteLock();
+ final List live = new ArrayList();
+ final List dead = new ArrayList();
+ datanodeManager.fetchDatanodes(live, dead, false);
+ out.println("Live Datanodes: " + live.size());
+ out.println("Dead Datanodes: " + dead.size());
//
// Dump contents of neededReplication
//
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 116fa4826a..4851796cea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -564,11 +564,6 @@ void metaSave(String filename) throws IOException {
out.println(totalInodes + " files and directories, " + totalBlocks
+ " blocks = " + (totalInodes + totalBlocks) + " total");
- final List live = new ArrayList();
- final List dead = new ArrayList();
- blockManager.getDatanodeManager().fetchDatanodes(live, dead, false);
- out.println("Live Datanodes: "+live.size());
- out.println("Dead Datanodes: "+dead.size());
blockManager.metaSave(out);
out.flush();
From f4a425e10d3d53ab4fdeba8f914b95d19a34f04f Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Wed, 28 Sep 2011 06:03:18 +0000
Subject: [PATCH 59/68] MAPREDUCE-3110. Fixed TestRPC failure. Contributed by
Vinod Kumar Vavilapalli.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176739 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 2 ++
.../src/test/java/org/apache/hadoop/yarn/TestRPC.java | 8 ++++----
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index fd7227078a..44c7d7c34a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1467,6 +1467,8 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2843. Fixed the node-table to be completely displayed and making
node entries on RM UI to be sortable. (Abhijit Suresh Shingate via vinodkv)
+ MAPREDUCE-3110. Fixed TestRPC failure. (vinodkv)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
index 82b90d2f5a..a855cc6f21 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
@@ -90,10 +90,10 @@ public void testUnknownCall() {
.newRecord(GetNewApplicationRequest.class));
Assert.fail("Excepted RPC call to fail with unknown method.");
} catch (YarnRemoteException e) {
- Assert.assertEquals("Unknown method getNewApplication called on "
- + "org.apache.hadoop.yarn.proto.ClientRMProtocol"
- + "$ClientRMProtocolService$BlockingInterface protocol.", e
- .getMessage());
+ Assert.assertTrue(e.getMessage().matches(
+ "Unknown method getNewApplication called on.*"
+ + "org.apache.hadoop.yarn.proto.ClientRMProtocol"
+ + "\\$ClientRMProtocolService\\$BlockingInterface protocol."));
}
}
From 312a7e71001d55f88781e56b331ab1b40a72a980 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Wed, 28 Sep 2011 07:31:03 +0000
Subject: [PATCH 60/68] MAPREDUCE-3078. Ensure MapReduce AM reports progress
correctly for displaying on the RM Web-UI. Contributed by Vinod K V.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176762 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../hadoop-mapreduce-client-app/pom.xml | 6 +
.../hadoop/mapreduce/v2/app/MRAppMaster.java | 4 +-
.../mapreduce/v2/app/job/impl/JobImpl.java | 29 +-
.../v2/app/local/LocalContainerAllocator.java | 21 +-
.../mapreduce/v2/app/rm/RMCommunicator.java | 54 +-
.../v2/app/rm/RMContainerRequestor.java | 13 +-
.../v2/app/TestRMContainerAllocator.java | 1069 ++++++++++-------
.../mapreduce/v2/util/MRBuilderUtils.java | 32 +-
.../hadoop-mapreduce-client-jobclient/pom.xml | 6 +
.../hadoop-mapreduce-client/pom.xml | 6 +
.../apache/hadoop/yarn/util/BuilderUtils.java | 24 +-
.../pom.xml | 14 +
.../server/resourcemanager/RMAppManager.java | 12 +-
.../resourcemanager/ResourceManager.java | 16 +-
.../server/resourcemanager/rmapp/RMApp.java | 2 +-
.../rmapp/attempt/RMAppAttempt.java | 2 +-
.../rmapp/attempt/RMAppAttemptImpl.java | 2 +
.../scheduler/SchedulerApp.java | 15 +-
.../event/NodeUpdateSchedulerEvent.java | 3 -
.../scheduler/fifo/FifoScheduler.java | 4 +-
.../yarn/server/resourcemanager/MockAM.java | 10 +-
.../TestAMRMRPCResponseId.java | 13 +-
.../resourcemanager/rmapp/MockRMApp.java | 1 +
.../TestContainerTokenSecretManager.java | 9 +-
25 files changed, 822 insertions(+), 548 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 44c7d7c34a..43fed6baa1 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1469,6 +1469,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3110. Fixed TestRPC failure. (vinodkv)
+ MAPREDUCE-3078. Ensure MapReduce AM reports progress correctly for
+ displaying on the RM Web-UI. (vinodkv via acmurthy)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
index 66ac197d84..0f12598fc1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
@@ -55,6 +55,12 @@
hadoop-yarn-server-resourcemanagertest
+
+ org.apache.hadoop
+ hadoop-yarn-server-resourcemanager
+ test-jar
+ test
+ org.apache.hadoophadoop-mapreduce-client-shuffle
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index 6bd1c47133..8b7d578fc9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -549,9 +549,9 @@ public void start() {
// It's more test friendly to put it here.
DefaultMetricsSystem.initialize("MRAppMaster");
- /** create a job event for job intialization */
+ // create a job event for job intialization
JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT);
- /** send init to the job (this does NOT trigger job execution) */
+ // Send init to the job (this does NOT trigger job execution)
// This is a synchronous call, not an event through dispatcher. We want
// job-init to be done completely here.
jobEventDispatcher.handle(initJobEvent);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index a3f067d14c..c26bc24695 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -92,6 +92,7 @@
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
@@ -584,25 +585,17 @@ public List getDiagnostics() {
public JobReport getReport() {
readLock.lock();
try {
- JobReport report = recordFactory.newRecordInstance(JobReport.class);
- report.setJobId(jobId);
- report.setJobState(getState());
-
- // TODO - Fix to correctly setup report and to check state
- if (report.getJobState() == JobState.NEW) {
- return report;
- }
-
- report.setStartTime(startTime);
- report.setFinishTime(finishTime);
- report.setSetupProgress(setupProgress);
- report.setCleanupProgress(cleanupProgress);
- report.setMapProgress(computeProgress(mapTasks));
- report.setReduceProgress(computeProgress(reduceTasks));
- report.setJobName(jobName);
- report.setUser(username);
+ JobState state = getState();
- return report;
+ if (getState() == JobState.NEW) {
+ return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
+ startTime, finishTime, setupProgress, 0.0f,
+ 0.0f, cleanupProgress);
+ }
+
+ return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
+ startTime, finishTime, setupProgress, computeProgress(mapTasks),
+ computeProgress(reduceTasks), cleanupProgress);
} finally {
readLock.unlock();
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
index 18a0f2d5a6..0261e18b56 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.mapreduce.v2.app.local;
+import java.util.ArrayList;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
@@ -30,15 +31,19 @@
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
-import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.AMResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.Records;
/**
@@ -65,6 +70,20 @@ public LocalContainerAllocator(ClientService clientService,
this.appID = context.getApplicationID();
}
+ @Override
+ protected synchronized void heartbeat() throws Exception {
+ AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest(
+ this.applicationAttemptId, this.lastResponseID, super
+ .getApplicationProgress(), new ArrayList(),
+ new ArrayList());
+ AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
+ AMResponse response = allocateResponse.getAMResponse();
+ if (response.getReboot()) {
+ // TODO
+ LOG.info("Event from RM: shutting down Application Master");
+ }
+ }
+
@Override
public void handle(ContainerAllocatorEvent event) {
if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
index db4a60b1dc..15a7e3f6a5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -20,7 +20,6 @@
import java.io.IOException;
import java.security.PrivilegedAction;
-import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -29,6 +28,7 @@
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
@@ -42,17 +42,12 @@
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.AMRMProtocol;
import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.records.AMResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -64,7 +59,7 @@
/**
* Registers/unregisters to RM and sends heartbeats to RM.
*/
-public class RMCommunicator extends AbstractService {
+public abstract class RMCommunicator extends AbstractService {
private static final Log LOG = LogFactory.getLog(RMContainerAllocator.class);
private int rmPollInterval;//millis
protected ApplicationId applicationId;
@@ -74,7 +69,7 @@ public class RMCommunicator extends AbstractService {
protected EventHandler eventHandler;
protected AMRMProtocol scheduler;
private final ClientService clientService;
- private int lastResponseID;
+ protected int lastResponseID;
private Resource minContainerCapability;
private Resource maxContainerCapability;
@@ -121,6 +116,34 @@ protected Job getJob() {
return job;
}
+ /**
+ * Get the appProgress. Can be used only after this component is started.
+ * @return the appProgress.
+ */
+ protected float getApplicationProgress() {
+ // For now just a single job. In future when we have a DAG, we need an
+ // aggregate progress.
+ JobReport report = this.job.getReport();
+ float setupWeight = 0.05f;
+ float cleanupWeight = 0.05f;
+ float mapWeight = 0.0f;
+ float reduceWeight = 0.0f;
+ int numMaps = this.job.getTotalMaps();
+ int numReduces = this.job.getTotalReduces();
+ if (numMaps == 0 && numReduces == 0) {
+ } else if (numMaps == 0) {
+ reduceWeight = 0.9f;
+ } else if (numReduces == 0) {
+ mapWeight = 0.9f;
+ } else {
+ mapWeight = reduceWeight = 0.45f;
+ }
+ return (report.getSetupProgress() * setupWeight
+ + report.getCleanupProgress() * cleanupWeight
+ + report.getMapProgress() * mapWeight + report.getReduceProgress()
+ * reduceWeight);
+ }
+
protected void register() {
//Register
String host =
@@ -262,18 +285,5 @@ public AMRMProtocol run() {
});
}
- protected synchronized void heartbeat() throws Exception {
- AllocateRequest allocateRequest =
- recordFactory.newRecordInstance(AllocateRequest.class);
- allocateRequest.setApplicationAttemptId(applicationAttemptId);
- allocateRequest.setResponseId(lastResponseID);
- allocateRequest.addAllAsks(new ArrayList());
- allocateRequest.addAllReleases(new ArrayList());
- AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
- AMResponse response = allocateResponse.getAMResponse();
- if (response.getReboot()) {
- LOG.info("Event from RM: shutting down Application Master");
- }
- }
-
+ protected abstract void heartbeat() throws Exception;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
index b9f0c6ee45..cda2ed678a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
@@ -43,6 +43,7 @@
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.BuilderUtils;
/**
* Keeps the data structures to send container requests to RM.
@@ -107,15 +108,11 @@ public void init(Configuration conf) {
LOG.info("maxTaskFailuresPerNode is " + maxTaskFailuresPerNode);
}
- protected abstract void heartbeat() throws Exception;
-
protected AMResponse makeRemoteRequest() throws YarnRemoteException {
- AllocateRequest allocateRequest = recordFactory
- .newRecordInstance(AllocateRequest.class);
- allocateRequest.setApplicationAttemptId(applicationAttemptId);
- allocateRequest.setResponseId(lastResponseID);
- allocateRequest.addAllAsks(new ArrayList(ask));
- allocateRequest.addAllReleases(new ArrayList(release));
+ AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest(
+ applicationAttemptId, lastResponseID, super.getApplicationProgress(),
+ new ArrayList(ask), new ArrayList(
+ release));
AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
AMResponse response = allocateResponse.getAMResponse();
lastResponseID = response.getResponseId();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
index cbf3ab0a65..a1eb928919 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
@@ -18,12 +18,15 @@
package org.apache.hadoop.mapreduce.v2.app;
+import static org.mockito.Matchers.isA;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
-import java.util.Map;
import java.util.Set;
import junit.framework.Assert;
@@ -32,475 +35,651 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.AMRMProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.records.AMResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationMaster;
-import org.apache.hadoop.yarn.api.records.ApplicationStatus;
-import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.ipc.RPCUtil;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
-import org.junit.BeforeClass;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.junit.After;
import org.junit.Test;
public class TestRMContainerAllocator {
-// private static final Log LOG = LogFactory.getLog(TestRMContainerAllocator.class);
-// private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
-//
-// @BeforeClass
-// public static void preTests() {
-// DefaultMetricsSystem.shutdown();
-// }
-//
-// @Test
-// public void testSimple() throws Exception {
-// FifoScheduler scheduler = createScheduler();
-// LocalRMContainerAllocator allocator = new LocalRMContainerAllocator(
-// scheduler, new Configuration());
-//
-// //add resources to scheduler
-// RMNode nodeManager1 = addNode(scheduler, "h1", 10240);
-// RMNode nodeManager2 = addNode(scheduler, "h2", 10240);
-// RMNode nodeManager3 = addNode(scheduler, "h3", 10240);
-//
-// //create the container request
-// ContainerRequestEvent event1 =
-// createReq(1, 1024, new String[]{"h1"});
-// allocator.sendRequest(event1);
-//
-// //send 1 more request with different resource req
-// ContainerRequestEvent event2 = createReq(2, 1024, new String[]{"h2"});
-// allocator.sendRequest(event2);
-//
-// //this tells the scheduler about the requests
-// //as nodes are not added, no allocations
-// List assigned = allocator.schedule();
-// Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
-//
-// //send another request with different resource and priority
-// ContainerRequestEvent event3 = createReq(3, 1024, new String[]{"h3"});
-// allocator.sendRequest(event3);
-//
-// //this tells the scheduler about the requests
-// //as nodes are not added, no allocations
-// assigned = allocator.schedule();
-// Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
-//
-// //update resources in scheduler
-// scheduler.nodeUpdate(nodeManager1); // Node heartbeat
-// scheduler.nodeUpdate(nodeManager2); // Node heartbeat
-// scheduler.nodeUpdate(nodeManager3); // Node heartbeat
-//
-//
-// assigned = allocator.schedule();
-// checkAssignments(
-// new ContainerRequestEvent[]{event1, event2, event3}, assigned, false);
-// }
-//
-// //TODO: Currently Scheduler seems to have bug where it does not work
-// //for Application asking for containers with different capabilities.
-// //@Test
-// public void testResource() throws Exception {
-// FifoScheduler scheduler = createScheduler();
-// LocalRMContainerAllocator allocator = new LocalRMContainerAllocator(
-// scheduler, new Configuration());
-//
-// //add resources to scheduler
-// RMNode nodeManager1 = addNode(scheduler, "h1", 10240);
-// RMNode nodeManager2 = addNode(scheduler, "h2", 10240);
-// RMNode nodeManager3 = addNode(scheduler, "h3", 10240);
-//
-// //create the container request
-// ContainerRequestEvent event1 =
-// createReq(1, 1024, new String[]{"h1"});
-// allocator.sendRequest(event1);
-//
-// //send 1 more request with different resource req
-// ContainerRequestEvent event2 = createReq(2, 2048, new String[]{"h2"});
-// allocator.sendRequest(event2);
-//
-// //this tells the scheduler about the requests
-// //as nodes are not added, no allocations
-// List assigned = allocator.schedule();
-// Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
-//
-// //update resources in scheduler
-// scheduler.nodeUpdate(nodeManager1); // Node heartbeat
-// scheduler.nodeUpdate(nodeManager2); // Node heartbeat
-// scheduler.nodeUpdate(nodeManager3); // Node heartbeat
-//
-// assigned = allocator.schedule();
-// checkAssignments(
-// new ContainerRequestEvent[]{event1, event2}, assigned, false);
-// }
-//
-// @Test
-// public void testMapReduceScheduling() throws Exception {
-// FifoScheduler scheduler = createScheduler();
-// Configuration conf = new Configuration();
-// LocalRMContainerAllocator allocator = new LocalRMContainerAllocator(
-// scheduler, conf);
-//
-// //add resources to scheduler
-// RMNode nodeManager1 = addNode(scheduler, "h1", 1024);
-// RMNode nodeManager2 = addNode(scheduler, "h2", 10240);
-// RMNode nodeManager3 = addNode(scheduler, "h3", 10240);
-//
-// //create the container request
-// //send MAP request
-// ContainerRequestEvent event1 =
-// createReq(1, 2048, new String[]{"h1", "h2"}, true, false);
-// allocator.sendRequest(event1);
-//
-// //send REDUCE request
-// ContainerRequestEvent event2 = createReq(2, 3000, new String[]{"h1"}, false, true);
-// allocator.sendRequest(event2);
-//
-// //send MAP request
-// ContainerRequestEvent event3 = createReq(3, 2048, new String[]{"h3"}, false, false);
-// allocator.sendRequest(event3);
-//
-// //this tells the scheduler about the requests
-// //as nodes are not added, no allocations
-// List assigned = allocator.schedule();
-// Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
-//
-// //update resources in scheduler
-// scheduler.nodeUpdate(nodeManager1); // Node heartbeat
-// scheduler.nodeUpdate(nodeManager2); // Node heartbeat
-// scheduler.nodeUpdate(nodeManager3); // Node heartbeat
-//
-// assigned = allocator.schedule();
-// checkAssignments(
-// new ContainerRequestEvent[]{event1, event3}, assigned, false);
-//
-// //validate that no container is assigned to h1 as it doesn't have 2048
-// for (TaskAttemptContainerAssignedEvent assig : assigned) {
-// Assert.assertFalse("Assigned count not correct",
-// "h1".equals(assig.getContainer().getNodeId().getHost()));
-// }
-// }
-//
-//
-//
-// private RMNode addNode(FifoScheduler scheduler,
-// String nodeName, int memory) {
-// NodeId nodeId = recordFactory.newRecordInstance(NodeId.class);
-// nodeId.setHost(nodeName);
-// nodeId.setPort(1234);
-// Resource resource = recordFactory.newRecordInstance(Resource.class);
-// resource.setMemory(memory);
-// RMNode nodeManager = new RMNodeImpl(nodeId, null, nodeName, 0, 0,
-// ResourceTrackerService.resolve(nodeName), resource);
-// scheduler.addNode(nodeManager); // Node registration
-// return nodeManager;
-// }
-//
-// private FifoScheduler createScheduler() throws YarnRemoteException {
-// FifoScheduler fsc = new FifoScheduler() {
-// //override this to copy the objects
-// //otherwise FifoScheduler updates the numContainers in same objects as kept by
-// //RMContainerAllocator
-//
-// @Override
-// public synchronized void allocate(ApplicationAttemptId applicationId,
-// List ask) {
-// List askCopy = new ArrayList();
-// for (ResourceRequest req : ask) {
-// ResourceRequest reqCopy = recordFactory.newRecordInstance(ResourceRequest.class);
-// reqCopy.setPriority(req.getPriority());
-// reqCopy.setHostName(req.getHostName());
-// reqCopy.setCapability(req.getCapability());
-// reqCopy.setNumContainers(req.getNumContainers());
-// askCopy.add(reqCopy);
-// }
-// super.allocate(applicationId, askCopy);
-// }
-// };
-// try {
-// fsc.reinitialize(new Configuration(), new ContainerTokenSecretManager(), null);
-// fsc.addApplication(recordFactory.newRecordInstance(ApplicationId.class),
-// recordFactory.newRecordInstance(ApplicationMaster.class),
-// "test", null, null, StoreFactory.createVoidAppStore());
-// } catch(IOException ie) {
-// LOG.info("add application failed with ", ie);
-// assert(false);
-// }
-// return fsc;
-// }
-//
-// private ContainerRequestEvent createReq(
-// int attemptid, int memory, String[] hosts) {
-// return createReq(attemptid, memory, hosts, false, false);
-// }
-//
-// private ContainerRequestEvent createReq(
-// int attemptid, int memory, String[] hosts, boolean earlierFailedAttempt, boolean reduce) {
-// ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
-// appId.setClusterTimestamp(0);
-// appId.setId(0);
-// JobId jobId = recordFactory.newRecordInstance(JobId.class);
-// jobId.setAppId(appId);
-// jobId.setId(0);
-// TaskId taskId = recordFactory.newRecordInstance(TaskId.class);
-// taskId.setId(0);
-// taskId.setJobId(jobId);
-// if (reduce) {
-// taskId.setTaskType(TaskType.REDUCE);
-// } else {
-// taskId.setTaskType(TaskType.MAP);
-// }
-// TaskAttemptId attemptId = recordFactory.newRecordInstance(TaskAttemptId.class);
-// attemptId.setId(attemptid);
-// attemptId.setTaskId(taskId);
-// Resource containerNeed = recordFactory.newRecordInstance(Resource.class);
-// containerNeed.setMemory(memory);
-// if (earlierFailedAttempt) {
-// return ContainerRequestEvent.
-// createContainerRequestEventForFailedContainer(attemptId, containerNeed);
-// }
-// return new ContainerRequestEvent(attemptId,
-// containerNeed,
-// hosts, new String[] {NetworkTopology.DEFAULT_RACK});
-// }
-//
-// private void checkAssignments(ContainerRequestEvent[] requests,
-// List assignments,
-// boolean checkHostMatch) {
-// Assert.assertNotNull("Container not assigned", assignments);
-// Assert.assertEquals("Assigned count not correct",
-// requests.length, assignments.size());
-//
-// //check for uniqueness of containerIDs
-// Set containerIds = new HashSet();
-// for (TaskAttemptContainerAssignedEvent assigned : assignments) {
-// containerIds.add(assigned.getContainer().getId());
-// }
-// Assert.assertEquals("Assigned containers must be different",
-// assignments.size(), containerIds.size());
-//
-// //check for all assignment
-// for (ContainerRequestEvent req : requests) {
-// TaskAttemptContainerAssignedEvent assigned = null;
-// for (TaskAttemptContainerAssignedEvent ass : assignments) {
-// if (ass.getTaskAttemptID().equals(req.getAttemptID())){
-// assigned = ass;
-// break;
-// }
-// }
-// checkAssignment(req, assigned, checkHostMatch);
-// }
-// }
-//
-// private void checkAssignment(ContainerRequestEvent request,
-// TaskAttemptContainerAssignedEvent assigned, boolean checkHostMatch) {
-// Assert.assertNotNull("Nothing assigned to attempt " + request.getAttemptID(),
-// assigned);
-// Assert.assertEquals("assigned to wrong attempt", request.getAttemptID(),
-// assigned.getTaskAttemptID());
-// if (checkHostMatch) {
-// Assert.assertTrue("Not assigned to requested host", Arrays.asList(
-// request.getHosts()).contains(
-// assigned.getContainer().getNodeId().toString()));
-// }
-//
-// }
-//
-// //Mock RMContainerAllocator
-// //Instead of talking to remote Scheduler,uses the local Scheduler
-// public static class LocalRMContainerAllocator extends RMContainerAllocator {
-// private static final List events =
-// new ArrayList();
-//
-// public static class AMRMProtocolImpl implements AMRMProtocol {
-//
-// private ResourceScheduler resourceScheduler;
-//
-// public AMRMProtocolImpl(ResourceScheduler resourceScheduler) {
-// this.resourceScheduler = resourceScheduler;
-// }
-//
-// @Override
-// public RegisterApplicationMasterResponse registerApplicationMaster(RegisterApplicationMasterRequest request) throws YarnRemoteException {
-// RegisterApplicationMasterResponse response = recordFactory.newRecordInstance(RegisterApplicationMasterResponse.class);
-// return response;
-// }
-//
-// public AllocateResponse allocate(AllocateRequest request) throws YarnRemoteException {
-// List ask = request.getAskList();
-// List release = request.getReleaseList();
-// try {
-// AMResponse response = recordFactory.newRecordInstance(AMResponse.class);
-// Allocation allocation = resourceScheduler.allocate(request.getApplicationAttemptId(), ask);
-// response.addAllNewContainers(allocation.getContainers());
-// response.setAvailableResources(allocation.getResourceLimit());
-// AllocateResponse allocateResponse = recordFactory.newRecordInstance(AllocateResponse.class);
-// allocateResponse.setAMResponse(response);
-// return allocateResponse;
-// } catch(IOException ie) {
-// throw RPCUtil.getRemoteException(ie);
-// }
-// }
-//
-// @Override
-// public FinishApplicationMasterResponse finishApplicationMaster(FinishApplicationMasterRequest request) throws YarnRemoteException {
-// FinishApplicationMasterResponse response = recordFactory.newRecordInstance(FinishApplicationMasterResponse.class);
-// return response;
-// }
-//
-// }
-//
-// private ResourceScheduler scheduler;
-// LocalRMContainerAllocator(ResourceScheduler scheduler, Configuration conf) {
-// super(null, new TestContext(events));
-// this.scheduler = scheduler;
-// super.init(conf);
-// super.start();
-// }
-//
-// protected AMRMProtocol createSchedulerProxy() {
-// return new AMRMProtocolImpl(scheduler);
-// }
-//
-// @Override
-// protected void register() {}
-// @Override
-// protected void unregister() {}
-//
-// @Override
-// protected Resource getMinContainerCapability() {
-// Resource res = recordFactory.newRecordInstance(Resource.class);
-// res.setMemory(1024);
-// return res;
-// }
-//
-// @Override
-// protected Resource getMaxContainerCapability() {
-// Resource res = recordFactory.newRecordInstance(Resource.class);
-// res.setMemory(10240);
-// return res;
-// }
-//
-// public void sendRequest(ContainerRequestEvent req) {
-// sendRequests(Arrays.asList(new ContainerRequestEvent[]{req}));
-// }
-//
-// public void sendRequests(List reqs) {
-// for (ContainerRequestEvent req : reqs) {
-// handle(req);
-// }
-// }
-//
-// //API to be used by tests
-// public List schedule() {
-// //run the scheduler
-// try {
-// heartbeat();
-// } catch (Exception e) {
-// LOG.error("error in heartbeat ", e);
-// throw new YarnException(e);
-// }
-//
-// List result = new ArrayList(events);
-// events.clear();
-// return result;
-// }
-//
-// protected void startAllocatorThread() {
-// //override to NOT start thread
-// }
-//
-// static class TestContext implements AppContext {
-// private List events;
-// TestContext(List events) {
-// this.events = events;
-// }
-// @Override
-// public Map getAllJobs() {
-// return null;
-// }
-// @Override
-// public ApplicationAttemptId getApplicationAttemptId() {
-// return recordFactory.newRecordInstance(ApplicationAttemptId.class);
-// }
-// @Override
-// public ApplicationId getApplicationID() {
-// return recordFactory.newRecordInstance(ApplicationId.class);
-// }
-// @Override
-// public EventHandler getEventHandler() {
-// return new EventHandler() {
-// @Override
-// public void handle(Event event) {
-// events.add((TaskAttemptContainerAssignedEvent) event);
-// }
-// };
-// }
-// @Override
-// public Job getJob(JobId jobID) {
-// return null;
-// }
-//
-// @Override
-// public String getUser() {
-// return null;
-// }
-//
-// @Override
-// public Clock getClock() {
-// return null;
-// }
-//
-// @Override
-// public String getApplicationName() {
-// return null;
-// }
-//
-// @Override
-// public long getStartTime() {
-// return 0;
-// }
-// }
-// }
-//
-// public static void main(String[] args) throws Exception {
-// TestRMContainerAllocator t = new TestRMContainerAllocator();
-// t.testSimple();
-// //t.testResource();
-// t.testMapReduceScheduling();
-// }
+
+ static final Log LOG = LogFactory
+ .getLog(TestRMContainerAllocator.class);
+ static final RecordFactory recordFactory = RecordFactoryProvider
+ .getRecordFactory(null);
+
+ @After
+ public void tearDown() {
+ DefaultMetricsSystem.shutdown();
+ }
+
+ @Test
+ public void testSimple() throws Exception {
+
+ LOG.info("Running testSimple");
+
+ Configuration conf = new Configuration();
+ MyResourceManager rm = new MyResourceManager(conf);
+ rm.start();
+ DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+ .getDispatcher();
+
+ // Submit the application
+ RMApp app = rm.submitApp(1024);
+ dispatcher.await();
+
+ MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+ amNodeManager.nodeHeartbeat(true);
+ dispatcher.await();
+
+ ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+ .getAppAttemptId();
+ rm.sendAMLaunched(appAttemptId);
+ dispatcher.await();
+
+ JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+ Job mockJob = mock(Job.class);
+ when(mockJob.getReport()).thenReturn(
+ MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING,
+ 0, 0, 0, 0, 0, 0));
+ MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+ appAttemptId, mockJob);
+
+ // add resources to scheduler
+ MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
+ MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
+ MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
+ dispatcher.await();
+
+ // create the container request
+ ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
+ new String[] { "h1" });
+ allocator.sendRequest(event1);
+
+ // send 1 more request with different resource req
+ ContainerRequestEvent event2 = createReq(jobId, 2, 1024,
+ new String[] { "h2" });
+ allocator.sendRequest(event2);
+
+ // this tells the scheduler about the requests
+ // as nodes are not added, no allocations
+ List assigned = allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
+ // send another request with different resource and priority
+ ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
+ new String[] { "h3" });
+ allocator.sendRequest(event3);
+
+ // this tells the scheduler about the requests
+ // as nodes are not added, no allocations
+ assigned = allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
+ // update resources in scheduler
+ nodeManager1.nodeHeartbeat(true); // Node heartbeat
+ nodeManager2.nodeHeartbeat(true); // Node heartbeat
+ nodeManager3.nodeHeartbeat(true); // Node heartbeat
+ dispatcher.await();
+
+ assigned = allocator.schedule();
+ dispatcher.await();
+ checkAssignments(new ContainerRequestEvent[] { event1, event2, event3 },
+ assigned, false);
+ }
+
+ @Test
+ public void testResource() throws Exception {
+
+ LOG.info("Running testResource");
+
+ Configuration conf = new Configuration();
+ MyResourceManager rm = new MyResourceManager(conf);
+ rm.start();
+ DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+ .getDispatcher();
+
+ // Submit the application
+ RMApp app = rm.submitApp(1024);
+ dispatcher.await();
+
+ MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+ amNodeManager.nodeHeartbeat(true);
+ dispatcher.await();
+
+ ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+ .getAppAttemptId();
+ rm.sendAMLaunched(appAttemptId);
+ dispatcher.await();
+
+ JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+ Job mockJob = mock(Job.class);
+ when(mockJob.getReport()).thenReturn(
+ MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING,
+ 0, 0, 0, 0, 0, 0));
+ MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+ appAttemptId, mockJob);
+
+ // add resources to scheduler
+ MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
+ MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
+ MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
+ dispatcher.await();
+
+ // create the container request
+ ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
+ new String[] { "h1" });
+ allocator.sendRequest(event1);
+
+ // send 1 more request with different resource req
+ ContainerRequestEvent event2 = createReq(jobId, 2, 2048,
+ new String[] { "h2" });
+ allocator.sendRequest(event2);
+
+ // this tells the scheduler about the requests
+ // as nodes are not added, no allocations
+ List assigned = allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
+ // update resources in scheduler
+ nodeManager1.nodeHeartbeat(true); // Node heartbeat
+ nodeManager2.nodeHeartbeat(true); // Node heartbeat
+ nodeManager3.nodeHeartbeat(true); // Node heartbeat
+ dispatcher.await();
+
+ assigned = allocator.schedule();
+ dispatcher.await();
+ checkAssignments(new ContainerRequestEvent[] { event1, event2 },
+ assigned, false);
+ }
+
+ @Test
+ public void testMapReduceScheduling() throws Exception {
+
+ LOG.info("Running testMapReduceScheduling");
+
+ Configuration conf = new Configuration();
+ MyResourceManager rm = new MyResourceManager(conf);
+ rm.start();
+ DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+ .getDispatcher();
+
+ // Submit the application
+ RMApp app = rm.submitApp(1024);
+ dispatcher.await();
+
+ MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+ amNodeManager.nodeHeartbeat(true);
+ dispatcher.await();
+
+ ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+ .getAppAttemptId();
+ rm.sendAMLaunched(appAttemptId);
+ dispatcher.await();
+
+ JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+ Job mockJob = mock(Job.class);
+ when(mockJob.getReport()).thenReturn(
+ MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING,
+ 0, 0, 0, 0, 0, 0));
+ MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+ appAttemptId, mockJob);
+
+ // add resources to scheduler
+ MockNM nodeManager1 = rm.registerNode("h1:1234", 1024);
+ MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
+ MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
+ dispatcher.await();
+
+ // create the container request
+ // send MAP request
+ ContainerRequestEvent event1 = createReq(jobId, 1, 2048, new String[] {
+ "h1", "h2" }, true, false);
+ allocator.sendRequest(event1);
+
+ // send REDUCE request
+ ContainerRequestEvent event2 = createReq(jobId, 2, 3000,
+ new String[] { "h1" }, false, true);
+ allocator.sendRequest(event2);
+
+ // send MAP request
+ ContainerRequestEvent event3 = createReq(jobId, 3, 2048,
+ new String[] { "h3" }, false, false);
+ allocator.sendRequest(event3);
+
+ // this tells the scheduler about the requests
+ // as nodes are not added, no allocations
+ List assigned = allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
+ // update resources in scheduler
+ nodeManager1.nodeHeartbeat(true); // Node heartbeat
+ nodeManager2.nodeHeartbeat(true); // Node heartbeat
+ nodeManager3.nodeHeartbeat(true); // Node heartbeat
+ dispatcher.await();
+
+ assigned = allocator.schedule();
+ dispatcher.await();
+ checkAssignments(new ContainerRequestEvent[] { event1, event3 },
+ assigned, false);
+
+ // validate that no container is assigned to h1 as it doesn't have 2048
+ for (TaskAttemptContainerAssignedEvent assig : assigned) {
+ Assert.assertFalse("Assigned count not correct", "h1".equals(assig
+ .getContainer().getNodeId().getHost()));
+ }
+ }
+
+ private static class MyResourceManager extends MockRM {
+
+ public MyResourceManager(Configuration conf) {
+ super(conf);
+ }
+
+ @Override
+ protected Dispatcher createDispatcher() {
+ return new DrainDispatcher();
+ }
+
+ @Override
+ protected EventHandler createSchedulerEventDispatcher() {
+ // Dispatch inline for test sanity
+ return new EventHandler() {
+ @Override
+ public void handle(SchedulerEvent event) {
+ scheduler.handle(event);
+ }
+ };
+ }
+ @Override
+ protected ResourceScheduler createScheduler() {
+ return new MyFifoScheduler(getRMContext());
+ }
+ }
+
+ private static class FakeJob extends JobImpl {
+
+ public FakeJob(ApplicationAttemptId appAttemptID, Configuration conf,
+ int numMaps, int numReduces) {
+ super(appAttemptID, conf, null, null, null, null, null, null, null,
+ null);
+ this.jobId = MRBuilderUtils
+ .newJobId(appAttemptID.getApplicationId(), 0);
+ this.numMaps = numMaps;
+ this.numReduces = numReduces;
+ }
+
+ private float setupProgress;
+ private float mapProgress;
+ private float reduceProgress;
+ private float cleanupProgress;
+ private final int numMaps;
+ private final int numReduces;
+ private JobId jobId;
+
+ void setProgress(float setupProgress, float mapProgress,
+ float reduceProgress, float cleanupProgress) {
+ this.setupProgress = setupProgress;
+ this.mapProgress = mapProgress;
+ this.reduceProgress = reduceProgress;
+ this.cleanupProgress = cleanupProgress;
+ }
+
+ @Override
+ public int getTotalMaps() { return this.numMaps; }
+ @Override
+ public int getTotalReduces() { return this.numReduces;}
+
+ @Override
+ public JobReport getReport() {
+ return MRBuilderUtils.newJobReport(this.jobId, "job", "user",
+ JobState.RUNNING, 0, 0, this.setupProgress, this.mapProgress,
+ this.reduceProgress, this.cleanupProgress);
+ }
+ }
+
+ @Test
+ public void testReportedAppProgress() throws Exception {
+
+ LOG.info("Running testReportedAppProgress");
+
+ Configuration conf = new Configuration();
+ MyResourceManager rm = new MyResourceManager(conf);
+ rm.start();
+ DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+ .getDispatcher();
+
+ // Submit the application
+ RMApp app = rm.submitApp(1024);
+ dispatcher.await();
+
+ MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+ amNodeManager.nodeHeartbeat(true);
+ dispatcher.await();
+
+ ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+ .getAppAttemptId();
+ rm.sendAMLaunched(appAttemptId);
+ dispatcher.await();
+
+ FakeJob job = new FakeJob(appAttemptId, conf, 2, 2);
+ MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+ appAttemptId, job);
+
+ allocator.schedule(); // Send heartbeat
+ dispatcher.await();
+ Assert.assertEquals(0.0, app.getProgress(), 0.0);
+
+ job.setProgress(100, 10, 0, 0);
+ allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals(9.5f, app.getProgress(), 0.0);
+
+ job.setProgress(100, 80, 0, 0);
+ allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals(41.0f, app.getProgress(), 0.0);
+
+ job.setProgress(100, 100, 20, 0);
+ allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals(59.0f, app.getProgress(), 0.0);
+
+ job.setProgress(100, 100, 100, 100);
+ allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals(100.0f, app.getProgress(), 0.0);
+ }
+
+ @Test
+ public void testReportedAppProgressWithOnlyMaps() throws Exception {
+
+ LOG.info("Running testReportedAppProgressWithOnlyMaps");
+
+ Configuration conf = new Configuration();
+ MyResourceManager rm = new MyResourceManager(conf);
+ rm.start();
+ DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+ .getDispatcher();
+
+ // Submit the application
+ RMApp app = rm.submitApp(1024);
+ dispatcher.await();
+
+ MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+ amNodeManager.nodeHeartbeat(true);
+ dispatcher.await();
+
+ ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+ .getAppAttemptId();
+ rm.sendAMLaunched(appAttemptId);
+ dispatcher.await();
+
+ FakeJob job = new FakeJob(appAttemptId, conf, 2, 0);
+ MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+ appAttemptId, job);
+
+ allocator.schedule(); // Send heartbeat
+ dispatcher.await();
+ Assert.assertEquals(0.0, app.getProgress(), 0.0);
+
+ job.setProgress(100, 10, 0, 0);
+ allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals(14f, app.getProgress(), 0.0);
+
+ job.setProgress(100, 60, 0, 0);
+ allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals(59.0f, app.getProgress(), 0.0);
+
+ job.setProgress(100, 100, 0, 100);
+ allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals(100.0f, app.getProgress(), 0.0);
+ }
+
+ private static class MyFifoScheduler extends FifoScheduler {
+
+ public MyFifoScheduler(RMContext rmContext) {
+ super();
+ try {
+ reinitialize(new Configuration(), new ContainerTokenSecretManager(),
+ rmContext);
+ } catch (IOException ie) {
+ LOG.info("add application failed with ", ie);
+ assert (false);
+ }
+ }
+
+ // override this to copy the objects otherwise FifoScheduler updates the
+ // numContainers in same objects as kept by RMContainerAllocator
+ @Override
+ public synchronized Allocation allocate(
+ ApplicationAttemptId applicationAttemptId, List ask,
+ List release) {
+ List askCopy = new ArrayList();
+ for (ResourceRequest req : ask) {
+ ResourceRequest reqCopy = BuilderUtils.newResourceRequest(req
+ .getPriority(), req.getHostName(), req.getCapability(), req
+ .getNumContainers());
+ askCopy.add(reqCopy);
+ }
+ return super.allocate(applicationAttemptId, askCopy, release);
+ }
+ }
+
+ private ContainerRequestEvent createReq(JobId jobId, int taskAttemptId,
+ int memory, String[] hosts) {
+ return createReq(jobId, taskAttemptId, memory, hosts, false, false);
+ }
+
+ private ContainerRequestEvent
+ createReq(JobId jobId, int taskAttemptId, int memory, String[] hosts,
+ boolean earlierFailedAttempt, boolean reduce) {
+ TaskId taskId;
+ if (reduce) {
+ taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
+ } else {
+ taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
+ }
+ TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
+ taskAttemptId);
+ Resource containerNeed = BuilderUtils.newResource(memory);
+ if (earlierFailedAttempt) {
+ return ContainerRequestEvent
+ .createContainerRequestEventForFailedContainer(attemptId,
+ containerNeed);
+ }
+ return new ContainerRequestEvent(attemptId, containerNeed, hosts,
+ new String[] { NetworkTopology.DEFAULT_RACK });
+ }
+
+ private void checkAssignments(ContainerRequestEvent[] requests,
+ List assignments,
+ boolean checkHostMatch) {
+ Assert.assertNotNull("Container not assigned", assignments);
+ Assert.assertEquals("Assigned count not correct", requests.length,
+ assignments.size());
+
+ // check for uniqueness of containerIDs
+ Set containerIds = new HashSet();
+ for (TaskAttemptContainerAssignedEvent assigned : assignments) {
+ containerIds.add(assigned.getContainer().getId());
+ }
+ Assert.assertEquals("Assigned containers must be different", assignments
+ .size(), containerIds.size());
+
+ // check for all assignment
+ for (ContainerRequestEvent req : requests) {
+ TaskAttemptContainerAssignedEvent assigned = null;
+ for (TaskAttemptContainerAssignedEvent ass : assignments) {
+ if (ass.getTaskAttemptID().equals(req.getAttemptID())) {
+ assigned = ass;
+ break;
+ }
+ }
+ checkAssignment(req, assigned, checkHostMatch);
+ }
+ }
+
+ private void checkAssignment(ContainerRequestEvent request,
+ TaskAttemptContainerAssignedEvent assigned, boolean checkHostMatch) {
+ Assert.assertNotNull("Nothing assigned to attempt "
+ + request.getAttemptID(), assigned);
+ Assert.assertEquals("assigned to wrong attempt", request.getAttemptID(),
+ assigned.getTaskAttemptID());
+ if (checkHostMatch) {
+ Assert.assertTrue("Not assigned to requested host", Arrays.asList(
+ request.getHosts()).contains(
+ assigned.getContainer().getNodeId().toString()));
+ }
+ }
+
+ // Mock RMContainerAllocator
+ // Instead of talking to remote Scheduler,uses the local Scheduler
+ private static class MyContainerAllocator extends RMContainerAllocator {
+ static final List events
+ = new ArrayList();
+
+ private MyResourceManager rm;
+
+ @SuppressWarnings("rawtypes")
+ private static AppContext createAppContext(
+ ApplicationAttemptId appAttemptId, Job job) {
+ AppContext context = mock(AppContext.class);
+ ApplicationId appId = appAttemptId.getApplicationId();
+ when(context.getApplicationID()).thenReturn(appId);
+ when(context.getApplicationAttemptId()).thenReturn(appAttemptId);
+ when(context.getJob(isA(JobId.class))).thenReturn(job);
+ when(context.getEventHandler()).thenReturn(new EventHandler() {
+ @Override
+ public void handle(Event event) {
+ // Only capture interesting events.
+ if (event instanceof TaskAttemptContainerAssignedEvent) {
+ events.add((TaskAttemptContainerAssignedEvent) event);
+ }
+ }
+ });
+ return context;
+ }
+
+ private static ClientService createMockClientService() {
+ ClientService service = mock(ClientService.class);
+ when(service.getBindAddress()).thenReturn(
+ NetUtils.createSocketAddr("localhost:4567"));
+ when(service.getHttpPort()).thenReturn(890);
+ return service;
+ }
+
+ MyContainerAllocator(MyResourceManager rm, Configuration conf,
+ ApplicationAttemptId appAttemptId, Job job) {
+ super(createMockClientService(), createAppContext(appAttemptId, job));
+ this.rm = rm;
+ super.init(conf);
+ super.start();
+ }
+
+ @Override
+ protected AMRMProtocol createSchedulerProxy() {
+ return this.rm.getApplicationMasterService();
+ }
+
+ @Override
+ protected void register() {
+ super.register();
+ }
+
+ @Override
+ protected void unregister() {
+ }
+
+ @Override
+ protected Resource getMinContainerCapability() {
+ return BuilderUtils.newResource(1024);
+ }
+
+ @Override
+ protected Resource getMaxContainerCapability() {
+ return BuilderUtils.newResource(10240);
+ }
+
+ public void sendRequest(ContainerRequestEvent req) {
+ sendRequests(Arrays.asList(new ContainerRequestEvent[] { req }));
+ }
+
+ public void sendRequests(List reqs) {
+ for (ContainerRequestEvent req : reqs) {
+ super.handle(req);
+ }
+ }
+
+ // API to be used by tests
+ public List schedule() {
+ // run the scheduler
+ try {
+ super.heartbeat();
+ } catch (Exception e) {
+ LOG.error("error in heartbeat ", e);
+ throw new YarnException(e);
+ }
+
+ List result
+ = new ArrayList(events);
+ events.clear();
+ return result;
+ }
+
+ protected void startAllocatorThread() {
+ // override to NOT start thread
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ TestRMContainerAllocator t = new TestRMContainerAllocator();
+ t.testSimple();
+ t.testResource();
+ t.testMapReduceScheduling();
+ t.testReportedAppProgress();
+ t.testReportedAppProgressWithOnlyMaps();
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
index c429ca55b5..d710a6f7b8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
@@ -19,27 +19,25 @@
package org.apache.hadoop.mapreduce.v2.util;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.factories.RecordFactory;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.Records;
public class MRBuilderUtils {
- private static final RecordFactory recordFactory = RecordFactoryProvider
- .getRecordFactory(null);
-
public static JobId newJobId(ApplicationId appId, int id) {
- JobId jobId = recordFactory.newRecordInstance(JobId.class);
+ JobId jobId = Records.newRecord(JobId.class);
jobId.setAppId(appId);
jobId.setId(id);
return jobId;
}
public static TaskId newTaskId(JobId jobId, int id, TaskType taskType) {
- TaskId taskId = recordFactory.newRecordInstance(TaskId.class);
+ TaskId taskId = Records.newRecord(TaskId.class);
taskId.setJobId(jobId);
taskId.setId(id);
taskId.setTaskType(taskType);
@@ -48,9 +46,27 @@ public static TaskId newTaskId(JobId jobId, int id, TaskType taskType) {
public static TaskAttemptId newTaskAttemptId(TaskId taskId, int attemptId) {
TaskAttemptId taskAttemptId =
- recordFactory.newRecordInstance(TaskAttemptId.class);
+ Records.newRecord(TaskAttemptId.class);
taskAttemptId.setTaskId(taskId);
taskAttemptId.setId(attemptId);
return taskAttemptId;
}
+
+ public static JobReport newJobReport(JobId jobId, String jobName,
+ String userName, JobState state, long startTime, long finishTime,
+ float setupProgress, float mapProgress, float reduceProgress,
+ float cleanupProgress) {
+ JobReport report = Records.newRecord(JobReport.class);
+ report.setJobId(jobId);
+ report.setJobName(jobName);
+ report.setUser(userName);
+ report.setJobState(state);
+ report.setStartTime(startTime);
+ report.setFinishTime(finishTime);
+ report.setSetupProgress(setupProgress);
+ report.setCleanupProgress(cleanupProgress);
+ report.setMapProgress(mapProgress);
+ report.setReduceProgress(reduceProgress);
+ return report;
+ }
}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index 4b605cb2ae..ef388fcd86 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -64,6 +64,12 @@
hadoop-yarn-server-resourcemanagertest
+
+ org.apache.hadoop
+ hadoop-yarn-server-resourcemanager
+ test-jar
+ test
+ org.apache.hadoophadoop-yarn-server-common
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index ab1ffcca98..2a5cef3cbc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -88,6 +88,12 @@
hadoop-yarn-server-resourcemanager${yarn.version}
+
+ org.apache.hadoop
+ hadoop-yarn-server-resourcemanager
+ ${yarn.version}
+ test-jar
+ org.apache.hadoophadoop-mapreduce-client-core
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
index 9df37ee03a..7ec367292e 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
@@ -20,7 +20,9 @@
import java.net.URI;
import java.util.Comparator;
+import java.util.List;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -184,6 +186,13 @@ public static ContainerId newContainerId(RecordFactory recordFactory,
return id;
}
+ public static NodeId newNodeId(String host, int port) {
+ NodeId nodeId = recordFactory.newRecordInstance(NodeId.class);
+ nodeId.setHost(host);
+ nodeId.setPort(port);
+ return nodeId;
+ }
+
public static Container newContainer(RecordFactory recordFactory,
ApplicationAttemptId appAttemptId, int containerId, NodeId nodeId,
String nodeHttpAddress, Resource resource, Priority priority) {
@@ -266,5 +275,18 @@ public static URL newURL(String scheme, String host, int port, String file) {
url.setFile(file);
return url;
}
-
+
+ public static AllocateRequest newAllocateRequest(
+ ApplicationAttemptId applicationAttemptId, int responseID,
+ float appProgress, List resourceAsk,
+ List containersToBeReleased) {
+ AllocateRequest allocateRequest = recordFactory
+ .newRecordInstance(AllocateRequest.class);
+ allocateRequest.setApplicationAttemptId(applicationAttemptId);
+ allocateRequest.setResponseId(responseID);
+ allocateRequest.setProgress(appProgress);
+ allocateRequest.addAllAsks(resourceAsk);
+ allocateRequest.addAllReleases(containersToBeReleased);
+ return allocateRequest;
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index e676485e92..d94f597314 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -37,6 +37,20 @@
+
+
+
+ maven-jar-plugin
+
+
+
+ test-jar
+
+ test-compile
+
+
+
+
maven-antrun-plugin
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index d0cd0a7ff8..3f175a34a0 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
@@ -250,13 +251,10 @@ protected synchronized void submitApplication(
if (rmContext.getRMApps().putIfAbsent(applicationId, application) !=
null) {
- LOG.info("Application with id " + applicationId +
- " is already present! Cannot add a duplicate!");
- // don't send event through dispatcher as it will be handled by app
- // already present with this id.
- application.handle(new RMAppRejectedEvent(applicationId,
- "Application with this id is already present! " +
- "Cannot add a duplicate!"));
+ String message = "Application with id " + applicationId
+ + " is already present! Cannot add a duplicate!";
+ LOG.info(message);
+ throw RPCUtil.getRemoteException(message);
} else {
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.START));
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index d1515e4fb5..8a56d504d6 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -98,7 +98,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
private ContainerAllocationExpirer containerAllocationExpirer;
protected NMLivelinessMonitor nmLivelinessMonitor;
protected NodesListManager nodesListManager;
- private SchedulerEventDispatcher schedulerDispatcher;
+ private EventHandler schedulerDispatcher;
protected RMAppManager rmAppManager;
private WebApp webApp;
@@ -119,7 +119,7 @@ public RMContext getRMContext() {
@Override
public synchronized void init(Configuration conf) {
- this.rmDispatcher = new AsyncDispatcher();
+ this.rmDispatcher = createDispatcher();
addIfService(this.rmDispatcher);
this.containerAllocationExpirer = new ContainerAllocationExpirer(
@@ -138,8 +138,8 @@ public synchronized void init(Configuration conf) {
this.conf = new YarnConfiguration(conf);
// Initialize the scheduler
this.scheduler = createScheduler();
- this.schedulerDispatcher = new SchedulerEventDispatcher(this.scheduler);
- addService(this.schedulerDispatcher);
+ this.schedulerDispatcher = createSchedulerEventDispatcher();
+ addIfService(this.schedulerDispatcher);
this.rmDispatcher.register(SchedulerEventType.class,
this.schedulerDispatcher);
@@ -195,6 +195,14 @@ public synchronized void init(Configuration conf) {
super.init(conf);
}
+ protected EventHandler createSchedulerEventDispatcher() {
+ return new SchedulerEventDispatcher(this.scheduler);
+ }
+
+ protected Dispatcher createDispatcher() {
+ return new AsyncDispatcher();
+ }
+
protected void addIfService(Object object) {
if (object instanceof Service) {
addService((Service) object);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
index 484a7a38ba..6e63e2248d 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
@@ -32,7 +32,7 @@
* look at {@link RMAppImpl} for its implementation. This interface
* exposes methods to access various updates in application status/report.
*/
-public interface RMApp extends EventHandler{
+public interface RMApp extends EventHandler {
/**
* The application id for this {@link RMApp}.
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
index 3164602f59..aeb3d2af04 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
@@ -36,7 +36,7 @@
* {@link YarnConfiguration#RM_AM_MAX_RETRIES}. For specific
* implementation take a look at {@link RMAppAttemptImpl}.
*/
-public interface RMAppAttempt extends EventHandler{
+public interface RMAppAttempt extends EventHandler {
/**
* Get the application attempt id for this {@link RMAppAttempt}.
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 7f7f050bc4..7f8ff82d6a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -685,6 +685,8 @@ public FinalTransition(RMAppAttemptState finalAttemptState) {
public void transition(RMAppAttemptImpl appAttempt,
RMAppAttemptEvent event) {
+ appAttempt.progress = 1.0f;
+
// Tell the app and the scheduler
super.transition(appAttempt, event);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java
index b4037aaeaf..10913e0999 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java
@@ -207,13 +207,18 @@ synchronized public RMContainer allocate(NodeType type, SchedulerNode node,
.getDispatcher().getEventHandler(), this.rmContext
.getContainerAllocationExpirer());
+ // Add it to allContainers list.
+ newlyAllocatedContainers.add(rmContainer);
+ liveContainers.put(container.getId(), rmContainer);
+
// Update consumption and track allocations
-
+ appSchedulingInfo.allocate(type, node, priority, request, container);
+ Resources.addTo(currentConsumption, container.getResource());
+
// Inform the container
rmContainer.handle(
new RMContainerEvent(container.getId(), RMContainerEventType.START));
- Resources.addTo(currentConsumption, container.getResource());
if (LOG.isDebugEnabled()) {
LOG.debug("allocate: applicationAttemptId="
+ container.getId().getApplicationAttemptId()
@@ -223,12 +228,6 @@ synchronized public RMContainer allocate(NodeType type, SchedulerNode node,
RMAuditLogger.logSuccess(getUser(),
AuditConstants.ALLOC_CONTAINER, "SchedulerApp",
getApplicationId(), container.getId());
-
- // Add it to allContainers list.
- newlyAllocatedContainers.add(rmContainer);
- liveContainers.put(container.getId(), rmContainer);
-
- appSchedulingInfo.allocate(type, node, priority, request, container);
return rmContainer;
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java
index 9f3bc1cce7..ff51d62d91 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java
@@ -19,10 +19,7 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
import java.util.List;
-import java.util.Map;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index dfa4965d5d..7a90c5b6fa 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -291,7 +291,7 @@ private SchedulerNode getNode(NodeId nodeId) {
@SuppressWarnings("unchecked")
private synchronized void addApplication(ApplicationAttemptId appAttemptId,
- String queueName, String user) {
+ String user) {
// TODO: Fix store
SchedulerApp schedulerApp =
new SchedulerApp(appAttemptId, user, DEFAULT_QUEUE,
@@ -628,7 +628,7 @@ public void handle(SchedulerEvent event) {
{
AppAddedSchedulerEvent appAddedEvent = (AppAddedSchedulerEvent) event;
addApplication(appAddedEvent.getApplicationAttemptId(), appAddedEvent
- .getQueue(), appAddedEvent.getUser());
+ .getUser());
}
break;
case APP_REMOVED:
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
index 9a9ae2f51c..727cd1a232 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.Records;
public class MockAM {
@@ -128,7 +129,7 @@ public ResourceRequest createResourceReq(String resource, int memory, int priori
req.setHostName(resource);
req.setNumContainers(containers);
Priority pri = Records.newRecord(Priority.class);
- pri.setPriority(1);
+ pri.setPriority(priority);
req.setPriority(pri);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(memory);
@@ -139,11 +140,8 @@ public ResourceRequest createResourceReq(String resource, int memory, int priori
public AMResponse allocate(
List resourceRequest, List releases)
throws Exception {
- AllocateRequest req = Records.newRecord(AllocateRequest.class);
- req.setResponseId(++responseId);
- req.setApplicationAttemptId(attemptId);
- req.addAllAsks(resourceRequest);
- req.addAllReleases(releases);
+ AllocateRequest req = BuilderUtils.newAllocateRequest(attemptId,
+ ++responseId, 0F, resourceRequest, releases);
AllocateResponse resp = amRMProtocol.allocate(req);
return resp.getAMResponse();
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCResponseId.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCResponseId.java
index 61d678ea01..3bc5547342 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCResponseId.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCResponseId.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.util.BuilderUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -77,13 +78,14 @@ public void testARRMResponseId() throws Exception {
am.registerAppAttempt();
- AllocateRequest allocateRequest = recordFactory.newRecordInstance(AllocateRequest.class);
- allocateRequest.setApplicationAttemptId(attempt.getAppAttemptId());
+ AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest(attempt
+ .getAppAttemptId(), 0, 0F, null, null);
AMResponse response = amService.allocate(allocateRequest).getAMResponse();
Assert.assertEquals(1, response.getResponseId());
Assert.assertFalse(response.getReboot());
- allocateRequest.setResponseId(response.getResponseId());
+ allocateRequest = BuilderUtils.newAllocateRequest(attempt
+ .getAppAttemptId(), response.getResponseId(), 0F, null, null);
response = amService.allocate(allocateRequest).getAMResponse();
Assert.assertEquals(2, response.getResponseId());
@@ -91,8 +93,9 @@ public void testARRMResponseId() throws Exception {
response = amService.allocate(allocateRequest).getAMResponse();
Assert.assertEquals(2, response.getResponseId());
- /** try sending old **/
- allocateRequest.setResponseId(0);
+ /** try sending old request again **/
+ allocateRequest = BuilderUtils.newAllocateRequest(attempt
+ .getAppAttemptId(), 0, 0F, null, null);
response = amService.allocate(allocateRequest).getAMResponse();
Assert.assertTrue(response.getReboot());
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
index 4fb6486c2c..03229c34b4 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
@@ -162,6 +162,7 @@ public void setDiagnostics(String diag) {
this.diagnostics = new StringBuilder(diag);
}
+ @Override
public void handle(RMAppEvent event) {
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java
index 989f3483d9..1b681628c9 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java
@@ -83,6 +83,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.junit.BeforeClass;
import org.junit.AfterClass;
@@ -240,12 +241,8 @@ public AMRMProtocol run() {
ask.add(rr);
ArrayList release = new ArrayList();
- AllocateRequest allocateRequest =
- recordFactory.newRecordInstance(AllocateRequest.class);
- allocateRequest.setApplicationAttemptId(appAttempt.getAppAttemptId());
- allocateRequest.setResponseId(0);
- allocateRequest.addAllAsks(ask);
- allocateRequest.addAllReleases(release);
+ AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest(
+ appAttempt.getAppAttemptId(), 0, 0F, ask, release);
List allocatedContainers = scheduler.allocate(allocateRequest)
.getAMResponse().getAllocatedContainers();
From 59586d801543a4209f2daf277c07a4f4e55414ba Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Wed, 28 Sep 2011 12:36:30 +0000
Subject: [PATCH 61/68] HADOOP-7662. Fixed logs servlet to use the pathspec
'/*' instead of '/' for correct filtering. Contributed by Thomas Graves.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176849 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../src/main/java/org/apache/hadoop/http/HttpServer.java | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 182344d8d0..336737f0c2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -622,6 +622,9 @@ Release 0.23.0 - Unreleased
HADOOP-7631. Fixes a config problem to do with running streaming jobs
(Eric Yang via ddas)
+ HADOOP-7662. Fixed logs servlet to use the pathspec '/*' instead of '/'
+ for correct filtering. (Thomas Graves via vinodkv)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
index de506c91b2..6ad3703dde 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
@@ -282,7 +282,7 @@ protected void addDefaultApps(ContextHandlerCollection parent,
if (logDir != null) {
Context logContext = new Context(parent, "/logs");
logContext.setResourceBase(logDir);
- logContext.addServlet(AdminAuthorizedServlet.class, "/");
+ logContext.addServlet(AdminAuthorizedServlet.class, "/*");
logContext.setDisplayName("logs");
setContextAttributes(logContext, conf);
defaultContexts.put(logContext, true);
From 9799353ee81405564854458abe4da5e306162eed Mon Sep 17 00:00:00 2001
From: Suresh Srinivas
Date: Wed, 28 Sep 2011 17:47:20 +0000
Subject: [PATCH 62/68] HADOOP-7668. Add a NetUtils method that can tell if an
InetAddress belongs to local host. Contributed by Suresh Srinivas.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176986 13f79535-47bb-0310-9956-ffa450edef68
---
.../hadoop-common/CHANGES.txt | 3 ++
.../java/org/apache/hadoop/net/NetUtils.java | 23 +++++++++++++
.../org/apache/hadoop/net/TestNetUtils.java | 32 +++++++++++++++++++
3 files changed, 58 insertions(+)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 336737f0c2..3960abe922 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -19,6 +19,9 @@ Trunk (unreleased changes)
HADOOP-7635. RetryInvocationHandler should release underlying resources on
close (atm)
+
+ HADOOP-7668. Add a NetUtils method that can tell if an InetAddress
+ belongs to local host. (suresh)
BUGS
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index b22aaa009c..9b744ed3ee 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -516,4 +516,27 @@ public static InetAddress getLocalInetAddress(String host)
} catch (UnknownHostException ignore) { }
return addr;
}
+
+ /**
+ * Given an InetAddress, checks to see if the address is a local address, by
+ * comparing the address with all the interfaces on the node.
+ * @param addr address to check if it is local node's address
+ * @return true if the address corresponds to the local node
+ */
+ public static boolean isLocalAddress(InetAddress addr) {
+ // Check if the address is any local or loop back
+ boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
+ System.out.println("address is any or loopback address " + addr);
+
+ // Check if the address is defined on any interface
+ if (!local) {
+ try {
+ local = NetworkInterface.getByInetAddress(addr) != null;
+ } catch (SocketException e) {
+ local = false;
+ }
+ }
+ System.out.println("address " + addr + " is local " + local);
+ return local;
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index f49d4c886e..7cc6f4d521 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -18,13 +18,17 @@
package org.apache.hadoop.net;
import org.junit.Test;
+
import static org.junit.Assert.*;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
import java.net.Socket;
import java.net.ConnectException;
import java.net.SocketException;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
+import java.util.Enumeration;
import org.apache.hadoop.conf.Configuration;
@@ -88,4 +92,32 @@ public void testVerifyHostnamesNoException() {
fail("NetUtils.verifyHostnames threw unexpected UnknownHostException");
}
}
+
+ /**
+ * Test for {@link NetUtils#isLocalAddress(java.net.InetAddress)}
+ */
+ @Test
+ public void testIsLocalAddress() throws Exception {
+ // Test - local host is local address
+ assertTrue(NetUtils.isLocalAddress(InetAddress.getLocalHost()));
+
+ // Test - all addresses bound network interface is local address
+ Enumeration interfaces = NetworkInterface
+ .getNetworkInterfaces();
+ if (interfaces != null) { // Iterate through all network interfaces
+ while (interfaces.hasMoreElements()) {
+ NetworkInterface i = interfaces.nextElement();
+ Enumeration addrs = i.getInetAddresses();
+ if (addrs == null) {
+ continue;
+ }
+ // Iterate through all the addresses of a network interface
+ while (addrs.hasMoreElements()) {
+ InetAddress addr = addrs.nextElement();
+ assertTrue(NetUtils.isLocalAddress(addr));
+ }
+ }
+ }
+ assertFalse(NetUtils.isLocalAddress(InetAddress.getByName("8.8.8.8")));
+ }
}
From 08db2a139d90f00b05975d02ab4eb5963d722a68 Mon Sep 17 00:00:00 2001
From: Aaron Myers
Date: Wed, 28 Sep 2011 18:06:26 +0000
Subject: [PATCH 63/68] HDFS-2346. TestHost2NodesMap & TestReplicasMap will
fail depending upon execution order of test methods (Laxman via atm)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1176994 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../blockmanagement/TestHost2NodesMap.java | 26 ++++++++++++-------
.../hdfs/server/datanode/TestReplicasMap.java | 17 +++++++-----
3 files changed, 29 insertions(+), 17 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4a37a36de8..d4d3d3c6bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -80,6 +80,9 @@ Trunk (unreleased changes)
HDFS-2361. hftp is broken, fixed username checks in JspHelper. (jitendra)
+ HDFS-2346. TestHost2NodesMap & TestReplicasMap will fail depending upon
+ execution order of test methods (Laxman via atm)
+
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
index 7b9126f7de..d34cf1c4c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
@@ -18,31 +18,34 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
-import junit.framework.TestCase;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap;
+import org.junit.Before;
+import org.junit.Test;
-public class TestHost2NodesMap extends TestCase {
- static private Host2NodesMap map = new Host2NodesMap();
- private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
+public class TestHost2NodesMap {
+ private Host2NodesMap map = new Host2NodesMap();
+ private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
};
- private final static DatanodeDescriptor NULL_NODE = null;
- private final static DatanodeDescriptor NODE =
- new DatanodeDescriptor(new DatanodeID("h3:5040"), "/d1/r4");
+ private final DatanodeDescriptor NULL_NODE = null;
+ private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"),
+ "/d1/r4");
- static {
+ @Before
+ public void setup() {
for(DatanodeDescriptor node:dataNodes) {
map.add(node);
}
map.add(NULL_NODE);
}
+ @Test
public void testContains() throws Exception {
for(int i=0; i
Date: Wed, 28 Sep 2011 18:33:00 +0000
Subject: [PATCH 64/68] HADOOP-7687 Make getProtocolSignature public
(sanjay)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1177002 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
.../src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3960abe922..08d8d2904c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -23,6 +23,8 @@ Trunk (unreleased changes)
HADOOP-7668. Add a NetUtils method that can tell if an InetAddress
belongs to local host. (suresh)
+ HADOOP-7687 Make getProtocolSignature public (sanjay)
+
BUGS
HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java
index a055a7fd46..04d08c5142 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java
@@ -199,7 +199,7 @@ private static ProtocolSigFingerprint getSigFingerprint(
* @param protocol protocol
* @return the server's protocol signature
*/
- static ProtocolSignature getProtocolSignature(
+ public static ProtocolSignature getProtocolSignature(
int clientMethodsHashCode,
long serverVersion,
Class extends VersionedProtocol> protocol) {
From 484649acfe3774682423eafd86fab6c486ad8491 Mon Sep 17 00:00:00 2001
From: Aaron Myers
Date: Wed, 28 Sep 2011 19:01:32 +0000
Subject: [PATCH 65/68] Removing entry for CHANGES.txt since this was
back-ported to 0.23.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1177020 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ---
1 file changed, 3 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d4d3d3c6bb..4a37a36de8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -80,9 +80,6 @@ Trunk (unreleased changes)
HDFS-2361. hftp is broken, fixed username checks in JspHelper. (jitendra)
- HDFS-2346. TestHost2NodesMap & TestReplicasMap will fail depending upon
- execution order of test methods (Laxman via atm)
-
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
From 05aeb2d9fd9c456d98fc61249c72ea19234ff2d5 Mon Sep 17 00:00:00 2001
From: Steve Loughran
Date: Wed, 28 Sep 2011 20:01:28 +0000
Subject: [PATCH 66/68] HADOOP-7668 revert the System.out() calls
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1177035 13f79535-47bb-0310-9956-ffa450edef68
---
.../src/main/java/org/apache/hadoop/net/NetUtils.java | 2 --
1 file changed, 2 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index 9b744ed3ee..d94b69f183 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -526,7 +526,6 @@ public static InetAddress getLocalInetAddress(String host)
public static boolean isLocalAddress(InetAddress addr) {
// Check if the address is any local or loop back
boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
- System.out.println("address is any or loopback address " + addr);
// Check if the address is defined on any interface
if (!local) {
@@ -536,7 +535,6 @@ public static boolean isLocalAddress(InetAddress addr) {
local = false;
}
}
- System.out.println("address " + addr + " is local " + local);
return local;
}
}
From c5179b16ecc2c26f693eed692a6c556b6ac2e845 Mon Sep 17 00:00:00 2001
From: Steve Loughran
Date: Wed, 28 Sep 2011 20:37:37 +0000
Subject: [PATCH 67/68] HADOOP-6220
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1177051 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-common-project/hadoop-common/CHANGES.txt | 4 ++++
.../src/main/java/org/apache/hadoop/http/HttpServer.java | 4 ++++
2 files changed, 8 insertions(+)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 08d8d2904c..4a0f0057f1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -39,6 +39,10 @@ Trunk (unreleased changes)
HADOOP-7669 Fix newly introduced release audit warning.
(Uma Maheswara Rao G via stevel)
+
+ HADOOP-6220. HttpServer wraps InterruptedExceptions by IOExceptions if interrupted
+ in startup (stevel)
+
Release 0.23.0 - Unreleased
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
index 6ad3703dde..c526e10286 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
@@ -20,6 +20,7 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
+import java.io.InterruptedIOException;
import java.net.BindException;
import java.net.InetSocketAddress;
import java.net.URL;
@@ -683,6 +684,9 @@ public void start() throws IOException {
}
} catch (IOException e) {
throw e;
+ } catch (InterruptedException e) {
+ throw (IOException) new InterruptedIOException(
+ "Interrupted while starting HTTP server").initCause(e);
} catch (Exception e) {
throw new IOException("Problem starting http server", e);
}
From e9dd78d9fede044101627786d991bec3265205a4 Mon Sep 17 00:00:00 2001
From: Suresh Srinivas
Date: Wed, 28 Sep 2011 22:58:10 +0000
Subject: [PATCH 68/68] HDFS-2355. Federation: enable using the same
configuration file across all the nodes in the cluster. Contributed by Suresh
Srinivas.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1177100 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 5 +-
.../java/org/apache/hadoop/hdfs/DFSUtil.java | 85 +++++++-
.../hdfs/server/namenode/BackupNode.java | 6 +
.../hadoop/hdfs/server/namenode/NameNode.java | 21 +-
.../server/namenode/SecondaryNameNode.java | 9 +-
.../org/apache/hadoop/hdfs/TestDFSUtil.java | 200 +++++++++++-------
.../TestMulitipleNNDataBlockScanner.java | 5 +-
7 files changed, 240 insertions(+), 91 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4a37a36de8..43c360fcb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -39,7 +39,7 @@ Trunk (unreleased changes)
via szetszwo)
HDFS-2351 Change Namenode and Datanode to register each of their protocols
- seperately (Sanjay Radia)
+ seperately. (Sanjay Radia)
HDFS-2356. Support case insensitive query parameter names in webhdfs.
(szetszwo)
@@ -47,6 +47,9 @@ Trunk (unreleased changes)
HDFS-2368. Move SPNEGO conf properties from hdfs-default.xml to
hdfs-site.xml. (szetszwo)
+ HDFS-2355. Federation: enable using the same configuration file across
+ all the nodes in the cluster. (suresh)
+
BUG FIXES
HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 538f4ba533..45fb3948e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -38,6 +38,7 @@
import java.util.StringTokenizer;
import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
@@ -576,17 +577,6 @@ public static void setGenericConf(Configuration conf,
}
}
- /**
- * Returns the configured nameservice Id
- *
- * @param conf
- * Configuration object to lookup the nameserviceId
- * @return nameserviceId string from conf
- */
- public static String getNameServiceId(Configuration conf) {
- return conf.get(DFS_FEDERATION_NAMESERVICE_ID);
- }
-
/** Return used as percentage of capacity */
public static float getPercentUsed(long used, long capacity) {
return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity;
@@ -696,4 +686,77 @@ public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
ClientDatanodeProtocol.versionID, addr, ticket, confWithNoIpcIdle,
NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
+
+ /**
+ * Get name service Id for the {@link NameNode} based on namenode RPC address
+ * matching the local node address.
+ */
+ public static String getNamenodeNameServiceId(Configuration conf) {
+ return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
+ }
+
+ /**
+ * Get name service Id for the BackupNode based on backup node RPC address
+ * matching the local node address.
+ */
+ public static String getBackupNameServiceId(Configuration conf) {
+ return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+ }
+
+ /**
+ * Get name service Id for the secondary node based on secondary http address
+ * matching the local node address.
+ */
+ public static String getSecondaryNameServiceId(Configuration conf) {
+ return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+ }
+
+ /**
+ * Get the nameservice Id by matching the {@code addressKey} with the
+ * the address of the local node.
+ *
+ * If {@link DFSConfigKeys#DFS_FEDERATION_NAMESERVICE_ID} is not specifically
+ * configured, this method determines the nameservice Id by matching the local
+ * nodes address with the configured addresses. When a match is found, it
+ * returns the nameservice Id from the corresponding configuration key.
+ *
+ * @param conf Configuration
+ * @param addressKey configuration key to get the address.
+ * @return name service Id on success, null on failure.
+ * @throws HadoopIllegalArgumentException on error
+ */
+ private static String getNameServiceId(Configuration conf, String addressKey) {
+ String nameserviceId = conf.get(DFS_FEDERATION_NAMESERVICE_ID);
+ if (nameserviceId != null) {
+ return nameserviceId;
+ }
+
+ Collection ids = getNameServiceIds(conf);
+ if (ids == null || ids.size() == 0) {
+ // Not federation configuration, hence no nameservice Id
+ return null;
+ }
+
+ // Match the rpc address with that of local address
+ int found = 0;
+ for (String id : ids) {
+ String addr = conf.get(getNameServiceIdKey(addressKey, id));
+ InetSocketAddress s = NetUtils.createSocketAddr(addr);
+ if (NetUtils.isLocalAddress(s.getAddress())) {
+ nameserviceId = id;
+ found++;
+ }
+ }
+ if (found > 1) { // Only one address must match the local address
+ throw new HadoopIllegalArgumentException(
+ "Configuration has multiple RPC addresses that matches "
+ + "the local node's address. Please configure the system with "
+ + "the parameter " + DFS_FEDERATION_NAMESERVICE_ID);
+ }
+ if (found == 0) {
+ throw new HadoopIllegalArgumentException("Configuration address "
+ + addressKey + " is missing in configuration with name service Id");
+ }
+ return nameserviceId;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
index 5579fa2b02..0a600f8023 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
@@ -388,4 +389,9 @@ String getBlockPoolId() {
String getClusterId() {
return clusterId;
}
+
+ @Override
+ protected String getNameServiceId(Configuration conf) {
+ return DFSUtil.getBackupNameServiceId(conf);
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 3d17afaec4..740a2b7b11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -30,6 +30,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -453,11 +454,14 @@ protected NameNode(Configuration conf, NamenodeRole role)
throws IOException {
this.role = role;
try {
- initializeGenericKeys(conf);
+ initializeGenericKeys(conf, getNameServiceId(conf));
initialize(conf);
} catch (IOException e) {
this.stop();
throw e;
+ } catch (HadoopIllegalArgumentException e) {
+ this.stop();
+ throw e;
}
}
@@ -762,16 +766,16 @@ public static NameNode createNameNode(String argv[], Configuration conf)
* @param conf
* Configuration object to lookup specific key and to set the value
* to the key passed. Note the conf object is modified
+ * @param nameserviceId name service Id
* @see DFSUtil#setGenericConf(Configuration, String, String...)
*/
- public static void initializeGenericKeys(Configuration conf) {
- final String nameserviceId = DFSUtil.getNameServiceId(conf);
+ public static void initializeGenericKeys(Configuration conf, String
+ nameserviceId) {
if ((nameserviceId == null) || nameserviceId.isEmpty()) {
return;
}
DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
-
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
@@ -779,6 +783,14 @@ public static void initializeGenericKeys(Configuration conf) {
}
}
+ /**
+ * Get the name service Id for the node
+ * @return name service Id or null if federation is not configured
+ */
+ protected String getNameServiceId(Configuration conf) {
+ return DFSUtil.getNamenodeNameServiceId(conf);
+ }
+
/**
*/
public static void main(String argv[]) throws Exception {
@@ -792,5 +804,4 @@ public static void main(String argv[]) throws Exception {
System.exit(-1);
}
}
-
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 9c5ef6f2c3..d403629146 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -38,10 +38,12 @@
import org.apache.commons.cli.PosixParser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -173,12 +175,17 @@ public SecondaryNameNode(Configuration conf) throws IOException {
public SecondaryNameNode(Configuration conf,
CommandLineOpts commandLineOpts) throws IOException {
try {
- NameNode.initializeGenericKeys(conf);
+ NameNode.initializeGenericKeys(conf,
+ DFSUtil.getSecondaryNameServiceId(conf));
initialize(conf, commandLineOpts);
} catch(IOException e) {
shutdown();
LOG.fatal("Failed to start secondary namenode. ", e);
throw e;
+ } catch(HadoopIllegalArgumentException e) {
+ shutdown();
+ LOG.fatal("Failed to start secondary namenode. ", e);
+ throw e;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index fc883118f8..f154ff7d20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -29,8 +29,7 @@
import java.util.Iterator;
import java.util.List;
-import junit.framework.Assert;
-
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -40,8 +39,7 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
-
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
public class TestDFSUtil {
/**
@@ -76,79 +74,141 @@ public void testLocatedBlocks2Locations() {
}
}
- assertTrue("expected 1 corrupt files but got " + corruptCount,
- corruptCount == 1);
-
+ assertTrue("expected 1 corrupt files but got " + corruptCount,
+ corruptCount == 1);
+
// test an empty location
bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
assertEquals(0, bs.length);
}
- /**
- * Test for
- * {@link DFSUtil#getNameServiceIds(Configuration)}
- * {@link DFSUtil#getNameServiceId(Configuration)}
- * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+
+ private Configuration setupAddress(String key) {
+ HdfsConfiguration conf = new HdfsConfiguration();
+ conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
+ conf.set(DFSUtil.getNameServiceIdKey(key, "nn1"), "localhost:9000");
+ return conf;
+ }
+
+ /**
+ * Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
+ * nameserviceId from the configuration returned
*/
@Test
- public void testMultipleNamenodes() throws IOException {
+ public void getNameServiceId() {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
-
- // Test - The configured nameserviceIds are returned
+ conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+ assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
+ }
+
+ /**
+ * Test {@link DFSUtil#getNameNodeNameServiceId(Configuration)} to ensure
+ * nameserviceId for namenode is determined based on matching the address with
+ * local node's address
+ */
+ @Test
+ public void getNameNodeNameServiceId() {
+ Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
+ assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
+ }
+
+ /**
+ * Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
+ * nameserviceId for backup node is determined based on matching the address
+ * with local node's address
+ */
+ @Test
+ public void getBackupNameServiceId() {
+ Configuration conf = setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+ assertEquals("nn1", DFSUtil.getBackupNameServiceId(conf));
+ }
+
+ /**
+ * Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
+ * nameserviceId for backup node is determined based on matching the address
+ * with local node's address
+ */
+ @Test
+ public void getSecondaryNameServiceId() {
+ Configuration conf = setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+ assertEquals("nn1", DFSUtil.getSecondaryNameServiceId(conf));
+ }
+
+ /**
+ * Test {@link DFSUtil#getNameServiceId(Configuration, String))} to ensure
+ * exception is thrown when multiple rpc addresses match the local node's
+ * address
+ */
+ @Test(expected = HadoopIllegalArgumentException.class)
+ public void testGetNameServiceIdException() {
+ HdfsConfiguration conf = new HdfsConfiguration();
+ conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+ conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
+ "localhost:9000");
+ conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
+ "localhost:9001");
+ DFSUtil.getNamenodeNameServiceId(conf);
+ fail("Expected exception is not thrown");
+ }
+
+ /**
+ * Test {@link DFSUtil#getNameServiceIds(Configuration)}
+ */
+ @Test
+ public void testGetNameServiceIds() {
+ HdfsConfiguration conf = new HdfsConfiguration();
+ conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
Collection nameserviceIds = DFSUtil.getNameServiceIds(conf);
Iterator it = nameserviceIds.iterator();
assertEquals(2, nameserviceIds.size());
assertEquals("nn1", it.next().toString());
assertEquals("nn2", it.next().toString());
-
- // Tests default nameserviceId is returned
- conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
- assertEquals("nn1", DFSUtil.getNameServiceId(conf));
-
+ }
+
+ /**
+ * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+ * {@link DFSUtil#getNameServiceIdFromAddress(Configuration, InetSocketAddress, String...)
+ * (Configuration)}
+ */
+ @Test
+ public void testMultipleNamenodes() throws IOException {
+ HdfsConfiguration conf = new HdfsConfiguration();
+ conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
// Test - configured list of namenodes are returned
final String NN1_ADDRESS = "localhost:9000";
final String NN2_ADDRESS = "localhost:9001";
final String NN3_ADDRESS = "localhost:9002";
- conf.set(DFSUtil.getNameServiceIdKey(
- DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), NN1_ADDRESS);
- conf.set(DFSUtil.getNameServiceIdKey(
- DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS);
-
- Collection nnAddresses =
- DFSUtil.getNNServiceRpcAddresses(conf);
+ conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
+ NN1_ADDRESS);
+ conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
+ NN2_ADDRESS);
+
+ Collection nnAddresses = DFSUtil
+ .getNNServiceRpcAddresses(conf);
assertEquals(2, nnAddresses.size());
Iterator iterator = nnAddresses.iterator();
- assertEquals(2, nameserviceIds.size());
InetSocketAddress addr = iterator.next();
assertEquals("localhost", addr.getHostName());
assertEquals(9000, addr.getPort());
addr = iterator.next();
assertEquals("localhost", addr.getHostName());
assertEquals(9001, addr.getPort());
-
+
// Test - can look up nameservice ID from service address
- InetSocketAddress testAddress1 = NetUtils.createSocketAddr(NN1_ADDRESS);
- String nameserviceId = DFSUtil.getNameServiceIdFromAddress(
- conf, testAddress1,
- DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
- DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
- assertEquals("nn1", nameserviceId);
- InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
- nameserviceId = DFSUtil.getNameServiceIdFromAddress(
- conf, testAddress2,
- DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
- DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
- assertEquals("nn2", nameserviceId);
- InetSocketAddress testAddress3 = NetUtils.createSocketAddr(NN3_ADDRESS);
- nameserviceId = DFSUtil.getNameServiceIdFromAddress(
- conf, testAddress3,
- DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
- DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
- assertNull(nameserviceId);
+ checkNameServiceId(conf, NN1_ADDRESS, "nn1");
+ checkNameServiceId(conf, NN2_ADDRESS, "nn2");
+ checkNameServiceId(conf, NN3_ADDRESS, null);
}
-
- /**
+
+ public void checkNameServiceId(Configuration conf, String addr,
+ String expectedNameServiceId) {
+ InetSocketAddress s = NetUtils.createSocketAddr(addr);
+ String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
+ DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
+ assertEquals(expectedNameServiceId, nameserviceId);
+ }
+
+ /**
* Test for
* {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)}
*/
@@ -157,27 +217,25 @@ public void testSingleNamenode() {
HdfsConfiguration conf = new HdfsConfiguration();
final String DEFAULT_ADDRESS = "localhost:9000";
final String NN2_ADDRESS = "localhost:9001";
- conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
-
+ conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
+
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
- DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
- DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+ DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
assertTrue(isDefault);
InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
- DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
- DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+ DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
assertFalse(isDefault);
}
-
+
/** Tests to ensure default namenode is used as fallback */
@Test
public void testDefaultNamenode() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
final String hdfs_default = "hdfs://localhost:9999/";
- conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, hdfs_default);
- // If DFSConfigKeys.DFS_FEDERATION_NAMESERVICES is not set, verify that
+ conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
+ // If DFS_FEDERATION_NAMESERVICES is not set, verify that
// default namenode address is returned.
List addrList = DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(1, addrList.size());
@@ -191,26 +249,26 @@ public void testDefaultNamenode() throws IOException {
@Test
public void testConfModification() throws IOException {
final HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1");
- conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
- final String nameserviceId = DFSUtil.getNameServiceId(conf);
-
+ conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
+ conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+ final String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
+
// Set the nameservice specific keys with nameserviceId in the config key
for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
// Note: value is same as the key
conf.set(DFSUtil.getNameServiceIdKey(key, nameserviceId), key);
}
-
+
// Initialize generic keys from specific keys
- NameNode.initializeGenericKeys(conf);
-
+ NameNode.initializeGenericKeys(conf, nameserviceId);
+
// Retrieve the keys without nameserviceId and Ensure generic keys are set
// to the correct value
for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
assertEquals(key, conf.get(key));
}
}
-
+
/**
* Tests for empty configuration, an exception is thrown from
* {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
@@ -238,16 +296,16 @@ public void testEmptyConf() {
} catch (IOException expected) {
}
}
-
+
@Test
- public void testGetServerInfo(){
+ public void testGetServerInfo() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
String httpsport = DFSUtil.getInfoServer(null, conf, true);
- Assert.assertEquals("0.0.0.0:50470", httpsport);
+ assertEquals("0.0.0.0:50470", httpsport);
String httpport = DFSUtil.getInfoServer(null, conf, false);
- Assert.assertEquals("0.0.0.0:50070", httpport);
+ assertEquals("0.0.0.0:50070", httpport);
}
}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
index 78ff00288b..1b9a19c649 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
@@ -96,7 +96,8 @@ public void testBlockScannerAfterRefresh() throws IOException,
String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId();
for (int i = 0; i < 2; i++) {
- String nsId = DFSUtil.getNameServiceId(cluster.getConfiguration(i));
+ String nsId = DFSUtil.getNamenodeNameServiceId(cluster
+ .getConfiguration(i));
namenodesBuilder.append(nsId);
namenodesBuilder.append(",");
}
@@ -116,7 +117,7 @@ public void testBlockScannerAfterRefresh() throws IOException,
LOG.info(ex.getMessage());
}
- namenodesBuilder.append(DFSUtil.getNameServiceId(cluster
+ namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
.getConfiguration(2)));
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
.toString());