MAPREDUCE-3068. Added a whitelist of environment variables for containers from the NodeManager and set MALLOC_ARENA_MAX for all daemons and containers. Contributed by Chris Riccomini.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1185447 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arun Murthy 2011-10-18 01:22:14 +00:00
parent 955d8eca21
commit c1d90772b6
11 changed files with 294 additions and 71 deletions

View File

@ -397,6 +397,10 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3136. Added documentation for setting up Hadoop clusters in both
non-secure and secure mode for both HDFS & YARN. (acmurthy)
MAPREDUCE-3068. Added a whitelist of environment variables for containers
from the NodeManager and set MALLOC_ARENA_MAX for all daemons and
containers. (Chris Riccomini via acmurthy)
OPTIMIZATIONS
MAPREDUCE-2026. Make JobTracker.getJobCounters() and

View File

@ -28,7 +28,7 @@
import org.apache.hadoop.mapred.TaskLog.LogName;
import org.apache.hadoop.mapreduce.ID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.util.Apps;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.util.StringUtils;
@ -78,15 +78,15 @@ public static void setVMEnv(Map<String, String> environment,
);
// Add pwd to LD_LIBRARY_PATH, add this before adding anything else
MRApps.addToEnvironment(
Apps.addToEnvironment(
environment,
Environment.LD_LIBRARY_PATH.name(),
Environment.PWD.$());
// Add the env variables passed by the user & admin
String mapredChildEnv = getChildEnv(conf, task.isMapTask());
MRApps.setEnvFromInputString(environment, mapredChildEnv);
MRApps.setEnvFromInputString(
Apps.setEnvFromInputString(environment, mapredChildEnv);
Apps.setEnvFromInputString(
environment,
conf.get(
MRJobConfig.MAPRED_ADMIN_USER_ENV,

View File

@ -93,6 +93,7 @@
import org.apache.hadoop.mapreduce.v2.app.speculate.SpeculatorEvent;
import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanupEvent;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.util.Apps;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
@ -616,7 +617,7 @@ private ContainerLaunchContext createContainerLaunchContext() {
serviceData.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID,
ShuffleHandler.serializeServiceData(jobToken));
MRApps.addToEnvironment(
Apps.addToEnvironment(
environment,
Environment.CLASSPATH.name(),
getInitialClasspath());

View File

@ -182,17 +182,17 @@ private static void setMRFrameworkClasspath(
reader = new BufferedReader(new InputStreamReader(classpathFileStream));
String cp = reader.readLine();
if (cp != null) {
addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim());
Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim());
}
// Put the file itself on classpath for tasks.
addToEnvironment(
Apps.addToEnvironment(
environment,
Environment.CLASSPATH.name(),
thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile());
// Add standard Hadoop classes
for (String c : ApplicationConstants.APPLICATION_CLASSPATH) {
addToEnvironment(environment, Environment.CLASSPATH.name(), c);
Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), c);
}
} finally {
if (classpathFileStream != null) {
@ -205,28 +205,13 @@ private static void setMRFrameworkClasspath(
// TODO: Remove duplicates.
}
private static final String SYSTEM_PATH_SEPARATOR =
System.getProperty("path.separator");
public static void addToEnvironment(
Map<String, String> environment,
String variable, String value) {
String val = environment.get(variable);
if (val == null) {
val = value;
} else {
val = val + SYSTEM_PATH_SEPARATOR + value;
}
environment.put(variable, val);
}
public static void setClasspath(Map<String, String> environment)
throws IOException {
MRApps.addToEnvironment(
Apps.addToEnvironment(
environment,
Environment.CLASSPATH.name(),
MRJobConfig.JOB_JAR);
MRApps.addToEnvironment(
Apps.addToEnvironment(
environment,
Environment.CLASSPATH.name(),
Environment.PWD.$() + Path.SEPARATOR + "*");
@ -355,43 +340,4 @@ private static long[] getFileSizes(Configuration conf, String key) {
}
return result;
}
public static void setEnvFromInputString(Map<String, String> env,
String envString) {
if (envString != null && envString.length() > 0) {
String childEnvs[] = envString.split(",");
for (String cEnv : childEnvs) {
String[] parts = cEnv.split("="); // split on '='
String value = env.get(parts[0]);
if (value != null) {
// Replace $env with the child's env constructed by NM's
// For example: LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
value = parts[1].replace("$" + parts[0], value);
} else {
// example PATH=$PATH:/tmp
value = System.getenv(parts[0]);
if (value != null) {
// the env key is present in the tt's env
value = parts[1].replace("$" + parts[0], value);
} else {
// check for simple variable substitution
// for e.g. ROOT=$HOME
String envValue = System.getenv(parts[1].substring(1));
if (envValue != null) {
value = envValue;
} else {
// the env key is note present anywhere .. simply set it
// example X=$X:/tmp or X=/tmp
value = parts[1].replace("$" + parts[0], "");
}
}
}
addToEnvironment(env, parts[0], value);
}
}
}
}

View File

@ -39,6 +39,10 @@ this="$bin/$script"
# the root of the Hadoop installation
export YARN_HOME=`dirname "$this"`/..
# Same glibc bug that discovered in Hadoop.
# Without this you can see very large vmem settings on containers.
export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
#check to see if the conf dir is given as an optional argument
if [ $# -gt 1 ]
then

View File

@ -140,6 +140,11 @@ public enum Environment {
*/
HADOOP_HDFS_HOME("HADOOP_HDFS_HOME"),
/**
* $MALLOC_ARENA_MAX
*/
MALLOC_ARENA_MAX("MALLOC_ARENA_MAX"),
/**
* $YARN_HOME
*/

View File

@ -213,6 +213,14 @@ public class YarnConfiguration extends Configuration {
/** Prefix for all node manager configs.*/
public static final String NM_PREFIX = "yarn.nodemanager.";
/** Environment variables that will be sent to containers.*/
public static final String NM_ADMIN_USER_ENV = NM_PREFIX + "admin-env";
public static final String DEFAULT_NM_ADMIN_USER_ENV = "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX";
/** Environment variables that containers may override rather than use NodeManager's default.*/
public static final String NM_ENV_WHITELIST = NM_PREFIX + "env-whitelist";
public static final String DEFAULT_NM_ENV_WHITELIST = "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME";
/** address of node manager IPC.*/
public static final String NM_ADDRESS = NM_PREFIX + "address";

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.util;
import java.util.Iterator;
import java.util.Map;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@ -59,4 +60,55 @@ public static void shouldHaveNext(String prefix, String s, Iterator<String> it)
public static void throwParseException(String name, String s) {
throw new YarnException(join("Error parsing ", name, ": ", s));
}
public static void setEnvFromInputString(Map<String, String> env,
String envString) {
if (envString != null && envString.length() > 0) {
String childEnvs[] = envString.split(",");
for (String cEnv : childEnvs) {
String[] parts = cEnv.split("="); // split on '='
String value = env.get(parts[0]);
if (value != null) {
// Replace $env with the child's env constructed by NM's
// For example: LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
value = parts[1].replace("$" + parts[0], value);
} else {
// example PATH=$PATH:/tmp
value = System.getenv(parts[0]);
if (value != null) {
// the env key is present in the tt's env
value = parts[1].replace("$" + parts[0], value);
} else {
// check for simple variable substitution
// for e.g. ROOT=$HOME
String envValue = System.getenv(parts[1].substring(1));
if (envValue != null) {
value = envValue;
} else {
// the env key is note present anywhere .. simply set it
// example X=$X:/tmp or X=/tmp
value = parts[1].replace("$" + parts[0], "");
}
}
}
addToEnvironment(env, parts[0], value);
}
}
}
private static final String SYSTEM_PATH_SEPARATOR =
System.getProperty("path.separator");
public static void addToEnvironment(
Map<String, String> environment,
String variable, String value) {
String val = environment.get(variable);
if (val == null) {
val = value;
} else {
val = val + SYSTEM_PATH_SEPARATOR + value;
}
environment.put(variable, val);
}
}

View File

@ -194,6 +194,18 @@
<value>0.0.0.0:45454</value>
</property>
<property>
<description>Environment variables that should be forwarded from the NodeManager's environment to the container's.</description>
<name>yarn.nodemanager.admin-env</name>
<value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
</property>
<property>
<description>Environment variables that containers may override rather than use NodeManager's default.</description>
<name>yarn.nodemanager.env-whitelist</name>
<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME</value>
</property>
<property>
<description>who will execute(launch) the containers.</description>
<name>yarn.nodemanager.container-executor.class</name>

View File

@ -57,7 +57,7 @@
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Apps;
public class ContainerLaunch implements Callable<Integer> {
private static final Log LOG = LogFactory.getLog(ContainerLaunch.class);
@ -309,7 +309,7 @@ public void sanitizeEnv(Map<String, String> environment,
/**
* Non-modifiable environment variables
*/
putEnvIfNotNull(environment, Environment.USER.name(), container.getUser());
putEnvIfNotNull(environment,
@ -343,11 +343,20 @@ public void sanitizeEnv(Map<String, String> environment,
* Modifiable environment variables
*/
putEnvIfAbsent(environment, Environment.JAVA_HOME.name());
putEnvIfAbsent(environment, Environment.HADOOP_COMMON_HOME.name());
putEnvIfAbsent(environment, Environment.HADOOP_HDFS_HOME.name());
putEnvIfAbsent(environment, Environment.YARN_HOME.name());
// allow containers to override these variables
String[] whitelist = conf.get(YarnConfiguration.NM_ENV_WHITELIST, YarnConfiguration.DEFAULT_NM_ENV_WHITELIST).split(",");
for(String whitelistEnvVariable : whitelist) {
putEnvIfAbsent(environment, whitelistEnvVariable.trim());
}
// variables here will be forced in, even if the container has specified them.
Apps.setEnvFromInputString(
environment,
conf.get(
YarnConfiguration.NM_ADMIN_USER_ENV,
YarnConfiguration.DEFAULT_NM_ADMIN_USER_ENV)
);
}
static void writeLaunchEnv(OutputStream out,

View File

@ -20,21 +20,64 @@
import static org.junit.Assert.*;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintWriter;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin;
import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.junit.Before;
import org.junit.Test;
import junit.framework.Assert;
public class TestContainerLaunch {
public class TestContainerLaunch extends BaseContainerManagerTest {
public TestContainerLaunch() throws UnsupportedFileSystemException {
super();
}
@Before
public void setup() throws IOException {
conf.setClass(
YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR,
LinuxResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
super.setup();
}
@Test
public void testSpecialCharSymlinks() throws IOException {
@ -96,5 +139,144 @@ public void testSpecialCharSymlinks() throws IOException {
}
}
}
// this is a dirty hack - but should be ok for a unittest.
public static void setNewEnvironmentHack(Map<String, String> newenv) throws Exception {
Class[] classes = Collections.class.getDeclaredClasses();
Map<String, String> env = System.getenv();
for (Class cl : classes) {
if ("java.util.Collections$UnmodifiableMap".equals(cl.getName())) {
Field field = cl.getDeclaredField("m");
field.setAccessible(true);
Object obj = field.get(env);
Map<String, String> map = (Map<String, String>) obj;
map.clear();
map.putAll(newenv);
}
}
}
/**
* See if environment variable is forwarded using sanitizeEnv.
* @throws Exception
*/
@Test
public void testContainerEnvVariables() throws Exception {
int exitCode = 0;
containerManager.start();
Map<String, String> envWithDummy = new HashMap<String, String>();
envWithDummy.putAll(System.getenv());
envWithDummy.put(Environment.MALLOC_ARENA_MAX.name(), "99");
setNewEnvironmentHack(envWithDummy);
String malloc = System.getenv(Environment.MALLOC_ARENA_MAX.name());
File scriptFile = new File(tmpDir, "scriptFile.sh");
PrintWriter fileWriter = new PrintWriter(scriptFile);
File processStartFile =
new File(tmpDir, "env_vars.txt").getAbsoluteFile();
fileWriter.write("\numask 0"); // So that start file is readable by the test
fileWriter.write("\necho $" + Environment.MALLOC_ARENA_MAX.name() + " > " + processStartFile);
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nexec sleep 100");
fileWriter.close();
assert(malloc != null && !"".equals(malloc));
ContainerLaunchContext containerLaunchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
// ////// Construct the Container-id
ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
appId.setClusterTimestamp(0);
appId.setId(0);
ApplicationAttemptId appAttemptId =
recordFactory.newRecordInstance(ApplicationAttemptId.class);
appAttemptId.setApplicationId(appId);
appAttemptId.setAttemptId(1);
ContainerId cId =
recordFactory.newRecordInstance(ContainerId.class);
cId.setApplicationAttemptId(appAttemptId);
containerLaunchContext.setContainerId(cId);
containerLaunchContext.setUser(user);
// upload the script file so that the container can run it
URL resource_alpha =
ConverterUtils.getYarnUrlFromPath(localFS
.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha =
recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile = "dest_file";
Map<String, LocalResource> localResources =
new HashMap<String, LocalResource>();
localResources.put(destinationFile, rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
// set up the rest of the container
containerLaunchContext.setUser(containerLaunchContext.getUser());
List<String> commands = new ArrayList<String>();
commands.add("/bin/bash");
commands.add(scriptFile.getAbsolutePath());
containerLaunchContext.setCommands(commands);
containerLaunchContext.setResource(recordFactory
.newRecordInstance(Resource.class));
containerLaunchContext.getResource().setMemory(100 * 1024 * 1024);
StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class);
startRequest.setContainerLaunchContext(containerLaunchContext);
containerManager.startContainer(startRequest);
int timeoutSecs = 0;
while (!processStartFile.exists() && timeoutSecs++ < 20) {
Thread.sleep(1000);
LOG.info("Waiting for process start-file to be created");
}
Assert.assertTrue("ProcessStartFile doesn't exist!",
processStartFile.exists());
// Now verify the contents of the file
BufferedReader reader =
new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals(malloc, reader.readLine());
// Get the pid of the process
String pid = reader.readLine().trim();
// No more lines
Assert.assertEquals(null, reader.readLine());
// Now test the stop functionality.
// Assert that the process is alive
Assert.assertTrue("Process is not alive!",
exec.signalContainer(user,
pid, Signal.NULL));
// Once more
Assert.assertTrue("Process is not alive!",
exec.signalContainer(user,
pid, Signal.NULL));
StopContainerRequest stopRequest = recordFactory.newRecordInstance(StopContainerRequest.class);
stopRequest.setContainerId(cId);
containerManager.stopContainer(stopRequest);
BaseContainerManagerTest.waitForContainerState(containerManager, cId,
ContainerState.COMPLETE);
GetContainerStatusRequest gcsRequest =
recordFactory.newRecordInstance(GetContainerStatusRequest.class);
gcsRequest.setContainerId(cId);
ContainerStatus containerStatus =
containerManager.getContainerStatus(gcsRequest).getStatus();
Assert.assertEquals(ExitCode.KILLED.getExitCode(),
containerStatus.getExitStatus());
// Assert that the process is not alive anymore
Assert.assertFalse("Process is still alive!",
exec.signalContainer(user,
pid, Signal.NULL));
}
}