HADOOP-11393. Revert HADOOP_PREFIX, go back to HADOOP_HOME (aw)

This commit is contained in:
Allen Wittenauer 2016-03-24 08:47:00 -07:00
parent 0064cba169
commit 0a74610d1c
67 changed files with 211 additions and 208 deletions

View File

@ -47,8 +47,8 @@ function hadoop_usage
# This script runs the hadoop core commands.
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
@ -84,9 +84,9 @@ case ${COMMAND} in
# shellcheck disable=SC2086
exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
--config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
elif [[ -f "${HADOOP_PREFIX}/bin/hdfs" ]]; then
elif [[ -f "${HADOOP_HOME}/bin/hdfs" ]]; then
# shellcheck disable=SC2086
exec "${HADOOP_PREFIX}/bin/hdfs" \
exec "${HADOOP_HOME}/bin/hdfs" \
--config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
else
hadoop_error "HADOOP_HDFS_HOME not found!"
@ -104,8 +104,8 @@ case ${COMMAND} in
if [[ -f "${HADOOP_MAPRED_HOME}/bin/mapred" ]]; then
exec "${HADOOP_MAPRED_HOME}/bin/mapred" \
--config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
elif [[ -f "${HADOOP_PREFIX}/bin/mapred" ]]; then
exec "${HADOOP_PREFIX}/bin/mapred" \
elif [[ -f "${HADOOP_HOME}/bin/mapred" ]]; then
exec "${HADOOP_HOME}/bin/mapred" \
--config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
else
hadoop_error "HADOOP_MAPRED_HOME not found!"

View File

@ -63,6 +63,8 @@ else
exit 1
fi
hadoop_deprecate_envvar HADOOP_PREFIX HADOOP_HOME
# allow overrides of the above and pre-defines of the below
if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
[[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" ]]; then
@ -128,8 +130,8 @@ fi
hadoop_shellprofiles_init
# get the native libs in there pretty quick
hadoop_add_javalibpath "${HADOOP_PREFIX}/build/native"
hadoop_add_javalibpath "${HADOOP_PREFIX}/${HADOOP_COMMON_LIB_NATIVE_DIR}"
hadoop_add_javalibpath "${HADOOP_HOME}/build/native"
hadoop_add_javalibpath "${HADOOP_HOME}/${HADOOP_COMMON_LIB_NATIVE_DIR}"
hadoop_shellprofiles_nativelib

View File

@ -21,8 +21,8 @@ function hadoop_usage
}
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
@ -47,7 +47,7 @@ daemonmode=$1
shift
if [[ -z "${HADOOP_HDFS_HOME}" ]]; then
hdfsscript="${HADOOP_PREFIX}/bin/hdfs"
hdfsscript="${HADOOP_HOME}/bin/hdfs"
else
hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs"
fi

View File

@ -27,8 +27,8 @@ this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi
@ -51,7 +51,7 @@ daemonmode=$1
shift
if [[ -z "${HADOOP_HDFS_HOME}" ]]; then
hdfsscript="${HADOOP_PREFIX}/bin/hdfs"
hdfsscript="${HADOOP_HOME}/bin/hdfs"
else
hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs"
fi

View File

@ -278,7 +278,7 @@ function hadoop_bootstrap
# By now, HADOOP_LIBEXEC_DIR should have been defined upstream
# We can piggyback off of that to figure out where the default
# HADOOP_FREFIX should be. This allows us to run without
# HADOOP_PREFIX ever being defined by a human! As a consequence
# HADOOP_HOME ever being defined by a human! As a consequence
# HADOOP_LIBEXEC_DIR now becomes perhaps the single most powerful
# env var within Hadoop.
if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
@ -286,8 +286,8 @@ function hadoop_bootstrap
exit 1
fi
HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P)
HADOOP_PREFIX=${HADOOP_PREFIX:-$HADOOP_DEFAULT_PREFIX}
export HADOOP_PREFIX
HADOOP_HOME=${HADOOP_HOME:-$HADOOP_DEFAULT_PREFIX}
export HADOOP_HOME
#
# short-cuts. vendors may redefine these as well, preferably
@ -302,7 +302,7 @@ function hadoop_bootstrap
YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_PREFIX}}
HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}}
HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
@ -326,12 +326,12 @@ function hadoop_find_confdir
# An attempt at compatibility with some Hadoop 1.x
# installs.
if [[ -e "${HADOOP_PREFIX}/conf/hadoop-env.sh" ]]; then
if [[ -e "${HADOOP_HOME}/conf/hadoop-env.sh" ]]; then
conf_dir="conf"
else
conf_dir="etc/hadoop"
fi
export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${conf_dir}}"
export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_HOME}/${conf_dir}}"
hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
}
@ -524,8 +524,8 @@ function hadoop_basic_init
hadoop_debug "Initialize CLASSPATH"
if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${HADOOP_COMMON_DIR}" ]]; then
export HADOOP_COMMON_HOME="${HADOOP_PREFIX}"
[[ -d "${HADOOP_HOME}/${HADOOP_COMMON_DIR}" ]]; then
export HADOOP_COMMON_HOME="${HADOOP_HOME}"
fi
# default policy file for service-level authorization
@ -533,20 +533,20 @@ function hadoop_basic_init
# define HADOOP_HDFS_HOME
if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
export HADOOP_HDFS_HOME="${HADOOP_PREFIX}"
[[ -d "${HADOOP_HOME}/${HDFS_DIR}" ]]; then
export HADOOP_HDFS_HOME="${HADOOP_HOME}"
fi
# define HADOOP_YARN_HOME
if [[ -z "${HADOOP_YARN_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
export HADOOP_YARN_HOME="${HADOOP_PREFIX}"
[[ -d "${HADOOP_HOME}/${YARN_DIR}" ]]; then
export HADOOP_YARN_HOME="${HADOOP_HOME}"
fi
# define HADOOP_MAPRED_HOME
if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
[[ -d "${HADOOP_HOME}/${MAPRED_DIR}" ]]; then
export HADOOP_MAPRED_HOME="${HADOOP_HOME}"
fi
if [[ ! -d "${HADOOP_COMMON_HOME}" ]]; then
@ -573,7 +573,7 @@ function hadoop_basic_init
# let's define it as 'hadoop'
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-hadoop}
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_PREFIX}/logs"}
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_HOME}/logs"}
HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO}
HADOOP_NICENESS=${HADOOP_NICENESS:-0}
@ -1219,7 +1219,6 @@ function hadoop_finalize_hadoop_opts
hadoop_translate_cygwin_path HADOOP_LOG_DIR
hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR}"
hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE}"
HADOOP_HOME=${HADOOP_PREFIX}
hadoop_translate_cygwin_path HADOOP_HOME
export HADOOP_HOME
hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_HOME}"
@ -1252,11 +1251,11 @@ function hadoop_finalize_catalina_opts
local prefix=${HADOOP_CATALINA_PREFIX}
hadoop_add_param CATALINA_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_PREFIX}"
hadoop_add_param CATALINA_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_HOME}"
if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
hadoop_add_param CATALINA_OPTS java.library.path "-Djava.library.path=${JAVA_LIBRARY_PATH}"
fi
hadoop_add_param CATALINA_OPTS "${prefix}.home.dir" "-D${prefix}.home.dir=${HADOOP_PREFIX}"
hadoop_add_param CATALINA_OPTS "${prefix}.home.dir" "-D${prefix}.home.dir=${HADOOP_HOME}"
hadoop_add_param CATALINA_OPTS "${prefix}.config.dir" "-D${prefix}.config.dir=${HADOOP_CATALINA_CONFIG}"
hadoop_add_param CATALINA_OPTS "${prefix}.log.dir" "-D${prefix}.log.dir=${HADOOP_CATALINA_LOG}"
hadoop_add_param CATALINA_OPTS "${prefix}.temp.dir" "-D${prefix}.temp.dir=${HADOOP_CATALINA_TEMP}"
@ -1282,7 +1281,7 @@ function hadoop_finalize
hadoop_finalize_hadoop_heap
hadoop_finalize_hadoop_opts
hadoop_translate_cygwin_path HADOOP_PREFIX
hadoop_translate_cygwin_path HADOOP_HOME
hadoop_translate_cygwin_path HADOOP_CONF_DIR
hadoop_translate_cygwin_path HADOOP_COMMON_HOME
hadoop_translate_cygwin_path HADOOP_HDFS_HOME

View File

@ -26,8 +26,8 @@
##
## If you move HADOOP_LIBEXEC_DIR from some location that
## isn't bin/../libexec, you MUST define either HADOOP_LIBEXEC_DIR
## or have HADOOP_PREFIX/libexec/hadoop-config.sh and
## HADOOP_PREFIX/libexec/hadoop-layout.sh (this file) exist.
## or have HADOOP_HOME/libexec/hadoop-config.sh and
## HADOOP_HOME/libexec/hadoop-layout.sh (this file) exist.
## NOTE:
##
@ -44,7 +44,7 @@
####
# Default location for the common/core Hadoop project
# export HADOOP_COMMON_HOME=${HADOOP_PREFIX}
# export HADOOP_COMMON_HOME=${HADOOP_HOME}
# Relative locations where components under HADOOP_COMMON_HOME are located
# export HADOOP_COMMON_DIR="share/hadoop/common"
@ -56,7 +56,7 @@
####
# Default location for the HDFS subproject
# export HADOOP_HDFS_HOME=${HADOOP_PREFIX}
# export HADOOP_HDFS_HOME=${HADOOP_HOME}
# Relative locations where components under HADOOP_HDFS_HOME are located
# export HDFS_DIR="share/hadoop/hdfs"
@ -67,7 +67,7 @@
####
# Default location for the YARN subproject
# export HADOOP_YARN_HOME=${HADOOP_PREFIX}
# export HADOOP_YARN_HOME=${HADOOP_HOME}
# Relative locations where components under HADOOP_YARN_HOME are located
# export YARN_DIR="share/hadoop/yarn"
@ -78,7 +78,7 @@
####
# Default location for the MapReduce subproject
# export HADOOP_MAPRED_HOME=${HADOOP_PREFIX}
# export HADOOP_MAPRED_HOME=${HADOOP_HOME}
# Relative locations where components under HADOOP_MAPRED_HOME are located
# export MAPRED_DIR="share/hadoop/mapreduce"
@ -92,6 +92,6 @@
# note that this path only gets added for certain commands and not
# part of the general classpath unless HADOOP_OPTIONAL_TOOLS is used
# to configure them in
# export HADOOP_TOOLS_HOME=${HADOOP_PREFIX}
# export HADOOP_TOOLS_HOME=${HADOOP_HOME}
# export HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
# export HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}

View File

@ -22,7 +22,7 @@
#
# HADOOP_SLAVES File naming remote hosts.
# Default is ${HADOOP_CONF_DIR}/slaves.
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
##
@ -33,8 +33,8 @@ function hadoop_usage
}
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)

View File

@ -21,8 +21,8 @@ exit 1
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)

View File

@ -22,8 +22,8 @@ echo "This script is deprecated. Use stop-dfs.sh and stop-yarn.sh instead."
exit 1
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)

View File

@ -55,14 +55,14 @@
# Location of Hadoop. By default, Hadoop will attempt to determine
# this location based upon its execution path.
# export HADOOP_PREFIX=
# export HADOOP_HOME=
# Location of Hadoop's configuration information. i.e., where this
# file is probably living. Many sites will also set this in the
# same location where JAVA_HOME is defined. If this is not defined
# Hadoop will attempt to locate it based upon its execution
# path.
# export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop
# export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
# The maximum amount of heap to use (Java -Xmx). If no unit
# is provided, it will be converted to MB. Daemons will
@ -186,10 +186,10 @@ esac
# non-secure)
#
# Where (primarily) daemon log files are stored. # $HADOOP_PREFIX/logs
# by default.
# Where (primarily) daemon log files are stored.
# ${HADOOP_HOME}/logs by default.
# Java property: hadoop.log.dir
# export HADOOP_LOG_DIR=${HADOOP_PREFIX}/logs
# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
# A string representing this instance of hadoop. $USER by default.
# This is used in writing log and pid files, so keep that in mind!

View File

@ -32,7 +32,7 @@
@InterfaceAudience.Private
public class TraceUtils {
private static List<ConfigurationPair> EMPTY = Collections.emptyList();
static final String DEFAULT_HADOOP_PREFIX = "hadoop.htrace.";
static final String DEFAULT_HADOOP_TRACE_PREFIX = "hadoop.htrace.";
public static HTraceConfiguration wrapHadoopConf(final String prefix,
final Configuration conf) {
@ -52,7 +52,7 @@ public String get(String key) {
if (ret != null) {
return ret;
}
return getInternal(DEFAULT_HADOOP_PREFIX + key);
return getInternal(DEFAULT_HADOOP_TRACE_PREFIX + key);
}
@Override

View File

@ -86,10 +86,10 @@ Other useful configuration parameters that you can customize include:
In most cases, you should specify the `HADOOP_PID_DIR` and `HADOOP_LOG_DIR` directories such that they can only be written to by the users that are going to run the hadoop daemons. Otherwise there is the potential for a symlink attack.
It is also traditional to configure `HADOOP_PREFIX` in the system-wide shell environment configuration. For example, a simple script inside `/etc/profile.d`:
It is also traditional to configure `HADOOP_HOME` in the system-wide shell environment configuration. For example, a simple script inside `/etc/profile.d`:
HADOOP_PREFIX=/path/to/hadoop
export HADOOP_PREFIX
HADOOP_HOME=/path/to/hadoop
export HADOOP_HOME
| Daemon | Environment Variable |
|:---- |:---- |
@ -243,73 +243,73 @@ To start a Hadoop cluster you will need to start both the HDFS and YARN cluster.
The first time you bring up HDFS, it must be formatted. Format a new distributed filesystem as *hdfs*:
[hdfs]$ $HADOOP_PREFIX/bin/hdfs namenode -format <cluster_name>
[hdfs]$ $HADOOP_HOME/bin/hdfs namenode -format <cluster_name>
Start the HDFS NameNode with the following command on the designated node as *hdfs*:
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start namenode
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start namenode
Start a HDFS DataNode with the following command on each designated node as *hdfs*:
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start datanode
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start datanode
If `etc/hadoop/slaves` and ssh trusted access is configured (see [Single Node Setup](./SingleCluster.html)), all of the HDFS processes can be started with a utility script. As *hdfs*:
[hdfs]$ $HADOOP_PREFIX/sbin/start-dfs.sh
[hdfs]$ $HADOOP_HOME/sbin/start-dfs.sh
Start the YARN with the following command, run on the designated ResourceManager as *yarn*:
[yarn]$ $HADOOP_PREFIX/bin/yarn --daemon start resourcemanager
[yarn]$ $HADOOP_HOME/bin/yarn --daemon start resourcemanager
Run a script to start a NodeManager on each designated host as *yarn*:
[yarn]$ $HADOOP_PREFIX/bin/yarn --daemon start nodemanager
[yarn]$ $HADOOP_HOME/bin/yarn --daemon start nodemanager
Start a standalone WebAppProxy server. Run on the WebAppProxy server as *yarn*. If multiple servers are used with load balancing it should be run on each of them:
[yarn]$ $HADOOP_PREFIX/bin/yarn --daemon start proxyserver
[yarn]$ $HADOOP_HOME/bin/yarn --daemon start proxyserver
If `etc/hadoop/slaves` and ssh trusted access is configured (see [Single Node Setup](./SingleCluster.html)), all of the YARN processes can be started with a utility script. As *yarn*:
[yarn]$ $HADOOP_PREFIX/sbin/start-yarn.sh
[yarn]$ $HADOOP_HOME/sbin/start-yarn.sh
Start the MapReduce JobHistory Server with the following command, run on the designated server as *mapred*:
[mapred]$ $HADOOP_PREFIX/bin/mapred --daemon start historyserver
[mapred]$ $HADOOP_HOME/bin/mapred --daemon start historyserver
### Hadoop Shutdown
Stop the NameNode with the following command, run on the designated NameNode as *hdfs*:
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon stop namenode
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon stop namenode
Run a script to stop a DataNode as *hdfs*:
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon stop datanode
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon stop datanode
If `etc/hadoop/slaves` and ssh trusted access is configured (see [Single Node Setup](./SingleCluster.html)), all of the HDFS processes may be stopped with a utility script. As *hdfs*:
[hdfs]$ $HADOOP_PREFIX/sbin/stop-dfs.sh
[hdfs]$ $HADOOP_HOME/sbin/stop-dfs.sh
Stop the ResourceManager with the following command, run on the designated ResourceManager as *yarn*:
[yarn]$ $HADOOP_PREFIX/bin/yarn --daemon stop resourcemanager
[yarn]$ $HADOOP_HOME/bin/yarn --daemon stop resourcemanager
Run a script to stop a NodeManager on a slave as *yarn*:
[yarn]$ $HADOOP_PREFIX/bin/yarn --daemon stop nodemanager
[yarn]$ $HADOOP_HOME/bin/yarn --daemon stop nodemanager
If `etc/hadoop/slaves` and ssh trusted access is configured (see [Single Node Setup](./SingleCluster.html)), all of the YARN processes can be stopped with a utility script. As *yarn*:
[yarn]$ $HADOOP_PREFIX/sbin/stop-yarn.sh
[yarn]$ $HADOOP_HOME/sbin/stop-yarn.sh
Stop the WebAppProxy server. Run on the WebAppProxy server as *yarn*. If multiple servers are used with load balancing it should be run on each of them:
[yarn]$ $HADOOP_PREFIX/bin/yarn stop proxyserver
[yarn]$ $HADOOP_HOME/bin/yarn stop proxyserver
Stop the MapReduce JobHistory Server with the following command, run on the designated server as *mapred*:
[mapred]$ $HADOOP_PREFIX/bin/mapred --daemon stop historyserver
[mapred]$ $HADOOP_HOME/bin/mapred --daemon stop historyserver
Web Interfaces
--------------

View File

@ -39,7 +39,7 @@ All of the shell commands will accept a common set of options. For some commands
| SHELL\_OPTION | Description |
|:---- |:---- |
| `--buildpaths` | Enables developer versions of jars. |
| `--config confdir` | Overwrites the default Configuration directory. Default is `$HADOOP_PREFIX/etc/hadoop`. |
| `--config confdir` | Overwrites the default Configuration directory. Default is `$HADOOP_HOME/etc/hadoop`. |
| `--daemon mode` | If the command supports daemonization (e.g., `hdfs namenode`), execute in the appropriate mode. Supported modes are `start` to start the process in daemon mode, `stop` to stop the process, and `status` to determine the active status of the process. `status` will return an [LSB-compliant](http://refspecs.linuxbase.org/LSB_3.0.0/LSB-generic/LSB-generic/iniscrptact.html) result code. If no option is provided, commands that support daemonization will run in the foreground. For commands that do not support daemonization, this option is ignored. |
| `--debug` | Enables shell level configuration debugging information |
| `--help` | Shell script usage information. |

View File

@ -83,7 +83,7 @@ Apache Hadoop allows for third parties to easily add new features through a vari
Core to this functionality is the concept of a shell profile. Shell profiles are shell snippets that can do things such as add jars to the classpath, configure Java system properties and more.
Shell profiles may be installed in either `${HADOOP_CONF_DIR}/shellprofile.d` or `${HADOOP_PREFIX}/libexec/shellprofile.d`. Shell profiles in the `libexec` directory are part of the base installation and cannot be overriden by the user. Shell profiles in the configuration directory may be ignored if the end user changes the configuration directory at runtime.
Shell profiles may be installed in either `${HADOOP_CONF_DIR}/shellprofile.d` or `${HADOOP_HOME}/libexec/shellprofile.d`. Shell profiles in the `libexec` directory are part of the base installation and cannot be overriden by the user. Shell profiles in the configuration directory may be ignored if the end user changes the configuration directory at runtime.
An example of a shell profile is in the libexec directory.

View File

@ -27,6 +27,7 @@ setup() {
# shellcheck disable=SC2034
HADOOP_SHELL_SCRIPT_DEBUG=true
unset HADOOP_CONF_DIR
# we unset both of these for bw compat
unset HADOOP_HOME
unset HADOOP_PREFIX

View File

@ -45,7 +45,7 @@ basicinitsetup () {
unset ${j}
done
HADOOP_PREFIX=${TMP}
HADOOP_HOME=${TMP}
}
check_var_values () {

View File

@ -22,7 +22,7 @@ load hadoop-functions_test_helper
}
@test "hadoop_bootstrap (libexec)" {
unset HADOOP_PREFIX
unset HADOOP_HOME
unset HADOOP_COMMON_DIR
unset HADOOP_COMMON_LIB_JARS_DIR
unset HDFS_DIR
@ -39,7 +39,7 @@ load hadoop-functions_test_helper
hadoop_bootstrap
# all of these should be set
[ -n ${HADOOP_PREFIX} ]
[ -n ${HADOOP_HOME} ]
[ -n ${HADOOP_COMMON_DIR} ]
[ -n ${HADOOP_COMMON_LIB_JARS_DIR} ]
[ -n ${HDFS_DIR} ]

View File

@ -16,10 +16,10 @@
load hadoop-functions_test_helper
create_fake_dirs () {
HADOOP_PREFIX=${TMP}
HADOOP_HOME=${TMP}
for j in conf etc/hadoop; do
mkdir -p "${HADOOP_PREFIX}/${j}"
echo "unittest=${j}" > "${HADOOP_PREFIX}/${j}/hadoop-env.sh"
mkdir -p "${HADOOP_HOME}/${j}"
echo "unittest=${j}" > "${HADOOP_HOME}/${j}/hadoop-env.sh"
done
}
@ -32,27 +32,27 @@ create_fake_dirs () {
@test "hadoop_find_confdir (bw compat: conf)" {
create_fake_dirs
hadoop_find_confdir
echo ">${HADOOP_CONF_DIR}< >${HADOOP_PREFIX}/conf<"
[ "${HADOOP_CONF_DIR}" = ${HADOOP_PREFIX}/conf ]
echo ">${HADOOP_CONF_DIR}< >${HADOOP_HOME}/conf<"
[ "${HADOOP_CONF_DIR}" = ${HADOOP_HOME}/conf ]
}
@test "hadoop_find_confdir (etc/hadoop)" {
create_fake_dirs
rm -rf "${HADOOP_PREFIX}/conf"
rm -rf "${HADOOP_HOME}/conf"
hadoop_find_confdir
[ "${HADOOP_CONF_DIR}" = ${HADOOP_PREFIX}/etc/hadoop ]
[ "${HADOOP_CONF_DIR}" = ${HADOOP_HOME}/etc/hadoop ]
}
@test "hadoop_verify_confdir (negative) " {
create_fake_dirs
HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
HADOOP_CONF_DIR=${HADOOP_HOME}/conf
run hadoop_verify_confdir
[ -n "${output}" ]
}
@test "hadoop_verify_confdir (positive) " {
create_fake_dirs
HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
HADOOP_CONF_DIR=${HADOOP_HOME}/conf
touch "${HADOOP_CONF_DIR}/log4j.properties"
run hadoop_verify_confdir
[ -z "${output}" ]
@ -60,7 +60,7 @@ create_fake_dirs () {
@test "hadoop_exec_hadoopenv (positive) " {
create_fake_dirs
HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
HADOOP_CONF_DIR=${HADOOP_HOME}/conf
hadoop_exec_hadoopenv
[ -n "${HADOOP_ENV_PROCESSED}" ]
[ "${unittest}" = conf ]
@ -68,7 +68,7 @@ create_fake_dirs () {
@test "hadoop_exec_hadoopenv (negative) " {
create_fake_dirs
HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
HADOOP_CONF_DIR=${HADOOP_HOME}/conf
HADOOP_ENV_PROCESSED=true
hadoop_exec_hadoopenv
[ -z "${unittest}" ]
@ -76,7 +76,7 @@ create_fake_dirs () {
@test "hadoop_exec_userfuncs" {
create_fake_dirs
HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
HADOOP_CONF_DIR=${HADOOP_HOME}/conf
echo "unittest=userfunc" > "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
hadoop_exec_userfuncs
[ "${unittest}" = "userfunc" ]

View File

@ -100,7 +100,7 @@ load hadoop-functions_test_helper
hadoop_finalize_hadoop_heap () { true; }
hadoop_finalize_hadoop_opts () { true; }
hadoop_translate_cygwin_path () {
if [ $1 = HADOOP_PREFIX ]; then
if [ $1 = HADOOP_HOME ]; then
testvar=prefix;
fi
}

View File

@ -20,7 +20,7 @@
# KMS temporary directory
#
# export KMS_TEMP=${HADOOP_PREFIX}/temp
# export KMS_TEMP=${HADOOP_HOME}/temp
# The HTTP port used by KMS
#
@ -59,7 +59,7 @@
#
# Location of tomcat
#
# export KMS_CATALINA_HOME=${HADOOP_PREFIX}/share/hadoop/kms/tomcat
# export KMS_CATALINA_HOME=${HADOOP_HOME}/share/hadoop/kms/tomcat
# Java System properties for KMS should be specified in this variable.
# The java.library.path and hadoop.home.dir properties are automatically

View File

@ -28,7 +28,7 @@ function hadoop_subproject_init
export HADOOP_CATALINA_PREFIX=kms
export HADOOP_CATALINA_TEMP="${KMS_TEMP:-${HADOOP_PREFIX}/temp}"
export HADOOP_CATALINA_TEMP="${KMS_TEMP:-${HADOOP_HOME}/temp}"
hadoop_deprecate_envvar KMS_CONFIG HADOOP_CONF_DIR
@ -49,7 +49,7 @@ function hadoop_subproject_init
# shellcheck disable=SC2086
export KMS_SSL_TRUSTSTORE_PASS=${KMS_SSL_TRUSTSTORE_PASS:-"$(echo ${CATALINA_OPTS} | grep -o 'trustStorePassword=[^ ]*' | cut -f2 -d= )"}
export CATALINA_BASE="${CATALINA_BASE:-${HADOOP_PREFIX}/share/hadoop/kms/tomcat}"
export CATALINA_BASE="${CATALINA_BASE:-${HADOOP_HOME}/share/hadoop/kms/tomcat}"
export HADOOP_CATALINA_HOME="${KMS_CATALINA_HOME:-${CATALINA_BASE}}"
export CATALINA_OUT="${CATALINA_OUT:-${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-kms-${HOSTNAME}.out}"
@ -69,8 +69,8 @@ if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
elif [[ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]]; then
. "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
elif [[ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]]; then
. "${HADOOP_HOME}/libexec/hadoop-config.sh"
else
echo "ERROR: Hadoop common not found." 2>&1
exit 1

View File

@ -30,8 +30,8 @@ function hadoop_usage
}
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"

View File

@ -20,7 +20,7 @@
# HTTPFS temporary directory
#
# export HTTPFS_TEMP=${HADOOP_PREFIX}/temp
# export HTTPFS_TEMP=${HADOOP_HOME}/temp
# The HTTP port used by HTTPFS
#
@ -53,7 +53,7 @@
#
# Location of tomcat
#
# export HTTPFS_CATALINA_HOME=${HADOOP_PREFIX}/share/hadoop/httpfs/tomcat
# export HTTPFS_CATALINA_HOME=${HADOOP_HOME}/share/hadoop/httpfs/tomcat
# Java System properties for HTTPFS should be specified in this variable.
# The java.library.path and hadoop.home.dir properties are automatically

View File

@ -28,7 +28,7 @@ function hadoop_subproject_init
export HADOOP_CATALINA_PREFIX=httpfs
export HADOOP_CATALINA_TEMP="${HTTPFS_TEMP:-${HADOOP_PREFIX}/temp}"
export HADOOP_CATALINA_TEMP="${HTTPFS_TEMP:-${HADOOP_HOME}/temp}"
hadoop_deprecate_envvar HTTPFS_CONFIG HADOOP_CONF_DIR
@ -47,7 +47,7 @@ function hadoop_subproject_init
export HADOOP_CATALINA_SSL_KEYSTORE_FILE="${HTTPFS_SSL_KEYSTORE_FILE:-${HOME}/.keystore}"
export CATALINA_BASE="${CATALINA_BASE:-${HADOOP_PREFIX}/share/hadoop/httpfs/tomcat}"
export CATALINA_BASE="${CATALINA_BASE:-${HADOOP_HOME}/share/hadoop/httpfs/tomcat}"
export HADOOP_CATALINA_HOME="${HTTPFS_CATALINA_HOME:-${CATALINA_BASE}}"
export CATALINA_OUT="${CATALINA_OUT:-${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-httpfs-${HOSTNAME}.out}"
@ -67,8 +67,8 @@ if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
elif [[ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]]; then
. "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
elif [[ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]]; then
. "${HADOOP_HOME}/libexec/hadoop-config.sh"
else
echo "ERROR: Hadoop common not found." 2>&1
exit 1

View File

@ -30,8 +30,8 @@ function hadoop_usage
}
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"

View File

@ -35,9 +35,9 @@ Requirements
BUILDING
fuse-dfs executable can be built by setting `require.fuse` option to true using Maven. For example:
in HADOOP_PREFIX: `mvn package -Pnative -Drequire.fuse=true -DskipTests -Dmaven.javadoc.skip=true`
in HADOOP_HOME: `mvn package -Pnative -Drequire.fuse=true -DskipTests -Dmaven.javadoc.skip=true`
The executable `fuse_dfs` will be located at HADOOP_PREFIX/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/
The executable `fuse_dfs` will be located at HADOOP_HOME/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/
Common build problems include not finding the libjvm.so in JAVA_HOME/jre/lib/OS_ARCH/server or not finding fuse in FUSE_HOME or /usr/local.
@ -109,7 +109,7 @@ NOTE - you cannot export this with a FUSE module built into the kernel
RECOMMENDATIONS
1. From /bin, `ln -s HADOOP_PREFIX/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/fuse_dfs* .`
1. From /bin, `ln -s HADOOP_HOME/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/fuse_dfs* .`
2. Always start with debug on so you can see if you are missing a classpath or something like that.

View File

@ -16,12 +16,12 @@
# limitations under the License.
#
if [ "$HADOOP_PREFIX" = "" ]; then
echo "HADOOP_PREFIX is empty. Set it to the root directory of Hadoop source code"
if [ "$HADOOP_HOME" = "" ]; then
echo "HADOOP_HOME is empty. Set it to the root directory of Hadoop source code"
exit 1
fi
export FUSEDFS_PATH="$HADOOP_PREFIX/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs"
export LIBHDFS_PATH="$HADOOP_PREFIX/hadoop-hdfs-project/hadoop-hdfs-native-client/target/usr/local/lib"
export FUSEDFS_PATH="$HADOOP_HOME/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs"
export LIBHDFS_PATH="$HADOOP_HOME/hadoop-hdfs-project/hadoop-hdfs-native-client/target/usr/local/lib"
if [ "$OS_ARCH" = "" ]; then
export OS_ARCH=amd64
@ -38,12 +38,12 @@ fi
while IFS= read -r -d '' file
do
export CLASSPATH=$CLASSPATH:$file
done < <(find "$HADOOP_PREFIX/hadoop-client" -name "*.jar" -print0)
done < <(find "$HADOOP_HOME/hadoop-client" -name "*.jar" -print0)
while IFS= read -r -d '' file
do
export CLASSPATH=$CLASSPATH:$file
done < <(find "$HADOOP_PREFIX/hhadoop-hdfs-project" -name "*.jar" -print0)
done < <(find "$HADOOP_HOME/hhadoop-hdfs-project" -name "*.jar" -print0)
export CLASSPATH=$HADOOP_CONF_DIR:$CLASSPATH
export PATH=$FUSEDFS_PATH:$PATH

View File

@ -52,8 +52,8 @@ if [ ! -f "$excludeFilenameLocal" ] ; then
exit 1
fi
namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -namenodes)
excludeFilenameRemote=$("$HADOOP_PREFIX/bin/hdfs" getconf -excludeFile)
namenodes=$("$HADOOP_HOME/bin/hdfs" getconf -namenodes)
excludeFilenameRemote=$("$HADOOP_HOME/bin/hdfs" getconf -excludeFile)
if [ "$excludeFilenameRemote" = '' ] ; then
echo \

View File

@ -60,8 +60,8 @@ function hadoop_usage
}
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"

View File

@ -49,7 +49,7 @@ function hadoop_subproject_init
hadoop_deprecate_envvar HADOOP_HDFS_IDENT_STRING HADOOP_IDENT_STRING
HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_PREFIX}"
HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_HOME}"
# turn on the defaults
export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}
@ -71,8 +71,8 @@ if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
elif [ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]; then
. "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
. "${HADOOP_HOME}/libexec/hadoop-config.sh"
else
echo "ERROR: Hadoop common not found." 2>&1
exit 1

View File

@ -21,8 +21,8 @@
# for dfsadmin to support multiple namenodes.
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)

View File

@ -30,8 +30,8 @@ function hadoop_usage
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

View File

@ -29,8 +29,8 @@ this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

View File

@ -26,8 +26,8 @@ this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

View File

@ -28,8 +28,8 @@ function hadoop_usage
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

View File

@ -28,8 +28,8 @@ this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

View File

@ -26,8 +26,8 @@ this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

View File

@ -150,13 +150,13 @@ Here is an example configuration with two Namenodes:
**Step 1**: Format a Namenode using the following command:
[hdfs]$ $HADOOP_PREFIX/bin/hdfs namenode -format [-clusterId <cluster_id>]
[hdfs]$ $HADOOP_HOME/bin/hdfs namenode -format [-clusterId <cluster_id>]
Choose a unique cluster\_id which will not conflict other clusters in your environment. If a cluster\_id is not provided, then a unique one is auto generated.
**Step 2**: Format additional Namenodes using the following command:
[hdfs]$ $HADOOP_PREFIX/bin/hdfs namenode -format -clusterId <cluster_id>
[hdfs]$ $HADOOP_HOME/bin/hdfs namenode -format -clusterId <cluster_id>
Note that the cluster\_id in step 2 must be same as that of the cluster\_id in step 1. If they are different, the additional Namenodes will not be part of the federated cluster.
@ -164,7 +164,7 @@ Note that the cluster\_id in step 2 must be same as that of the cluster\_id in s
Older releases only support a single Namenode. Upgrade the cluster to newer release in order to enable federation During upgrade you can provide a ClusterID as follows:
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start namenode -upgrade -clusterId <cluster_ID>
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start namenode -upgrade -clusterId <cluster_ID>
If cluster\_id is not provided, it is auto generated.
@ -187,7 +187,7 @@ Perform the following steps:
* Refresh the Datanodes to pickup the newly added Namenode by running
the following command against all the Datanodes in the cluster:
[hdfs]$ $HADOOP_PREFIX/bin/hdfs dfsadmin -refreshNamenodes <datanode_host_name>:<datanode_rpc_port>
[hdfs]$ $HADOOP_HOME/bin/hdfs dfsadmin -refreshNamenodes <datanode_host_name>:<datanode_rpc_port>
Managing the cluster
--------------------
@ -196,11 +196,11 @@ Managing the cluster
To start the cluster run the following command:
[hdfs]$ $HADOOP_PREFIX/sbin/start-dfs.sh
[hdfs]$ $HADOOP_HOME/sbin/start-dfs.sh
To stop the cluster run the following command:
[hdfs]$ $HADOOP_PREFIX/sbin/stop-dfs.sh
[hdfs]$ $HADOOP_HOME/sbin/stop-dfs.sh
These commands can be run from any node where the HDFS configuration is available. The command uses the configuration to determine the Namenodes in the cluster and then starts the Namenode process on those nodes. The Datanodes are started on the nodes specified in the `slaves` file. The script can be used as a reference for building your own scripts to start and stop the cluster.
@ -208,7 +208,7 @@ These commands can be run from any node where the HDFS configuration is availabl
The Balancer has been changed to work with multiple Namenodes. The Balancer can be run using the command:
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start balancer [-policy <policy>]
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start balancer [-policy <policy>]
The policy parameter can be any of the following:
@ -227,11 +227,11 @@ Decommissioning is similar to prior releases. The nodes that need to be decomiss
**Step 1**: To distribute an exclude file to all the Namenodes, use the following command:
[hdfs]$ $HADOOP_PREFIX/sbin/distribute-exclude.sh <exclude_file>
[hdfs]$ $HADOOP_HOME/sbin/distribute-exclude.sh <exclude_file>
**Step 2**: Refresh all the Namenodes to pick up the new exclude file:
[hdfs]$ $HADOOP_PREFIX/sbin/refresh-namenodes.sh
[hdfs]$ $HADOOP_HOME/sbin/refresh-namenodes.sh
The above command uses HDFS configuration to determine the configured Namenodes in the cluster and refreshes them to pick up the new exclude file.

View File

@ -475,7 +475,7 @@ There are also several other configuration parameters which may be set to contro
After the configuration keys have been added, the next step is to initialize required state in ZooKeeper. You can do so by running the following command from one of the NameNode hosts.
[hdfs]$ $HADOOP_PREFIX/bin/zkfc -formatZK
[hdfs]$ $HADOOP_HOME/bin/zkfc -formatZK
This will create a znode in ZooKeeper inside of which the automatic failover system stores its data.
@ -487,7 +487,7 @@ Since automatic failover has been enabled in the configuration, the `start-dfs.s
If you manually manage the services on your cluster, you will need to manually start the `zkfc` daemon on each of the machines that runs a NameNode. You can start the daemon by running:
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start zkfc
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start zkfc
### Securing access to ZooKeeper

View File

@ -523,7 +523,7 @@ There are also several other configuration parameters which may be set to contro
After the configuration keys have been added, the next step is to initialize required state in ZooKeeper. You can do so by running the following command from one of the NameNode hosts.
[hdfs]$ $HADOOP_PREFIX/bin/hdfs zkfc -formatZK
[hdfs]$ $HADOOP_HOME/bin/hdfs zkfc -formatZK
This will create a znode in ZooKeeper inside of which the automatic failover system stores its data.
@ -535,7 +535,7 @@ Since automatic failover has been enabled in the configuration, the `start-dfs.s
If you manually manage the services on your cluster, you will need to manually start the `zkfc` daemon on each of the machines that runs a NameNode. You can start the daemon by running:
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start zkfc
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start zkfc
### Securing access to ZooKeeper

View File

@ -215,7 +215,7 @@ Three daemons are required to provide NFS service: rpcbind (or portmap), mountd
2. Start Hadoop's portmap (needs root privileges):
[root]> $HADOOP_PREFIX/bin/hdfs --daemon start portmap
[root]> $HADOOP_HOME/bin/hdfs --daemon start portmap
3. Start mountd and nfsd.
@ -224,12 +224,12 @@ Three daemons are required to provide NFS service: rpcbind (or portmap), mountd
While in secure mode, any user can start NFS gateway
as long as the user has read access to the Kerberos keytab defined in "nfs.keytab.file".
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start nfs3
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start nfs3
4. Stop NFS gateway services.
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon stop nfs3
[root]> $HADOOP_PREFIX/bin/hdfs --daemon stop portmap
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon stop nfs3
[root]> $HADOOP_HOME/bin/hdfs --daemon stop portmap
Optionally, you can forgo running the Hadoop-provided portmap daemon and instead use the system portmap daemon on all operating systems if you start the NFS Gateway as root. This will allow the HDFS NFS Gateway to work around the aforementioned bug and still register using the system portmap daemon. To do so, just start the NFS gateway daemon as you normally would, but make sure to do so as the "root" user, and also set the "HADOOP\_PRIVILEGED\_NFS\_USER" environment variable to an unprivileged user. In this mode the NFS Gateway will start as root to perform its initial registration with the system portmap, and then will drop privileges back to the user specified by the HADOOP\_PRIVILEGED\_NFS\_USER afterward and for the rest of the duration of the lifetime of the NFS Gateway process. Note that if you choose this route, you should skip steps 1 and 2 above.

View File

@ -62,7 +62,7 @@ private String getHostPortForNN(MiniDFSCluster cluster) {
public void testCreateAndDestroySpanReceiver() throws Exception {
Configuration conf = new Configuration();
conf = new Configuration();
conf.set(TraceUtils.DEFAULT_HADOOP_PREFIX +
conf.set(TraceUtils.DEFAULT_HADOOP_TRACE_PREFIX +
Tracer.SPAN_RECEIVER_CLASSES_KEY, "");
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

View File

@ -65,10 +65,10 @@ public static void shutdown() throws IOException {
public void testShortCircuitTraceHooks() throws IOException {
assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
conf = new Configuration();
conf.set(TraceUtils.DEFAULT_HADOOP_PREFIX +
conf.set(TraceUtils.DEFAULT_HADOOP_TRACE_PREFIX +
Tracer.SPAN_RECEIVER_CLASSES_KEY,
SetSpanReceiver.class.getName());
conf.set(TraceUtils.DEFAULT_HADOOP_PREFIX +
conf.set(TraceUtils.DEFAULT_HADOOP_TRACE_PREFIX +
Tracer.SAMPLER_CLASSES_KEY,
"AlwaysSampler");
conf.setLong("dfs.blocksize", 100 * 1024);

View File

@ -37,8 +37,8 @@ function hadoop_usage
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

View File

@ -47,7 +47,7 @@ function hadoop_subproject_init
hadoop_deprecate_envvar HADOOP_MAPRED_ROOT_LOGGER HADOOP_ROOT_LOGGER
HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_PREFIX}"
HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_HOME}"
hadoop_deprecate_envvar HADOOP_MAPRED_IDENT_STRING HADOOP_IDENT_STRING
}
@ -62,8 +62,8 @@ if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
elif [ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]; then
. "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
. "${HADOOP_HOME}/libexec/hadoop-config.sh"
else
echo "ERROR: Hadoop common not found." 2>&1
exit 1

View File

@ -21,8 +21,8 @@ function hadoop_usage
}
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)

View File

@ -315,7 +315,7 @@ private static void setupPipesJob(JobConf conf) throws IOException {
// <path>#<executable>
if (exec.contains("#")) {
// set default gdb commands for map and reduce task
String defScript = "$HADOOP_PREFIX/src/c++/pipes/debug/pipes-default-script";
String defScript = "$HADOOP_HOME/src/c++/pipes/debug/pipes-default-script";
setIfUnset(conf, MRJobConfig.MAP_DEBUG_SCRIPT,defScript);
setIfUnset(conf, MRJobConfig.REDUCE_DEBUG_SCRIPT,defScript);
}

View File

@ -450,7 +450,7 @@ public static void main(String[] args) {
}
//Copy the executables over to the remote filesystem
String hadoopHome = System.getenv("HADOOP_PREFIX");
String hadoopHome = System.getenv("HADOOP_HOME");
fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/libhdfs.so." + HDFS_LIB_VERSION),
HDFS_SHLIB);
fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_read"), HDFS_READ);

View File

@ -179,7 +179,7 @@ private void runSortValidatorTest(final JobClient jc,
private String normalizeCommandPath(String command) {
final String hadoopHome;
if ((hadoopHome = System.getenv("HADOOP_PREFIX")) != null) {
if ((hadoopHome = System.getenv("HADOOP_HOME")) != null) {
command = hadoopHome + "/" + command;
}
return command;

View File

@ -450,7 +450,7 @@ public int compare(AppInfo o1, AppInfo o2) {
fi
export HADOOP_CLIENT_OPTS="-Xmx1024m"
export HADOOP_CLASSPATH=/dist/share/hadoop/tools/lib/hadoop-archive-logs-2.8.0-SNAPSHOT.jar:/dist/share/hadoop/tools/lib/hadoop-archives-2.8.0-SNAPSHOT.jar
"$HADOOP_PREFIX"/bin/hadoop org.apache.hadoop.tools.HadoopArchiveLogsRunner -appId "$appId" -user "$user" -workingDir /tmp/logs/archive-logs-work -remoteRootLogDir /tmp/logs -suffix logs
"$HADOOP_HOME"/bin/hadoop org.apache.hadoop.tools.HadoopArchiveLogsRunner -appId "$appId" -user "$user" -workingDir /tmp/logs/archive-logs-work -remoteRootLogDir /tmp/logs -suffix logs
*/
@VisibleForTesting
void generateScript(File localScript, Path workingDir,
@ -484,7 +484,7 @@ void generateScript(File localScript, Path workingDir,
fw.write("m\"\n");
fw.write("export HADOOP_CLASSPATH=");
fw.write(classpath);
fw.write("\n\"$HADOOP_PREFIX\"/bin/hadoop ");
fw.write("\n\"$HADOOP_HOME\"/bin/hadoop ");
fw.write(HadoopArchiveLogsRunner.class.getName());
fw.write(" -appId \"$appId\" -user \"$user\" -workingDir ");
fw.write(workingDir.toString());

View File

@ -294,14 +294,14 @@ private void _testGenerateScript(boolean proxy) throws Exception {
Assert.assertTrue(lines[14].startsWith("export HADOOP_CLASSPATH="));
if (proxy) {
Assert.assertEquals(
"\"$HADOOP_PREFIX\"/bin/hadoop org.apache.hadoop.tools." +
"\"$HADOOP_HOME\"/bin/hadoop org.apache.hadoop.tools." +
"HadoopArchiveLogsRunner -appId \"$appId\" -user \"$user\" " +
"-workingDir " + workingDir.toString() + " -remoteRootLogDir " +
remoteRootLogDir.toString() + " -suffix " + suffix,
lines[15]);
} else {
Assert.assertEquals(
"\"$HADOOP_PREFIX\"/bin/hadoop org.apache.hadoop.tools." +
"\"$HADOOP_HOME\"/bin/hadoop org.apache.hadoop.tools." +
"HadoopArchiveLogsRunner -appId \"$appId\" -user \"$user\" " +
"-workingDir " + workingDir.toString() + " -remoteRootLogDir " +
remoteRootLogDir.toString() + " -suffix " + suffix + " -noProxy",

View File

@ -20,7 +20,7 @@ B.a31 B.a32
*****************************
*** Invoke SampleDataJoin ***
*****************************
[:~]$ $HADOOP_PREFIX/bin/hadoop jar hadoop-datajoin-examples.jar org.apache.hadoop.contrib.utils.join.DataJoinJob datajoin/input datajoin/output Text 1 org.apache.hadoop.contrib.utils.join.SampleDataJoinMapper org.apache.hadoop.contrib.utils.join.SampleDataJoinReducer org.apache.hadoop.contrib.utils.join.SampleTaggedMapOutput Text
[:~]$ $HADOOP_HOME/bin/hadoop jar hadoop-datajoin-examples.jar org.apache.hadoop.contrib.utils.join.DataJoinJob datajoin/input datajoin/output Text 1 org.apache.hadoop.contrib.utils.join.SampleDataJoinMapper org.apache.hadoop.contrib.utils.join.SampleDataJoinReducer org.apache.hadoop.contrib.utils.join.SampleTaggedMapOutput Text
Using TextInputFormat: Text
Using TextOutputFormat: Text
07/06/01 19:58:23 INFO mapred.FileInputFormat: Total input paths to process : 2

View File

@ -1,3 +1,4 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@ -9,6 +10,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
core=`find . -name 'core*'`
core=$(find . -name 'core*')
#Only pipes programs have 5th argument as program name.
gdb -quiet $5 -c $core -x $HADOOP_PREFIX/src/c++/pipes/debug/pipes-default-gdb-commands.txt
gdb -quiet "${5}" -c "${core}" -x "${HADOOP_HOME}/src/c++/pipes/debug/pipes-default-gdb-commands.txt"

View File

@ -77,8 +77,8 @@ function run_sls_generator()
}
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)

View File

@ -71,8 +71,8 @@ function parse_args()
function calculate_classpath
{
hadoop_add_to_classpath_tools hadoop-sls
hadoop_debug "Injecting ${HADOOP_PREFIX}/share/hadoop/tools/sls/html into CLASSPATH"
hadoop_add_classpath "${HADOOP_PREFIX}/share/hadoop/tools/sls/html"
hadoop_debug "Injecting ${HADOOP_TOOLS_DIR}/sls/html into CLASSPATH"
hadoop_add_classpath "${HADOOP_TOOLS_DIR}/sls/html"
}
function run_simulation() {
@ -105,8 +105,8 @@ function run_simulation() {
}
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)

View File

@ -91,7 +91,7 @@ public int run(String[] args) throws Exception {
}
private void printUsage() {
System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar"
System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar"
+ " dumptb <glob-pattern>");
System.out.println(" Dumps all files that match the given pattern to " +
"standard output as typed bytes.");

View File

@ -56,7 +56,7 @@ public static void main(String[] args) throws Exception {
}
private static void printUsage() {
System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar"
System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar"
+ " [options]");
System.out.println("Options:");
System.out.println(" dumptb <glob-pattern> Dumps all files that match the"

View File

@ -89,7 +89,7 @@ public int run(String[] args) throws Exception {
}
private void printUsage() {
System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar"
System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar"
+ " loadtb <path>");
System.out.println(" Reads typed bytes from standard input" +
" and stores them in a sequence file in");

View File

@ -502,7 +502,7 @@ public void exitUsage(boolean detailed) {
}
private void printUsage(boolean detailed) {
System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar"
System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar"
+ " [options]");
System.out.println("Options:");
System.out.println(" -input <path> DFS input file(s) for the Map"
@ -551,7 +551,7 @@ private void printUsage(boolean detailed) {
System.out.println();
System.out.println("For more details about these options:");
System.out.println("Use " +
"$HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar -info");
"$HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar -info");
return;
}
System.out.println();
@ -611,7 +611,7 @@ private void printUsage(boolean detailed) {
System.out.println(" -D stream.non.zero.exit.is.failure=false");
System.out.println("Use a custom hadoop streaming build along with standard"
+ " hadoop install:");
System.out.println(" $HADOOP_PREFIX/bin/hadoop jar " +
System.out.println(" $HADOOP_HOME/bin/hadoop jar " +
"/path/my-hadoop-streaming.jar [...]\\");
System.out.println(" [...] -D stream.shipped.hadoopstreaming=" +
"/path/my-hadoop-streaming.jar");
@ -625,7 +625,7 @@ private void printUsage(boolean detailed) {
System.out.println(" -cmdenv EXAMPLE_DIR=/home/example/dictionaries/");
System.out.println();
System.out.println("Shortcut:");
System.out.println(" setenv HSTREAMING \"$HADOOP_PREFIX/bin/hadoop jar " +
System.out.println(" setenv HSTREAMING \"$HADOOP_HOME/bin/hadoop jar " +
"hadoop-streaming.jar\"");
System.out.println();
System.out.println("Example: $HSTREAMING -mapper " +
@ -648,9 +648,9 @@ public void fail(String message) {
// --------------------------------------------
protected String getHadoopClientHome() {
String h = env_.getProperty("HADOOP_PREFIX"); // standard Hadoop
String h = env_.getProperty("HADOOP_HOME"); // standard Hadoop
if (h == null) {
//fail("Missing required environment variable: HADOOP_PREFIX");
//fail("Missing required environment variable: HADOOP_HOME");
h = "UNDEF";
}
return h;
@ -674,8 +674,8 @@ protected String packageJobJar() throws IOException {
// usually found in: build/contrib or build/hadoop-<version>-dev-streaming.jar
// First try an explicit spec: it's too hard to find our own location in this case:
// $HADOOP_PREFIX/bin/hadoop jar /not/first/on/classpath/custom-hadoop-streaming.jar
// where findInClasspath() would find the version of hadoop-streaming.jar in $HADOOP_PREFIX
// $HADOOP_HOME/bin/hadoop jar /not/first/on/classpath/custom-hadoop-streaming.jar
// where findInClasspath() would find the version of hadoop-streaming.jar in $HADOOP_HOME
String runtimeClasses = config_.get("stream.shipped.hadoopstreaming"); // jar or class dir
if (runtimeClasses == null) {

View File

@ -26,8 +26,8 @@ function hadoop_usage
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

View File

@ -26,8 +26,8 @@ function hadoop_usage
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

View File

@ -51,8 +51,8 @@ function hadoop_usage
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"

View File

@ -53,7 +53,7 @@ function hadoop_subproject_init
hadoop_deprecate_envvar YARN_SLAVES HADOOP_SLAVES
HADOOP_YARN_HOME="${HADOOP_YARN_HOME:-$HADOOP_PREFIX}"
HADOOP_YARN_HOME="${HADOOP_YARN_HOME:-$HADOOP_HOME}"
# YARN-1429 added the completely superfluous YARN_USER_CLASSPATH
# env var. We're going to override HADOOP_USER_CLASSPATH to keep
@ -74,8 +74,8 @@ if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
elif [ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]; then
. "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
. "${HADOOP_HOME}/libexec/hadoop-config.sh"
else
echo "ERROR: Hadoop common not found." 2>&1
exit 1

View File

@ -21,8 +21,8 @@ function hadoop_usage
}
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)

View File

@ -25,8 +25,8 @@ this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
# let's locate libexec...
if [[ -n "${HADOOP_PREFIX}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

View File

@ -169,7 +169,7 @@ public void testContainerLaunchInvalidImage() throws IOException {
String appSubmitter = "nobody";
String appId = "APP_ID";
String containerId = "CONTAINER_ID";
String testImage = "testrepo.com/test-image rm -rf $HADOOP_PREFIX/*";
String testImage = "testrepo.com/test-image rm -rf $HADOOP_HOME/*";
Container container = mock(Container.class, RETURNS_DEEP_STUBS);
ContainerId cId = mock(ContainerId.class, RETURNS_DEEP_STUBS);

View File

@ -142,7 +142,7 @@ Step 2. Pick a custom Docker image if you want. In this example, we'll use seque
Step 3. Run.
```bash
hadoop jar $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-${project.version}.jar \
hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-${project.version}.jar \
teragen \
-Dmapreduce.map.env="yarn.nodemanager.docker-container-executor.image-name=sequenceiq/hadoop-docker:2.4.1" \
-Dyarn.app.mapreduce.am.env="yarn.nodemanager.docker-container-executor.image-name=sequenceiq/hadoop-docker:2.4.1" \